gdbserver/linux-low: turn 'regs_info' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
107 const regs_info *get_regs_info () override;
108
109 protected:
110
111 void low_arch_setup () override;
112 };
113
114 /* The singleton target ops object. */
115
116 static x86_target the_x86_target;
117
118 /* Per-process arch-specific data we want to keep. */
119
120 struct arch_process_info
121 {
122 struct x86_debug_reg_state debug_reg_state;
123 };
124
125 #ifdef __x86_64__
126
127 /* Mapping between the general-purpose registers in `struct user'
128 format and GDB's register array layout.
129 Note that the transfer layout uses 64-bit regs. */
130 static /*const*/ int i386_regmap[] =
131 {
132 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
133 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
134 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
135 DS * 8, ES * 8, FS * 8, GS * 8
136 };
137
138 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
139
140 /* So code below doesn't have to care, i386 or amd64. */
141 #define ORIG_EAX ORIG_RAX
142 #define REGSIZE 8
143
144 static const int x86_64_regmap[] =
145 {
146 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
147 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
148 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
149 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
150 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
151 DS * 8, ES * 8, FS * 8, GS * 8,
152 -1, -1, -1, -1, -1, -1, -1, -1,
153 -1, -1, -1, -1, -1, -1, -1, -1,
154 -1, -1, -1, -1, -1, -1, -1, -1,
155 -1,
156 -1, -1, -1, -1, -1, -1, -1, -1,
157 ORIG_RAX * 8,
158 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
159 21 * 8, 22 * 8,
160 #else
161 -1, -1,
162 #endif
163 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
164 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
165 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
166 -1, -1, -1, -1, -1, -1, -1, -1,
167 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
168 -1, -1, -1, -1, -1, -1, -1, -1,
169 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
170 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
171 -1, -1, -1, -1, -1, -1, -1, -1,
172 -1, -1, -1, -1, -1, -1, -1, -1,
173 -1, -1, -1, -1, -1, -1, -1, -1,
174 -1 /* pkru */
175 };
176
177 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
178 #define X86_64_USER_REGS (GS + 1)
179
180 #else /* ! __x86_64__ */
181
182 /* Mapping between the general-purpose registers in `struct user'
183 format and GDB's register array layout. */
184 static /*const*/ int i386_regmap[] =
185 {
186 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
187 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
188 EIP * 4, EFL * 4, CS * 4, SS * 4,
189 DS * 4, ES * 4, FS * 4, GS * 4
190 };
191
192 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
193
194 #define REGSIZE 4
195
196 #endif
197
198 #ifdef __x86_64__
199
200 /* Returns true if the current inferior belongs to a x86-64 process,
201 per the tdesc. */
202
203 static int
204 is_64bit_tdesc (void)
205 {
206 struct regcache *regcache = get_thread_regcache (current_thread, 0);
207
208 return register_size (regcache->tdesc, 0) == 8;
209 }
210
211 #endif
212
213 \f
214 /* Called by libthread_db. */
215
216 ps_err_e
217 ps_get_thread_area (struct ps_prochandle *ph,
218 lwpid_t lwpid, int idx, void **base)
219 {
220 #ifdef __x86_64__
221 int use_64bit = is_64bit_tdesc ();
222
223 if (use_64bit)
224 {
225 switch (idx)
226 {
227 case FS:
228 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
229 return PS_OK;
230 break;
231 case GS:
232 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
233 return PS_OK;
234 break;
235 default:
236 return PS_BADADDR;
237 }
238 return PS_ERR;
239 }
240 #endif
241
242 {
243 unsigned int desc[4];
244
245 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
246 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
247 return PS_ERR;
248
249 /* Ensure we properly extend the value to 64-bits for x86_64. */
250 *base = (void *) (uintptr_t) desc[1];
251 return PS_OK;
252 }
253 }
254
255 /* Get the thread area address. This is used to recognize which
256 thread is which when tracing with the in-process agent library. We
257 don't read anything from the address, and treat it as opaque; it's
258 the address itself that we assume is unique per-thread. */
259
260 static int
261 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
262 {
263 #ifdef __x86_64__
264 int use_64bit = is_64bit_tdesc ();
265
266 if (use_64bit)
267 {
268 void *base;
269 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
270 {
271 *addr = (CORE_ADDR) (uintptr_t) base;
272 return 0;
273 }
274
275 return -1;
276 }
277 #endif
278
279 {
280 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
281 struct thread_info *thr = get_lwp_thread (lwp);
282 struct regcache *regcache = get_thread_regcache (thr, 1);
283 unsigned int desc[4];
284 ULONGEST gs = 0;
285 const int reg_thread_area = 3; /* bits to scale down register value. */
286 int idx;
287
288 collect_register_by_name (regcache, "gs", &gs);
289
290 idx = gs >> reg_thread_area;
291
292 if (ptrace (PTRACE_GET_THREAD_AREA,
293 lwpid_of (thr),
294 (void *) (long) idx, (unsigned long) &desc) < 0)
295 return -1;
296
297 *addr = desc[1];
298 return 0;
299 }
300 }
301
302
303 \f
304 static int
305 x86_cannot_store_register (int regno)
306 {
307 #ifdef __x86_64__
308 if (is_64bit_tdesc ())
309 return 0;
310 #endif
311
312 return regno >= I386_NUM_REGS;
313 }
314
315 static int
316 x86_cannot_fetch_register (int regno)
317 {
318 #ifdef __x86_64__
319 if (is_64bit_tdesc ())
320 return 0;
321 #endif
322
323 return regno >= I386_NUM_REGS;
324 }
325
326 static void
327 x86_fill_gregset (struct regcache *regcache, void *buf)
328 {
329 int i;
330
331 #ifdef __x86_64__
332 if (register_size (regcache->tdesc, 0) == 8)
333 {
334 for (i = 0; i < X86_64_NUM_REGS; i++)
335 if (x86_64_regmap[i] != -1)
336 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
337
338 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
339 {
340 unsigned long base;
341 int lwpid = lwpid_of (current_thread);
342
343 collect_register_by_name (regcache, "fs_base", &base);
344 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
345
346 collect_register_by_name (regcache, "gs_base", &base);
347 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
348 }
349 #endif
350
351 return;
352 }
353
354 /* 32-bit inferior registers need to be zero-extended.
355 Callers would read uninitialized memory otherwise. */
356 memset (buf, 0x00, X86_64_USER_REGS * 8);
357 #endif
358
359 for (i = 0; i < I386_NUM_REGS; i++)
360 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
361
362 collect_register_by_name (regcache, "orig_eax",
363 ((char *) buf) + ORIG_EAX * REGSIZE);
364
365 #ifdef __x86_64__
366 /* Sign extend EAX value to avoid potential syscall restart
367 problems.
368
369 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
370 for a detailed explanation. */
371 if (register_size (regcache->tdesc, 0) == 4)
372 {
373 void *ptr = ((gdb_byte *) buf
374 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
375
376 *(int64_t *) ptr = *(int32_t *) ptr;
377 }
378 #endif
379 }
380
381 static void
382 x86_store_gregset (struct regcache *regcache, const void *buf)
383 {
384 int i;
385
386 #ifdef __x86_64__
387 if (register_size (regcache->tdesc, 0) == 8)
388 {
389 for (i = 0; i < X86_64_NUM_REGS; i++)
390 if (x86_64_regmap[i] != -1)
391 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
392
393 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
394 {
395 unsigned long base;
396 int lwpid = lwpid_of (current_thread);
397
398 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
399 supply_register_by_name (regcache, "fs_base", &base);
400
401 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
402 supply_register_by_name (regcache, "gs_base", &base);
403 }
404 #endif
405 return;
406 }
407 #endif
408
409 for (i = 0; i < I386_NUM_REGS; i++)
410 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
411
412 supply_register_by_name (regcache, "orig_eax",
413 ((char *) buf) + ORIG_EAX * REGSIZE);
414 }
415
416 static void
417 x86_fill_fpregset (struct regcache *regcache, void *buf)
418 {
419 #ifdef __x86_64__
420 i387_cache_to_fxsave (regcache, buf);
421 #else
422 i387_cache_to_fsave (regcache, buf);
423 #endif
424 }
425
426 static void
427 x86_store_fpregset (struct regcache *regcache, const void *buf)
428 {
429 #ifdef __x86_64__
430 i387_fxsave_to_cache (regcache, buf);
431 #else
432 i387_fsave_to_cache (regcache, buf);
433 #endif
434 }
435
436 #ifndef __x86_64__
437
438 static void
439 x86_fill_fpxregset (struct regcache *regcache, void *buf)
440 {
441 i387_cache_to_fxsave (regcache, buf);
442 }
443
444 static void
445 x86_store_fpxregset (struct regcache *regcache, const void *buf)
446 {
447 i387_fxsave_to_cache (regcache, buf);
448 }
449
450 #endif
451
452 static void
453 x86_fill_xstateregset (struct regcache *regcache, void *buf)
454 {
455 i387_cache_to_xsave (regcache, buf);
456 }
457
458 static void
459 x86_store_xstateregset (struct regcache *regcache, const void *buf)
460 {
461 i387_xsave_to_cache (regcache, buf);
462 }
463
464 /* ??? The non-biarch i386 case stores all the i387 regs twice.
465 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
466 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
467 doesn't work. IWBN to avoid the duplication in the case where it
468 does work. Maybe the arch_setup routine could check whether it works
469 and update the supported regsets accordingly. */
470
471 static struct regset_info x86_regsets[] =
472 {
473 #ifdef HAVE_PTRACE_GETREGS
474 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
475 GENERAL_REGS,
476 x86_fill_gregset, x86_store_gregset },
477 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
478 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
479 # ifndef __x86_64__
480 # ifdef HAVE_PTRACE_GETFPXREGS
481 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
482 EXTENDED_REGS,
483 x86_fill_fpxregset, x86_store_fpxregset },
484 # endif
485 # endif
486 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
487 FP_REGS,
488 x86_fill_fpregset, x86_store_fpregset },
489 #endif /* HAVE_PTRACE_GETREGS */
490 NULL_REGSET
491 };
492
493 static CORE_ADDR
494 x86_get_pc (struct regcache *regcache)
495 {
496 int use_64bit = register_size (regcache->tdesc, 0) == 8;
497
498 if (use_64bit)
499 {
500 uint64_t pc;
501
502 collect_register_by_name (regcache, "rip", &pc);
503 return (CORE_ADDR) pc;
504 }
505 else
506 {
507 uint32_t pc;
508
509 collect_register_by_name (regcache, "eip", &pc);
510 return (CORE_ADDR) pc;
511 }
512 }
513
514 static void
515 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
516 {
517 int use_64bit = register_size (regcache->tdesc, 0) == 8;
518
519 if (use_64bit)
520 {
521 uint64_t newpc = pc;
522
523 supply_register_by_name (regcache, "rip", &newpc);
524 }
525 else
526 {
527 uint32_t newpc = pc;
528
529 supply_register_by_name (regcache, "eip", &newpc);
530 }
531 }
532 \f
533 static const gdb_byte x86_breakpoint[] = { 0xCC };
534 #define x86_breakpoint_len 1
535
536 static int
537 x86_breakpoint_at (CORE_ADDR pc)
538 {
539 unsigned char c;
540
541 the_target->read_memory (pc, &c, 1);
542 if (c == 0xCC)
543 return 1;
544
545 return 0;
546 }
547 \f
548 /* Low-level function vector. */
549 struct x86_dr_low_type x86_dr_low =
550 {
551 x86_linux_dr_set_control,
552 x86_linux_dr_set_addr,
553 x86_linux_dr_get_addr,
554 x86_linux_dr_get_status,
555 x86_linux_dr_get_control,
556 sizeof (void *),
557 };
558 \f
559 /* Breakpoint/Watchpoint support. */
560
561 static int
562 x86_supports_z_point_type (char z_type)
563 {
564 switch (z_type)
565 {
566 case Z_PACKET_SW_BP:
567 case Z_PACKET_HW_BP:
568 case Z_PACKET_WRITE_WP:
569 case Z_PACKET_ACCESS_WP:
570 return 1;
571 default:
572 return 0;
573 }
574 }
575
576 static int
577 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
578 int size, struct raw_breakpoint *bp)
579 {
580 struct process_info *proc = current_process ();
581
582 switch (type)
583 {
584 case raw_bkpt_type_hw:
585 case raw_bkpt_type_write_wp:
586 case raw_bkpt_type_access_wp:
587 {
588 enum target_hw_bp_type hw_type
589 = raw_bkpt_type_to_target_hw_bp_type (type);
590 struct x86_debug_reg_state *state
591 = &proc->priv->arch_private->debug_reg_state;
592
593 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
594 }
595
596 default:
597 /* Unsupported. */
598 return 1;
599 }
600 }
601
602 static int
603 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
604 int size, struct raw_breakpoint *bp)
605 {
606 struct process_info *proc = current_process ();
607
608 switch (type)
609 {
610 case raw_bkpt_type_hw:
611 case raw_bkpt_type_write_wp:
612 case raw_bkpt_type_access_wp:
613 {
614 enum target_hw_bp_type hw_type
615 = raw_bkpt_type_to_target_hw_bp_type (type);
616 struct x86_debug_reg_state *state
617 = &proc->priv->arch_private->debug_reg_state;
618
619 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
620 }
621 default:
622 /* Unsupported. */
623 return 1;
624 }
625 }
626
627 static int
628 x86_stopped_by_watchpoint (void)
629 {
630 struct process_info *proc = current_process ();
631 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
632 }
633
634 static CORE_ADDR
635 x86_stopped_data_address (void)
636 {
637 struct process_info *proc = current_process ();
638 CORE_ADDR addr;
639 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
640 &addr))
641 return addr;
642 return 0;
643 }
644 \f
645 /* Called when a new process is created. */
646
647 static struct arch_process_info *
648 x86_linux_new_process (void)
649 {
650 struct arch_process_info *info = XCNEW (struct arch_process_info);
651
652 x86_low_init_dregs (&info->debug_reg_state);
653
654 return info;
655 }
656
657 /* Called when a process is being deleted. */
658
659 static void
660 x86_linux_delete_process (struct arch_process_info *info)
661 {
662 xfree (info);
663 }
664
665 /* Target routine for linux_new_fork. */
666
667 static void
668 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
669 {
670 /* These are allocated by linux_add_process. */
671 gdb_assert (parent->priv != NULL
672 && parent->priv->arch_private != NULL);
673 gdb_assert (child->priv != NULL
674 && child->priv->arch_private != NULL);
675
676 /* Linux kernel before 2.6.33 commit
677 72f674d203cd230426437cdcf7dd6f681dad8b0d
678 will inherit hardware debug registers from parent
679 on fork/vfork/clone. Newer Linux kernels create such tasks with
680 zeroed debug registers.
681
682 GDB core assumes the child inherits the watchpoints/hw
683 breakpoints of the parent, and will remove them all from the
684 forked off process. Copy the debug registers mirrors into the
685 new process so that all breakpoints and watchpoints can be
686 removed together. The debug registers mirror will become zeroed
687 in the end before detaching the forked off process, thus making
688 this compatible with older Linux kernels too. */
689
690 *child->priv->arch_private = *parent->priv->arch_private;
691 }
692
693 /* See nat/x86-dregs.h. */
694
695 struct x86_debug_reg_state *
696 x86_debug_reg_state (pid_t pid)
697 {
698 struct process_info *proc = find_process_pid (pid);
699
700 return &proc->priv->arch_private->debug_reg_state;
701 }
702 \f
703 /* When GDBSERVER is built as a 64-bit application on linux, the
704 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
705 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
706 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
707 conversion in-place ourselves. */
708
709 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
710 layout of the inferiors' architecture. Returns true if any
711 conversion was done; false otherwise. If DIRECTION is 1, then copy
712 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
713 INF. */
714
715 static int
716 x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
717 {
718 #ifdef __x86_64__
719 unsigned int machine;
720 int tid = lwpid_of (current_thread);
721 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
722
723 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
724 if (!is_64bit_tdesc ())
725 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
726 FIXUP_32);
727 /* No fixup for native x32 GDB. */
728 else if (!is_elf64 && sizeof (void *) == 8)
729 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
730 FIXUP_X32);
731 #endif
732
733 return 0;
734 }
735 \f
736 static int use_xml;
737
738 /* Format of XSAVE extended state is:
739 struct
740 {
741 fxsave_bytes[0..463]
742 sw_usable_bytes[464..511]
743 xstate_hdr_bytes[512..575]
744 avx_bytes[576..831]
745 future_state etc
746 };
747
748 Same memory layout will be used for the coredump NT_X86_XSTATE
749 representing the XSAVE extended state registers.
750
751 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
752 extended state mask, which is the same as the extended control register
753 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
754 together with the mask saved in the xstate_hdr_bytes to determine what
755 states the processor/OS supports and what state, used or initialized,
756 the process/thread is in. */
757 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
758
759 /* Does the current host support the GETFPXREGS request? The header
760 file may or may not define it, and even if it is defined, the
761 kernel will return EIO if it's running on a pre-SSE processor. */
762 int have_ptrace_getfpxregs =
763 #ifdef HAVE_PTRACE_GETFPXREGS
764 -1
765 #else
766 0
767 #endif
768 ;
769
770 /* Get Linux/x86 target description from running target. */
771
772 static const struct target_desc *
773 x86_linux_read_description (void)
774 {
775 unsigned int machine;
776 int is_elf64;
777 int xcr0_features;
778 int tid;
779 static uint64_t xcr0;
780 struct regset_info *regset;
781
782 tid = lwpid_of (current_thread);
783
784 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
785
786 if (sizeof (void *) == 4)
787 {
788 if (is_elf64 > 0)
789 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
790 #ifndef __x86_64__
791 else if (machine == EM_X86_64)
792 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
793 #endif
794 }
795
796 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
797 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
798 {
799 elf_fpxregset_t fpxregs;
800
801 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
802 {
803 have_ptrace_getfpxregs = 0;
804 have_ptrace_getregset = 0;
805 return i386_linux_read_description (X86_XSTATE_X87);
806 }
807 else
808 have_ptrace_getfpxregs = 1;
809 }
810 #endif
811
812 if (!use_xml)
813 {
814 x86_xcr0 = X86_XSTATE_SSE_MASK;
815
816 /* Don't use XML. */
817 #ifdef __x86_64__
818 if (machine == EM_X86_64)
819 return tdesc_amd64_linux_no_xml;
820 else
821 #endif
822 return tdesc_i386_linux_no_xml;
823 }
824
825 if (have_ptrace_getregset == -1)
826 {
827 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
828 struct iovec iov;
829
830 iov.iov_base = xstateregs;
831 iov.iov_len = sizeof (xstateregs);
832
833 /* Check if PTRACE_GETREGSET works. */
834 if (ptrace (PTRACE_GETREGSET, tid,
835 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
836 have_ptrace_getregset = 0;
837 else
838 {
839 have_ptrace_getregset = 1;
840
841 /* Get XCR0 from XSAVE extended state. */
842 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
843 / sizeof (uint64_t))];
844
845 /* Use PTRACE_GETREGSET if it is available. */
846 for (regset = x86_regsets;
847 regset->fill_function != NULL; regset++)
848 if (regset->get_request == PTRACE_GETREGSET)
849 regset->size = X86_XSTATE_SIZE (xcr0);
850 else if (regset->type != GENERAL_REGS)
851 regset->size = 0;
852 }
853 }
854
855 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
856 xcr0_features = (have_ptrace_getregset
857 && (xcr0 & X86_XSTATE_ALL_MASK));
858
859 if (xcr0_features)
860 x86_xcr0 = xcr0;
861
862 if (machine == EM_X86_64)
863 {
864 #ifdef __x86_64__
865 const target_desc *tdesc = NULL;
866
867 if (xcr0_features)
868 {
869 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
870 !is_elf64);
871 }
872
873 if (tdesc == NULL)
874 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
875 return tdesc;
876 #endif
877 }
878 else
879 {
880 const target_desc *tdesc = NULL;
881
882 if (xcr0_features)
883 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
884
885 if (tdesc == NULL)
886 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
887
888 return tdesc;
889 }
890
891 gdb_assert_not_reached ("failed to return tdesc");
892 }
893
894 /* Update all the target description of all processes; a new GDB
895 connected, and it may or not support xml target descriptions. */
896
897 void
898 x86_target::update_xmltarget ()
899 {
900 struct thread_info *saved_thread = current_thread;
901
902 /* Before changing the register cache's internal layout, flush the
903 contents of the current valid caches back to the threads, and
904 release the current regcache objects. */
905 regcache_release ();
906
907 for_each_process ([this] (process_info *proc) {
908 int pid = proc->pid;
909
910 /* Look up any thread of this process. */
911 current_thread = find_any_thread_of_pid (pid);
912
913 low_arch_setup ();
914 });
915
916 current_thread = saved_thread;
917 }
918
919 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
920 PTRACE_GETREGSET. */
921
922 static void
923 x86_linux_process_qsupported (char **features, int count)
924 {
925 int i;
926
927 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
928 with "i386" in qSupported query, it supports x86 XML target
929 descriptions. */
930 use_xml = 0;
931 for (i = 0; i < count; i++)
932 {
933 const char *feature = features[i];
934
935 if (startswith (feature, "xmlRegisters="))
936 {
937 char *copy = xstrdup (feature + 13);
938
939 char *saveptr;
940 for (char *p = strtok_r (copy, ",", &saveptr);
941 p != NULL;
942 p = strtok_r (NULL, ",", &saveptr))
943 {
944 if (strcmp (p, "i386") == 0)
945 {
946 use_xml = 1;
947 break;
948 }
949 }
950
951 free (copy);
952 }
953 }
954 the_x86_target.update_xmltarget ();
955 }
956
957 /* Common for x86/x86-64. */
958
959 static struct regsets_info x86_regsets_info =
960 {
961 x86_regsets, /* regsets */
962 0, /* num_regsets */
963 NULL, /* disabled_regsets */
964 };
965
966 #ifdef __x86_64__
967 static struct regs_info amd64_linux_regs_info =
968 {
969 NULL, /* regset_bitmap */
970 NULL, /* usrregs_info */
971 &x86_regsets_info
972 };
973 #endif
974 static struct usrregs_info i386_linux_usrregs_info =
975 {
976 I386_NUM_REGS,
977 i386_regmap,
978 };
979
980 static struct regs_info i386_linux_regs_info =
981 {
982 NULL, /* regset_bitmap */
983 &i386_linux_usrregs_info,
984 &x86_regsets_info
985 };
986
987 const regs_info *
988 x86_target::get_regs_info ()
989 {
990 #ifdef __x86_64__
991 if (is_64bit_tdesc ())
992 return &amd64_linux_regs_info;
993 else
994 #endif
995 return &i386_linux_regs_info;
996 }
997
998 /* Initialize the target description for the architecture of the
999 inferior. */
1000
1001 void
1002 x86_target::low_arch_setup ()
1003 {
1004 current_process ()->tdesc = x86_linux_read_description ();
1005 }
1006
1007 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1008 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1009
1010 static void
1011 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
1012 {
1013 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1014
1015 if (use_64bit)
1016 {
1017 long l_sysno;
1018
1019 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1020 *sysno = (int) l_sysno;
1021 }
1022 else
1023 collect_register_by_name (regcache, "orig_eax", sysno);
1024 }
1025
1026 static int
1027 x86_supports_tracepoints (void)
1028 {
1029 return 1;
1030 }
1031
1032 static void
1033 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1034 {
1035 target_write_memory (*to, buf, len);
1036 *to += len;
1037 }
1038
1039 static int
1040 push_opcode (unsigned char *buf, const char *op)
1041 {
1042 unsigned char *buf_org = buf;
1043
1044 while (1)
1045 {
1046 char *endptr;
1047 unsigned long ul = strtoul (op, &endptr, 16);
1048
1049 if (endptr == op)
1050 break;
1051
1052 *buf++ = ul;
1053 op = endptr;
1054 }
1055
1056 return buf - buf_org;
1057 }
1058
1059 #ifdef __x86_64__
1060
1061 /* Build a jump pad that saves registers and calls a collection
1062 function. Writes a jump instruction to the jump pad to
1063 JJUMPAD_INSN. The caller is responsible to write it in at the
1064 tracepoint address. */
1065
1066 static int
1067 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1068 CORE_ADDR collector,
1069 CORE_ADDR lockaddr,
1070 ULONGEST orig_size,
1071 CORE_ADDR *jump_entry,
1072 CORE_ADDR *trampoline,
1073 ULONGEST *trampoline_size,
1074 unsigned char *jjump_pad_insn,
1075 ULONGEST *jjump_pad_insn_size,
1076 CORE_ADDR *adjusted_insn_addr,
1077 CORE_ADDR *adjusted_insn_addr_end,
1078 char *err)
1079 {
1080 unsigned char buf[40];
1081 int i, offset;
1082 int64_t loffset;
1083
1084 CORE_ADDR buildaddr = *jump_entry;
1085
1086 /* Build the jump pad. */
1087
1088 /* First, do tracepoint data collection. Save registers. */
1089 i = 0;
1090 /* Need to ensure stack pointer saved first. */
1091 buf[i++] = 0x54; /* push %rsp */
1092 buf[i++] = 0x55; /* push %rbp */
1093 buf[i++] = 0x57; /* push %rdi */
1094 buf[i++] = 0x56; /* push %rsi */
1095 buf[i++] = 0x52; /* push %rdx */
1096 buf[i++] = 0x51; /* push %rcx */
1097 buf[i++] = 0x53; /* push %rbx */
1098 buf[i++] = 0x50; /* push %rax */
1099 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1100 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1101 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1102 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1103 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1104 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1105 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1106 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1107 buf[i++] = 0x9c; /* pushfq */
1108 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1109 buf[i++] = 0xbf;
1110 memcpy (buf + i, &tpaddr, 8);
1111 i += 8;
1112 buf[i++] = 0x57; /* push %rdi */
1113 append_insns (&buildaddr, i, buf);
1114
1115 /* Stack space for the collecting_t object. */
1116 i = 0;
1117 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1118 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1119 memcpy (buf + i, &tpoint, 8);
1120 i += 8;
1121 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1122 i += push_opcode (&buf[i],
1123 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1124 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1125 append_insns (&buildaddr, i, buf);
1126
1127 /* spin-lock. */
1128 i = 0;
1129 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1130 memcpy (&buf[i], (void *) &lockaddr, 8);
1131 i += 8;
1132 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1133 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1134 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1135 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1136 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1137 append_insns (&buildaddr, i, buf);
1138
1139 /* Set up the gdb_collect call. */
1140 /* At this point, (stack pointer + 0x18) is the base of our saved
1141 register block. */
1142
1143 i = 0;
1144 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1145 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1146
1147 /* tpoint address may be 64-bit wide. */
1148 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1149 memcpy (buf + i, &tpoint, 8);
1150 i += 8;
1151 append_insns (&buildaddr, i, buf);
1152
1153 /* The collector function being in the shared library, may be
1154 >31-bits away off the jump pad. */
1155 i = 0;
1156 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1157 memcpy (buf + i, &collector, 8);
1158 i += 8;
1159 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1160 append_insns (&buildaddr, i, buf);
1161
1162 /* Clear the spin-lock. */
1163 i = 0;
1164 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1165 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1166 memcpy (buf + i, &lockaddr, 8);
1167 i += 8;
1168 append_insns (&buildaddr, i, buf);
1169
1170 /* Remove stack that had been used for the collect_t object. */
1171 i = 0;
1172 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1173 append_insns (&buildaddr, i, buf);
1174
1175 /* Restore register state. */
1176 i = 0;
1177 buf[i++] = 0x48; /* add $0x8,%rsp */
1178 buf[i++] = 0x83;
1179 buf[i++] = 0xc4;
1180 buf[i++] = 0x08;
1181 buf[i++] = 0x9d; /* popfq */
1182 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1183 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1184 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1185 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1186 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1187 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1188 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1189 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1190 buf[i++] = 0x58; /* pop %rax */
1191 buf[i++] = 0x5b; /* pop %rbx */
1192 buf[i++] = 0x59; /* pop %rcx */
1193 buf[i++] = 0x5a; /* pop %rdx */
1194 buf[i++] = 0x5e; /* pop %rsi */
1195 buf[i++] = 0x5f; /* pop %rdi */
1196 buf[i++] = 0x5d; /* pop %rbp */
1197 buf[i++] = 0x5c; /* pop %rsp */
1198 append_insns (&buildaddr, i, buf);
1199
1200 /* Now, adjust the original instruction to execute in the jump
1201 pad. */
1202 *adjusted_insn_addr = buildaddr;
1203 relocate_instruction (&buildaddr, tpaddr);
1204 *adjusted_insn_addr_end = buildaddr;
1205
1206 /* Finally, write a jump back to the program. */
1207
1208 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1209 if (loffset > INT_MAX || loffset < INT_MIN)
1210 {
1211 sprintf (err,
1212 "E.Jump back from jump pad too far from tracepoint "
1213 "(offset 0x%" PRIx64 " > int32).", loffset);
1214 return 1;
1215 }
1216
1217 offset = (int) loffset;
1218 memcpy (buf, jump_insn, sizeof (jump_insn));
1219 memcpy (buf + 1, &offset, 4);
1220 append_insns (&buildaddr, sizeof (jump_insn), buf);
1221
1222 /* The jump pad is now built. Wire in a jump to our jump pad. This
1223 is always done last (by our caller actually), so that we can
1224 install fast tracepoints with threads running. This relies on
1225 the agent's atomic write support. */
1226 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1227 if (loffset > INT_MAX || loffset < INT_MIN)
1228 {
1229 sprintf (err,
1230 "E.Jump pad too far from tracepoint "
1231 "(offset 0x%" PRIx64 " > int32).", loffset);
1232 return 1;
1233 }
1234
1235 offset = (int) loffset;
1236
1237 memcpy (buf, jump_insn, sizeof (jump_insn));
1238 memcpy (buf + 1, &offset, 4);
1239 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1240 *jjump_pad_insn_size = sizeof (jump_insn);
1241
1242 /* Return the end address of our pad. */
1243 *jump_entry = buildaddr;
1244
1245 return 0;
1246 }
1247
1248 #endif /* __x86_64__ */
1249
1250 /* Build a jump pad that saves registers and calls a collection
1251 function. Writes a jump instruction to the jump pad to
1252 JJUMPAD_INSN. The caller is responsible to write it in at the
1253 tracepoint address. */
1254
1255 static int
1256 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1257 CORE_ADDR collector,
1258 CORE_ADDR lockaddr,
1259 ULONGEST orig_size,
1260 CORE_ADDR *jump_entry,
1261 CORE_ADDR *trampoline,
1262 ULONGEST *trampoline_size,
1263 unsigned char *jjump_pad_insn,
1264 ULONGEST *jjump_pad_insn_size,
1265 CORE_ADDR *adjusted_insn_addr,
1266 CORE_ADDR *adjusted_insn_addr_end,
1267 char *err)
1268 {
1269 unsigned char buf[0x100];
1270 int i, offset;
1271 CORE_ADDR buildaddr = *jump_entry;
1272
1273 /* Build the jump pad. */
1274
1275 /* First, do tracepoint data collection. Save registers. */
1276 i = 0;
1277 buf[i++] = 0x60; /* pushad */
1278 buf[i++] = 0x68; /* push tpaddr aka $pc */
1279 *((int *)(buf + i)) = (int) tpaddr;
1280 i += 4;
1281 buf[i++] = 0x9c; /* pushf */
1282 buf[i++] = 0x1e; /* push %ds */
1283 buf[i++] = 0x06; /* push %es */
1284 buf[i++] = 0x0f; /* push %fs */
1285 buf[i++] = 0xa0;
1286 buf[i++] = 0x0f; /* push %gs */
1287 buf[i++] = 0xa8;
1288 buf[i++] = 0x16; /* push %ss */
1289 buf[i++] = 0x0e; /* push %cs */
1290 append_insns (&buildaddr, i, buf);
1291
1292 /* Stack space for the collecting_t object. */
1293 i = 0;
1294 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1295
1296 /* Build the object. */
1297 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1298 memcpy (buf + i, &tpoint, 4);
1299 i += 4;
1300 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1301
1302 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1303 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1304 append_insns (&buildaddr, i, buf);
1305
1306 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1307 If we cared for it, this could be using xchg alternatively. */
1308
1309 i = 0;
1310 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1311 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1312 %esp,<lockaddr> */
1313 memcpy (&buf[i], (void *) &lockaddr, 4);
1314 i += 4;
1315 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1316 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1317 append_insns (&buildaddr, i, buf);
1318
1319
1320 /* Set up arguments to the gdb_collect call. */
1321 i = 0;
1322 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1323 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1324 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1325 append_insns (&buildaddr, i, buf);
1326
1327 i = 0;
1328 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1329 append_insns (&buildaddr, i, buf);
1330
1331 i = 0;
1332 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1333 memcpy (&buf[i], (void *) &tpoint, 4);
1334 i += 4;
1335 append_insns (&buildaddr, i, buf);
1336
1337 buf[0] = 0xe8; /* call <reladdr> */
1338 offset = collector - (buildaddr + sizeof (jump_insn));
1339 memcpy (buf + 1, &offset, 4);
1340 append_insns (&buildaddr, 5, buf);
1341 /* Clean up after the call. */
1342 buf[0] = 0x83; /* add $0x8,%esp */
1343 buf[1] = 0xc4;
1344 buf[2] = 0x08;
1345 append_insns (&buildaddr, 3, buf);
1346
1347
1348 /* Clear the spin-lock. This would need the LOCK prefix on older
1349 broken archs. */
1350 i = 0;
1351 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1352 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1353 memcpy (buf + i, &lockaddr, 4);
1354 i += 4;
1355 append_insns (&buildaddr, i, buf);
1356
1357
1358 /* Remove stack that had been used for the collect_t object. */
1359 i = 0;
1360 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1361 append_insns (&buildaddr, i, buf);
1362
1363 i = 0;
1364 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1365 buf[i++] = 0xc4;
1366 buf[i++] = 0x04;
1367 buf[i++] = 0x17; /* pop %ss */
1368 buf[i++] = 0x0f; /* pop %gs */
1369 buf[i++] = 0xa9;
1370 buf[i++] = 0x0f; /* pop %fs */
1371 buf[i++] = 0xa1;
1372 buf[i++] = 0x07; /* pop %es */
1373 buf[i++] = 0x1f; /* pop %ds */
1374 buf[i++] = 0x9d; /* popf */
1375 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1376 buf[i++] = 0xc4;
1377 buf[i++] = 0x04;
1378 buf[i++] = 0x61; /* popad */
1379 append_insns (&buildaddr, i, buf);
1380
1381 /* Now, adjust the original instruction to execute in the jump
1382 pad. */
1383 *adjusted_insn_addr = buildaddr;
1384 relocate_instruction (&buildaddr, tpaddr);
1385 *adjusted_insn_addr_end = buildaddr;
1386
1387 /* Write the jump back to the program. */
1388 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1389 memcpy (buf, jump_insn, sizeof (jump_insn));
1390 memcpy (buf + 1, &offset, 4);
1391 append_insns (&buildaddr, sizeof (jump_insn), buf);
1392
1393 /* The jump pad is now built. Wire in a jump to our jump pad. This
1394 is always done last (by our caller actually), so that we can
1395 install fast tracepoints with threads running. This relies on
1396 the agent's atomic write support. */
1397 if (orig_size == 4)
1398 {
1399 /* Create a trampoline. */
1400 *trampoline_size = sizeof (jump_insn);
1401 if (!claim_trampoline_space (*trampoline_size, trampoline))
1402 {
1403 /* No trampoline space available. */
1404 strcpy (err,
1405 "E.Cannot allocate trampoline space needed for fast "
1406 "tracepoints on 4-byte instructions.");
1407 return 1;
1408 }
1409
1410 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1411 memcpy (buf, jump_insn, sizeof (jump_insn));
1412 memcpy (buf + 1, &offset, 4);
1413 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1414
1415 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1416 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1417 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1418 memcpy (buf + 2, &offset, 2);
1419 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1420 *jjump_pad_insn_size = sizeof (small_jump_insn);
1421 }
1422 else
1423 {
1424 /* Else use a 32-bit relative jump instruction. */
1425 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1426 memcpy (buf, jump_insn, sizeof (jump_insn));
1427 memcpy (buf + 1, &offset, 4);
1428 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1429 *jjump_pad_insn_size = sizeof (jump_insn);
1430 }
1431
1432 /* Return the end address of our pad. */
1433 *jump_entry = buildaddr;
1434
1435 return 0;
1436 }
1437
1438 static int
1439 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1440 CORE_ADDR collector,
1441 CORE_ADDR lockaddr,
1442 ULONGEST orig_size,
1443 CORE_ADDR *jump_entry,
1444 CORE_ADDR *trampoline,
1445 ULONGEST *trampoline_size,
1446 unsigned char *jjump_pad_insn,
1447 ULONGEST *jjump_pad_insn_size,
1448 CORE_ADDR *adjusted_insn_addr,
1449 CORE_ADDR *adjusted_insn_addr_end,
1450 char *err)
1451 {
1452 #ifdef __x86_64__
1453 if (is_64bit_tdesc ())
1454 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1455 collector, lockaddr,
1456 orig_size, jump_entry,
1457 trampoline, trampoline_size,
1458 jjump_pad_insn,
1459 jjump_pad_insn_size,
1460 adjusted_insn_addr,
1461 adjusted_insn_addr_end,
1462 err);
1463 #endif
1464
1465 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1466 collector, lockaddr,
1467 orig_size, jump_entry,
1468 trampoline, trampoline_size,
1469 jjump_pad_insn,
1470 jjump_pad_insn_size,
1471 adjusted_insn_addr,
1472 adjusted_insn_addr_end,
1473 err);
1474 }
1475
1476 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1477 architectures. */
1478
1479 static int
1480 x86_get_min_fast_tracepoint_insn_len (void)
1481 {
1482 static int warned_about_fast_tracepoints = 0;
1483
1484 #ifdef __x86_64__
1485 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1486 used for fast tracepoints. */
1487 if (is_64bit_tdesc ())
1488 return 5;
1489 #endif
1490
1491 if (agent_loaded_p ())
1492 {
1493 char errbuf[IPA_BUFSIZ];
1494
1495 errbuf[0] = '\0';
1496
1497 /* On x86, if trampolines are available, then 4-byte jump instructions
1498 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1499 with a 4-byte offset are used instead. */
1500 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1501 return 4;
1502 else
1503 {
1504 /* GDB has no channel to explain to user why a shorter fast
1505 tracepoint is not possible, but at least make GDBserver
1506 mention that something has gone awry. */
1507 if (!warned_about_fast_tracepoints)
1508 {
1509 warning ("4-byte fast tracepoints not available; %s", errbuf);
1510 warned_about_fast_tracepoints = 1;
1511 }
1512 return 5;
1513 }
1514 }
1515 else
1516 {
1517 /* Indicate that the minimum length is currently unknown since the IPA
1518 has not loaded yet. */
1519 return 0;
1520 }
1521 }
1522
1523 static void
1524 add_insns (unsigned char *start, int len)
1525 {
1526 CORE_ADDR buildaddr = current_insn_ptr;
1527
1528 if (debug_threads)
1529 debug_printf ("Adding %d bytes of insn at %s\n",
1530 len, paddress (buildaddr));
1531
1532 append_insns (&buildaddr, len, start);
1533 current_insn_ptr = buildaddr;
1534 }
1535
1536 /* Our general strategy for emitting code is to avoid specifying raw
1537 bytes whenever possible, and instead copy a block of inline asm
1538 that is embedded in the function. This is a little messy, because
1539 we need to keep the compiler from discarding what looks like dead
1540 code, plus suppress various warnings. */
1541
1542 #define EMIT_ASM(NAME, INSNS) \
1543 do \
1544 { \
1545 extern unsigned char start_ ## NAME, end_ ## NAME; \
1546 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1547 __asm__ ("jmp end_" #NAME "\n" \
1548 "\t" "start_" #NAME ":" \
1549 "\t" INSNS "\n" \
1550 "\t" "end_" #NAME ":"); \
1551 } while (0)
1552
1553 #ifdef __x86_64__
1554
1555 #define EMIT_ASM32(NAME,INSNS) \
1556 do \
1557 { \
1558 extern unsigned char start_ ## NAME, end_ ## NAME; \
1559 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1560 __asm__ (".code32\n" \
1561 "\t" "jmp end_" #NAME "\n" \
1562 "\t" "start_" #NAME ":\n" \
1563 "\t" INSNS "\n" \
1564 "\t" "end_" #NAME ":\n" \
1565 ".code64\n"); \
1566 } while (0)
1567
1568 #else
1569
1570 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1571
1572 #endif
1573
1574 #ifdef __x86_64__
1575
1576 static void
1577 amd64_emit_prologue (void)
1578 {
1579 EMIT_ASM (amd64_prologue,
1580 "pushq %rbp\n\t"
1581 "movq %rsp,%rbp\n\t"
1582 "sub $0x20,%rsp\n\t"
1583 "movq %rdi,-8(%rbp)\n\t"
1584 "movq %rsi,-16(%rbp)");
1585 }
1586
1587
1588 static void
1589 amd64_emit_epilogue (void)
1590 {
1591 EMIT_ASM (amd64_epilogue,
1592 "movq -16(%rbp),%rdi\n\t"
1593 "movq %rax,(%rdi)\n\t"
1594 "xor %rax,%rax\n\t"
1595 "leave\n\t"
1596 "ret");
1597 }
1598
1599 static void
1600 amd64_emit_add (void)
1601 {
1602 EMIT_ASM (amd64_add,
1603 "add (%rsp),%rax\n\t"
1604 "lea 0x8(%rsp),%rsp");
1605 }
1606
1607 static void
1608 amd64_emit_sub (void)
1609 {
1610 EMIT_ASM (amd64_sub,
1611 "sub %rax,(%rsp)\n\t"
1612 "pop %rax");
1613 }
1614
1615 static void
1616 amd64_emit_mul (void)
1617 {
1618 emit_error = 1;
1619 }
1620
1621 static void
1622 amd64_emit_lsh (void)
1623 {
1624 emit_error = 1;
1625 }
1626
1627 static void
1628 amd64_emit_rsh_signed (void)
1629 {
1630 emit_error = 1;
1631 }
1632
1633 static void
1634 amd64_emit_rsh_unsigned (void)
1635 {
1636 emit_error = 1;
1637 }
1638
1639 static void
1640 amd64_emit_ext (int arg)
1641 {
1642 switch (arg)
1643 {
1644 case 8:
1645 EMIT_ASM (amd64_ext_8,
1646 "cbtw\n\t"
1647 "cwtl\n\t"
1648 "cltq");
1649 break;
1650 case 16:
1651 EMIT_ASM (amd64_ext_16,
1652 "cwtl\n\t"
1653 "cltq");
1654 break;
1655 case 32:
1656 EMIT_ASM (amd64_ext_32,
1657 "cltq");
1658 break;
1659 default:
1660 emit_error = 1;
1661 }
1662 }
1663
1664 static void
1665 amd64_emit_log_not (void)
1666 {
1667 EMIT_ASM (amd64_log_not,
1668 "test %rax,%rax\n\t"
1669 "sete %cl\n\t"
1670 "movzbq %cl,%rax");
1671 }
1672
1673 static void
1674 amd64_emit_bit_and (void)
1675 {
1676 EMIT_ASM (amd64_and,
1677 "and (%rsp),%rax\n\t"
1678 "lea 0x8(%rsp),%rsp");
1679 }
1680
1681 static void
1682 amd64_emit_bit_or (void)
1683 {
1684 EMIT_ASM (amd64_or,
1685 "or (%rsp),%rax\n\t"
1686 "lea 0x8(%rsp),%rsp");
1687 }
1688
1689 static void
1690 amd64_emit_bit_xor (void)
1691 {
1692 EMIT_ASM (amd64_xor,
1693 "xor (%rsp),%rax\n\t"
1694 "lea 0x8(%rsp),%rsp");
1695 }
1696
1697 static void
1698 amd64_emit_bit_not (void)
1699 {
1700 EMIT_ASM (amd64_bit_not,
1701 "xorq $0xffffffffffffffff,%rax");
1702 }
1703
1704 static void
1705 amd64_emit_equal (void)
1706 {
1707 EMIT_ASM (amd64_equal,
1708 "cmp %rax,(%rsp)\n\t"
1709 "je .Lamd64_equal_true\n\t"
1710 "xor %rax,%rax\n\t"
1711 "jmp .Lamd64_equal_end\n\t"
1712 ".Lamd64_equal_true:\n\t"
1713 "mov $0x1,%rax\n\t"
1714 ".Lamd64_equal_end:\n\t"
1715 "lea 0x8(%rsp),%rsp");
1716 }
1717
1718 static void
1719 amd64_emit_less_signed (void)
1720 {
1721 EMIT_ASM (amd64_less_signed,
1722 "cmp %rax,(%rsp)\n\t"
1723 "jl .Lamd64_less_signed_true\n\t"
1724 "xor %rax,%rax\n\t"
1725 "jmp .Lamd64_less_signed_end\n\t"
1726 ".Lamd64_less_signed_true:\n\t"
1727 "mov $1,%rax\n\t"
1728 ".Lamd64_less_signed_end:\n\t"
1729 "lea 0x8(%rsp),%rsp");
1730 }
1731
1732 static void
1733 amd64_emit_less_unsigned (void)
1734 {
1735 EMIT_ASM (amd64_less_unsigned,
1736 "cmp %rax,(%rsp)\n\t"
1737 "jb .Lamd64_less_unsigned_true\n\t"
1738 "xor %rax,%rax\n\t"
1739 "jmp .Lamd64_less_unsigned_end\n\t"
1740 ".Lamd64_less_unsigned_true:\n\t"
1741 "mov $1,%rax\n\t"
1742 ".Lamd64_less_unsigned_end:\n\t"
1743 "lea 0x8(%rsp),%rsp");
1744 }
1745
1746 static void
1747 amd64_emit_ref (int size)
1748 {
1749 switch (size)
1750 {
1751 case 1:
1752 EMIT_ASM (amd64_ref1,
1753 "movb (%rax),%al");
1754 break;
1755 case 2:
1756 EMIT_ASM (amd64_ref2,
1757 "movw (%rax),%ax");
1758 break;
1759 case 4:
1760 EMIT_ASM (amd64_ref4,
1761 "movl (%rax),%eax");
1762 break;
1763 case 8:
1764 EMIT_ASM (amd64_ref8,
1765 "movq (%rax),%rax");
1766 break;
1767 }
1768 }
1769
1770 static void
1771 amd64_emit_if_goto (int *offset_p, int *size_p)
1772 {
1773 EMIT_ASM (amd64_if_goto,
1774 "mov %rax,%rcx\n\t"
1775 "pop %rax\n\t"
1776 "cmp $0,%rcx\n\t"
1777 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1778 if (offset_p)
1779 *offset_p = 10;
1780 if (size_p)
1781 *size_p = 4;
1782 }
1783
1784 static void
1785 amd64_emit_goto (int *offset_p, int *size_p)
1786 {
1787 EMIT_ASM (amd64_goto,
1788 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1789 if (offset_p)
1790 *offset_p = 1;
1791 if (size_p)
1792 *size_p = 4;
1793 }
1794
1795 static void
1796 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1797 {
1798 int diff = (to - (from + size));
1799 unsigned char buf[sizeof (int)];
1800
1801 if (size != 4)
1802 {
1803 emit_error = 1;
1804 return;
1805 }
1806
1807 memcpy (buf, &diff, sizeof (int));
1808 target_write_memory (from, buf, sizeof (int));
1809 }
1810
1811 static void
1812 amd64_emit_const (LONGEST num)
1813 {
1814 unsigned char buf[16];
1815 int i;
1816 CORE_ADDR buildaddr = current_insn_ptr;
1817
1818 i = 0;
1819 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1820 memcpy (&buf[i], &num, sizeof (num));
1821 i += 8;
1822 append_insns (&buildaddr, i, buf);
1823 current_insn_ptr = buildaddr;
1824 }
1825
1826 static void
1827 amd64_emit_call (CORE_ADDR fn)
1828 {
1829 unsigned char buf[16];
1830 int i;
1831 CORE_ADDR buildaddr;
1832 LONGEST offset64;
1833
1834 /* The destination function being in the shared library, may be
1835 >31-bits away off the compiled code pad. */
1836
1837 buildaddr = current_insn_ptr;
1838
1839 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1840
1841 i = 0;
1842
1843 if (offset64 > INT_MAX || offset64 < INT_MIN)
1844 {
1845 /* Offset is too large for a call. Use callq, but that requires
1846 a register, so avoid it if possible. Use r10, since it is
1847 call-clobbered, we don't have to push/pop it. */
1848 buf[i++] = 0x48; /* mov $fn,%r10 */
1849 buf[i++] = 0xba;
1850 memcpy (buf + i, &fn, 8);
1851 i += 8;
1852 buf[i++] = 0xff; /* callq *%r10 */
1853 buf[i++] = 0xd2;
1854 }
1855 else
1856 {
1857 int offset32 = offset64; /* we know we can't overflow here. */
1858
1859 buf[i++] = 0xe8; /* call <reladdr> */
1860 memcpy (buf + i, &offset32, 4);
1861 i += 4;
1862 }
1863
1864 append_insns (&buildaddr, i, buf);
1865 current_insn_ptr = buildaddr;
1866 }
1867
1868 static void
1869 amd64_emit_reg (int reg)
1870 {
1871 unsigned char buf[16];
1872 int i;
1873 CORE_ADDR buildaddr;
1874
1875 /* Assume raw_regs is still in %rdi. */
1876 buildaddr = current_insn_ptr;
1877 i = 0;
1878 buf[i++] = 0xbe; /* mov $<n>,%esi */
1879 memcpy (&buf[i], &reg, sizeof (reg));
1880 i += 4;
1881 append_insns (&buildaddr, i, buf);
1882 current_insn_ptr = buildaddr;
1883 amd64_emit_call (get_raw_reg_func_addr ());
1884 }
1885
1886 static void
1887 amd64_emit_pop (void)
1888 {
1889 EMIT_ASM (amd64_pop,
1890 "pop %rax");
1891 }
1892
1893 static void
1894 amd64_emit_stack_flush (void)
1895 {
1896 EMIT_ASM (amd64_stack_flush,
1897 "push %rax");
1898 }
1899
1900 static void
1901 amd64_emit_zero_ext (int arg)
1902 {
1903 switch (arg)
1904 {
1905 case 8:
1906 EMIT_ASM (amd64_zero_ext_8,
1907 "and $0xff,%rax");
1908 break;
1909 case 16:
1910 EMIT_ASM (amd64_zero_ext_16,
1911 "and $0xffff,%rax");
1912 break;
1913 case 32:
1914 EMIT_ASM (amd64_zero_ext_32,
1915 "mov $0xffffffff,%rcx\n\t"
1916 "and %rcx,%rax");
1917 break;
1918 default:
1919 emit_error = 1;
1920 }
1921 }
1922
1923 static void
1924 amd64_emit_swap (void)
1925 {
1926 EMIT_ASM (amd64_swap,
1927 "mov %rax,%rcx\n\t"
1928 "pop %rax\n\t"
1929 "push %rcx");
1930 }
1931
1932 static void
1933 amd64_emit_stack_adjust (int n)
1934 {
1935 unsigned char buf[16];
1936 int i;
1937 CORE_ADDR buildaddr = current_insn_ptr;
1938
1939 i = 0;
1940 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1941 buf[i++] = 0x8d;
1942 buf[i++] = 0x64;
1943 buf[i++] = 0x24;
1944 /* This only handles adjustments up to 16, but we don't expect any more. */
1945 buf[i++] = n * 8;
1946 append_insns (&buildaddr, i, buf);
1947 current_insn_ptr = buildaddr;
1948 }
1949
1950 /* FN's prototype is `LONGEST(*fn)(int)'. */
1951
1952 static void
1953 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1954 {
1955 unsigned char buf[16];
1956 int i;
1957 CORE_ADDR buildaddr;
1958
1959 buildaddr = current_insn_ptr;
1960 i = 0;
1961 buf[i++] = 0xbf; /* movl $<n>,%edi */
1962 memcpy (&buf[i], &arg1, sizeof (arg1));
1963 i += 4;
1964 append_insns (&buildaddr, i, buf);
1965 current_insn_ptr = buildaddr;
1966 amd64_emit_call (fn);
1967 }
1968
1969 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1970
1971 static void
1972 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1973 {
1974 unsigned char buf[16];
1975 int i;
1976 CORE_ADDR buildaddr;
1977
1978 buildaddr = current_insn_ptr;
1979 i = 0;
1980 buf[i++] = 0xbf; /* movl $<n>,%edi */
1981 memcpy (&buf[i], &arg1, sizeof (arg1));
1982 i += 4;
1983 append_insns (&buildaddr, i, buf);
1984 current_insn_ptr = buildaddr;
1985 EMIT_ASM (amd64_void_call_2_a,
1986 /* Save away a copy of the stack top. */
1987 "push %rax\n\t"
1988 /* Also pass top as the second argument. */
1989 "mov %rax,%rsi");
1990 amd64_emit_call (fn);
1991 EMIT_ASM (amd64_void_call_2_b,
1992 /* Restore the stack top, %rax may have been trashed. */
1993 "pop %rax");
1994 }
1995
1996 static void
1997 amd64_emit_eq_goto (int *offset_p, int *size_p)
1998 {
1999 EMIT_ASM (amd64_eq,
2000 "cmp %rax,(%rsp)\n\t"
2001 "jne .Lamd64_eq_fallthru\n\t"
2002 "lea 0x8(%rsp),%rsp\n\t"
2003 "pop %rax\n\t"
2004 /* jmp, but don't trust the assembler to choose the right jump */
2005 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2006 ".Lamd64_eq_fallthru:\n\t"
2007 "lea 0x8(%rsp),%rsp\n\t"
2008 "pop %rax");
2009
2010 if (offset_p)
2011 *offset_p = 13;
2012 if (size_p)
2013 *size_p = 4;
2014 }
2015
2016 static void
2017 amd64_emit_ne_goto (int *offset_p, int *size_p)
2018 {
2019 EMIT_ASM (amd64_ne,
2020 "cmp %rax,(%rsp)\n\t"
2021 "je .Lamd64_ne_fallthru\n\t"
2022 "lea 0x8(%rsp),%rsp\n\t"
2023 "pop %rax\n\t"
2024 /* jmp, but don't trust the assembler to choose the right jump */
2025 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2026 ".Lamd64_ne_fallthru:\n\t"
2027 "lea 0x8(%rsp),%rsp\n\t"
2028 "pop %rax");
2029
2030 if (offset_p)
2031 *offset_p = 13;
2032 if (size_p)
2033 *size_p = 4;
2034 }
2035
2036 static void
2037 amd64_emit_lt_goto (int *offset_p, int *size_p)
2038 {
2039 EMIT_ASM (amd64_lt,
2040 "cmp %rax,(%rsp)\n\t"
2041 "jnl .Lamd64_lt_fallthru\n\t"
2042 "lea 0x8(%rsp),%rsp\n\t"
2043 "pop %rax\n\t"
2044 /* jmp, but don't trust the assembler to choose the right jump */
2045 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2046 ".Lamd64_lt_fallthru:\n\t"
2047 "lea 0x8(%rsp),%rsp\n\t"
2048 "pop %rax");
2049
2050 if (offset_p)
2051 *offset_p = 13;
2052 if (size_p)
2053 *size_p = 4;
2054 }
2055
2056 static void
2057 amd64_emit_le_goto (int *offset_p, int *size_p)
2058 {
2059 EMIT_ASM (amd64_le,
2060 "cmp %rax,(%rsp)\n\t"
2061 "jnle .Lamd64_le_fallthru\n\t"
2062 "lea 0x8(%rsp),%rsp\n\t"
2063 "pop %rax\n\t"
2064 /* jmp, but don't trust the assembler to choose the right jump */
2065 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2066 ".Lamd64_le_fallthru:\n\t"
2067 "lea 0x8(%rsp),%rsp\n\t"
2068 "pop %rax");
2069
2070 if (offset_p)
2071 *offset_p = 13;
2072 if (size_p)
2073 *size_p = 4;
2074 }
2075
2076 static void
2077 amd64_emit_gt_goto (int *offset_p, int *size_p)
2078 {
2079 EMIT_ASM (amd64_gt,
2080 "cmp %rax,(%rsp)\n\t"
2081 "jng .Lamd64_gt_fallthru\n\t"
2082 "lea 0x8(%rsp),%rsp\n\t"
2083 "pop %rax\n\t"
2084 /* jmp, but don't trust the assembler to choose the right jump */
2085 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2086 ".Lamd64_gt_fallthru:\n\t"
2087 "lea 0x8(%rsp),%rsp\n\t"
2088 "pop %rax");
2089
2090 if (offset_p)
2091 *offset_p = 13;
2092 if (size_p)
2093 *size_p = 4;
2094 }
2095
2096 static void
2097 amd64_emit_ge_goto (int *offset_p, int *size_p)
2098 {
2099 EMIT_ASM (amd64_ge,
2100 "cmp %rax,(%rsp)\n\t"
2101 "jnge .Lamd64_ge_fallthru\n\t"
2102 ".Lamd64_ge_jump:\n\t"
2103 "lea 0x8(%rsp),%rsp\n\t"
2104 "pop %rax\n\t"
2105 /* jmp, but don't trust the assembler to choose the right jump */
2106 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2107 ".Lamd64_ge_fallthru:\n\t"
2108 "lea 0x8(%rsp),%rsp\n\t"
2109 "pop %rax");
2110
2111 if (offset_p)
2112 *offset_p = 13;
2113 if (size_p)
2114 *size_p = 4;
2115 }
2116
2117 struct emit_ops amd64_emit_ops =
2118 {
2119 amd64_emit_prologue,
2120 amd64_emit_epilogue,
2121 amd64_emit_add,
2122 amd64_emit_sub,
2123 amd64_emit_mul,
2124 amd64_emit_lsh,
2125 amd64_emit_rsh_signed,
2126 amd64_emit_rsh_unsigned,
2127 amd64_emit_ext,
2128 amd64_emit_log_not,
2129 amd64_emit_bit_and,
2130 amd64_emit_bit_or,
2131 amd64_emit_bit_xor,
2132 amd64_emit_bit_not,
2133 amd64_emit_equal,
2134 amd64_emit_less_signed,
2135 amd64_emit_less_unsigned,
2136 amd64_emit_ref,
2137 amd64_emit_if_goto,
2138 amd64_emit_goto,
2139 amd64_write_goto_address,
2140 amd64_emit_const,
2141 amd64_emit_call,
2142 amd64_emit_reg,
2143 amd64_emit_pop,
2144 amd64_emit_stack_flush,
2145 amd64_emit_zero_ext,
2146 amd64_emit_swap,
2147 amd64_emit_stack_adjust,
2148 amd64_emit_int_call_1,
2149 amd64_emit_void_call_2,
2150 amd64_emit_eq_goto,
2151 amd64_emit_ne_goto,
2152 amd64_emit_lt_goto,
2153 amd64_emit_le_goto,
2154 amd64_emit_gt_goto,
2155 amd64_emit_ge_goto
2156 };
2157
2158 #endif /* __x86_64__ */
2159
2160 static void
2161 i386_emit_prologue (void)
2162 {
2163 EMIT_ASM32 (i386_prologue,
2164 "push %ebp\n\t"
2165 "mov %esp,%ebp\n\t"
2166 "push %ebx");
2167 /* At this point, the raw regs base address is at 8(%ebp), and the
2168 value pointer is at 12(%ebp). */
2169 }
2170
2171 static void
2172 i386_emit_epilogue (void)
2173 {
2174 EMIT_ASM32 (i386_epilogue,
2175 "mov 12(%ebp),%ecx\n\t"
2176 "mov %eax,(%ecx)\n\t"
2177 "mov %ebx,0x4(%ecx)\n\t"
2178 "xor %eax,%eax\n\t"
2179 "pop %ebx\n\t"
2180 "pop %ebp\n\t"
2181 "ret");
2182 }
2183
2184 static void
2185 i386_emit_add (void)
2186 {
2187 EMIT_ASM32 (i386_add,
2188 "add (%esp),%eax\n\t"
2189 "adc 0x4(%esp),%ebx\n\t"
2190 "lea 0x8(%esp),%esp");
2191 }
2192
2193 static void
2194 i386_emit_sub (void)
2195 {
2196 EMIT_ASM32 (i386_sub,
2197 "subl %eax,(%esp)\n\t"
2198 "sbbl %ebx,4(%esp)\n\t"
2199 "pop %eax\n\t"
2200 "pop %ebx\n\t");
2201 }
2202
2203 static void
2204 i386_emit_mul (void)
2205 {
2206 emit_error = 1;
2207 }
2208
2209 static void
2210 i386_emit_lsh (void)
2211 {
2212 emit_error = 1;
2213 }
2214
2215 static void
2216 i386_emit_rsh_signed (void)
2217 {
2218 emit_error = 1;
2219 }
2220
2221 static void
2222 i386_emit_rsh_unsigned (void)
2223 {
2224 emit_error = 1;
2225 }
2226
2227 static void
2228 i386_emit_ext (int arg)
2229 {
2230 switch (arg)
2231 {
2232 case 8:
2233 EMIT_ASM32 (i386_ext_8,
2234 "cbtw\n\t"
2235 "cwtl\n\t"
2236 "movl %eax,%ebx\n\t"
2237 "sarl $31,%ebx");
2238 break;
2239 case 16:
2240 EMIT_ASM32 (i386_ext_16,
2241 "cwtl\n\t"
2242 "movl %eax,%ebx\n\t"
2243 "sarl $31,%ebx");
2244 break;
2245 case 32:
2246 EMIT_ASM32 (i386_ext_32,
2247 "movl %eax,%ebx\n\t"
2248 "sarl $31,%ebx");
2249 break;
2250 default:
2251 emit_error = 1;
2252 }
2253 }
2254
2255 static void
2256 i386_emit_log_not (void)
2257 {
2258 EMIT_ASM32 (i386_log_not,
2259 "or %ebx,%eax\n\t"
2260 "test %eax,%eax\n\t"
2261 "sete %cl\n\t"
2262 "xor %ebx,%ebx\n\t"
2263 "movzbl %cl,%eax");
2264 }
2265
2266 static void
2267 i386_emit_bit_and (void)
2268 {
2269 EMIT_ASM32 (i386_and,
2270 "and (%esp),%eax\n\t"
2271 "and 0x4(%esp),%ebx\n\t"
2272 "lea 0x8(%esp),%esp");
2273 }
2274
2275 static void
2276 i386_emit_bit_or (void)
2277 {
2278 EMIT_ASM32 (i386_or,
2279 "or (%esp),%eax\n\t"
2280 "or 0x4(%esp),%ebx\n\t"
2281 "lea 0x8(%esp),%esp");
2282 }
2283
2284 static void
2285 i386_emit_bit_xor (void)
2286 {
2287 EMIT_ASM32 (i386_xor,
2288 "xor (%esp),%eax\n\t"
2289 "xor 0x4(%esp),%ebx\n\t"
2290 "lea 0x8(%esp),%esp");
2291 }
2292
2293 static void
2294 i386_emit_bit_not (void)
2295 {
2296 EMIT_ASM32 (i386_bit_not,
2297 "xor $0xffffffff,%eax\n\t"
2298 "xor $0xffffffff,%ebx\n\t");
2299 }
2300
2301 static void
2302 i386_emit_equal (void)
2303 {
2304 EMIT_ASM32 (i386_equal,
2305 "cmpl %ebx,4(%esp)\n\t"
2306 "jne .Li386_equal_false\n\t"
2307 "cmpl %eax,(%esp)\n\t"
2308 "je .Li386_equal_true\n\t"
2309 ".Li386_equal_false:\n\t"
2310 "xor %eax,%eax\n\t"
2311 "jmp .Li386_equal_end\n\t"
2312 ".Li386_equal_true:\n\t"
2313 "mov $1,%eax\n\t"
2314 ".Li386_equal_end:\n\t"
2315 "xor %ebx,%ebx\n\t"
2316 "lea 0x8(%esp),%esp");
2317 }
2318
2319 static void
2320 i386_emit_less_signed (void)
2321 {
2322 EMIT_ASM32 (i386_less_signed,
2323 "cmpl %ebx,4(%esp)\n\t"
2324 "jl .Li386_less_signed_true\n\t"
2325 "jne .Li386_less_signed_false\n\t"
2326 "cmpl %eax,(%esp)\n\t"
2327 "jl .Li386_less_signed_true\n\t"
2328 ".Li386_less_signed_false:\n\t"
2329 "xor %eax,%eax\n\t"
2330 "jmp .Li386_less_signed_end\n\t"
2331 ".Li386_less_signed_true:\n\t"
2332 "mov $1,%eax\n\t"
2333 ".Li386_less_signed_end:\n\t"
2334 "xor %ebx,%ebx\n\t"
2335 "lea 0x8(%esp),%esp");
2336 }
2337
2338 static void
2339 i386_emit_less_unsigned (void)
2340 {
2341 EMIT_ASM32 (i386_less_unsigned,
2342 "cmpl %ebx,4(%esp)\n\t"
2343 "jb .Li386_less_unsigned_true\n\t"
2344 "jne .Li386_less_unsigned_false\n\t"
2345 "cmpl %eax,(%esp)\n\t"
2346 "jb .Li386_less_unsigned_true\n\t"
2347 ".Li386_less_unsigned_false:\n\t"
2348 "xor %eax,%eax\n\t"
2349 "jmp .Li386_less_unsigned_end\n\t"
2350 ".Li386_less_unsigned_true:\n\t"
2351 "mov $1,%eax\n\t"
2352 ".Li386_less_unsigned_end:\n\t"
2353 "xor %ebx,%ebx\n\t"
2354 "lea 0x8(%esp),%esp");
2355 }
2356
2357 static void
2358 i386_emit_ref (int size)
2359 {
2360 switch (size)
2361 {
2362 case 1:
2363 EMIT_ASM32 (i386_ref1,
2364 "movb (%eax),%al");
2365 break;
2366 case 2:
2367 EMIT_ASM32 (i386_ref2,
2368 "movw (%eax),%ax");
2369 break;
2370 case 4:
2371 EMIT_ASM32 (i386_ref4,
2372 "movl (%eax),%eax");
2373 break;
2374 case 8:
2375 EMIT_ASM32 (i386_ref8,
2376 "movl 4(%eax),%ebx\n\t"
2377 "movl (%eax),%eax");
2378 break;
2379 }
2380 }
2381
2382 static void
2383 i386_emit_if_goto (int *offset_p, int *size_p)
2384 {
2385 EMIT_ASM32 (i386_if_goto,
2386 "mov %eax,%ecx\n\t"
2387 "or %ebx,%ecx\n\t"
2388 "pop %eax\n\t"
2389 "pop %ebx\n\t"
2390 "cmpl $0,%ecx\n\t"
2391 /* Don't trust the assembler to choose the right jump */
2392 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2393
2394 if (offset_p)
2395 *offset_p = 11; /* be sure that this matches the sequence above */
2396 if (size_p)
2397 *size_p = 4;
2398 }
2399
2400 static void
2401 i386_emit_goto (int *offset_p, int *size_p)
2402 {
2403 EMIT_ASM32 (i386_goto,
2404 /* Don't trust the assembler to choose the right jump */
2405 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2406 if (offset_p)
2407 *offset_p = 1;
2408 if (size_p)
2409 *size_p = 4;
2410 }
2411
2412 static void
2413 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2414 {
2415 int diff = (to - (from + size));
2416 unsigned char buf[sizeof (int)];
2417
2418 /* We're only doing 4-byte sizes at the moment. */
2419 if (size != 4)
2420 {
2421 emit_error = 1;
2422 return;
2423 }
2424
2425 memcpy (buf, &diff, sizeof (int));
2426 target_write_memory (from, buf, sizeof (int));
2427 }
2428
2429 static void
2430 i386_emit_const (LONGEST num)
2431 {
2432 unsigned char buf[16];
2433 int i, hi, lo;
2434 CORE_ADDR buildaddr = current_insn_ptr;
2435
2436 i = 0;
2437 buf[i++] = 0xb8; /* mov $<n>,%eax */
2438 lo = num & 0xffffffff;
2439 memcpy (&buf[i], &lo, sizeof (lo));
2440 i += 4;
2441 hi = ((num >> 32) & 0xffffffff);
2442 if (hi)
2443 {
2444 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2445 memcpy (&buf[i], &hi, sizeof (hi));
2446 i += 4;
2447 }
2448 else
2449 {
2450 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2451 }
2452 append_insns (&buildaddr, i, buf);
2453 current_insn_ptr = buildaddr;
2454 }
2455
2456 static void
2457 i386_emit_call (CORE_ADDR fn)
2458 {
2459 unsigned char buf[16];
2460 int i, offset;
2461 CORE_ADDR buildaddr;
2462
2463 buildaddr = current_insn_ptr;
2464 i = 0;
2465 buf[i++] = 0xe8; /* call <reladdr> */
2466 offset = ((int) fn) - (buildaddr + 5);
2467 memcpy (buf + 1, &offset, 4);
2468 append_insns (&buildaddr, 5, buf);
2469 current_insn_ptr = buildaddr;
2470 }
2471
2472 static void
2473 i386_emit_reg (int reg)
2474 {
2475 unsigned char buf[16];
2476 int i;
2477 CORE_ADDR buildaddr;
2478
2479 EMIT_ASM32 (i386_reg_a,
2480 "sub $0x8,%esp");
2481 buildaddr = current_insn_ptr;
2482 i = 0;
2483 buf[i++] = 0xb8; /* mov $<n>,%eax */
2484 memcpy (&buf[i], &reg, sizeof (reg));
2485 i += 4;
2486 append_insns (&buildaddr, i, buf);
2487 current_insn_ptr = buildaddr;
2488 EMIT_ASM32 (i386_reg_b,
2489 "mov %eax,4(%esp)\n\t"
2490 "mov 8(%ebp),%eax\n\t"
2491 "mov %eax,(%esp)");
2492 i386_emit_call (get_raw_reg_func_addr ());
2493 EMIT_ASM32 (i386_reg_c,
2494 "xor %ebx,%ebx\n\t"
2495 "lea 0x8(%esp),%esp");
2496 }
2497
2498 static void
2499 i386_emit_pop (void)
2500 {
2501 EMIT_ASM32 (i386_pop,
2502 "pop %eax\n\t"
2503 "pop %ebx");
2504 }
2505
2506 static void
2507 i386_emit_stack_flush (void)
2508 {
2509 EMIT_ASM32 (i386_stack_flush,
2510 "push %ebx\n\t"
2511 "push %eax");
2512 }
2513
2514 static void
2515 i386_emit_zero_ext (int arg)
2516 {
2517 switch (arg)
2518 {
2519 case 8:
2520 EMIT_ASM32 (i386_zero_ext_8,
2521 "and $0xff,%eax\n\t"
2522 "xor %ebx,%ebx");
2523 break;
2524 case 16:
2525 EMIT_ASM32 (i386_zero_ext_16,
2526 "and $0xffff,%eax\n\t"
2527 "xor %ebx,%ebx");
2528 break;
2529 case 32:
2530 EMIT_ASM32 (i386_zero_ext_32,
2531 "xor %ebx,%ebx");
2532 break;
2533 default:
2534 emit_error = 1;
2535 }
2536 }
2537
2538 static void
2539 i386_emit_swap (void)
2540 {
2541 EMIT_ASM32 (i386_swap,
2542 "mov %eax,%ecx\n\t"
2543 "mov %ebx,%edx\n\t"
2544 "pop %eax\n\t"
2545 "pop %ebx\n\t"
2546 "push %edx\n\t"
2547 "push %ecx");
2548 }
2549
2550 static void
2551 i386_emit_stack_adjust (int n)
2552 {
2553 unsigned char buf[16];
2554 int i;
2555 CORE_ADDR buildaddr = current_insn_ptr;
2556
2557 i = 0;
2558 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2559 buf[i++] = 0x64;
2560 buf[i++] = 0x24;
2561 buf[i++] = n * 8;
2562 append_insns (&buildaddr, i, buf);
2563 current_insn_ptr = buildaddr;
2564 }
2565
2566 /* FN's prototype is `LONGEST(*fn)(int)'. */
2567
2568 static void
2569 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2570 {
2571 unsigned char buf[16];
2572 int i;
2573 CORE_ADDR buildaddr;
2574
2575 EMIT_ASM32 (i386_int_call_1_a,
2576 /* Reserve a bit of stack space. */
2577 "sub $0x8,%esp");
2578 /* Put the one argument on the stack. */
2579 buildaddr = current_insn_ptr;
2580 i = 0;
2581 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2582 buf[i++] = 0x04;
2583 buf[i++] = 0x24;
2584 memcpy (&buf[i], &arg1, sizeof (arg1));
2585 i += 4;
2586 append_insns (&buildaddr, i, buf);
2587 current_insn_ptr = buildaddr;
2588 i386_emit_call (fn);
2589 EMIT_ASM32 (i386_int_call_1_c,
2590 "mov %edx,%ebx\n\t"
2591 "lea 0x8(%esp),%esp");
2592 }
2593
2594 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2595
2596 static void
2597 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2598 {
2599 unsigned char buf[16];
2600 int i;
2601 CORE_ADDR buildaddr;
2602
2603 EMIT_ASM32 (i386_void_call_2_a,
2604 /* Preserve %eax only; we don't have to worry about %ebx. */
2605 "push %eax\n\t"
2606 /* Reserve a bit of stack space for arguments. */
2607 "sub $0x10,%esp\n\t"
2608 /* Copy "top" to the second argument position. (Note that
2609 we can't assume function won't scribble on its
2610 arguments, so don't try to restore from this.) */
2611 "mov %eax,4(%esp)\n\t"
2612 "mov %ebx,8(%esp)");
2613 /* Put the first argument on the stack. */
2614 buildaddr = current_insn_ptr;
2615 i = 0;
2616 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2617 buf[i++] = 0x04;
2618 buf[i++] = 0x24;
2619 memcpy (&buf[i], &arg1, sizeof (arg1));
2620 i += 4;
2621 append_insns (&buildaddr, i, buf);
2622 current_insn_ptr = buildaddr;
2623 i386_emit_call (fn);
2624 EMIT_ASM32 (i386_void_call_2_b,
2625 "lea 0x10(%esp),%esp\n\t"
2626 /* Restore original stack top. */
2627 "pop %eax");
2628 }
2629
2630
2631 static void
2632 i386_emit_eq_goto (int *offset_p, int *size_p)
2633 {
2634 EMIT_ASM32 (eq,
2635 /* Check low half first, more likely to be decider */
2636 "cmpl %eax,(%esp)\n\t"
2637 "jne .Leq_fallthru\n\t"
2638 "cmpl %ebx,4(%esp)\n\t"
2639 "jne .Leq_fallthru\n\t"
2640 "lea 0x8(%esp),%esp\n\t"
2641 "pop %eax\n\t"
2642 "pop %ebx\n\t"
2643 /* jmp, but don't trust the assembler to choose the right jump */
2644 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2645 ".Leq_fallthru:\n\t"
2646 "lea 0x8(%esp),%esp\n\t"
2647 "pop %eax\n\t"
2648 "pop %ebx");
2649
2650 if (offset_p)
2651 *offset_p = 18;
2652 if (size_p)
2653 *size_p = 4;
2654 }
2655
2656 static void
2657 i386_emit_ne_goto (int *offset_p, int *size_p)
2658 {
2659 EMIT_ASM32 (ne,
2660 /* Check low half first, more likely to be decider */
2661 "cmpl %eax,(%esp)\n\t"
2662 "jne .Lne_jump\n\t"
2663 "cmpl %ebx,4(%esp)\n\t"
2664 "je .Lne_fallthru\n\t"
2665 ".Lne_jump:\n\t"
2666 "lea 0x8(%esp),%esp\n\t"
2667 "pop %eax\n\t"
2668 "pop %ebx\n\t"
2669 /* jmp, but don't trust the assembler to choose the right jump */
2670 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2671 ".Lne_fallthru:\n\t"
2672 "lea 0x8(%esp),%esp\n\t"
2673 "pop %eax\n\t"
2674 "pop %ebx");
2675
2676 if (offset_p)
2677 *offset_p = 18;
2678 if (size_p)
2679 *size_p = 4;
2680 }
2681
2682 static void
2683 i386_emit_lt_goto (int *offset_p, int *size_p)
2684 {
2685 EMIT_ASM32 (lt,
2686 "cmpl %ebx,4(%esp)\n\t"
2687 "jl .Llt_jump\n\t"
2688 "jne .Llt_fallthru\n\t"
2689 "cmpl %eax,(%esp)\n\t"
2690 "jnl .Llt_fallthru\n\t"
2691 ".Llt_jump:\n\t"
2692 "lea 0x8(%esp),%esp\n\t"
2693 "pop %eax\n\t"
2694 "pop %ebx\n\t"
2695 /* jmp, but don't trust the assembler to choose the right jump */
2696 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2697 ".Llt_fallthru:\n\t"
2698 "lea 0x8(%esp),%esp\n\t"
2699 "pop %eax\n\t"
2700 "pop %ebx");
2701
2702 if (offset_p)
2703 *offset_p = 20;
2704 if (size_p)
2705 *size_p = 4;
2706 }
2707
2708 static void
2709 i386_emit_le_goto (int *offset_p, int *size_p)
2710 {
2711 EMIT_ASM32 (le,
2712 "cmpl %ebx,4(%esp)\n\t"
2713 "jle .Lle_jump\n\t"
2714 "jne .Lle_fallthru\n\t"
2715 "cmpl %eax,(%esp)\n\t"
2716 "jnle .Lle_fallthru\n\t"
2717 ".Lle_jump:\n\t"
2718 "lea 0x8(%esp),%esp\n\t"
2719 "pop %eax\n\t"
2720 "pop %ebx\n\t"
2721 /* jmp, but don't trust the assembler to choose the right jump */
2722 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2723 ".Lle_fallthru:\n\t"
2724 "lea 0x8(%esp),%esp\n\t"
2725 "pop %eax\n\t"
2726 "pop %ebx");
2727
2728 if (offset_p)
2729 *offset_p = 20;
2730 if (size_p)
2731 *size_p = 4;
2732 }
2733
2734 static void
2735 i386_emit_gt_goto (int *offset_p, int *size_p)
2736 {
2737 EMIT_ASM32 (gt,
2738 "cmpl %ebx,4(%esp)\n\t"
2739 "jg .Lgt_jump\n\t"
2740 "jne .Lgt_fallthru\n\t"
2741 "cmpl %eax,(%esp)\n\t"
2742 "jng .Lgt_fallthru\n\t"
2743 ".Lgt_jump:\n\t"
2744 "lea 0x8(%esp),%esp\n\t"
2745 "pop %eax\n\t"
2746 "pop %ebx\n\t"
2747 /* jmp, but don't trust the assembler to choose the right jump */
2748 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2749 ".Lgt_fallthru:\n\t"
2750 "lea 0x8(%esp),%esp\n\t"
2751 "pop %eax\n\t"
2752 "pop %ebx");
2753
2754 if (offset_p)
2755 *offset_p = 20;
2756 if (size_p)
2757 *size_p = 4;
2758 }
2759
2760 static void
2761 i386_emit_ge_goto (int *offset_p, int *size_p)
2762 {
2763 EMIT_ASM32 (ge,
2764 "cmpl %ebx,4(%esp)\n\t"
2765 "jge .Lge_jump\n\t"
2766 "jne .Lge_fallthru\n\t"
2767 "cmpl %eax,(%esp)\n\t"
2768 "jnge .Lge_fallthru\n\t"
2769 ".Lge_jump:\n\t"
2770 "lea 0x8(%esp),%esp\n\t"
2771 "pop %eax\n\t"
2772 "pop %ebx\n\t"
2773 /* jmp, but don't trust the assembler to choose the right jump */
2774 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2775 ".Lge_fallthru:\n\t"
2776 "lea 0x8(%esp),%esp\n\t"
2777 "pop %eax\n\t"
2778 "pop %ebx");
2779
2780 if (offset_p)
2781 *offset_p = 20;
2782 if (size_p)
2783 *size_p = 4;
2784 }
2785
2786 struct emit_ops i386_emit_ops =
2787 {
2788 i386_emit_prologue,
2789 i386_emit_epilogue,
2790 i386_emit_add,
2791 i386_emit_sub,
2792 i386_emit_mul,
2793 i386_emit_lsh,
2794 i386_emit_rsh_signed,
2795 i386_emit_rsh_unsigned,
2796 i386_emit_ext,
2797 i386_emit_log_not,
2798 i386_emit_bit_and,
2799 i386_emit_bit_or,
2800 i386_emit_bit_xor,
2801 i386_emit_bit_not,
2802 i386_emit_equal,
2803 i386_emit_less_signed,
2804 i386_emit_less_unsigned,
2805 i386_emit_ref,
2806 i386_emit_if_goto,
2807 i386_emit_goto,
2808 i386_write_goto_address,
2809 i386_emit_const,
2810 i386_emit_call,
2811 i386_emit_reg,
2812 i386_emit_pop,
2813 i386_emit_stack_flush,
2814 i386_emit_zero_ext,
2815 i386_emit_swap,
2816 i386_emit_stack_adjust,
2817 i386_emit_int_call_1,
2818 i386_emit_void_call_2,
2819 i386_emit_eq_goto,
2820 i386_emit_ne_goto,
2821 i386_emit_lt_goto,
2822 i386_emit_le_goto,
2823 i386_emit_gt_goto,
2824 i386_emit_ge_goto
2825 };
2826
2827
2828 static struct emit_ops *
2829 x86_emit_ops (void)
2830 {
2831 #ifdef __x86_64__
2832 if (is_64bit_tdesc ())
2833 return &amd64_emit_ops;
2834 else
2835 #endif
2836 return &i386_emit_ops;
2837 }
2838
2839 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2840
2841 static const gdb_byte *
2842 x86_sw_breakpoint_from_kind (int kind, int *size)
2843 {
2844 *size = x86_breakpoint_len;
2845 return x86_breakpoint;
2846 }
2847
2848 static int
2849 x86_supports_range_stepping (void)
2850 {
2851 return 1;
2852 }
2853
2854 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2855 */
2856
2857 static int
2858 x86_supports_hardware_single_step (void)
2859 {
2860 return 1;
2861 }
2862
2863 static int
2864 x86_get_ipa_tdesc_idx (void)
2865 {
2866 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2867 const struct target_desc *tdesc = regcache->tdesc;
2868
2869 #ifdef __x86_64__
2870 return amd64_get_ipa_tdesc_idx (tdesc);
2871 #endif
2872
2873 if (tdesc == tdesc_i386_linux_no_xml)
2874 return X86_TDESC_SSE;
2875
2876 return i386_get_ipa_tdesc_idx (tdesc);
2877 }
2878
2879 /* This is initialized assuming an amd64 target.
2880 x86_arch_setup will correct it for i386 or amd64 targets. */
2881
2882 struct linux_target_ops the_low_target =
2883 {
2884 x86_cannot_fetch_register,
2885 x86_cannot_store_register,
2886 NULL, /* fetch_register */
2887 x86_get_pc,
2888 x86_set_pc,
2889 NULL, /* breakpoint_kind_from_pc */
2890 x86_sw_breakpoint_from_kind,
2891 NULL,
2892 1,
2893 x86_breakpoint_at,
2894 x86_supports_z_point_type,
2895 x86_insert_point,
2896 x86_remove_point,
2897 x86_stopped_by_watchpoint,
2898 x86_stopped_data_address,
2899 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2900 native i386 case (no registers smaller than an xfer unit), and are not
2901 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2902 NULL,
2903 NULL,
2904 /* need to fix up i386 siginfo if host is amd64 */
2905 x86_siginfo_fixup,
2906 x86_linux_new_process,
2907 x86_linux_delete_process,
2908 x86_linux_new_thread,
2909 x86_linux_delete_thread,
2910 x86_linux_new_fork,
2911 x86_linux_prepare_to_resume,
2912 x86_linux_process_qsupported,
2913 x86_supports_tracepoints,
2914 x86_get_thread_area,
2915 x86_install_fast_tracepoint_jump_pad,
2916 x86_emit_ops,
2917 x86_get_min_fast_tracepoint_insn_len,
2918 x86_supports_range_stepping,
2919 NULL, /* breakpoint_kind_from_current_state */
2920 x86_supports_hardware_single_step,
2921 x86_get_syscall_trapinfo,
2922 x86_get_ipa_tdesc_idx,
2923 };
2924
2925 /* The linux target ops object. */
2926
2927 linux_process_target *the_linux_target = &the_x86_target;
2928
2929 void
2930 initialize_low_arch (void)
2931 {
2932 /* Initialize the Linux target descriptions. */
2933 #ifdef __x86_64__
2934 tdesc_amd64_linux_no_xml = allocate_target_description ();
2935 copy_target_description (tdesc_amd64_linux_no_xml,
2936 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2937 false));
2938 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2939 #endif
2940
2941 tdesc_i386_linux_no_xml = allocate_target_description ();
2942 copy_target_description (tdesc_i386_linux_no_xml,
2943 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2944 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2945
2946 initialize_regsets_info (&x86_regsets_info);
2947 }
This page took 0.094342 seconds and 4 git commands to generate.