Revert "Fix build breakage"
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2016 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48
49 #ifdef __x86_64__
50 /* Defined in auto-generated file amd64-linux.c. */
51 void init_registers_amd64_linux (void);
52 extern const struct target_desc *tdesc_amd64_linux;
53
54 /* Defined in auto-generated file amd64-avx-linux.c. */
55 void init_registers_amd64_avx_linux (void);
56 extern const struct target_desc *tdesc_amd64_avx_linux;
57
58 /* Defined in auto-generated file amd64-avx512-linux.c. */
59 void init_registers_amd64_avx512_linux (void);
60 extern const struct target_desc *tdesc_amd64_avx512_linux;
61
62 /* Defined in auto-generated file amd64-mpx-linux.c. */
63 void init_registers_amd64_mpx_linux (void);
64 extern const struct target_desc *tdesc_amd64_mpx_linux;
65
66 /* Defined in auto-generated file x32-linux.c. */
67 void init_registers_x32_linux (void);
68 extern const struct target_desc *tdesc_x32_linux;
69
70 /* Defined in auto-generated file x32-avx-linux.c. */
71 void init_registers_x32_avx_linux (void);
72 extern const struct target_desc *tdesc_x32_avx_linux;
73
74 /* Defined in auto-generated file x32-avx512-linux.c. */
75 void init_registers_x32_avx512_linux (void);
76 extern const struct target_desc *tdesc_x32_avx512_linux;
77
78 #endif
79
80 /* Defined in auto-generated file i386-linux.c. */
81 void init_registers_i386_linux (void);
82 extern const struct target_desc *tdesc_i386_linux;
83
84 /* Defined in auto-generated file i386-mmx-linux.c. */
85 void init_registers_i386_mmx_linux (void);
86 extern const struct target_desc *tdesc_i386_mmx_linux;
87
88 /* Defined in auto-generated file i386-avx-linux.c. */
89 void init_registers_i386_avx_linux (void);
90 extern const struct target_desc *tdesc_i386_avx_linux;
91
92 /* Defined in auto-generated file i386-avx512-linux.c. */
93 void init_registers_i386_avx512_linux (void);
94 extern const struct target_desc *tdesc_i386_avx512_linux;
95
96 /* Defined in auto-generated file i386-mpx-linux.c. */
97 void init_registers_i386_mpx_linux (void);
98 extern const struct target_desc *tdesc_i386_mpx_linux;
99
100 #ifdef __x86_64__
101 static struct target_desc *tdesc_amd64_linux_no_xml;
102 #endif
103 static struct target_desc *tdesc_i386_linux_no_xml;
104
105
106 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
107 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
108
109 /* Backward compatibility for gdb without XML support. */
110
111 static const char *xmltarget_i386_linux_no_xml = "@<target>\
112 <architecture>i386</architecture>\
113 <osabi>GNU/Linux</osabi>\
114 </target>";
115
116 #ifdef __x86_64__
117 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
118 <architecture>i386:x86-64</architecture>\
119 <osabi>GNU/Linux</osabi>\
120 </target>";
121 #endif
122
123 #include <sys/reg.h>
124 #include <sys/procfs.h>
125 #include "nat/gdb_ptrace.h"
126 #include <sys/uio.h>
127
128 #ifndef PTRACE_GET_THREAD_AREA
129 #define PTRACE_GET_THREAD_AREA 25
130 #endif
131
132 /* This definition comes from prctl.h, but some kernels may not have it. */
133 #ifndef PTRACE_ARCH_PRCTL
134 #define PTRACE_ARCH_PRCTL 30
135 #endif
136
137 /* The following definitions come from prctl.h, but may be absent
138 for certain configurations. */
139 #ifndef ARCH_GET_FS
140 #define ARCH_SET_GS 0x1001
141 #define ARCH_SET_FS 0x1002
142 #define ARCH_GET_FS 0x1003
143 #define ARCH_GET_GS 0x1004
144 #endif
145
146 /* Per-process arch-specific data we want to keep. */
147
148 struct arch_process_info
149 {
150 struct x86_debug_reg_state debug_reg_state;
151 };
152
153 #ifdef __x86_64__
154
155 /* Mapping between the general-purpose registers in `struct user'
156 format and GDB's register array layout.
157 Note that the transfer layout uses 64-bit regs. */
158 static /*const*/ int i386_regmap[] =
159 {
160 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
161 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
162 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
163 DS * 8, ES * 8, FS * 8, GS * 8
164 };
165
166 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
167
168 /* So code below doesn't have to care, i386 or amd64. */
169 #define ORIG_EAX ORIG_RAX
170 #define REGSIZE 8
171
172 static const int x86_64_regmap[] =
173 {
174 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
175 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
176 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
177 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
178 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
179 DS * 8, ES * 8, FS * 8, GS * 8,
180 -1, -1, -1, -1, -1, -1, -1, -1,
181 -1, -1, -1, -1, -1, -1, -1, -1,
182 -1, -1, -1, -1, -1, -1, -1, -1,
183 -1,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 ORIG_RAX * 8,
186 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
187 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
188 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
193 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, -1, -1, -1, -1,
196 -1, -1, -1, -1, -1, -1, -1, -1
197 };
198
199 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
200 #define X86_64_USER_REGS (GS + 1)
201
202 #else /* ! __x86_64__ */
203
204 /* Mapping between the general-purpose registers in `struct user'
205 format and GDB's register array layout. */
206 static /*const*/ int i386_regmap[] =
207 {
208 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
209 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
210 EIP * 4, EFL * 4, CS * 4, SS * 4,
211 DS * 4, ES * 4, FS * 4, GS * 4
212 };
213
214 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
215
216 #define REGSIZE 4
217
218 #endif
219
220 #ifdef __x86_64__
221
222 /* Returns true if the current inferior belongs to a x86-64 process,
223 per the tdesc. */
224
225 static int
226 is_64bit_tdesc (void)
227 {
228 struct regcache *regcache = get_thread_regcache (current_thread, 0);
229
230 return register_size (regcache->tdesc, 0) == 8;
231 }
232
233 #endif
234
235 \f
236 /* Called by libthread_db. */
237
238 ps_err_e
239 ps_get_thread_area (const struct ps_prochandle *ph,
240 lwpid_t lwpid, int idx, void **base)
241 {
242 #ifdef __x86_64__
243 int use_64bit = is_64bit_tdesc ();
244
245 if (use_64bit)
246 {
247 switch (idx)
248 {
249 case FS:
250 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
251 return PS_OK;
252 break;
253 case GS:
254 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
255 return PS_OK;
256 break;
257 default:
258 return PS_BADADDR;
259 }
260 return PS_ERR;
261 }
262 #endif
263
264 {
265 unsigned int desc[4];
266
267 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
268 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
269 return PS_ERR;
270
271 /* Ensure we properly extend the value to 64-bits for x86_64. */
272 *base = (void *) (uintptr_t) desc[1];
273 return PS_OK;
274 }
275 }
276
277 /* Get the thread area address. This is used to recognize which
278 thread is which when tracing with the in-process agent library. We
279 don't read anything from the address, and treat it as opaque; it's
280 the address itself that we assume is unique per-thread. */
281
282 static int
283 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
284 {
285 #ifdef __x86_64__
286 int use_64bit = is_64bit_tdesc ();
287
288 if (use_64bit)
289 {
290 void *base;
291 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
292 {
293 *addr = (CORE_ADDR) (uintptr_t) base;
294 return 0;
295 }
296
297 return -1;
298 }
299 #endif
300
301 {
302 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
303 struct thread_info *thr = get_lwp_thread (lwp);
304 struct regcache *regcache = get_thread_regcache (thr, 1);
305 unsigned int desc[4];
306 ULONGEST gs = 0;
307 const int reg_thread_area = 3; /* bits to scale down register value. */
308 int idx;
309
310 collect_register_by_name (regcache, "gs", &gs);
311
312 idx = gs >> reg_thread_area;
313
314 if (ptrace (PTRACE_GET_THREAD_AREA,
315 lwpid_of (thr),
316 (void *) (long) idx, (unsigned long) &desc) < 0)
317 return -1;
318
319 *addr = desc[1];
320 return 0;
321 }
322 }
323
324
325 \f
326 static int
327 x86_cannot_store_register (int regno)
328 {
329 #ifdef __x86_64__
330 if (is_64bit_tdesc ())
331 return 0;
332 #endif
333
334 return regno >= I386_NUM_REGS;
335 }
336
337 static int
338 x86_cannot_fetch_register (int regno)
339 {
340 #ifdef __x86_64__
341 if (is_64bit_tdesc ())
342 return 0;
343 #endif
344
345 return regno >= I386_NUM_REGS;
346 }
347
348 static void
349 x86_fill_gregset (struct regcache *regcache, void *buf)
350 {
351 int i;
352
353 #ifdef __x86_64__
354 if (register_size (regcache->tdesc, 0) == 8)
355 {
356 for (i = 0; i < X86_64_NUM_REGS; i++)
357 if (x86_64_regmap[i] != -1)
358 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
359 return;
360 }
361
362 /* 32-bit inferior registers need to be zero-extended.
363 Callers would read uninitialized memory otherwise. */
364 memset (buf, 0x00, X86_64_USER_REGS * 8);
365 #endif
366
367 for (i = 0; i < I386_NUM_REGS; i++)
368 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
369
370 collect_register_by_name (regcache, "orig_eax",
371 ((char *) buf) + ORIG_EAX * REGSIZE);
372 }
373
374 static void
375 x86_store_gregset (struct regcache *regcache, const void *buf)
376 {
377 int i;
378
379 #ifdef __x86_64__
380 if (register_size (regcache->tdesc, 0) == 8)
381 {
382 for (i = 0; i < X86_64_NUM_REGS; i++)
383 if (x86_64_regmap[i] != -1)
384 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
385 return;
386 }
387 #endif
388
389 for (i = 0; i < I386_NUM_REGS; i++)
390 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
391
392 supply_register_by_name (regcache, "orig_eax",
393 ((char *) buf) + ORIG_EAX * REGSIZE);
394 }
395
396 static void
397 x86_fill_fpregset (struct regcache *regcache, void *buf)
398 {
399 #ifdef __x86_64__
400 i387_cache_to_fxsave (regcache, buf);
401 #else
402 i387_cache_to_fsave (regcache, buf);
403 #endif
404 }
405
406 static void
407 x86_store_fpregset (struct regcache *regcache, const void *buf)
408 {
409 #ifdef __x86_64__
410 i387_fxsave_to_cache (regcache, buf);
411 #else
412 i387_fsave_to_cache (regcache, buf);
413 #endif
414 }
415
416 #ifndef __x86_64__
417
418 static void
419 x86_fill_fpxregset (struct regcache *regcache, void *buf)
420 {
421 i387_cache_to_fxsave (regcache, buf);
422 }
423
424 static void
425 x86_store_fpxregset (struct regcache *regcache, const void *buf)
426 {
427 i387_fxsave_to_cache (regcache, buf);
428 }
429
430 #endif
431
432 static void
433 x86_fill_xstateregset (struct regcache *regcache, void *buf)
434 {
435 i387_cache_to_xsave (regcache, buf);
436 }
437
438 static void
439 x86_store_xstateregset (struct regcache *regcache, const void *buf)
440 {
441 i387_xsave_to_cache (regcache, buf);
442 }
443
444 /* ??? The non-biarch i386 case stores all the i387 regs twice.
445 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
446 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
447 doesn't work. IWBN to avoid the duplication in the case where it
448 does work. Maybe the arch_setup routine could check whether it works
449 and update the supported regsets accordingly. */
450
451 static struct regset_info x86_regsets[] =
452 {
453 #ifdef HAVE_PTRACE_GETREGS
454 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
455 GENERAL_REGS,
456 x86_fill_gregset, x86_store_gregset },
457 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
458 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
459 # ifndef __x86_64__
460 # ifdef HAVE_PTRACE_GETFPXREGS
461 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
462 EXTENDED_REGS,
463 x86_fill_fpxregset, x86_store_fpxregset },
464 # endif
465 # endif
466 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
467 FP_REGS,
468 x86_fill_fpregset, x86_store_fpregset },
469 #endif /* HAVE_PTRACE_GETREGS */
470 NULL_REGSET
471 };
472
473 static CORE_ADDR
474 x86_get_pc (struct regcache *regcache)
475 {
476 int use_64bit = register_size (regcache->tdesc, 0) == 8;
477
478 if (use_64bit)
479 {
480 unsigned long pc;
481 collect_register_by_name (regcache, "rip", &pc);
482 return (CORE_ADDR) pc;
483 }
484 else
485 {
486 unsigned int pc;
487 collect_register_by_name (regcache, "eip", &pc);
488 return (CORE_ADDR) pc;
489 }
490 }
491
492 static void
493 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
494 {
495 int use_64bit = register_size (regcache->tdesc, 0) == 8;
496
497 if (use_64bit)
498 {
499 unsigned long newpc = pc;
500 supply_register_by_name (regcache, "rip", &newpc);
501 }
502 else
503 {
504 unsigned int newpc = pc;
505 supply_register_by_name (regcache, "eip", &newpc);
506 }
507 }
508 \f
509 static const gdb_byte x86_breakpoint[] = { 0xCC };
510 #define x86_breakpoint_len 1
511
512 static int
513 x86_breakpoint_at (CORE_ADDR pc)
514 {
515 unsigned char c;
516
517 (*the_target->read_memory) (pc, &c, 1);
518 if (c == 0xCC)
519 return 1;
520
521 return 0;
522 }
523 \f
524 /* Low-level function vector. */
525 struct x86_dr_low_type x86_dr_low =
526 {
527 x86_linux_dr_set_control,
528 x86_linux_dr_set_addr,
529 x86_linux_dr_get_addr,
530 x86_linux_dr_get_status,
531 x86_linux_dr_get_control,
532 sizeof (void *),
533 };
534 \f
535 /* Breakpoint/Watchpoint support. */
536
537 static int
538 x86_supports_z_point_type (char z_type)
539 {
540 switch (z_type)
541 {
542 case Z_PACKET_SW_BP:
543 case Z_PACKET_HW_BP:
544 case Z_PACKET_WRITE_WP:
545 case Z_PACKET_ACCESS_WP:
546 return 1;
547 default:
548 return 0;
549 }
550 }
551
552 static int
553 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
554 int size, struct raw_breakpoint *bp)
555 {
556 struct process_info *proc = current_process ();
557
558 switch (type)
559 {
560 case raw_bkpt_type_hw:
561 case raw_bkpt_type_write_wp:
562 case raw_bkpt_type_access_wp:
563 {
564 enum target_hw_bp_type hw_type
565 = raw_bkpt_type_to_target_hw_bp_type (type);
566 struct x86_debug_reg_state *state
567 = &proc->priv->arch_private->debug_reg_state;
568
569 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
570 }
571
572 default:
573 /* Unsupported. */
574 return 1;
575 }
576 }
577
578 static int
579 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
580 int size, struct raw_breakpoint *bp)
581 {
582 struct process_info *proc = current_process ();
583
584 switch (type)
585 {
586 case raw_bkpt_type_hw:
587 case raw_bkpt_type_write_wp:
588 case raw_bkpt_type_access_wp:
589 {
590 enum target_hw_bp_type hw_type
591 = raw_bkpt_type_to_target_hw_bp_type (type);
592 struct x86_debug_reg_state *state
593 = &proc->priv->arch_private->debug_reg_state;
594
595 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
596 }
597 default:
598 /* Unsupported. */
599 return 1;
600 }
601 }
602
603 static int
604 x86_stopped_by_watchpoint (void)
605 {
606 struct process_info *proc = current_process ();
607 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
608 }
609
610 static CORE_ADDR
611 x86_stopped_data_address (void)
612 {
613 struct process_info *proc = current_process ();
614 CORE_ADDR addr;
615 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
616 &addr))
617 return addr;
618 return 0;
619 }
620 \f
621 /* Called when a new process is created. */
622
623 static struct arch_process_info *
624 x86_linux_new_process (void)
625 {
626 struct arch_process_info *info = XCNEW (struct arch_process_info);
627
628 x86_low_init_dregs (&info->debug_reg_state);
629
630 return info;
631 }
632
633 /* Target routine for linux_new_fork. */
634
635 static void
636 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
637 {
638 /* These are allocated by linux_add_process. */
639 gdb_assert (parent->priv != NULL
640 && parent->priv->arch_private != NULL);
641 gdb_assert (child->priv != NULL
642 && child->priv->arch_private != NULL);
643
644 /* Linux kernel before 2.6.33 commit
645 72f674d203cd230426437cdcf7dd6f681dad8b0d
646 will inherit hardware debug registers from parent
647 on fork/vfork/clone. Newer Linux kernels create such tasks with
648 zeroed debug registers.
649
650 GDB core assumes the child inherits the watchpoints/hw
651 breakpoints of the parent, and will remove them all from the
652 forked off process. Copy the debug registers mirrors into the
653 new process so that all breakpoints and watchpoints can be
654 removed together. The debug registers mirror will become zeroed
655 in the end before detaching the forked off process, thus making
656 this compatible with older Linux kernels too. */
657
658 *child->priv->arch_private = *parent->priv->arch_private;
659 }
660
661 /* See nat/x86-dregs.h. */
662
663 struct x86_debug_reg_state *
664 x86_debug_reg_state (pid_t pid)
665 {
666 struct process_info *proc = find_process_pid (pid);
667
668 return &proc->priv->arch_private->debug_reg_state;
669 }
670 \f
671 /* When GDBSERVER is built as a 64-bit application on linux, the
672 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
673 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
674 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
675 conversion in-place ourselves. */
676
677 /* Convert a native/host siginfo object, into/from the siginfo in the
678 layout of the inferiors' architecture. Returns true if any
679 conversion was done; false otherwise. If DIRECTION is 1, then copy
680 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
681 INF. */
682
683 static int
684 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
685 {
686 #ifdef __x86_64__
687 unsigned int machine;
688 int tid = lwpid_of (current_thread);
689 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
690
691 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
692 if (!is_64bit_tdesc ())
693 return amd64_linux_siginfo_fixup_common (native, inf, direction,
694 FIXUP_32);
695 /* No fixup for native x32 GDB. */
696 else if (!is_elf64 && sizeof (void *) == 8)
697 return amd64_linux_siginfo_fixup_common (native, inf, direction,
698 FIXUP_X32);
699 #endif
700
701 return 0;
702 }
703 \f
704 static int use_xml;
705
706 /* Format of XSAVE extended state is:
707 struct
708 {
709 fxsave_bytes[0..463]
710 sw_usable_bytes[464..511]
711 xstate_hdr_bytes[512..575]
712 avx_bytes[576..831]
713 future_state etc
714 };
715
716 Same memory layout will be used for the coredump NT_X86_XSTATE
717 representing the XSAVE extended state registers.
718
719 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
720 extended state mask, which is the same as the extended control register
721 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
722 together with the mask saved in the xstate_hdr_bytes to determine what
723 states the processor/OS supports and what state, used or initialized,
724 the process/thread is in. */
725 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
726
727 /* Does the current host support the GETFPXREGS request? The header
728 file may or may not define it, and even if it is defined, the
729 kernel will return EIO if it's running on a pre-SSE processor. */
730 int have_ptrace_getfpxregs =
731 #ifdef HAVE_PTRACE_GETFPXREGS
732 -1
733 #else
734 0
735 #endif
736 ;
737
738 /* Get Linux/x86 target description from running target. */
739
740 static const struct target_desc *
741 x86_linux_read_description (void)
742 {
743 unsigned int machine;
744 int is_elf64;
745 int xcr0_features;
746 int tid;
747 static uint64_t xcr0;
748 struct regset_info *regset;
749
750 tid = lwpid_of (current_thread);
751
752 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
753
754 if (sizeof (void *) == 4)
755 {
756 if (is_elf64 > 0)
757 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
758 #ifndef __x86_64__
759 else if (machine == EM_X86_64)
760 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
761 #endif
762 }
763
764 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
765 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
766 {
767 elf_fpxregset_t fpxregs;
768
769 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
770 {
771 have_ptrace_getfpxregs = 0;
772 have_ptrace_getregset = 0;
773 return tdesc_i386_mmx_linux;
774 }
775 else
776 have_ptrace_getfpxregs = 1;
777 }
778 #endif
779
780 if (!use_xml)
781 {
782 x86_xcr0 = X86_XSTATE_SSE_MASK;
783
784 /* Don't use XML. */
785 #ifdef __x86_64__
786 if (machine == EM_X86_64)
787 return tdesc_amd64_linux_no_xml;
788 else
789 #endif
790 return tdesc_i386_linux_no_xml;
791 }
792
793 if (have_ptrace_getregset == -1)
794 {
795 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
796 struct iovec iov;
797
798 iov.iov_base = xstateregs;
799 iov.iov_len = sizeof (xstateregs);
800
801 /* Check if PTRACE_GETREGSET works. */
802 if (ptrace (PTRACE_GETREGSET, tid,
803 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
804 have_ptrace_getregset = 0;
805 else
806 {
807 have_ptrace_getregset = 1;
808
809 /* Get XCR0 from XSAVE extended state. */
810 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
811 / sizeof (uint64_t))];
812
813 /* Use PTRACE_GETREGSET if it is available. */
814 for (regset = x86_regsets;
815 regset->fill_function != NULL; regset++)
816 if (regset->get_request == PTRACE_GETREGSET)
817 regset->size = X86_XSTATE_SIZE (xcr0);
818 else if (regset->type != GENERAL_REGS)
819 regset->size = 0;
820 }
821 }
822
823 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
824 xcr0_features = (have_ptrace_getregset
825 && (xcr0 & X86_XSTATE_ALL_MASK));
826
827 if (xcr0_features)
828 x86_xcr0 = xcr0;
829
830 if (machine == EM_X86_64)
831 {
832 #ifdef __x86_64__
833 if (is_elf64)
834 {
835 if (xcr0_features)
836 {
837 switch (xcr0 & X86_XSTATE_ALL_MASK)
838 {
839 case X86_XSTATE_AVX512_MASK:
840 return tdesc_amd64_avx512_linux;
841
842 case X86_XSTATE_MPX_MASK:
843 return tdesc_amd64_mpx_linux;
844
845 case X86_XSTATE_AVX_MASK:
846 return tdesc_amd64_avx_linux;
847
848 default:
849 return tdesc_amd64_linux;
850 }
851 }
852 else
853 return tdesc_amd64_linux;
854 }
855 else
856 {
857 if (xcr0_features)
858 {
859 switch (xcr0 & X86_XSTATE_ALL_MASK)
860 {
861 case X86_XSTATE_AVX512_MASK:
862 return tdesc_x32_avx512_linux;
863
864 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
865 case X86_XSTATE_AVX_MASK:
866 return tdesc_x32_avx_linux;
867
868 default:
869 return tdesc_x32_linux;
870 }
871 }
872 else
873 return tdesc_x32_linux;
874 }
875 #endif
876 }
877 else
878 {
879 if (xcr0_features)
880 {
881 switch (xcr0 & X86_XSTATE_ALL_MASK)
882 {
883 case (X86_XSTATE_AVX512_MASK):
884 return tdesc_i386_avx512_linux;
885
886 case (X86_XSTATE_MPX_MASK):
887 return tdesc_i386_mpx_linux;
888
889 case (X86_XSTATE_AVX_MASK):
890 return tdesc_i386_avx_linux;
891
892 default:
893 return tdesc_i386_linux;
894 }
895 }
896 else
897 return tdesc_i386_linux;
898 }
899
900 gdb_assert_not_reached ("failed to return tdesc");
901 }
902
903 /* Callback for find_inferior. Stops iteration when a thread with a
904 given PID is found. */
905
906 static int
907 same_process_callback (struct inferior_list_entry *entry, void *data)
908 {
909 int pid = *(int *) data;
910
911 return (ptid_get_pid (entry->id) == pid);
912 }
913
914 /* Callback for for_each_inferior. Calls the arch_setup routine for
915 each process. */
916
917 static void
918 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
919 {
920 int pid = ptid_get_pid (entry->id);
921
922 /* Look up any thread of this processes. */
923 current_thread
924 = (struct thread_info *) find_inferior (&all_threads,
925 same_process_callback, &pid);
926
927 the_low_target.arch_setup ();
928 }
929
930 /* Update all the target description of all processes; a new GDB
931 connected, and it may or not support xml target descriptions. */
932
933 static void
934 x86_linux_update_xmltarget (void)
935 {
936 struct thread_info *saved_thread = current_thread;
937
938 /* Before changing the register cache's internal layout, flush the
939 contents of the current valid caches back to the threads, and
940 release the current regcache objects. */
941 regcache_release ();
942
943 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
944
945 current_thread = saved_thread;
946 }
947
948 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
949 PTRACE_GETREGSET. */
950
951 static void
952 x86_linux_process_qsupported (char **features, int count)
953 {
954 int i;
955
956 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
957 with "i386" in qSupported query, it supports x86 XML target
958 descriptions. */
959 use_xml = 0;
960 for (i = 0; i < count; i++)
961 {
962 const char *feature = features[i];
963
964 if (startswith (feature, "xmlRegisters="))
965 {
966 char *copy = xstrdup (feature + 13);
967 char *p;
968
969 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
970 {
971 if (strcmp (p, "i386") == 0)
972 {
973 use_xml = 1;
974 break;
975 }
976 }
977
978 free (copy);
979 }
980 }
981 x86_linux_update_xmltarget ();
982 }
983
984 /* Common for x86/x86-64. */
985
986 static struct regsets_info x86_regsets_info =
987 {
988 x86_regsets, /* regsets */
989 0, /* num_regsets */
990 NULL, /* disabled_regsets */
991 };
992
993 #ifdef __x86_64__
994 static struct regs_info amd64_linux_regs_info =
995 {
996 NULL, /* regset_bitmap */
997 NULL, /* usrregs_info */
998 &x86_regsets_info
999 };
1000 #endif
1001 static struct usrregs_info i386_linux_usrregs_info =
1002 {
1003 I386_NUM_REGS,
1004 i386_regmap,
1005 };
1006
1007 static struct regs_info i386_linux_regs_info =
1008 {
1009 NULL, /* regset_bitmap */
1010 &i386_linux_usrregs_info,
1011 &x86_regsets_info
1012 };
1013
1014 const struct regs_info *
1015 x86_linux_regs_info (void)
1016 {
1017 #ifdef __x86_64__
1018 if (is_64bit_tdesc ())
1019 return &amd64_linux_regs_info;
1020 else
1021 #endif
1022 return &i386_linux_regs_info;
1023 }
1024
1025 /* Initialize the target description for the architecture of the
1026 inferior. */
1027
1028 static void
1029 x86_arch_setup (void)
1030 {
1031 current_process ()->tdesc = x86_linux_read_description ();
1032 }
1033
1034 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1035 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1036
1037 static void
1038 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno, int *sysret)
1039 {
1040 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1041
1042 if (use_64bit)
1043 {
1044 long l_sysno;
1045 long l_sysret;
1046
1047 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1048 collect_register_by_name (regcache, "rax", &l_sysret);
1049 *sysno = (int) l_sysno;
1050 *sysret = (int) l_sysret;
1051 }
1052 else
1053 {
1054 collect_register_by_name (regcache, "orig_eax", sysno);
1055 collect_register_by_name (regcache, "eax", sysret);
1056 }
1057 }
1058
1059 static int
1060 x86_supports_tracepoints (void)
1061 {
1062 return 1;
1063 }
1064
1065 static void
1066 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1067 {
1068 write_inferior_memory (*to, buf, len);
1069 *to += len;
1070 }
1071
1072 static int
1073 push_opcode (unsigned char *buf, char *op)
1074 {
1075 unsigned char *buf_org = buf;
1076
1077 while (1)
1078 {
1079 char *endptr;
1080 unsigned long ul = strtoul (op, &endptr, 16);
1081
1082 if (endptr == op)
1083 break;
1084
1085 *buf++ = ul;
1086 op = endptr;
1087 }
1088
1089 return buf - buf_org;
1090 }
1091
1092 #ifdef __x86_64__
1093
1094 /* Build a jump pad that saves registers and calls a collection
1095 function. Writes a jump instruction to the jump pad to
1096 JJUMPAD_INSN. The caller is responsible to write it in at the
1097 tracepoint address. */
1098
1099 static int
1100 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1101 CORE_ADDR collector,
1102 CORE_ADDR lockaddr,
1103 ULONGEST orig_size,
1104 CORE_ADDR *jump_entry,
1105 CORE_ADDR *trampoline,
1106 ULONGEST *trampoline_size,
1107 unsigned char *jjump_pad_insn,
1108 ULONGEST *jjump_pad_insn_size,
1109 CORE_ADDR *adjusted_insn_addr,
1110 CORE_ADDR *adjusted_insn_addr_end,
1111 char *err)
1112 {
1113 unsigned char buf[40];
1114 int i, offset;
1115 int64_t loffset;
1116
1117 CORE_ADDR buildaddr = *jump_entry;
1118
1119 /* Build the jump pad. */
1120
1121 /* First, do tracepoint data collection. Save registers. */
1122 i = 0;
1123 /* Need to ensure stack pointer saved first. */
1124 buf[i++] = 0x54; /* push %rsp */
1125 buf[i++] = 0x55; /* push %rbp */
1126 buf[i++] = 0x57; /* push %rdi */
1127 buf[i++] = 0x56; /* push %rsi */
1128 buf[i++] = 0x52; /* push %rdx */
1129 buf[i++] = 0x51; /* push %rcx */
1130 buf[i++] = 0x53; /* push %rbx */
1131 buf[i++] = 0x50; /* push %rax */
1132 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1133 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1134 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1135 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1136 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1137 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1138 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1139 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1140 buf[i++] = 0x9c; /* pushfq */
1141 buf[i++] = 0x48; /* movl <addr>,%rdi */
1142 buf[i++] = 0xbf;
1143 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1144 i += sizeof (unsigned long);
1145 buf[i++] = 0x57; /* push %rdi */
1146 append_insns (&buildaddr, i, buf);
1147
1148 /* Stack space for the collecting_t object. */
1149 i = 0;
1150 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1151 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1152 memcpy (buf + i, &tpoint, 8);
1153 i += 8;
1154 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1155 i += push_opcode (&buf[i],
1156 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1157 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1158 append_insns (&buildaddr, i, buf);
1159
1160 /* spin-lock. */
1161 i = 0;
1162 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1163 memcpy (&buf[i], (void *) &lockaddr, 8);
1164 i += 8;
1165 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1166 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1167 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1168 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1169 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1170 append_insns (&buildaddr, i, buf);
1171
1172 /* Set up the gdb_collect call. */
1173 /* At this point, (stack pointer + 0x18) is the base of our saved
1174 register block. */
1175
1176 i = 0;
1177 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1178 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1179
1180 /* tpoint address may be 64-bit wide. */
1181 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1182 memcpy (buf + i, &tpoint, 8);
1183 i += 8;
1184 append_insns (&buildaddr, i, buf);
1185
1186 /* The collector function being in the shared library, may be
1187 >31-bits away off the jump pad. */
1188 i = 0;
1189 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1190 memcpy (buf + i, &collector, 8);
1191 i += 8;
1192 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1193 append_insns (&buildaddr, i, buf);
1194
1195 /* Clear the spin-lock. */
1196 i = 0;
1197 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1198 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1199 memcpy (buf + i, &lockaddr, 8);
1200 i += 8;
1201 append_insns (&buildaddr, i, buf);
1202
1203 /* Remove stack that had been used for the collect_t object. */
1204 i = 0;
1205 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1206 append_insns (&buildaddr, i, buf);
1207
1208 /* Restore register state. */
1209 i = 0;
1210 buf[i++] = 0x48; /* add $0x8,%rsp */
1211 buf[i++] = 0x83;
1212 buf[i++] = 0xc4;
1213 buf[i++] = 0x08;
1214 buf[i++] = 0x9d; /* popfq */
1215 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1216 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1217 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1218 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1219 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1220 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1221 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1222 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1223 buf[i++] = 0x58; /* pop %rax */
1224 buf[i++] = 0x5b; /* pop %rbx */
1225 buf[i++] = 0x59; /* pop %rcx */
1226 buf[i++] = 0x5a; /* pop %rdx */
1227 buf[i++] = 0x5e; /* pop %rsi */
1228 buf[i++] = 0x5f; /* pop %rdi */
1229 buf[i++] = 0x5d; /* pop %rbp */
1230 buf[i++] = 0x5c; /* pop %rsp */
1231 append_insns (&buildaddr, i, buf);
1232
1233 /* Now, adjust the original instruction to execute in the jump
1234 pad. */
1235 *adjusted_insn_addr = buildaddr;
1236 relocate_instruction (&buildaddr, tpaddr);
1237 *adjusted_insn_addr_end = buildaddr;
1238
1239 /* Finally, write a jump back to the program. */
1240
1241 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1242 if (loffset > INT_MAX || loffset < INT_MIN)
1243 {
1244 sprintf (err,
1245 "E.Jump back from jump pad too far from tracepoint "
1246 "(offset 0x%" PRIx64 " > int32).", loffset);
1247 return 1;
1248 }
1249
1250 offset = (int) loffset;
1251 memcpy (buf, jump_insn, sizeof (jump_insn));
1252 memcpy (buf + 1, &offset, 4);
1253 append_insns (&buildaddr, sizeof (jump_insn), buf);
1254
1255 /* The jump pad is now built. Wire in a jump to our jump pad. This
1256 is always done last (by our caller actually), so that we can
1257 install fast tracepoints with threads running. This relies on
1258 the agent's atomic write support. */
1259 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1260 if (loffset > INT_MAX || loffset < INT_MIN)
1261 {
1262 sprintf (err,
1263 "E.Jump pad too far from tracepoint "
1264 "(offset 0x%" PRIx64 " > int32).", loffset);
1265 return 1;
1266 }
1267
1268 offset = (int) loffset;
1269
1270 memcpy (buf, jump_insn, sizeof (jump_insn));
1271 memcpy (buf + 1, &offset, 4);
1272 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1273 *jjump_pad_insn_size = sizeof (jump_insn);
1274
1275 /* Return the end address of our pad. */
1276 *jump_entry = buildaddr;
1277
1278 return 0;
1279 }
1280
1281 #endif /* __x86_64__ */
1282
1283 /* Build a jump pad that saves registers and calls a collection
1284 function. Writes a jump instruction to the jump pad to
1285 JJUMPAD_INSN. The caller is responsible to write it in at the
1286 tracepoint address. */
1287
1288 static int
1289 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1290 CORE_ADDR collector,
1291 CORE_ADDR lockaddr,
1292 ULONGEST orig_size,
1293 CORE_ADDR *jump_entry,
1294 CORE_ADDR *trampoline,
1295 ULONGEST *trampoline_size,
1296 unsigned char *jjump_pad_insn,
1297 ULONGEST *jjump_pad_insn_size,
1298 CORE_ADDR *adjusted_insn_addr,
1299 CORE_ADDR *adjusted_insn_addr_end,
1300 char *err)
1301 {
1302 unsigned char buf[0x100];
1303 int i, offset;
1304 CORE_ADDR buildaddr = *jump_entry;
1305
1306 /* Build the jump pad. */
1307
1308 /* First, do tracepoint data collection. Save registers. */
1309 i = 0;
1310 buf[i++] = 0x60; /* pushad */
1311 buf[i++] = 0x68; /* push tpaddr aka $pc */
1312 *((int *)(buf + i)) = (int) tpaddr;
1313 i += 4;
1314 buf[i++] = 0x9c; /* pushf */
1315 buf[i++] = 0x1e; /* push %ds */
1316 buf[i++] = 0x06; /* push %es */
1317 buf[i++] = 0x0f; /* push %fs */
1318 buf[i++] = 0xa0;
1319 buf[i++] = 0x0f; /* push %gs */
1320 buf[i++] = 0xa8;
1321 buf[i++] = 0x16; /* push %ss */
1322 buf[i++] = 0x0e; /* push %cs */
1323 append_insns (&buildaddr, i, buf);
1324
1325 /* Stack space for the collecting_t object. */
1326 i = 0;
1327 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1328
1329 /* Build the object. */
1330 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1331 memcpy (buf + i, &tpoint, 4);
1332 i += 4;
1333 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1334
1335 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1336 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1337 append_insns (&buildaddr, i, buf);
1338
1339 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1340 If we cared for it, this could be using xchg alternatively. */
1341
1342 i = 0;
1343 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1344 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1345 %esp,<lockaddr> */
1346 memcpy (&buf[i], (void *) &lockaddr, 4);
1347 i += 4;
1348 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1349 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1350 append_insns (&buildaddr, i, buf);
1351
1352
1353 /* Set up arguments to the gdb_collect call. */
1354 i = 0;
1355 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1356 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1357 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1358 append_insns (&buildaddr, i, buf);
1359
1360 i = 0;
1361 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1362 append_insns (&buildaddr, i, buf);
1363
1364 i = 0;
1365 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1366 memcpy (&buf[i], (void *) &tpoint, 4);
1367 i += 4;
1368 append_insns (&buildaddr, i, buf);
1369
1370 buf[0] = 0xe8; /* call <reladdr> */
1371 offset = collector - (buildaddr + sizeof (jump_insn));
1372 memcpy (buf + 1, &offset, 4);
1373 append_insns (&buildaddr, 5, buf);
1374 /* Clean up after the call. */
1375 buf[0] = 0x83; /* add $0x8,%esp */
1376 buf[1] = 0xc4;
1377 buf[2] = 0x08;
1378 append_insns (&buildaddr, 3, buf);
1379
1380
1381 /* Clear the spin-lock. This would need the LOCK prefix on older
1382 broken archs. */
1383 i = 0;
1384 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1385 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1386 memcpy (buf + i, &lockaddr, 4);
1387 i += 4;
1388 append_insns (&buildaddr, i, buf);
1389
1390
1391 /* Remove stack that had been used for the collect_t object. */
1392 i = 0;
1393 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1394 append_insns (&buildaddr, i, buf);
1395
1396 i = 0;
1397 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1398 buf[i++] = 0xc4;
1399 buf[i++] = 0x04;
1400 buf[i++] = 0x17; /* pop %ss */
1401 buf[i++] = 0x0f; /* pop %gs */
1402 buf[i++] = 0xa9;
1403 buf[i++] = 0x0f; /* pop %fs */
1404 buf[i++] = 0xa1;
1405 buf[i++] = 0x07; /* pop %es */
1406 buf[i++] = 0x1f; /* pop %ds */
1407 buf[i++] = 0x9d; /* popf */
1408 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1409 buf[i++] = 0xc4;
1410 buf[i++] = 0x04;
1411 buf[i++] = 0x61; /* popad */
1412 append_insns (&buildaddr, i, buf);
1413
1414 /* Now, adjust the original instruction to execute in the jump
1415 pad. */
1416 *adjusted_insn_addr = buildaddr;
1417 relocate_instruction (&buildaddr, tpaddr);
1418 *adjusted_insn_addr_end = buildaddr;
1419
1420 /* Write the jump back to the program. */
1421 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1422 memcpy (buf, jump_insn, sizeof (jump_insn));
1423 memcpy (buf + 1, &offset, 4);
1424 append_insns (&buildaddr, sizeof (jump_insn), buf);
1425
1426 /* The jump pad is now built. Wire in a jump to our jump pad. This
1427 is always done last (by our caller actually), so that we can
1428 install fast tracepoints with threads running. This relies on
1429 the agent's atomic write support. */
1430 if (orig_size == 4)
1431 {
1432 /* Create a trampoline. */
1433 *trampoline_size = sizeof (jump_insn);
1434 if (!claim_trampoline_space (*trampoline_size, trampoline))
1435 {
1436 /* No trampoline space available. */
1437 strcpy (err,
1438 "E.Cannot allocate trampoline space needed for fast "
1439 "tracepoints on 4-byte instructions.");
1440 return 1;
1441 }
1442
1443 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1444 memcpy (buf, jump_insn, sizeof (jump_insn));
1445 memcpy (buf + 1, &offset, 4);
1446 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1447
1448 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1449 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1450 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1451 memcpy (buf + 2, &offset, 2);
1452 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1453 *jjump_pad_insn_size = sizeof (small_jump_insn);
1454 }
1455 else
1456 {
1457 /* Else use a 32-bit relative jump instruction. */
1458 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1459 memcpy (buf, jump_insn, sizeof (jump_insn));
1460 memcpy (buf + 1, &offset, 4);
1461 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1462 *jjump_pad_insn_size = sizeof (jump_insn);
1463 }
1464
1465 /* Return the end address of our pad. */
1466 *jump_entry = buildaddr;
1467
1468 return 0;
1469 }
1470
1471 static int
1472 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1473 CORE_ADDR collector,
1474 CORE_ADDR lockaddr,
1475 ULONGEST orig_size,
1476 CORE_ADDR *jump_entry,
1477 CORE_ADDR *trampoline,
1478 ULONGEST *trampoline_size,
1479 unsigned char *jjump_pad_insn,
1480 ULONGEST *jjump_pad_insn_size,
1481 CORE_ADDR *adjusted_insn_addr,
1482 CORE_ADDR *adjusted_insn_addr_end,
1483 char *err)
1484 {
1485 #ifdef __x86_64__
1486 if (is_64bit_tdesc ())
1487 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1488 collector, lockaddr,
1489 orig_size, jump_entry,
1490 trampoline, trampoline_size,
1491 jjump_pad_insn,
1492 jjump_pad_insn_size,
1493 adjusted_insn_addr,
1494 adjusted_insn_addr_end,
1495 err);
1496 #endif
1497
1498 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1499 collector, lockaddr,
1500 orig_size, jump_entry,
1501 trampoline, trampoline_size,
1502 jjump_pad_insn,
1503 jjump_pad_insn_size,
1504 adjusted_insn_addr,
1505 adjusted_insn_addr_end,
1506 err);
1507 }
1508
1509 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1510 architectures. */
1511
1512 static int
1513 x86_get_min_fast_tracepoint_insn_len (void)
1514 {
1515 static int warned_about_fast_tracepoints = 0;
1516
1517 #ifdef __x86_64__
1518 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1519 used for fast tracepoints. */
1520 if (is_64bit_tdesc ())
1521 return 5;
1522 #endif
1523
1524 if (agent_loaded_p ())
1525 {
1526 char errbuf[IPA_BUFSIZ];
1527
1528 errbuf[0] = '\0';
1529
1530 /* On x86, if trampolines are available, then 4-byte jump instructions
1531 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1532 with a 4-byte offset are used instead. */
1533 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1534 return 4;
1535 else
1536 {
1537 /* GDB has no channel to explain to user why a shorter fast
1538 tracepoint is not possible, but at least make GDBserver
1539 mention that something has gone awry. */
1540 if (!warned_about_fast_tracepoints)
1541 {
1542 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1543 warned_about_fast_tracepoints = 1;
1544 }
1545 return 5;
1546 }
1547 }
1548 else
1549 {
1550 /* Indicate that the minimum length is currently unknown since the IPA
1551 has not loaded yet. */
1552 return 0;
1553 }
1554 }
1555
1556 static void
1557 add_insns (unsigned char *start, int len)
1558 {
1559 CORE_ADDR buildaddr = current_insn_ptr;
1560
1561 if (debug_threads)
1562 debug_printf ("Adding %d bytes of insn at %s\n",
1563 len, paddress (buildaddr));
1564
1565 append_insns (&buildaddr, len, start);
1566 current_insn_ptr = buildaddr;
1567 }
1568
1569 /* Our general strategy for emitting code is to avoid specifying raw
1570 bytes whenever possible, and instead copy a block of inline asm
1571 that is embedded in the function. This is a little messy, because
1572 we need to keep the compiler from discarding what looks like dead
1573 code, plus suppress various warnings. */
1574
1575 #define EMIT_ASM(NAME, INSNS) \
1576 do \
1577 { \
1578 extern unsigned char start_ ## NAME, end_ ## NAME; \
1579 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1580 __asm__ ("jmp end_" #NAME "\n" \
1581 "\t" "start_" #NAME ":" \
1582 "\t" INSNS "\n" \
1583 "\t" "end_" #NAME ":"); \
1584 } while (0)
1585
1586 #ifdef __x86_64__
1587
1588 #define EMIT_ASM32(NAME,INSNS) \
1589 do \
1590 { \
1591 extern unsigned char start_ ## NAME, end_ ## NAME; \
1592 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1593 __asm__ (".code32\n" \
1594 "\t" "jmp end_" #NAME "\n" \
1595 "\t" "start_" #NAME ":\n" \
1596 "\t" INSNS "\n" \
1597 "\t" "end_" #NAME ":\n" \
1598 ".code64\n"); \
1599 } while (0)
1600
1601 #else
1602
1603 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1604
1605 #endif
1606
1607 #ifdef __x86_64__
1608
1609 static void
1610 amd64_emit_prologue (void)
1611 {
1612 EMIT_ASM (amd64_prologue,
1613 "pushq %rbp\n\t"
1614 "movq %rsp,%rbp\n\t"
1615 "sub $0x20,%rsp\n\t"
1616 "movq %rdi,-8(%rbp)\n\t"
1617 "movq %rsi,-16(%rbp)");
1618 }
1619
1620
1621 static void
1622 amd64_emit_epilogue (void)
1623 {
1624 EMIT_ASM (amd64_epilogue,
1625 "movq -16(%rbp),%rdi\n\t"
1626 "movq %rax,(%rdi)\n\t"
1627 "xor %rax,%rax\n\t"
1628 "leave\n\t"
1629 "ret");
1630 }
1631
1632 static void
1633 amd64_emit_add (void)
1634 {
1635 EMIT_ASM (amd64_add,
1636 "add (%rsp),%rax\n\t"
1637 "lea 0x8(%rsp),%rsp");
1638 }
1639
1640 static void
1641 amd64_emit_sub (void)
1642 {
1643 EMIT_ASM (amd64_sub,
1644 "sub %rax,(%rsp)\n\t"
1645 "pop %rax");
1646 }
1647
1648 static void
1649 amd64_emit_mul (void)
1650 {
1651 emit_error = 1;
1652 }
1653
1654 static void
1655 amd64_emit_lsh (void)
1656 {
1657 emit_error = 1;
1658 }
1659
1660 static void
1661 amd64_emit_rsh_signed (void)
1662 {
1663 emit_error = 1;
1664 }
1665
1666 static void
1667 amd64_emit_rsh_unsigned (void)
1668 {
1669 emit_error = 1;
1670 }
1671
1672 static void
1673 amd64_emit_ext (int arg)
1674 {
1675 switch (arg)
1676 {
1677 case 8:
1678 EMIT_ASM (amd64_ext_8,
1679 "cbtw\n\t"
1680 "cwtl\n\t"
1681 "cltq");
1682 break;
1683 case 16:
1684 EMIT_ASM (amd64_ext_16,
1685 "cwtl\n\t"
1686 "cltq");
1687 break;
1688 case 32:
1689 EMIT_ASM (amd64_ext_32,
1690 "cltq");
1691 break;
1692 default:
1693 emit_error = 1;
1694 }
1695 }
1696
1697 static void
1698 amd64_emit_log_not (void)
1699 {
1700 EMIT_ASM (amd64_log_not,
1701 "test %rax,%rax\n\t"
1702 "sete %cl\n\t"
1703 "movzbq %cl,%rax");
1704 }
1705
1706 static void
1707 amd64_emit_bit_and (void)
1708 {
1709 EMIT_ASM (amd64_and,
1710 "and (%rsp),%rax\n\t"
1711 "lea 0x8(%rsp),%rsp");
1712 }
1713
1714 static void
1715 amd64_emit_bit_or (void)
1716 {
1717 EMIT_ASM (amd64_or,
1718 "or (%rsp),%rax\n\t"
1719 "lea 0x8(%rsp),%rsp");
1720 }
1721
1722 static void
1723 amd64_emit_bit_xor (void)
1724 {
1725 EMIT_ASM (amd64_xor,
1726 "xor (%rsp),%rax\n\t"
1727 "lea 0x8(%rsp),%rsp");
1728 }
1729
1730 static void
1731 amd64_emit_bit_not (void)
1732 {
1733 EMIT_ASM (amd64_bit_not,
1734 "xorq $0xffffffffffffffff,%rax");
1735 }
1736
1737 static void
1738 amd64_emit_equal (void)
1739 {
1740 EMIT_ASM (amd64_equal,
1741 "cmp %rax,(%rsp)\n\t"
1742 "je .Lamd64_equal_true\n\t"
1743 "xor %rax,%rax\n\t"
1744 "jmp .Lamd64_equal_end\n\t"
1745 ".Lamd64_equal_true:\n\t"
1746 "mov $0x1,%rax\n\t"
1747 ".Lamd64_equal_end:\n\t"
1748 "lea 0x8(%rsp),%rsp");
1749 }
1750
1751 static void
1752 amd64_emit_less_signed (void)
1753 {
1754 EMIT_ASM (amd64_less_signed,
1755 "cmp %rax,(%rsp)\n\t"
1756 "jl .Lamd64_less_signed_true\n\t"
1757 "xor %rax,%rax\n\t"
1758 "jmp .Lamd64_less_signed_end\n\t"
1759 ".Lamd64_less_signed_true:\n\t"
1760 "mov $1,%rax\n\t"
1761 ".Lamd64_less_signed_end:\n\t"
1762 "lea 0x8(%rsp),%rsp");
1763 }
1764
1765 static void
1766 amd64_emit_less_unsigned (void)
1767 {
1768 EMIT_ASM (amd64_less_unsigned,
1769 "cmp %rax,(%rsp)\n\t"
1770 "jb .Lamd64_less_unsigned_true\n\t"
1771 "xor %rax,%rax\n\t"
1772 "jmp .Lamd64_less_unsigned_end\n\t"
1773 ".Lamd64_less_unsigned_true:\n\t"
1774 "mov $1,%rax\n\t"
1775 ".Lamd64_less_unsigned_end:\n\t"
1776 "lea 0x8(%rsp),%rsp");
1777 }
1778
1779 static void
1780 amd64_emit_ref (int size)
1781 {
1782 switch (size)
1783 {
1784 case 1:
1785 EMIT_ASM (amd64_ref1,
1786 "movb (%rax),%al");
1787 break;
1788 case 2:
1789 EMIT_ASM (amd64_ref2,
1790 "movw (%rax),%ax");
1791 break;
1792 case 4:
1793 EMIT_ASM (amd64_ref4,
1794 "movl (%rax),%eax");
1795 break;
1796 case 8:
1797 EMIT_ASM (amd64_ref8,
1798 "movq (%rax),%rax");
1799 break;
1800 }
1801 }
1802
1803 static void
1804 amd64_emit_if_goto (int *offset_p, int *size_p)
1805 {
1806 EMIT_ASM (amd64_if_goto,
1807 "mov %rax,%rcx\n\t"
1808 "pop %rax\n\t"
1809 "cmp $0,%rcx\n\t"
1810 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1811 if (offset_p)
1812 *offset_p = 10;
1813 if (size_p)
1814 *size_p = 4;
1815 }
1816
1817 static void
1818 amd64_emit_goto (int *offset_p, int *size_p)
1819 {
1820 EMIT_ASM (amd64_goto,
1821 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1822 if (offset_p)
1823 *offset_p = 1;
1824 if (size_p)
1825 *size_p = 4;
1826 }
1827
1828 static void
1829 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1830 {
1831 int diff = (to - (from + size));
1832 unsigned char buf[sizeof (int)];
1833
1834 if (size != 4)
1835 {
1836 emit_error = 1;
1837 return;
1838 }
1839
1840 memcpy (buf, &diff, sizeof (int));
1841 write_inferior_memory (from, buf, sizeof (int));
1842 }
1843
1844 static void
1845 amd64_emit_const (LONGEST num)
1846 {
1847 unsigned char buf[16];
1848 int i;
1849 CORE_ADDR buildaddr = current_insn_ptr;
1850
1851 i = 0;
1852 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1853 memcpy (&buf[i], &num, sizeof (num));
1854 i += 8;
1855 append_insns (&buildaddr, i, buf);
1856 current_insn_ptr = buildaddr;
1857 }
1858
1859 static void
1860 amd64_emit_call (CORE_ADDR fn)
1861 {
1862 unsigned char buf[16];
1863 int i;
1864 CORE_ADDR buildaddr;
1865 LONGEST offset64;
1866
1867 /* The destination function being in the shared library, may be
1868 >31-bits away off the compiled code pad. */
1869
1870 buildaddr = current_insn_ptr;
1871
1872 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1873
1874 i = 0;
1875
1876 if (offset64 > INT_MAX || offset64 < INT_MIN)
1877 {
1878 /* Offset is too large for a call. Use callq, but that requires
1879 a register, so avoid it if possible. Use r10, since it is
1880 call-clobbered, we don't have to push/pop it. */
1881 buf[i++] = 0x48; /* mov $fn,%r10 */
1882 buf[i++] = 0xba;
1883 memcpy (buf + i, &fn, 8);
1884 i += 8;
1885 buf[i++] = 0xff; /* callq *%r10 */
1886 buf[i++] = 0xd2;
1887 }
1888 else
1889 {
1890 int offset32 = offset64; /* we know we can't overflow here. */
1891 memcpy (buf + i, &offset32, 4);
1892 i += 4;
1893 }
1894
1895 append_insns (&buildaddr, i, buf);
1896 current_insn_ptr = buildaddr;
1897 }
1898
1899 static void
1900 amd64_emit_reg (int reg)
1901 {
1902 unsigned char buf[16];
1903 int i;
1904 CORE_ADDR buildaddr;
1905
1906 /* Assume raw_regs is still in %rdi. */
1907 buildaddr = current_insn_ptr;
1908 i = 0;
1909 buf[i++] = 0xbe; /* mov $<n>,%esi */
1910 memcpy (&buf[i], &reg, sizeof (reg));
1911 i += 4;
1912 append_insns (&buildaddr, i, buf);
1913 current_insn_ptr = buildaddr;
1914 amd64_emit_call (get_raw_reg_func_addr ());
1915 }
1916
1917 static void
1918 amd64_emit_pop (void)
1919 {
1920 EMIT_ASM (amd64_pop,
1921 "pop %rax");
1922 }
1923
1924 static void
1925 amd64_emit_stack_flush (void)
1926 {
1927 EMIT_ASM (amd64_stack_flush,
1928 "push %rax");
1929 }
1930
1931 static void
1932 amd64_emit_zero_ext (int arg)
1933 {
1934 switch (arg)
1935 {
1936 case 8:
1937 EMIT_ASM (amd64_zero_ext_8,
1938 "and $0xff,%rax");
1939 break;
1940 case 16:
1941 EMIT_ASM (amd64_zero_ext_16,
1942 "and $0xffff,%rax");
1943 break;
1944 case 32:
1945 EMIT_ASM (amd64_zero_ext_32,
1946 "mov $0xffffffff,%rcx\n\t"
1947 "and %rcx,%rax");
1948 break;
1949 default:
1950 emit_error = 1;
1951 }
1952 }
1953
1954 static void
1955 amd64_emit_swap (void)
1956 {
1957 EMIT_ASM (amd64_swap,
1958 "mov %rax,%rcx\n\t"
1959 "pop %rax\n\t"
1960 "push %rcx");
1961 }
1962
1963 static void
1964 amd64_emit_stack_adjust (int n)
1965 {
1966 unsigned char buf[16];
1967 int i;
1968 CORE_ADDR buildaddr = current_insn_ptr;
1969
1970 i = 0;
1971 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1972 buf[i++] = 0x8d;
1973 buf[i++] = 0x64;
1974 buf[i++] = 0x24;
1975 /* This only handles adjustments up to 16, but we don't expect any more. */
1976 buf[i++] = n * 8;
1977 append_insns (&buildaddr, i, buf);
1978 current_insn_ptr = buildaddr;
1979 }
1980
1981 /* FN's prototype is `LONGEST(*fn)(int)'. */
1982
1983 static void
1984 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1985 {
1986 unsigned char buf[16];
1987 int i;
1988 CORE_ADDR buildaddr;
1989
1990 buildaddr = current_insn_ptr;
1991 i = 0;
1992 buf[i++] = 0xbf; /* movl $<n>,%edi */
1993 memcpy (&buf[i], &arg1, sizeof (arg1));
1994 i += 4;
1995 append_insns (&buildaddr, i, buf);
1996 current_insn_ptr = buildaddr;
1997 amd64_emit_call (fn);
1998 }
1999
2000 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2001
2002 static void
2003 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2004 {
2005 unsigned char buf[16];
2006 int i;
2007 CORE_ADDR buildaddr;
2008
2009 buildaddr = current_insn_ptr;
2010 i = 0;
2011 buf[i++] = 0xbf; /* movl $<n>,%edi */
2012 memcpy (&buf[i], &arg1, sizeof (arg1));
2013 i += 4;
2014 append_insns (&buildaddr, i, buf);
2015 current_insn_ptr = buildaddr;
2016 EMIT_ASM (amd64_void_call_2_a,
2017 /* Save away a copy of the stack top. */
2018 "push %rax\n\t"
2019 /* Also pass top as the second argument. */
2020 "mov %rax,%rsi");
2021 amd64_emit_call (fn);
2022 EMIT_ASM (amd64_void_call_2_b,
2023 /* Restore the stack top, %rax may have been trashed. */
2024 "pop %rax");
2025 }
2026
2027 void
2028 amd64_emit_eq_goto (int *offset_p, int *size_p)
2029 {
2030 EMIT_ASM (amd64_eq,
2031 "cmp %rax,(%rsp)\n\t"
2032 "jne .Lamd64_eq_fallthru\n\t"
2033 "lea 0x8(%rsp),%rsp\n\t"
2034 "pop %rax\n\t"
2035 /* jmp, but don't trust the assembler to choose the right jump */
2036 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2037 ".Lamd64_eq_fallthru:\n\t"
2038 "lea 0x8(%rsp),%rsp\n\t"
2039 "pop %rax");
2040
2041 if (offset_p)
2042 *offset_p = 13;
2043 if (size_p)
2044 *size_p = 4;
2045 }
2046
2047 void
2048 amd64_emit_ne_goto (int *offset_p, int *size_p)
2049 {
2050 EMIT_ASM (amd64_ne,
2051 "cmp %rax,(%rsp)\n\t"
2052 "je .Lamd64_ne_fallthru\n\t"
2053 "lea 0x8(%rsp),%rsp\n\t"
2054 "pop %rax\n\t"
2055 /* jmp, but don't trust the assembler to choose the right jump */
2056 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2057 ".Lamd64_ne_fallthru:\n\t"
2058 "lea 0x8(%rsp),%rsp\n\t"
2059 "pop %rax");
2060
2061 if (offset_p)
2062 *offset_p = 13;
2063 if (size_p)
2064 *size_p = 4;
2065 }
2066
2067 void
2068 amd64_emit_lt_goto (int *offset_p, int *size_p)
2069 {
2070 EMIT_ASM (amd64_lt,
2071 "cmp %rax,(%rsp)\n\t"
2072 "jnl .Lamd64_lt_fallthru\n\t"
2073 "lea 0x8(%rsp),%rsp\n\t"
2074 "pop %rax\n\t"
2075 /* jmp, but don't trust the assembler to choose the right jump */
2076 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2077 ".Lamd64_lt_fallthru:\n\t"
2078 "lea 0x8(%rsp),%rsp\n\t"
2079 "pop %rax");
2080
2081 if (offset_p)
2082 *offset_p = 13;
2083 if (size_p)
2084 *size_p = 4;
2085 }
2086
2087 void
2088 amd64_emit_le_goto (int *offset_p, int *size_p)
2089 {
2090 EMIT_ASM (amd64_le,
2091 "cmp %rax,(%rsp)\n\t"
2092 "jnle .Lamd64_le_fallthru\n\t"
2093 "lea 0x8(%rsp),%rsp\n\t"
2094 "pop %rax\n\t"
2095 /* jmp, but don't trust the assembler to choose the right jump */
2096 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2097 ".Lamd64_le_fallthru:\n\t"
2098 "lea 0x8(%rsp),%rsp\n\t"
2099 "pop %rax");
2100
2101 if (offset_p)
2102 *offset_p = 13;
2103 if (size_p)
2104 *size_p = 4;
2105 }
2106
2107 void
2108 amd64_emit_gt_goto (int *offset_p, int *size_p)
2109 {
2110 EMIT_ASM (amd64_gt,
2111 "cmp %rax,(%rsp)\n\t"
2112 "jng .Lamd64_gt_fallthru\n\t"
2113 "lea 0x8(%rsp),%rsp\n\t"
2114 "pop %rax\n\t"
2115 /* jmp, but don't trust the assembler to choose the right jump */
2116 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2117 ".Lamd64_gt_fallthru:\n\t"
2118 "lea 0x8(%rsp),%rsp\n\t"
2119 "pop %rax");
2120
2121 if (offset_p)
2122 *offset_p = 13;
2123 if (size_p)
2124 *size_p = 4;
2125 }
2126
2127 void
2128 amd64_emit_ge_goto (int *offset_p, int *size_p)
2129 {
2130 EMIT_ASM (amd64_ge,
2131 "cmp %rax,(%rsp)\n\t"
2132 "jnge .Lamd64_ge_fallthru\n\t"
2133 ".Lamd64_ge_jump:\n\t"
2134 "lea 0x8(%rsp),%rsp\n\t"
2135 "pop %rax\n\t"
2136 /* jmp, but don't trust the assembler to choose the right jump */
2137 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2138 ".Lamd64_ge_fallthru:\n\t"
2139 "lea 0x8(%rsp),%rsp\n\t"
2140 "pop %rax");
2141
2142 if (offset_p)
2143 *offset_p = 13;
2144 if (size_p)
2145 *size_p = 4;
2146 }
2147
2148 struct emit_ops amd64_emit_ops =
2149 {
2150 amd64_emit_prologue,
2151 amd64_emit_epilogue,
2152 amd64_emit_add,
2153 amd64_emit_sub,
2154 amd64_emit_mul,
2155 amd64_emit_lsh,
2156 amd64_emit_rsh_signed,
2157 amd64_emit_rsh_unsigned,
2158 amd64_emit_ext,
2159 amd64_emit_log_not,
2160 amd64_emit_bit_and,
2161 amd64_emit_bit_or,
2162 amd64_emit_bit_xor,
2163 amd64_emit_bit_not,
2164 amd64_emit_equal,
2165 amd64_emit_less_signed,
2166 amd64_emit_less_unsigned,
2167 amd64_emit_ref,
2168 amd64_emit_if_goto,
2169 amd64_emit_goto,
2170 amd64_write_goto_address,
2171 amd64_emit_const,
2172 amd64_emit_call,
2173 amd64_emit_reg,
2174 amd64_emit_pop,
2175 amd64_emit_stack_flush,
2176 amd64_emit_zero_ext,
2177 amd64_emit_swap,
2178 amd64_emit_stack_adjust,
2179 amd64_emit_int_call_1,
2180 amd64_emit_void_call_2,
2181 amd64_emit_eq_goto,
2182 amd64_emit_ne_goto,
2183 amd64_emit_lt_goto,
2184 amd64_emit_le_goto,
2185 amd64_emit_gt_goto,
2186 amd64_emit_ge_goto
2187 };
2188
2189 #endif /* __x86_64__ */
2190
2191 static void
2192 i386_emit_prologue (void)
2193 {
2194 EMIT_ASM32 (i386_prologue,
2195 "push %ebp\n\t"
2196 "mov %esp,%ebp\n\t"
2197 "push %ebx");
2198 /* At this point, the raw regs base address is at 8(%ebp), and the
2199 value pointer is at 12(%ebp). */
2200 }
2201
2202 static void
2203 i386_emit_epilogue (void)
2204 {
2205 EMIT_ASM32 (i386_epilogue,
2206 "mov 12(%ebp),%ecx\n\t"
2207 "mov %eax,(%ecx)\n\t"
2208 "mov %ebx,0x4(%ecx)\n\t"
2209 "xor %eax,%eax\n\t"
2210 "pop %ebx\n\t"
2211 "pop %ebp\n\t"
2212 "ret");
2213 }
2214
2215 static void
2216 i386_emit_add (void)
2217 {
2218 EMIT_ASM32 (i386_add,
2219 "add (%esp),%eax\n\t"
2220 "adc 0x4(%esp),%ebx\n\t"
2221 "lea 0x8(%esp),%esp");
2222 }
2223
2224 static void
2225 i386_emit_sub (void)
2226 {
2227 EMIT_ASM32 (i386_sub,
2228 "subl %eax,(%esp)\n\t"
2229 "sbbl %ebx,4(%esp)\n\t"
2230 "pop %eax\n\t"
2231 "pop %ebx\n\t");
2232 }
2233
2234 static void
2235 i386_emit_mul (void)
2236 {
2237 emit_error = 1;
2238 }
2239
2240 static void
2241 i386_emit_lsh (void)
2242 {
2243 emit_error = 1;
2244 }
2245
2246 static void
2247 i386_emit_rsh_signed (void)
2248 {
2249 emit_error = 1;
2250 }
2251
2252 static void
2253 i386_emit_rsh_unsigned (void)
2254 {
2255 emit_error = 1;
2256 }
2257
2258 static void
2259 i386_emit_ext (int arg)
2260 {
2261 switch (arg)
2262 {
2263 case 8:
2264 EMIT_ASM32 (i386_ext_8,
2265 "cbtw\n\t"
2266 "cwtl\n\t"
2267 "movl %eax,%ebx\n\t"
2268 "sarl $31,%ebx");
2269 break;
2270 case 16:
2271 EMIT_ASM32 (i386_ext_16,
2272 "cwtl\n\t"
2273 "movl %eax,%ebx\n\t"
2274 "sarl $31,%ebx");
2275 break;
2276 case 32:
2277 EMIT_ASM32 (i386_ext_32,
2278 "movl %eax,%ebx\n\t"
2279 "sarl $31,%ebx");
2280 break;
2281 default:
2282 emit_error = 1;
2283 }
2284 }
2285
2286 static void
2287 i386_emit_log_not (void)
2288 {
2289 EMIT_ASM32 (i386_log_not,
2290 "or %ebx,%eax\n\t"
2291 "test %eax,%eax\n\t"
2292 "sete %cl\n\t"
2293 "xor %ebx,%ebx\n\t"
2294 "movzbl %cl,%eax");
2295 }
2296
2297 static void
2298 i386_emit_bit_and (void)
2299 {
2300 EMIT_ASM32 (i386_and,
2301 "and (%esp),%eax\n\t"
2302 "and 0x4(%esp),%ebx\n\t"
2303 "lea 0x8(%esp),%esp");
2304 }
2305
2306 static void
2307 i386_emit_bit_or (void)
2308 {
2309 EMIT_ASM32 (i386_or,
2310 "or (%esp),%eax\n\t"
2311 "or 0x4(%esp),%ebx\n\t"
2312 "lea 0x8(%esp),%esp");
2313 }
2314
2315 static void
2316 i386_emit_bit_xor (void)
2317 {
2318 EMIT_ASM32 (i386_xor,
2319 "xor (%esp),%eax\n\t"
2320 "xor 0x4(%esp),%ebx\n\t"
2321 "lea 0x8(%esp),%esp");
2322 }
2323
2324 static void
2325 i386_emit_bit_not (void)
2326 {
2327 EMIT_ASM32 (i386_bit_not,
2328 "xor $0xffffffff,%eax\n\t"
2329 "xor $0xffffffff,%ebx\n\t");
2330 }
2331
2332 static void
2333 i386_emit_equal (void)
2334 {
2335 EMIT_ASM32 (i386_equal,
2336 "cmpl %ebx,4(%esp)\n\t"
2337 "jne .Li386_equal_false\n\t"
2338 "cmpl %eax,(%esp)\n\t"
2339 "je .Li386_equal_true\n\t"
2340 ".Li386_equal_false:\n\t"
2341 "xor %eax,%eax\n\t"
2342 "jmp .Li386_equal_end\n\t"
2343 ".Li386_equal_true:\n\t"
2344 "mov $1,%eax\n\t"
2345 ".Li386_equal_end:\n\t"
2346 "xor %ebx,%ebx\n\t"
2347 "lea 0x8(%esp),%esp");
2348 }
2349
2350 static void
2351 i386_emit_less_signed (void)
2352 {
2353 EMIT_ASM32 (i386_less_signed,
2354 "cmpl %ebx,4(%esp)\n\t"
2355 "jl .Li386_less_signed_true\n\t"
2356 "jne .Li386_less_signed_false\n\t"
2357 "cmpl %eax,(%esp)\n\t"
2358 "jl .Li386_less_signed_true\n\t"
2359 ".Li386_less_signed_false:\n\t"
2360 "xor %eax,%eax\n\t"
2361 "jmp .Li386_less_signed_end\n\t"
2362 ".Li386_less_signed_true:\n\t"
2363 "mov $1,%eax\n\t"
2364 ".Li386_less_signed_end:\n\t"
2365 "xor %ebx,%ebx\n\t"
2366 "lea 0x8(%esp),%esp");
2367 }
2368
2369 static void
2370 i386_emit_less_unsigned (void)
2371 {
2372 EMIT_ASM32 (i386_less_unsigned,
2373 "cmpl %ebx,4(%esp)\n\t"
2374 "jb .Li386_less_unsigned_true\n\t"
2375 "jne .Li386_less_unsigned_false\n\t"
2376 "cmpl %eax,(%esp)\n\t"
2377 "jb .Li386_less_unsigned_true\n\t"
2378 ".Li386_less_unsigned_false:\n\t"
2379 "xor %eax,%eax\n\t"
2380 "jmp .Li386_less_unsigned_end\n\t"
2381 ".Li386_less_unsigned_true:\n\t"
2382 "mov $1,%eax\n\t"
2383 ".Li386_less_unsigned_end:\n\t"
2384 "xor %ebx,%ebx\n\t"
2385 "lea 0x8(%esp),%esp");
2386 }
2387
2388 static void
2389 i386_emit_ref (int size)
2390 {
2391 switch (size)
2392 {
2393 case 1:
2394 EMIT_ASM32 (i386_ref1,
2395 "movb (%eax),%al");
2396 break;
2397 case 2:
2398 EMIT_ASM32 (i386_ref2,
2399 "movw (%eax),%ax");
2400 break;
2401 case 4:
2402 EMIT_ASM32 (i386_ref4,
2403 "movl (%eax),%eax");
2404 break;
2405 case 8:
2406 EMIT_ASM32 (i386_ref8,
2407 "movl 4(%eax),%ebx\n\t"
2408 "movl (%eax),%eax");
2409 break;
2410 }
2411 }
2412
2413 static void
2414 i386_emit_if_goto (int *offset_p, int *size_p)
2415 {
2416 EMIT_ASM32 (i386_if_goto,
2417 "mov %eax,%ecx\n\t"
2418 "or %ebx,%ecx\n\t"
2419 "pop %eax\n\t"
2420 "pop %ebx\n\t"
2421 "cmpl $0,%ecx\n\t"
2422 /* Don't trust the assembler to choose the right jump */
2423 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2424
2425 if (offset_p)
2426 *offset_p = 11; /* be sure that this matches the sequence above */
2427 if (size_p)
2428 *size_p = 4;
2429 }
2430
2431 static void
2432 i386_emit_goto (int *offset_p, int *size_p)
2433 {
2434 EMIT_ASM32 (i386_goto,
2435 /* Don't trust the assembler to choose the right jump */
2436 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2437 if (offset_p)
2438 *offset_p = 1;
2439 if (size_p)
2440 *size_p = 4;
2441 }
2442
2443 static void
2444 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2445 {
2446 int diff = (to - (from + size));
2447 unsigned char buf[sizeof (int)];
2448
2449 /* We're only doing 4-byte sizes at the moment. */
2450 if (size != 4)
2451 {
2452 emit_error = 1;
2453 return;
2454 }
2455
2456 memcpy (buf, &diff, sizeof (int));
2457 write_inferior_memory (from, buf, sizeof (int));
2458 }
2459
2460 static void
2461 i386_emit_const (LONGEST num)
2462 {
2463 unsigned char buf[16];
2464 int i, hi, lo;
2465 CORE_ADDR buildaddr = current_insn_ptr;
2466
2467 i = 0;
2468 buf[i++] = 0xb8; /* mov $<n>,%eax */
2469 lo = num & 0xffffffff;
2470 memcpy (&buf[i], &lo, sizeof (lo));
2471 i += 4;
2472 hi = ((num >> 32) & 0xffffffff);
2473 if (hi)
2474 {
2475 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2476 memcpy (&buf[i], &hi, sizeof (hi));
2477 i += 4;
2478 }
2479 else
2480 {
2481 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2482 }
2483 append_insns (&buildaddr, i, buf);
2484 current_insn_ptr = buildaddr;
2485 }
2486
2487 static void
2488 i386_emit_call (CORE_ADDR fn)
2489 {
2490 unsigned char buf[16];
2491 int i, offset;
2492 CORE_ADDR buildaddr;
2493
2494 buildaddr = current_insn_ptr;
2495 i = 0;
2496 buf[i++] = 0xe8; /* call <reladdr> */
2497 offset = ((int) fn) - (buildaddr + 5);
2498 memcpy (buf + 1, &offset, 4);
2499 append_insns (&buildaddr, 5, buf);
2500 current_insn_ptr = buildaddr;
2501 }
2502
2503 static void
2504 i386_emit_reg (int reg)
2505 {
2506 unsigned char buf[16];
2507 int i;
2508 CORE_ADDR buildaddr;
2509
2510 EMIT_ASM32 (i386_reg_a,
2511 "sub $0x8,%esp");
2512 buildaddr = current_insn_ptr;
2513 i = 0;
2514 buf[i++] = 0xb8; /* mov $<n>,%eax */
2515 memcpy (&buf[i], &reg, sizeof (reg));
2516 i += 4;
2517 append_insns (&buildaddr, i, buf);
2518 current_insn_ptr = buildaddr;
2519 EMIT_ASM32 (i386_reg_b,
2520 "mov %eax,4(%esp)\n\t"
2521 "mov 8(%ebp),%eax\n\t"
2522 "mov %eax,(%esp)");
2523 i386_emit_call (get_raw_reg_func_addr ());
2524 EMIT_ASM32 (i386_reg_c,
2525 "xor %ebx,%ebx\n\t"
2526 "lea 0x8(%esp),%esp");
2527 }
2528
2529 static void
2530 i386_emit_pop (void)
2531 {
2532 EMIT_ASM32 (i386_pop,
2533 "pop %eax\n\t"
2534 "pop %ebx");
2535 }
2536
2537 static void
2538 i386_emit_stack_flush (void)
2539 {
2540 EMIT_ASM32 (i386_stack_flush,
2541 "push %ebx\n\t"
2542 "push %eax");
2543 }
2544
2545 static void
2546 i386_emit_zero_ext (int arg)
2547 {
2548 switch (arg)
2549 {
2550 case 8:
2551 EMIT_ASM32 (i386_zero_ext_8,
2552 "and $0xff,%eax\n\t"
2553 "xor %ebx,%ebx");
2554 break;
2555 case 16:
2556 EMIT_ASM32 (i386_zero_ext_16,
2557 "and $0xffff,%eax\n\t"
2558 "xor %ebx,%ebx");
2559 break;
2560 case 32:
2561 EMIT_ASM32 (i386_zero_ext_32,
2562 "xor %ebx,%ebx");
2563 break;
2564 default:
2565 emit_error = 1;
2566 }
2567 }
2568
2569 static void
2570 i386_emit_swap (void)
2571 {
2572 EMIT_ASM32 (i386_swap,
2573 "mov %eax,%ecx\n\t"
2574 "mov %ebx,%edx\n\t"
2575 "pop %eax\n\t"
2576 "pop %ebx\n\t"
2577 "push %edx\n\t"
2578 "push %ecx");
2579 }
2580
2581 static void
2582 i386_emit_stack_adjust (int n)
2583 {
2584 unsigned char buf[16];
2585 int i;
2586 CORE_ADDR buildaddr = current_insn_ptr;
2587
2588 i = 0;
2589 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2590 buf[i++] = 0x64;
2591 buf[i++] = 0x24;
2592 buf[i++] = n * 8;
2593 append_insns (&buildaddr, i, buf);
2594 current_insn_ptr = buildaddr;
2595 }
2596
2597 /* FN's prototype is `LONGEST(*fn)(int)'. */
2598
2599 static void
2600 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2601 {
2602 unsigned char buf[16];
2603 int i;
2604 CORE_ADDR buildaddr;
2605
2606 EMIT_ASM32 (i386_int_call_1_a,
2607 /* Reserve a bit of stack space. */
2608 "sub $0x8,%esp");
2609 /* Put the one argument on the stack. */
2610 buildaddr = current_insn_ptr;
2611 i = 0;
2612 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2613 buf[i++] = 0x04;
2614 buf[i++] = 0x24;
2615 memcpy (&buf[i], &arg1, sizeof (arg1));
2616 i += 4;
2617 append_insns (&buildaddr, i, buf);
2618 current_insn_ptr = buildaddr;
2619 i386_emit_call (fn);
2620 EMIT_ASM32 (i386_int_call_1_c,
2621 "mov %edx,%ebx\n\t"
2622 "lea 0x8(%esp),%esp");
2623 }
2624
2625 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2626
2627 static void
2628 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2629 {
2630 unsigned char buf[16];
2631 int i;
2632 CORE_ADDR buildaddr;
2633
2634 EMIT_ASM32 (i386_void_call_2_a,
2635 /* Preserve %eax only; we don't have to worry about %ebx. */
2636 "push %eax\n\t"
2637 /* Reserve a bit of stack space for arguments. */
2638 "sub $0x10,%esp\n\t"
2639 /* Copy "top" to the second argument position. (Note that
2640 we can't assume function won't scribble on its
2641 arguments, so don't try to restore from this.) */
2642 "mov %eax,4(%esp)\n\t"
2643 "mov %ebx,8(%esp)");
2644 /* Put the first argument on the stack. */
2645 buildaddr = current_insn_ptr;
2646 i = 0;
2647 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2648 buf[i++] = 0x04;
2649 buf[i++] = 0x24;
2650 memcpy (&buf[i], &arg1, sizeof (arg1));
2651 i += 4;
2652 append_insns (&buildaddr, i, buf);
2653 current_insn_ptr = buildaddr;
2654 i386_emit_call (fn);
2655 EMIT_ASM32 (i386_void_call_2_b,
2656 "lea 0x10(%esp),%esp\n\t"
2657 /* Restore original stack top. */
2658 "pop %eax");
2659 }
2660
2661
2662 void
2663 i386_emit_eq_goto (int *offset_p, int *size_p)
2664 {
2665 EMIT_ASM32 (eq,
2666 /* Check low half first, more likely to be decider */
2667 "cmpl %eax,(%esp)\n\t"
2668 "jne .Leq_fallthru\n\t"
2669 "cmpl %ebx,4(%esp)\n\t"
2670 "jne .Leq_fallthru\n\t"
2671 "lea 0x8(%esp),%esp\n\t"
2672 "pop %eax\n\t"
2673 "pop %ebx\n\t"
2674 /* jmp, but don't trust the assembler to choose the right jump */
2675 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2676 ".Leq_fallthru:\n\t"
2677 "lea 0x8(%esp),%esp\n\t"
2678 "pop %eax\n\t"
2679 "pop %ebx");
2680
2681 if (offset_p)
2682 *offset_p = 18;
2683 if (size_p)
2684 *size_p = 4;
2685 }
2686
2687 void
2688 i386_emit_ne_goto (int *offset_p, int *size_p)
2689 {
2690 EMIT_ASM32 (ne,
2691 /* Check low half first, more likely to be decider */
2692 "cmpl %eax,(%esp)\n\t"
2693 "jne .Lne_jump\n\t"
2694 "cmpl %ebx,4(%esp)\n\t"
2695 "je .Lne_fallthru\n\t"
2696 ".Lne_jump:\n\t"
2697 "lea 0x8(%esp),%esp\n\t"
2698 "pop %eax\n\t"
2699 "pop %ebx\n\t"
2700 /* jmp, but don't trust the assembler to choose the right jump */
2701 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2702 ".Lne_fallthru:\n\t"
2703 "lea 0x8(%esp),%esp\n\t"
2704 "pop %eax\n\t"
2705 "pop %ebx");
2706
2707 if (offset_p)
2708 *offset_p = 18;
2709 if (size_p)
2710 *size_p = 4;
2711 }
2712
2713 void
2714 i386_emit_lt_goto (int *offset_p, int *size_p)
2715 {
2716 EMIT_ASM32 (lt,
2717 "cmpl %ebx,4(%esp)\n\t"
2718 "jl .Llt_jump\n\t"
2719 "jne .Llt_fallthru\n\t"
2720 "cmpl %eax,(%esp)\n\t"
2721 "jnl .Llt_fallthru\n\t"
2722 ".Llt_jump:\n\t"
2723 "lea 0x8(%esp),%esp\n\t"
2724 "pop %eax\n\t"
2725 "pop %ebx\n\t"
2726 /* jmp, but don't trust the assembler to choose the right jump */
2727 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2728 ".Llt_fallthru:\n\t"
2729 "lea 0x8(%esp),%esp\n\t"
2730 "pop %eax\n\t"
2731 "pop %ebx");
2732
2733 if (offset_p)
2734 *offset_p = 20;
2735 if (size_p)
2736 *size_p = 4;
2737 }
2738
2739 void
2740 i386_emit_le_goto (int *offset_p, int *size_p)
2741 {
2742 EMIT_ASM32 (le,
2743 "cmpl %ebx,4(%esp)\n\t"
2744 "jle .Lle_jump\n\t"
2745 "jne .Lle_fallthru\n\t"
2746 "cmpl %eax,(%esp)\n\t"
2747 "jnle .Lle_fallthru\n\t"
2748 ".Lle_jump:\n\t"
2749 "lea 0x8(%esp),%esp\n\t"
2750 "pop %eax\n\t"
2751 "pop %ebx\n\t"
2752 /* jmp, but don't trust the assembler to choose the right jump */
2753 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2754 ".Lle_fallthru:\n\t"
2755 "lea 0x8(%esp),%esp\n\t"
2756 "pop %eax\n\t"
2757 "pop %ebx");
2758
2759 if (offset_p)
2760 *offset_p = 20;
2761 if (size_p)
2762 *size_p = 4;
2763 }
2764
2765 void
2766 i386_emit_gt_goto (int *offset_p, int *size_p)
2767 {
2768 EMIT_ASM32 (gt,
2769 "cmpl %ebx,4(%esp)\n\t"
2770 "jg .Lgt_jump\n\t"
2771 "jne .Lgt_fallthru\n\t"
2772 "cmpl %eax,(%esp)\n\t"
2773 "jng .Lgt_fallthru\n\t"
2774 ".Lgt_jump:\n\t"
2775 "lea 0x8(%esp),%esp\n\t"
2776 "pop %eax\n\t"
2777 "pop %ebx\n\t"
2778 /* jmp, but don't trust the assembler to choose the right jump */
2779 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2780 ".Lgt_fallthru:\n\t"
2781 "lea 0x8(%esp),%esp\n\t"
2782 "pop %eax\n\t"
2783 "pop %ebx");
2784
2785 if (offset_p)
2786 *offset_p = 20;
2787 if (size_p)
2788 *size_p = 4;
2789 }
2790
2791 void
2792 i386_emit_ge_goto (int *offset_p, int *size_p)
2793 {
2794 EMIT_ASM32 (ge,
2795 "cmpl %ebx,4(%esp)\n\t"
2796 "jge .Lge_jump\n\t"
2797 "jne .Lge_fallthru\n\t"
2798 "cmpl %eax,(%esp)\n\t"
2799 "jnge .Lge_fallthru\n\t"
2800 ".Lge_jump:\n\t"
2801 "lea 0x8(%esp),%esp\n\t"
2802 "pop %eax\n\t"
2803 "pop %ebx\n\t"
2804 /* jmp, but don't trust the assembler to choose the right jump */
2805 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2806 ".Lge_fallthru:\n\t"
2807 "lea 0x8(%esp),%esp\n\t"
2808 "pop %eax\n\t"
2809 "pop %ebx");
2810
2811 if (offset_p)
2812 *offset_p = 20;
2813 if (size_p)
2814 *size_p = 4;
2815 }
2816
2817 struct emit_ops i386_emit_ops =
2818 {
2819 i386_emit_prologue,
2820 i386_emit_epilogue,
2821 i386_emit_add,
2822 i386_emit_sub,
2823 i386_emit_mul,
2824 i386_emit_lsh,
2825 i386_emit_rsh_signed,
2826 i386_emit_rsh_unsigned,
2827 i386_emit_ext,
2828 i386_emit_log_not,
2829 i386_emit_bit_and,
2830 i386_emit_bit_or,
2831 i386_emit_bit_xor,
2832 i386_emit_bit_not,
2833 i386_emit_equal,
2834 i386_emit_less_signed,
2835 i386_emit_less_unsigned,
2836 i386_emit_ref,
2837 i386_emit_if_goto,
2838 i386_emit_goto,
2839 i386_write_goto_address,
2840 i386_emit_const,
2841 i386_emit_call,
2842 i386_emit_reg,
2843 i386_emit_pop,
2844 i386_emit_stack_flush,
2845 i386_emit_zero_ext,
2846 i386_emit_swap,
2847 i386_emit_stack_adjust,
2848 i386_emit_int_call_1,
2849 i386_emit_void_call_2,
2850 i386_emit_eq_goto,
2851 i386_emit_ne_goto,
2852 i386_emit_lt_goto,
2853 i386_emit_le_goto,
2854 i386_emit_gt_goto,
2855 i386_emit_ge_goto
2856 };
2857
2858
2859 static struct emit_ops *
2860 x86_emit_ops (void)
2861 {
2862 #ifdef __x86_64__
2863 if (is_64bit_tdesc ())
2864 return &amd64_emit_ops;
2865 else
2866 #endif
2867 return &i386_emit_ops;
2868 }
2869
2870 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2871
2872 static const gdb_byte *
2873 x86_sw_breakpoint_from_kind (int kind, int *size)
2874 {
2875 *size = x86_breakpoint_len;
2876 return x86_breakpoint;
2877 }
2878
2879 static int
2880 x86_supports_range_stepping (void)
2881 {
2882 return 1;
2883 }
2884
2885 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2886 */
2887
2888 static int
2889 x86_supports_hardware_single_step (void)
2890 {
2891 return 1;
2892 }
2893
2894 /* This is initialized assuming an amd64 target.
2895 x86_arch_setup will correct it for i386 or amd64 targets. */
2896
2897 struct linux_target_ops the_low_target =
2898 {
2899 x86_arch_setup,
2900 x86_linux_regs_info,
2901 x86_cannot_fetch_register,
2902 x86_cannot_store_register,
2903 NULL, /* fetch_register */
2904 x86_get_pc,
2905 x86_set_pc,
2906 NULL, /* breakpoint_kind_from_pc */
2907 x86_sw_breakpoint_from_kind,
2908 NULL,
2909 1,
2910 x86_breakpoint_at,
2911 x86_supports_z_point_type,
2912 x86_insert_point,
2913 x86_remove_point,
2914 x86_stopped_by_watchpoint,
2915 x86_stopped_data_address,
2916 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2917 native i386 case (no registers smaller than an xfer unit), and are not
2918 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2919 NULL,
2920 NULL,
2921 /* need to fix up i386 siginfo if host is amd64 */
2922 x86_siginfo_fixup,
2923 x86_linux_new_process,
2924 x86_linux_new_thread,
2925 x86_linux_new_fork,
2926 x86_linux_prepare_to_resume,
2927 x86_linux_process_qsupported,
2928 x86_supports_tracepoints,
2929 x86_get_thread_area,
2930 x86_install_fast_tracepoint_jump_pad,
2931 x86_emit_ops,
2932 x86_get_min_fast_tracepoint_insn_len,
2933 x86_supports_range_stepping,
2934 NULL, /* breakpoint_kind_from_current_state */
2935 x86_supports_hardware_single_step,
2936 x86_get_syscall_trapinfo,
2937 };
2938
2939 void
2940 initialize_low_arch (void)
2941 {
2942 /* Initialize the Linux target descriptions. */
2943 #ifdef __x86_64__
2944 init_registers_amd64_linux ();
2945 init_registers_amd64_avx_linux ();
2946 init_registers_amd64_avx512_linux ();
2947 init_registers_amd64_mpx_linux ();
2948
2949 init_registers_x32_linux ();
2950 init_registers_x32_avx_linux ();
2951 init_registers_x32_avx512_linux ();
2952
2953 tdesc_amd64_linux_no_xml = XNEW (struct target_desc);
2954 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
2955 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2956 #endif
2957 init_registers_i386_linux ();
2958 init_registers_i386_mmx_linux ();
2959 init_registers_i386_avx_linux ();
2960 init_registers_i386_avx512_linux ();
2961 init_registers_i386_mpx_linux ();
2962
2963 tdesc_i386_linux_no_xml = XNEW (struct target_desc);
2964 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
2965 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2966
2967 initialize_regsets_info (&x86_regsets_info);
2968 }
This page took 0.155181 seconds and 5 git commands to generate.