dce9a5fb7d7e0d3cd61e14baa1ac14ca8e03bd21
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 const regs_info *get_regs_info () override;
104
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
107 bool supports_z_point_type (char z_type) override;
108
109 void process_qsupported (char **features, int count) override;
110
111 bool supports_tracepoints () override;
112
113 bool supports_fast_tracepoints () override;
114
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121 char *err) override;
122
123 int get_min_fast_tracepoint_insn_len () override;
124
125 struct emit_ops *emit_ops () override;
126
127 protected:
128
129 void low_arch_setup () override;
130
131 bool low_cannot_fetch_register (int regno) override;
132
133 bool low_cannot_store_register (int regno) override;
134
135 bool low_supports_breakpoints () override;
136
137 CORE_ADDR low_get_pc (regcache *regcache) override;
138
139 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
140
141 int low_decr_pc_after_break () override;
142
143 bool low_breakpoint_at (CORE_ADDR pc) override;
144
145 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
146 int size, raw_breakpoint *bp) override;
147
148 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
149 int size, raw_breakpoint *bp) override;
150
151 bool low_stopped_by_watchpoint () override;
152
153 CORE_ADDR low_stopped_data_address () override;
154
155 /* collect_ptrace_register/supply_ptrace_register are not needed in the
156 native i386 case (no registers smaller than an xfer unit), and are not
157 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
158
159 /* Need to fix up i386 siginfo if host is amd64. */
160 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
161 int direction) override;
162
163 arch_process_info *low_new_process () override;
164
165 void low_delete_process (arch_process_info *info) override;
166
167 void low_new_thread (lwp_info *) override;
168
169 void low_delete_thread (arch_lwp_info *) override;
170
171 void low_new_fork (process_info *parent, process_info *child) override;
172
173 void low_prepare_to_resume (lwp_info *lwp) override;
174
175 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
176
177 bool low_supports_range_stepping () override;
178
179 private:
180
181 /* Update all the target description of all processes; a new GDB
182 connected, and it may or not support xml target descriptions. */
183 void update_xmltarget ();
184 };
185
186 /* The singleton target ops object. */
187
188 static x86_target the_x86_target;
189
190 /* Per-process arch-specific data we want to keep. */
191
192 struct arch_process_info
193 {
194 struct x86_debug_reg_state debug_reg_state;
195 };
196
197 #ifdef __x86_64__
198
199 /* Mapping between the general-purpose registers in `struct user'
200 format and GDB's register array layout.
201 Note that the transfer layout uses 64-bit regs. */
202 static /*const*/ int i386_regmap[] =
203 {
204 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
205 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
206 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
207 DS * 8, ES * 8, FS * 8, GS * 8
208 };
209
210 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
211
212 /* So code below doesn't have to care, i386 or amd64. */
213 #define ORIG_EAX ORIG_RAX
214 #define REGSIZE 8
215
216 static const int x86_64_regmap[] =
217 {
218 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
219 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
220 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
221 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
222 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
223 DS * 8, ES * 8, FS * 8, GS * 8,
224 -1, -1, -1, -1, -1, -1, -1, -1,
225 -1, -1, -1, -1, -1, -1, -1, -1,
226 -1, -1, -1, -1, -1, -1, -1, -1,
227 -1,
228 -1, -1, -1, -1, -1, -1, -1, -1,
229 ORIG_RAX * 8,
230 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
231 21 * 8, 22 * 8,
232 #else
233 -1, -1,
234 #endif
235 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
236 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
237 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
238 -1, -1, -1, -1, -1, -1, -1, -1,
239 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
240 -1, -1, -1, -1, -1, -1, -1, -1,
241 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
242 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
243 -1, -1, -1, -1, -1, -1, -1, -1,
244 -1, -1, -1, -1, -1, -1, -1, -1,
245 -1, -1, -1, -1, -1, -1, -1, -1,
246 -1 /* pkru */
247 };
248
249 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
250 #define X86_64_USER_REGS (GS + 1)
251
252 #else /* ! __x86_64__ */
253
254 /* Mapping between the general-purpose registers in `struct user'
255 format and GDB's register array layout. */
256 static /*const*/ int i386_regmap[] =
257 {
258 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
259 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
260 EIP * 4, EFL * 4, CS * 4, SS * 4,
261 DS * 4, ES * 4, FS * 4, GS * 4
262 };
263
264 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
265
266 #define REGSIZE 4
267
268 #endif
269
270 #ifdef __x86_64__
271
272 /* Returns true if the current inferior belongs to a x86-64 process,
273 per the tdesc. */
274
275 static int
276 is_64bit_tdesc (void)
277 {
278 struct regcache *regcache = get_thread_regcache (current_thread, 0);
279
280 return register_size (regcache->tdesc, 0) == 8;
281 }
282
283 #endif
284
285 \f
286 /* Called by libthread_db. */
287
288 ps_err_e
289 ps_get_thread_area (struct ps_prochandle *ph,
290 lwpid_t lwpid, int idx, void **base)
291 {
292 #ifdef __x86_64__
293 int use_64bit = is_64bit_tdesc ();
294
295 if (use_64bit)
296 {
297 switch (idx)
298 {
299 case FS:
300 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
301 return PS_OK;
302 break;
303 case GS:
304 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
305 return PS_OK;
306 break;
307 default:
308 return PS_BADADDR;
309 }
310 return PS_ERR;
311 }
312 #endif
313
314 {
315 unsigned int desc[4];
316
317 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
318 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
319 return PS_ERR;
320
321 /* Ensure we properly extend the value to 64-bits for x86_64. */
322 *base = (void *) (uintptr_t) desc[1];
323 return PS_OK;
324 }
325 }
326
327 /* Get the thread area address. This is used to recognize which
328 thread is which when tracing with the in-process agent library. We
329 don't read anything from the address, and treat it as opaque; it's
330 the address itself that we assume is unique per-thread. */
331
332 int
333 x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
334 {
335 #ifdef __x86_64__
336 int use_64bit = is_64bit_tdesc ();
337
338 if (use_64bit)
339 {
340 void *base;
341 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
342 {
343 *addr = (CORE_ADDR) (uintptr_t) base;
344 return 0;
345 }
346
347 return -1;
348 }
349 #endif
350
351 {
352 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
353 struct thread_info *thr = get_lwp_thread (lwp);
354 struct regcache *regcache = get_thread_regcache (thr, 1);
355 unsigned int desc[4];
356 ULONGEST gs = 0;
357 const int reg_thread_area = 3; /* bits to scale down register value. */
358 int idx;
359
360 collect_register_by_name (regcache, "gs", &gs);
361
362 idx = gs >> reg_thread_area;
363
364 if (ptrace (PTRACE_GET_THREAD_AREA,
365 lwpid_of (thr),
366 (void *) (long) idx, (unsigned long) &desc) < 0)
367 return -1;
368
369 *addr = desc[1];
370 return 0;
371 }
372 }
373
374
375 \f
376 bool
377 x86_target::low_cannot_store_register (int regno)
378 {
379 #ifdef __x86_64__
380 if (is_64bit_tdesc ())
381 return false;
382 #endif
383
384 return regno >= I386_NUM_REGS;
385 }
386
387 bool
388 x86_target::low_cannot_fetch_register (int regno)
389 {
390 #ifdef __x86_64__
391 if (is_64bit_tdesc ())
392 return false;
393 #endif
394
395 return regno >= I386_NUM_REGS;
396 }
397
398 static void
399 x86_fill_gregset (struct regcache *regcache, void *buf)
400 {
401 int i;
402
403 #ifdef __x86_64__
404 if (register_size (regcache->tdesc, 0) == 8)
405 {
406 for (i = 0; i < X86_64_NUM_REGS; i++)
407 if (x86_64_regmap[i] != -1)
408 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
409
410 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
411 {
412 unsigned long base;
413 int lwpid = lwpid_of (current_thread);
414
415 collect_register_by_name (regcache, "fs_base", &base);
416 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
417
418 collect_register_by_name (regcache, "gs_base", &base);
419 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
420 }
421 #endif
422
423 return;
424 }
425
426 /* 32-bit inferior registers need to be zero-extended.
427 Callers would read uninitialized memory otherwise. */
428 memset (buf, 0x00, X86_64_USER_REGS * 8);
429 #endif
430
431 for (i = 0; i < I386_NUM_REGS; i++)
432 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
433
434 collect_register_by_name (regcache, "orig_eax",
435 ((char *) buf) + ORIG_EAX * REGSIZE);
436
437 #ifdef __x86_64__
438 /* Sign extend EAX value to avoid potential syscall restart
439 problems.
440
441 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
442 for a detailed explanation. */
443 if (register_size (regcache->tdesc, 0) == 4)
444 {
445 void *ptr = ((gdb_byte *) buf
446 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
447
448 *(int64_t *) ptr = *(int32_t *) ptr;
449 }
450 #endif
451 }
452
453 static void
454 x86_store_gregset (struct regcache *regcache, const void *buf)
455 {
456 int i;
457
458 #ifdef __x86_64__
459 if (register_size (regcache->tdesc, 0) == 8)
460 {
461 for (i = 0; i < X86_64_NUM_REGS; i++)
462 if (x86_64_regmap[i] != -1)
463 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
464
465 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
466 {
467 unsigned long base;
468 int lwpid = lwpid_of (current_thread);
469
470 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
471 supply_register_by_name (regcache, "fs_base", &base);
472
473 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
474 supply_register_by_name (regcache, "gs_base", &base);
475 }
476 #endif
477 return;
478 }
479 #endif
480
481 for (i = 0; i < I386_NUM_REGS; i++)
482 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
483
484 supply_register_by_name (regcache, "orig_eax",
485 ((char *) buf) + ORIG_EAX * REGSIZE);
486 }
487
488 static void
489 x86_fill_fpregset (struct regcache *regcache, void *buf)
490 {
491 #ifdef __x86_64__
492 i387_cache_to_fxsave (regcache, buf);
493 #else
494 i387_cache_to_fsave (regcache, buf);
495 #endif
496 }
497
498 static void
499 x86_store_fpregset (struct regcache *regcache, const void *buf)
500 {
501 #ifdef __x86_64__
502 i387_fxsave_to_cache (regcache, buf);
503 #else
504 i387_fsave_to_cache (regcache, buf);
505 #endif
506 }
507
508 #ifndef __x86_64__
509
510 static void
511 x86_fill_fpxregset (struct regcache *regcache, void *buf)
512 {
513 i387_cache_to_fxsave (regcache, buf);
514 }
515
516 static void
517 x86_store_fpxregset (struct regcache *regcache, const void *buf)
518 {
519 i387_fxsave_to_cache (regcache, buf);
520 }
521
522 #endif
523
524 static void
525 x86_fill_xstateregset (struct regcache *regcache, void *buf)
526 {
527 i387_cache_to_xsave (regcache, buf);
528 }
529
530 static void
531 x86_store_xstateregset (struct regcache *regcache, const void *buf)
532 {
533 i387_xsave_to_cache (regcache, buf);
534 }
535
536 /* ??? The non-biarch i386 case stores all the i387 regs twice.
537 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
538 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
539 doesn't work. IWBN to avoid the duplication in the case where it
540 does work. Maybe the arch_setup routine could check whether it works
541 and update the supported regsets accordingly. */
542
543 static struct regset_info x86_regsets[] =
544 {
545 #ifdef HAVE_PTRACE_GETREGS
546 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
547 GENERAL_REGS,
548 x86_fill_gregset, x86_store_gregset },
549 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
550 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
551 # ifndef __x86_64__
552 # ifdef HAVE_PTRACE_GETFPXREGS
553 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
554 EXTENDED_REGS,
555 x86_fill_fpxregset, x86_store_fpxregset },
556 # endif
557 # endif
558 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
559 FP_REGS,
560 x86_fill_fpregset, x86_store_fpregset },
561 #endif /* HAVE_PTRACE_GETREGS */
562 NULL_REGSET
563 };
564
565 bool
566 x86_target::low_supports_breakpoints ()
567 {
568 return true;
569 }
570
571 CORE_ADDR
572 x86_target::low_get_pc (regcache *regcache)
573 {
574 int use_64bit = register_size (regcache->tdesc, 0) == 8;
575
576 if (use_64bit)
577 {
578 uint64_t pc;
579
580 collect_register_by_name (regcache, "rip", &pc);
581 return (CORE_ADDR) pc;
582 }
583 else
584 {
585 uint32_t pc;
586
587 collect_register_by_name (regcache, "eip", &pc);
588 return (CORE_ADDR) pc;
589 }
590 }
591
592 void
593 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
594 {
595 int use_64bit = register_size (regcache->tdesc, 0) == 8;
596
597 if (use_64bit)
598 {
599 uint64_t newpc = pc;
600
601 supply_register_by_name (regcache, "rip", &newpc);
602 }
603 else
604 {
605 uint32_t newpc = pc;
606
607 supply_register_by_name (regcache, "eip", &newpc);
608 }
609 }
610
611 int
612 x86_target::low_decr_pc_after_break ()
613 {
614 return 1;
615 }
616
617 \f
618 static const gdb_byte x86_breakpoint[] = { 0xCC };
619 #define x86_breakpoint_len 1
620
621 bool
622 x86_target::low_breakpoint_at (CORE_ADDR pc)
623 {
624 unsigned char c;
625
626 read_memory (pc, &c, 1);
627 if (c == 0xCC)
628 return true;
629
630 return false;
631 }
632 \f
633 /* Low-level function vector. */
634 struct x86_dr_low_type x86_dr_low =
635 {
636 x86_linux_dr_set_control,
637 x86_linux_dr_set_addr,
638 x86_linux_dr_get_addr,
639 x86_linux_dr_get_status,
640 x86_linux_dr_get_control,
641 sizeof (void *),
642 };
643 \f
644 /* Breakpoint/Watchpoint support. */
645
646 bool
647 x86_target::supports_z_point_type (char z_type)
648 {
649 switch (z_type)
650 {
651 case Z_PACKET_SW_BP:
652 case Z_PACKET_HW_BP:
653 case Z_PACKET_WRITE_WP:
654 case Z_PACKET_ACCESS_WP:
655 return true;
656 default:
657 return false;
658 }
659 }
660
661 int
662 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
663 int size, raw_breakpoint *bp)
664 {
665 struct process_info *proc = current_process ();
666
667 switch (type)
668 {
669 case raw_bkpt_type_hw:
670 case raw_bkpt_type_write_wp:
671 case raw_bkpt_type_access_wp:
672 {
673 enum target_hw_bp_type hw_type
674 = raw_bkpt_type_to_target_hw_bp_type (type);
675 struct x86_debug_reg_state *state
676 = &proc->priv->arch_private->debug_reg_state;
677
678 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
679 }
680
681 default:
682 /* Unsupported. */
683 return 1;
684 }
685 }
686
687 int
688 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
689 int size, raw_breakpoint *bp)
690 {
691 struct process_info *proc = current_process ();
692
693 switch (type)
694 {
695 case raw_bkpt_type_hw:
696 case raw_bkpt_type_write_wp:
697 case raw_bkpt_type_access_wp:
698 {
699 enum target_hw_bp_type hw_type
700 = raw_bkpt_type_to_target_hw_bp_type (type);
701 struct x86_debug_reg_state *state
702 = &proc->priv->arch_private->debug_reg_state;
703
704 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
705 }
706 default:
707 /* Unsupported. */
708 return 1;
709 }
710 }
711
712 bool
713 x86_target::low_stopped_by_watchpoint ()
714 {
715 struct process_info *proc = current_process ();
716 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
717 }
718
719 CORE_ADDR
720 x86_target::low_stopped_data_address ()
721 {
722 struct process_info *proc = current_process ();
723 CORE_ADDR addr;
724 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
725 &addr))
726 return addr;
727 return 0;
728 }
729 \f
730 /* Called when a new process is created. */
731
732 arch_process_info *
733 x86_target::low_new_process ()
734 {
735 struct arch_process_info *info = XCNEW (struct arch_process_info);
736
737 x86_low_init_dregs (&info->debug_reg_state);
738
739 return info;
740 }
741
742 /* Called when a process is being deleted. */
743
744 void
745 x86_target::low_delete_process (arch_process_info *info)
746 {
747 xfree (info);
748 }
749
750 void
751 x86_target::low_new_thread (lwp_info *lwp)
752 {
753 /* This comes from nat/. */
754 x86_linux_new_thread (lwp);
755 }
756
757 void
758 x86_target::low_delete_thread (arch_lwp_info *alwp)
759 {
760 /* This comes from nat/. */
761 x86_linux_delete_thread (alwp);
762 }
763
764 /* Target routine for new_fork. */
765
766 void
767 x86_target::low_new_fork (process_info *parent, process_info *child)
768 {
769 /* These are allocated by linux_add_process. */
770 gdb_assert (parent->priv != NULL
771 && parent->priv->arch_private != NULL);
772 gdb_assert (child->priv != NULL
773 && child->priv->arch_private != NULL);
774
775 /* Linux kernel before 2.6.33 commit
776 72f674d203cd230426437cdcf7dd6f681dad8b0d
777 will inherit hardware debug registers from parent
778 on fork/vfork/clone. Newer Linux kernels create such tasks with
779 zeroed debug registers.
780
781 GDB core assumes the child inherits the watchpoints/hw
782 breakpoints of the parent, and will remove them all from the
783 forked off process. Copy the debug registers mirrors into the
784 new process so that all breakpoints and watchpoints can be
785 removed together. The debug registers mirror will become zeroed
786 in the end before detaching the forked off process, thus making
787 this compatible with older Linux kernels too. */
788
789 *child->priv->arch_private = *parent->priv->arch_private;
790 }
791
792 void
793 x86_target::low_prepare_to_resume (lwp_info *lwp)
794 {
795 /* This comes from nat/. */
796 x86_linux_prepare_to_resume (lwp);
797 }
798
799 /* See nat/x86-dregs.h. */
800
801 struct x86_debug_reg_state *
802 x86_debug_reg_state (pid_t pid)
803 {
804 struct process_info *proc = find_process_pid (pid);
805
806 return &proc->priv->arch_private->debug_reg_state;
807 }
808 \f
809 /* When GDBSERVER is built as a 64-bit application on linux, the
810 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
811 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
812 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
813 conversion in-place ourselves. */
814
815 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
816 layout of the inferiors' architecture. Returns true if any
817 conversion was done; false otherwise. If DIRECTION is 1, then copy
818 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
819 INF. */
820
821 bool
822 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
823 {
824 #ifdef __x86_64__
825 unsigned int machine;
826 int tid = lwpid_of (current_thread);
827 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
828
829 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
830 if (!is_64bit_tdesc ())
831 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
832 FIXUP_32);
833 /* No fixup for native x32 GDB. */
834 else if (!is_elf64 && sizeof (void *) == 8)
835 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
836 FIXUP_X32);
837 #endif
838
839 return false;
840 }
841 \f
842 static int use_xml;
843
844 /* Format of XSAVE extended state is:
845 struct
846 {
847 fxsave_bytes[0..463]
848 sw_usable_bytes[464..511]
849 xstate_hdr_bytes[512..575]
850 avx_bytes[576..831]
851 future_state etc
852 };
853
854 Same memory layout will be used for the coredump NT_X86_XSTATE
855 representing the XSAVE extended state registers.
856
857 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
858 extended state mask, which is the same as the extended control register
859 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
860 together with the mask saved in the xstate_hdr_bytes to determine what
861 states the processor/OS supports and what state, used or initialized,
862 the process/thread is in. */
863 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
864
865 /* Does the current host support the GETFPXREGS request? The header
866 file may or may not define it, and even if it is defined, the
867 kernel will return EIO if it's running on a pre-SSE processor. */
868 int have_ptrace_getfpxregs =
869 #ifdef HAVE_PTRACE_GETFPXREGS
870 -1
871 #else
872 0
873 #endif
874 ;
875
876 /* Get Linux/x86 target description from running target. */
877
878 static const struct target_desc *
879 x86_linux_read_description (void)
880 {
881 unsigned int machine;
882 int is_elf64;
883 int xcr0_features;
884 int tid;
885 static uint64_t xcr0;
886 struct regset_info *regset;
887
888 tid = lwpid_of (current_thread);
889
890 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
891
892 if (sizeof (void *) == 4)
893 {
894 if (is_elf64 > 0)
895 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
896 #ifndef __x86_64__
897 else if (machine == EM_X86_64)
898 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
899 #endif
900 }
901
902 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
903 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
904 {
905 elf_fpxregset_t fpxregs;
906
907 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
908 {
909 have_ptrace_getfpxregs = 0;
910 have_ptrace_getregset = 0;
911 return i386_linux_read_description (X86_XSTATE_X87);
912 }
913 else
914 have_ptrace_getfpxregs = 1;
915 }
916 #endif
917
918 if (!use_xml)
919 {
920 x86_xcr0 = X86_XSTATE_SSE_MASK;
921
922 /* Don't use XML. */
923 #ifdef __x86_64__
924 if (machine == EM_X86_64)
925 return tdesc_amd64_linux_no_xml;
926 else
927 #endif
928 return tdesc_i386_linux_no_xml;
929 }
930
931 if (have_ptrace_getregset == -1)
932 {
933 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
934 struct iovec iov;
935
936 iov.iov_base = xstateregs;
937 iov.iov_len = sizeof (xstateregs);
938
939 /* Check if PTRACE_GETREGSET works. */
940 if (ptrace (PTRACE_GETREGSET, tid,
941 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
942 have_ptrace_getregset = 0;
943 else
944 {
945 have_ptrace_getregset = 1;
946
947 /* Get XCR0 from XSAVE extended state. */
948 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
949 / sizeof (uint64_t))];
950
951 /* Use PTRACE_GETREGSET if it is available. */
952 for (regset = x86_regsets;
953 regset->fill_function != NULL; regset++)
954 if (regset->get_request == PTRACE_GETREGSET)
955 regset->size = X86_XSTATE_SIZE (xcr0);
956 else if (regset->type != GENERAL_REGS)
957 regset->size = 0;
958 }
959 }
960
961 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
962 xcr0_features = (have_ptrace_getregset
963 && (xcr0 & X86_XSTATE_ALL_MASK));
964
965 if (xcr0_features)
966 x86_xcr0 = xcr0;
967
968 if (machine == EM_X86_64)
969 {
970 #ifdef __x86_64__
971 const target_desc *tdesc = NULL;
972
973 if (xcr0_features)
974 {
975 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
976 !is_elf64);
977 }
978
979 if (tdesc == NULL)
980 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
981 return tdesc;
982 #endif
983 }
984 else
985 {
986 const target_desc *tdesc = NULL;
987
988 if (xcr0_features)
989 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
990
991 if (tdesc == NULL)
992 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
993
994 return tdesc;
995 }
996
997 gdb_assert_not_reached ("failed to return tdesc");
998 }
999
1000 /* Update all the target description of all processes; a new GDB
1001 connected, and it may or not support xml target descriptions. */
1002
1003 void
1004 x86_target::update_xmltarget ()
1005 {
1006 struct thread_info *saved_thread = current_thread;
1007
1008 /* Before changing the register cache's internal layout, flush the
1009 contents of the current valid caches back to the threads, and
1010 release the current regcache objects. */
1011 regcache_release ();
1012
1013 for_each_process ([this] (process_info *proc) {
1014 int pid = proc->pid;
1015
1016 /* Look up any thread of this process. */
1017 current_thread = find_any_thread_of_pid (pid);
1018
1019 low_arch_setup ();
1020 });
1021
1022 current_thread = saved_thread;
1023 }
1024
1025 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1026 PTRACE_GETREGSET. */
1027
1028 void
1029 x86_target::process_qsupported (char **features, int count)
1030 {
1031 int i;
1032
1033 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1034 with "i386" in qSupported query, it supports x86 XML target
1035 descriptions. */
1036 use_xml = 0;
1037 for (i = 0; i < count; i++)
1038 {
1039 const char *feature = features[i];
1040
1041 if (startswith (feature, "xmlRegisters="))
1042 {
1043 char *copy = xstrdup (feature + 13);
1044
1045 char *saveptr;
1046 for (char *p = strtok_r (copy, ",", &saveptr);
1047 p != NULL;
1048 p = strtok_r (NULL, ",", &saveptr))
1049 {
1050 if (strcmp (p, "i386") == 0)
1051 {
1052 use_xml = 1;
1053 break;
1054 }
1055 }
1056
1057 free (copy);
1058 }
1059 }
1060 update_xmltarget ();
1061 }
1062
1063 /* Common for x86/x86-64. */
1064
1065 static struct regsets_info x86_regsets_info =
1066 {
1067 x86_regsets, /* regsets */
1068 0, /* num_regsets */
1069 NULL, /* disabled_regsets */
1070 };
1071
1072 #ifdef __x86_64__
1073 static struct regs_info amd64_linux_regs_info =
1074 {
1075 NULL, /* regset_bitmap */
1076 NULL, /* usrregs_info */
1077 &x86_regsets_info
1078 };
1079 #endif
1080 static struct usrregs_info i386_linux_usrregs_info =
1081 {
1082 I386_NUM_REGS,
1083 i386_regmap,
1084 };
1085
1086 static struct regs_info i386_linux_regs_info =
1087 {
1088 NULL, /* regset_bitmap */
1089 &i386_linux_usrregs_info,
1090 &x86_regsets_info
1091 };
1092
1093 const regs_info *
1094 x86_target::get_regs_info ()
1095 {
1096 #ifdef __x86_64__
1097 if (is_64bit_tdesc ())
1098 return &amd64_linux_regs_info;
1099 else
1100 #endif
1101 return &i386_linux_regs_info;
1102 }
1103
1104 /* Initialize the target description for the architecture of the
1105 inferior. */
1106
1107 void
1108 x86_target::low_arch_setup ()
1109 {
1110 current_process ()->tdesc = x86_linux_read_description ();
1111 }
1112
1113 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1114 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1115
1116 static void
1117 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
1118 {
1119 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1120
1121 if (use_64bit)
1122 {
1123 long l_sysno;
1124
1125 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1126 *sysno = (int) l_sysno;
1127 }
1128 else
1129 collect_register_by_name (regcache, "orig_eax", sysno);
1130 }
1131
1132 bool
1133 x86_target::supports_tracepoints ()
1134 {
1135 return true;
1136 }
1137
1138 static void
1139 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1140 {
1141 target_write_memory (*to, buf, len);
1142 *to += len;
1143 }
1144
1145 static int
1146 push_opcode (unsigned char *buf, const char *op)
1147 {
1148 unsigned char *buf_org = buf;
1149
1150 while (1)
1151 {
1152 char *endptr;
1153 unsigned long ul = strtoul (op, &endptr, 16);
1154
1155 if (endptr == op)
1156 break;
1157
1158 *buf++ = ul;
1159 op = endptr;
1160 }
1161
1162 return buf - buf_org;
1163 }
1164
1165 #ifdef __x86_64__
1166
1167 /* Build a jump pad that saves registers and calls a collection
1168 function. Writes a jump instruction to the jump pad to
1169 JJUMPAD_INSN. The caller is responsible to write it in at the
1170 tracepoint address. */
1171
1172 static int
1173 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1174 CORE_ADDR collector,
1175 CORE_ADDR lockaddr,
1176 ULONGEST orig_size,
1177 CORE_ADDR *jump_entry,
1178 CORE_ADDR *trampoline,
1179 ULONGEST *trampoline_size,
1180 unsigned char *jjump_pad_insn,
1181 ULONGEST *jjump_pad_insn_size,
1182 CORE_ADDR *adjusted_insn_addr,
1183 CORE_ADDR *adjusted_insn_addr_end,
1184 char *err)
1185 {
1186 unsigned char buf[40];
1187 int i, offset;
1188 int64_t loffset;
1189
1190 CORE_ADDR buildaddr = *jump_entry;
1191
1192 /* Build the jump pad. */
1193
1194 /* First, do tracepoint data collection. Save registers. */
1195 i = 0;
1196 /* Need to ensure stack pointer saved first. */
1197 buf[i++] = 0x54; /* push %rsp */
1198 buf[i++] = 0x55; /* push %rbp */
1199 buf[i++] = 0x57; /* push %rdi */
1200 buf[i++] = 0x56; /* push %rsi */
1201 buf[i++] = 0x52; /* push %rdx */
1202 buf[i++] = 0x51; /* push %rcx */
1203 buf[i++] = 0x53; /* push %rbx */
1204 buf[i++] = 0x50; /* push %rax */
1205 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1206 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1207 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1208 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1209 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1210 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1211 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1212 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1213 buf[i++] = 0x9c; /* pushfq */
1214 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1215 buf[i++] = 0xbf;
1216 memcpy (buf + i, &tpaddr, 8);
1217 i += 8;
1218 buf[i++] = 0x57; /* push %rdi */
1219 append_insns (&buildaddr, i, buf);
1220
1221 /* Stack space for the collecting_t object. */
1222 i = 0;
1223 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1224 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1225 memcpy (buf + i, &tpoint, 8);
1226 i += 8;
1227 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1228 i += push_opcode (&buf[i],
1229 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1230 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1231 append_insns (&buildaddr, i, buf);
1232
1233 /* spin-lock. */
1234 i = 0;
1235 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1236 memcpy (&buf[i], (void *) &lockaddr, 8);
1237 i += 8;
1238 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1239 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1240 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1241 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1242 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1243 append_insns (&buildaddr, i, buf);
1244
1245 /* Set up the gdb_collect call. */
1246 /* At this point, (stack pointer + 0x18) is the base of our saved
1247 register block. */
1248
1249 i = 0;
1250 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1251 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1252
1253 /* tpoint address may be 64-bit wide. */
1254 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1255 memcpy (buf + i, &tpoint, 8);
1256 i += 8;
1257 append_insns (&buildaddr, i, buf);
1258
1259 /* The collector function being in the shared library, may be
1260 >31-bits away off the jump pad. */
1261 i = 0;
1262 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1263 memcpy (buf + i, &collector, 8);
1264 i += 8;
1265 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1266 append_insns (&buildaddr, i, buf);
1267
1268 /* Clear the spin-lock. */
1269 i = 0;
1270 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1271 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1272 memcpy (buf + i, &lockaddr, 8);
1273 i += 8;
1274 append_insns (&buildaddr, i, buf);
1275
1276 /* Remove stack that had been used for the collect_t object. */
1277 i = 0;
1278 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1279 append_insns (&buildaddr, i, buf);
1280
1281 /* Restore register state. */
1282 i = 0;
1283 buf[i++] = 0x48; /* add $0x8,%rsp */
1284 buf[i++] = 0x83;
1285 buf[i++] = 0xc4;
1286 buf[i++] = 0x08;
1287 buf[i++] = 0x9d; /* popfq */
1288 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1289 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1290 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1291 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1292 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1293 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1294 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1295 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1296 buf[i++] = 0x58; /* pop %rax */
1297 buf[i++] = 0x5b; /* pop %rbx */
1298 buf[i++] = 0x59; /* pop %rcx */
1299 buf[i++] = 0x5a; /* pop %rdx */
1300 buf[i++] = 0x5e; /* pop %rsi */
1301 buf[i++] = 0x5f; /* pop %rdi */
1302 buf[i++] = 0x5d; /* pop %rbp */
1303 buf[i++] = 0x5c; /* pop %rsp */
1304 append_insns (&buildaddr, i, buf);
1305
1306 /* Now, adjust the original instruction to execute in the jump
1307 pad. */
1308 *adjusted_insn_addr = buildaddr;
1309 relocate_instruction (&buildaddr, tpaddr);
1310 *adjusted_insn_addr_end = buildaddr;
1311
1312 /* Finally, write a jump back to the program. */
1313
1314 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1315 if (loffset > INT_MAX || loffset < INT_MIN)
1316 {
1317 sprintf (err,
1318 "E.Jump back from jump pad too far from tracepoint "
1319 "(offset 0x%" PRIx64 " > int32).", loffset);
1320 return 1;
1321 }
1322
1323 offset = (int) loffset;
1324 memcpy (buf, jump_insn, sizeof (jump_insn));
1325 memcpy (buf + 1, &offset, 4);
1326 append_insns (&buildaddr, sizeof (jump_insn), buf);
1327
1328 /* The jump pad is now built. Wire in a jump to our jump pad. This
1329 is always done last (by our caller actually), so that we can
1330 install fast tracepoints with threads running. This relies on
1331 the agent's atomic write support. */
1332 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1333 if (loffset > INT_MAX || loffset < INT_MIN)
1334 {
1335 sprintf (err,
1336 "E.Jump pad too far from tracepoint "
1337 "(offset 0x%" PRIx64 " > int32).", loffset);
1338 return 1;
1339 }
1340
1341 offset = (int) loffset;
1342
1343 memcpy (buf, jump_insn, sizeof (jump_insn));
1344 memcpy (buf + 1, &offset, 4);
1345 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1346 *jjump_pad_insn_size = sizeof (jump_insn);
1347
1348 /* Return the end address of our pad. */
1349 *jump_entry = buildaddr;
1350
1351 return 0;
1352 }
1353
1354 #endif /* __x86_64__ */
1355
1356 /* Build a jump pad that saves registers and calls a collection
1357 function. Writes a jump instruction to the jump pad to
1358 JJUMPAD_INSN. The caller is responsible to write it in at the
1359 tracepoint address. */
1360
1361 static int
1362 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1363 CORE_ADDR collector,
1364 CORE_ADDR lockaddr,
1365 ULONGEST orig_size,
1366 CORE_ADDR *jump_entry,
1367 CORE_ADDR *trampoline,
1368 ULONGEST *trampoline_size,
1369 unsigned char *jjump_pad_insn,
1370 ULONGEST *jjump_pad_insn_size,
1371 CORE_ADDR *adjusted_insn_addr,
1372 CORE_ADDR *adjusted_insn_addr_end,
1373 char *err)
1374 {
1375 unsigned char buf[0x100];
1376 int i, offset;
1377 CORE_ADDR buildaddr = *jump_entry;
1378
1379 /* Build the jump pad. */
1380
1381 /* First, do tracepoint data collection. Save registers. */
1382 i = 0;
1383 buf[i++] = 0x60; /* pushad */
1384 buf[i++] = 0x68; /* push tpaddr aka $pc */
1385 *((int *)(buf + i)) = (int) tpaddr;
1386 i += 4;
1387 buf[i++] = 0x9c; /* pushf */
1388 buf[i++] = 0x1e; /* push %ds */
1389 buf[i++] = 0x06; /* push %es */
1390 buf[i++] = 0x0f; /* push %fs */
1391 buf[i++] = 0xa0;
1392 buf[i++] = 0x0f; /* push %gs */
1393 buf[i++] = 0xa8;
1394 buf[i++] = 0x16; /* push %ss */
1395 buf[i++] = 0x0e; /* push %cs */
1396 append_insns (&buildaddr, i, buf);
1397
1398 /* Stack space for the collecting_t object. */
1399 i = 0;
1400 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1401
1402 /* Build the object. */
1403 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1404 memcpy (buf + i, &tpoint, 4);
1405 i += 4;
1406 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1407
1408 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1409 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1410 append_insns (&buildaddr, i, buf);
1411
1412 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1413 If we cared for it, this could be using xchg alternatively. */
1414
1415 i = 0;
1416 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1417 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1418 %esp,<lockaddr> */
1419 memcpy (&buf[i], (void *) &lockaddr, 4);
1420 i += 4;
1421 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1422 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1423 append_insns (&buildaddr, i, buf);
1424
1425
1426 /* Set up arguments to the gdb_collect call. */
1427 i = 0;
1428 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1429 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1430 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1431 append_insns (&buildaddr, i, buf);
1432
1433 i = 0;
1434 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1435 append_insns (&buildaddr, i, buf);
1436
1437 i = 0;
1438 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1439 memcpy (&buf[i], (void *) &tpoint, 4);
1440 i += 4;
1441 append_insns (&buildaddr, i, buf);
1442
1443 buf[0] = 0xe8; /* call <reladdr> */
1444 offset = collector - (buildaddr + sizeof (jump_insn));
1445 memcpy (buf + 1, &offset, 4);
1446 append_insns (&buildaddr, 5, buf);
1447 /* Clean up after the call. */
1448 buf[0] = 0x83; /* add $0x8,%esp */
1449 buf[1] = 0xc4;
1450 buf[2] = 0x08;
1451 append_insns (&buildaddr, 3, buf);
1452
1453
1454 /* Clear the spin-lock. This would need the LOCK prefix on older
1455 broken archs. */
1456 i = 0;
1457 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1458 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1459 memcpy (buf + i, &lockaddr, 4);
1460 i += 4;
1461 append_insns (&buildaddr, i, buf);
1462
1463
1464 /* Remove stack that had been used for the collect_t object. */
1465 i = 0;
1466 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1467 append_insns (&buildaddr, i, buf);
1468
1469 i = 0;
1470 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1471 buf[i++] = 0xc4;
1472 buf[i++] = 0x04;
1473 buf[i++] = 0x17; /* pop %ss */
1474 buf[i++] = 0x0f; /* pop %gs */
1475 buf[i++] = 0xa9;
1476 buf[i++] = 0x0f; /* pop %fs */
1477 buf[i++] = 0xa1;
1478 buf[i++] = 0x07; /* pop %es */
1479 buf[i++] = 0x1f; /* pop %ds */
1480 buf[i++] = 0x9d; /* popf */
1481 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1482 buf[i++] = 0xc4;
1483 buf[i++] = 0x04;
1484 buf[i++] = 0x61; /* popad */
1485 append_insns (&buildaddr, i, buf);
1486
1487 /* Now, adjust the original instruction to execute in the jump
1488 pad. */
1489 *adjusted_insn_addr = buildaddr;
1490 relocate_instruction (&buildaddr, tpaddr);
1491 *adjusted_insn_addr_end = buildaddr;
1492
1493 /* Write the jump back to the program. */
1494 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1495 memcpy (buf, jump_insn, sizeof (jump_insn));
1496 memcpy (buf + 1, &offset, 4);
1497 append_insns (&buildaddr, sizeof (jump_insn), buf);
1498
1499 /* The jump pad is now built. Wire in a jump to our jump pad. This
1500 is always done last (by our caller actually), so that we can
1501 install fast tracepoints with threads running. This relies on
1502 the agent's atomic write support. */
1503 if (orig_size == 4)
1504 {
1505 /* Create a trampoline. */
1506 *trampoline_size = sizeof (jump_insn);
1507 if (!claim_trampoline_space (*trampoline_size, trampoline))
1508 {
1509 /* No trampoline space available. */
1510 strcpy (err,
1511 "E.Cannot allocate trampoline space needed for fast "
1512 "tracepoints on 4-byte instructions.");
1513 return 1;
1514 }
1515
1516 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1517 memcpy (buf, jump_insn, sizeof (jump_insn));
1518 memcpy (buf + 1, &offset, 4);
1519 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1520
1521 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1522 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1523 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1524 memcpy (buf + 2, &offset, 2);
1525 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1526 *jjump_pad_insn_size = sizeof (small_jump_insn);
1527 }
1528 else
1529 {
1530 /* Else use a 32-bit relative jump instruction. */
1531 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1532 memcpy (buf, jump_insn, sizeof (jump_insn));
1533 memcpy (buf + 1, &offset, 4);
1534 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1535 *jjump_pad_insn_size = sizeof (jump_insn);
1536 }
1537
1538 /* Return the end address of our pad. */
1539 *jump_entry = buildaddr;
1540
1541 return 0;
1542 }
1543
1544 bool
1545 x86_target::supports_fast_tracepoints ()
1546 {
1547 return true;
1548 }
1549
1550 int
1551 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1552 CORE_ADDR tpaddr,
1553 CORE_ADDR collector,
1554 CORE_ADDR lockaddr,
1555 ULONGEST orig_size,
1556 CORE_ADDR *jump_entry,
1557 CORE_ADDR *trampoline,
1558 ULONGEST *trampoline_size,
1559 unsigned char *jjump_pad_insn,
1560 ULONGEST *jjump_pad_insn_size,
1561 CORE_ADDR *adjusted_insn_addr,
1562 CORE_ADDR *adjusted_insn_addr_end,
1563 char *err)
1564 {
1565 #ifdef __x86_64__
1566 if (is_64bit_tdesc ())
1567 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1568 collector, lockaddr,
1569 orig_size, jump_entry,
1570 trampoline, trampoline_size,
1571 jjump_pad_insn,
1572 jjump_pad_insn_size,
1573 adjusted_insn_addr,
1574 adjusted_insn_addr_end,
1575 err);
1576 #endif
1577
1578 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1579 collector, lockaddr,
1580 orig_size, jump_entry,
1581 trampoline, trampoline_size,
1582 jjump_pad_insn,
1583 jjump_pad_insn_size,
1584 adjusted_insn_addr,
1585 adjusted_insn_addr_end,
1586 err);
1587 }
1588
1589 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1590 architectures. */
1591
1592 int
1593 x86_target::get_min_fast_tracepoint_insn_len ()
1594 {
1595 static int warned_about_fast_tracepoints = 0;
1596
1597 #ifdef __x86_64__
1598 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1599 used for fast tracepoints. */
1600 if (is_64bit_tdesc ())
1601 return 5;
1602 #endif
1603
1604 if (agent_loaded_p ())
1605 {
1606 char errbuf[IPA_BUFSIZ];
1607
1608 errbuf[0] = '\0';
1609
1610 /* On x86, if trampolines are available, then 4-byte jump instructions
1611 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1612 with a 4-byte offset are used instead. */
1613 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1614 return 4;
1615 else
1616 {
1617 /* GDB has no channel to explain to user why a shorter fast
1618 tracepoint is not possible, but at least make GDBserver
1619 mention that something has gone awry. */
1620 if (!warned_about_fast_tracepoints)
1621 {
1622 warning ("4-byte fast tracepoints not available; %s", errbuf);
1623 warned_about_fast_tracepoints = 1;
1624 }
1625 return 5;
1626 }
1627 }
1628 else
1629 {
1630 /* Indicate that the minimum length is currently unknown since the IPA
1631 has not loaded yet. */
1632 return 0;
1633 }
1634 }
1635
1636 static void
1637 add_insns (unsigned char *start, int len)
1638 {
1639 CORE_ADDR buildaddr = current_insn_ptr;
1640
1641 if (debug_threads)
1642 debug_printf ("Adding %d bytes of insn at %s\n",
1643 len, paddress (buildaddr));
1644
1645 append_insns (&buildaddr, len, start);
1646 current_insn_ptr = buildaddr;
1647 }
1648
1649 /* Our general strategy for emitting code is to avoid specifying raw
1650 bytes whenever possible, and instead copy a block of inline asm
1651 that is embedded in the function. This is a little messy, because
1652 we need to keep the compiler from discarding what looks like dead
1653 code, plus suppress various warnings. */
1654
1655 #define EMIT_ASM(NAME, INSNS) \
1656 do \
1657 { \
1658 extern unsigned char start_ ## NAME, end_ ## NAME; \
1659 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1660 __asm__ ("jmp end_" #NAME "\n" \
1661 "\t" "start_" #NAME ":" \
1662 "\t" INSNS "\n" \
1663 "\t" "end_" #NAME ":"); \
1664 } while (0)
1665
1666 #ifdef __x86_64__
1667
1668 #define EMIT_ASM32(NAME,INSNS) \
1669 do \
1670 { \
1671 extern unsigned char start_ ## NAME, end_ ## NAME; \
1672 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1673 __asm__ (".code32\n" \
1674 "\t" "jmp end_" #NAME "\n" \
1675 "\t" "start_" #NAME ":\n" \
1676 "\t" INSNS "\n" \
1677 "\t" "end_" #NAME ":\n" \
1678 ".code64\n"); \
1679 } while (0)
1680
1681 #else
1682
1683 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1684
1685 #endif
1686
1687 #ifdef __x86_64__
1688
1689 static void
1690 amd64_emit_prologue (void)
1691 {
1692 EMIT_ASM (amd64_prologue,
1693 "pushq %rbp\n\t"
1694 "movq %rsp,%rbp\n\t"
1695 "sub $0x20,%rsp\n\t"
1696 "movq %rdi,-8(%rbp)\n\t"
1697 "movq %rsi,-16(%rbp)");
1698 }
1699
1700
1701 static void
1702 amd64_emit_epilogue (void)
1703 {
1704 EMIT_ASM (amd64_epilogue,
1705 "movq -16(%rbp),%rdi\n\t"
1706 "movq %rax,(%rdi)\n\t"
1707 "xor %rax,%rax\n\t"
1708 "leave\n\t"
1709 "ret");
1710 }
1711
1712 static void
1713 amd64_emit_add (void)
1714 {
1715 EMIT_ASM (amd64_add,
1716 "add (%rsp),%rax\n\t"
1717 "lea 0x8(%rsp),%rsp");
1718 }
1719
1720 static void
1721 amd64_emit_sub (void)
1722 {
1723 EMIT_ASM (amd64_sub,
1724 "sub %rax,(%rsp)\n\t"
1725 "pop %rax");
1726 }
1727
1728 static void
1729 amd64_emit_mul (void)
1730 {
1731 emit_error = 1;
1732 }
1733
1734 static void
1735 amd64_emit_lsh (void)
1736 {
1737 emit_error = 1;
1738 }
1739
1740 static void
1741 amd64_emit_rsh_signed (void)
1742 {
1743 emit_error = 1;
1744 }
1745
1746 static void
1747 amd64_emit_rsh_unsigned (void)
1748 {
1749 emit_error = 1;
1750 }
1751
1752 static void
1753 amd64_emit_ext (int arg)
1754 {
1755 switch (arg)
1756 {
1757 case 8:
1758 EMIT_ASM (amd64_ext_8,
1759 "cbtw\n\t"
1760 "cwtl\n\t"
1761 "cltq");
1762 break;
1763 case 16:
1764 EMIT_ASM (amd64_ext_16,
1765 "cwtl\n\t"
1766 "cltq");
1767 break;
1768 case 32:
1769 EMIT_ASM (amd64_ext_32,
1770 "cltq");
1771 break;
1772 default:
1773 emit_error = 1;
1774 }
1775 }
1776
1777 static void
1778 amd64_emit_log_not (void)
1779 {
1780 EMIT_ASM (amd64_log_not,
1781 "test %rax,%rax\n\t"
1782 "sete %cl\n\t"
1783 "movzbq %cl,%rax");
1784 }
1785
1786 static void
1787 amd64_emit_bit_and (void)
1788 {
1789 EMIT_ASM (amd64_and,
1790 "and (%rsp),%rax\n\t"
1791 "lea 0x8(%rsp),%rsp");
1792 }
1793
1794 static void
1795 amd64_emit_bit_or (void)
1796 {
1797 EMIT_ASM (amd64_or,
1798 "or (%rsp),%rax\n\t"
1799 "lea 0x8(%rsp),%rsp");
1800 }
1801
1802 static void
1803 amd64_emit_bit_xor (void)
1804 {
1805 EMIT_ASM (amd64_xor,
1806 "xor (%rsp),%rax\n\t"
1807 "lea 0x8(%rsp),%rsp");
1808 }
1809
1810 static void
1811 amd64_emit_bit_not (void)
1812 {
1813 EMIT_ASM (amd64_bit_not,
1814 "xorq $0xffffffffffffffff,%rax");
1815 }
1816
1817 static void
1818 amd64_emit_equal (void)
1819 {
1820 EMIT_ASM (amd64_equal,
1821 "cmp %rax,(%rsp)\n\t"
1822 "je .Lamd64_equal_true\n\t"
1823 "xor %rax,%rax\n\t"
1824 "jmp .Lamd64_equal_end\n\t"
1825 ".Lamd64_equal_true:\n\t"
1826 "mov $0x1,%rax\n\t"
1827 ".Lamd64_equal_end:\n\t"
1828 "lea 0x8(%rsp),%rsp");
1829 }
1830
1831 static void
1832 amd64_emit_less_signed (void)
1833 {
1834 EMIT_ASM (amd64_less_signed,
1835 "cmp %rax,(%rsp)\n\t"
1836 "jl .Lamd64_less_signed_true\n\t"
1837 "xor %rax,%rax\n\t"
1838 "jmp .Lamd64_less_signed_end\n\t"
1839 ".Lamd64_less_signed_true:\n\t"
1840 "mov $1,%rax\n\t"
1841 ".Lamd64_less_signed_end:\n\t"
1842 "lea 0x8(%rsp),%rsp");
1843 }
1844
1845 static void
1846 amd64_emit_less_unsigned (void)
1847 {
1848 EMIT_ASM (amd64_less_unsigned,
1849 "cmp %rax,(%rsp)\n\t"
1850 "jb .Lamd64_less_unsigned_true\n\t"
1851 "xor %rax,%rax\n\t"
1852 "jmp .Lamd64_less_unsigned_end\n\t"
1853 ".Lamd64_less_unsigned_true:\n\t"
1854 "mov $1,%rax\n\t"
1855 ".Lamd64_less_unsigned_end:\n\t"
1856 "lea 0x8(%rsp),%rsp");
1857 }
1858
1859 static void
1860 amd64_emit_ref (int size)
1861 {
1862 switch (size)
1863 {
1864 case 1:
1865 EMIT_ASM (amd64_ref1,
1866 "movb (%rax),%al");
1867 break;
1868 case 2:
1869 EMIT_ASM (amd64_ref2,
1870 "movw (%rax),%ax");
1871 break;
1872 case 4:
1873 EMIT_ASM (amd64_ref4,
1874 "movl (%rax),%eax");
1875 break;
1876 case 8:
1877 EMIT_ASM (amd64_ref8,
1878 "movq (%rax),%rax");
1879 break;
1880 }
1881 }
1882
1883 static void
1884 amd64_emit_if_goto (int *offset_p, int *size_p)
1885 {
1886 EMIT_ASM (amd64_if_goto,
1887 "mov %rax,%rcx\n\t"
1888 "pop %rax\n\t"
1889 "cmp $0,%rcx\n\t"
1890 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1891 if (offset_p)
1892 *offset_p = 10;
1893 if (size_p)
1894 *size_p = 4;
1895 }
1896
1897 static void
1898 amd64_emit_goto (int *offset_p, int *size_p)
1899 {
1900 EMIT_ASM (amd64_goto,
1901 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1902 if (offset_p)
1903 *offset_p = 1;
1904 if (size_p)
1905 *size_p = 4;
1906 }
1907
1908 static void
1909 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1910 {
1911 int diff = (to - (from + size));
1912 unsigned char buf[sizeof (int)];
1913
1914 if (size != 4)
1915 {
1916 emit_error = 1;
1917 return;
1918 }
1919
1920 memcpy (buf, &diff, sizeof (int));
1921 target_write_memory (from, buf, sizeof (int));
1922 }
1923
1924 static void
1925 amd64_emit_const (LONGEST num)
1926 {
1927 unsigned char buf[16];
1928 int i;
1929 CORE_ADDR buildaddr = current_insn_ptr;
1930
1931 i = 0;
1932 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1933 memcpy (&buf[i], &num, sizeof (num));
1934 i += 8;
1935 append_insns (&buildaddr, i, buf);
1936 current_insn_ptr = buildaddr;
1937 }
1938
1939 static void
1940 amd64_emit_call (CORE_ADDR fn)
1941 {
1942 unsigned char buf[16];
1943 int i;
1944 CORE_ADDR buildaddr;
1945 LONGEST offset64;
1946
1947 /* The destination function being in the shared library, may be
1948 >31-bits away off the compiled code pad. */
1949
1950 buildaddr = current_insn_ptr;
1951
1952 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1953
1954 i = 0;
1955
1956 if (offset64 > INT_MAX || offset64 < INT_MIN)
1957 {
1958 /* Offset is too large for a call. Use callq, but that requires
1959 a register, so avoid it if possible. Use r10, since it is
1960 call-clobbered, we don't have to push/pop it. */
1961 buf[i++] = 0x48; /* mov $fn,%r10 */
1962 buf[i++] = 0xba;
1963 memcpy (buf + i, &fn, 8);
1964 i += 8;
1965 buf[i++] = 0xff; /* callq *%r10 */
1966 buf[i++] = 0xd2;
1967 }
1968 else
1969 {
1970 int offset32 = offset64; /* we know we can't overflow here. */
1971
1972 buf[i++] = 0xe8; /* call <reladdr> */
1973 memcpy (buf + i, &offset32, 4);
1974 i += 4;
1975 }
1976
1977 append_insns (&buildaddr, i, buf);
1978 current_insn_ptr = buildaddr;
1979 }
1980
1981 static void
1982 amd64_emit_reg (int reg)
1983 {
1984 unsigned char buf[16];
1985 int i;
1986 CORE_ADDR buildaddr;
1987
1988 /* Assume raw_regs is still in %rdi. */
1989 buildaddr = current_insn_ptr;
1990 i = 0;
1991 buf[i++] = 0xbe; /* mov $<n>,%esi */
1992 memcpy (&buf[i], &reg, sizeof (reg));
1993 i += 4;
1994 append_insns (&buildaddr, i, buf);
1995 current_insn_ptr = buildaddr;
1996 amd64_emit_call (get_raw_reg_func_addr ());
1997 }
1998
1999 static void
2000 amd64_emit_pop (void)
2001 {
2002 EMIT_ASM (amd64_pop,
2003 "pop %rax");
2004 }
2005
2006 static void
2007 amd64_emit_stack_flush (void)
2008 {
2009 EMIT_ASM (amd64_stack_flush,
2010 "push %rax");
2011 }
2012
2013 static void
2014 amd64_emit_zero_ext (int arg)
2015 {
2016 switch (arg)
2017 {
2018 case 8:
2019 EMIT_ASM (amd64_zero_ext_8,
2020 "and $0xff,%rax");
2021 break;
2022 case 16:
2023 EMIT_ASM (amd64_zero_ext_16,
2024 "and $0xffff,%rax");
2025 break;
2026 case 32:
2027 EMIT_ASM (amd64_zero_ext_32,
2028 "mov $0xffffffff,%rcx\n\t"
2029 "and %rcx,%rax");
2030 break;
2031 default:
2032 emit_error = 1;
2033 }
2034 }
2035
2036 static void
2037 amd64_emit_swap (void)
2038 {
2039 EMIT_ASM (amd64_swap,
2040 "mov %rax,%rcx\n\t"
2041 "pop %rax\n\t"
2042 "push %rcx");
2043 }
2044
2045 static void
2046 amd64_emit_stack_adjust (int n)
2047 {
2048 unsigned char buf[16];
2049 int i;
2050 CORE_ADDR buildaddr = current_insn_ptr;
2051
2052 i = 0;
2053 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2054 buf[i++] = 0x8d;
2055 buf[i++] = 0x64;
2056 buf[i++] = 0x24;
2057 /* This only handles adjustments up to 16, but we don't expect any more. */
2058 buf[i++] = n * 8;
2059 append_insns (&buildaddr, i, buf);
2060 current_insn_ptr = buildaddr;
2061 }
2062
2063 /* FN's prototype is `LONGEST(*fn)(int)'. */
2064
2065 static void
2066 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2067 {
2068 unsigned char buf[16];
2069 int i;
2070 CORE_ADDR buildaddr;
2071
2072 buildaddr = current_insn_ptr;
2073 i = 0;
2074 buf[i++] = 0xbf; /* movl $<n>,%edi */
2075 memcpy (&buf[i], &arg1, sizeof (arg1));
2076 i += 4;
2077 append_insns (&buildaddr, i, buf);
2078 current_insn_ptr = buildaddr;
2079 amd64_emit_call (fn);
2080 }
2081
2082 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2083
2084 static void
2085 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2086 {
2087 unsigned char buf[16];
2088 int i;
2089 CORE_ADDR buildaddr;
2090
2091 buildaddr = current_insn_ptr;
2092 i = 0;
2093 buf[i++] = 0xbf; /* movl $<n>,%edi */
2094 memcpy (&buf[i], &arg1, sizeof (arg1));
2095 i += 4;
2096 append_insns (&buildaddr, i, buf);
2097 current_insn_ptr = buildaddr;
2098 EMIT_ASM (amd64_void_call_2_a,
2099 /* Save away a copy of the stack top. */
2100 "push %rax\n\t"
2101 /* Also pass top as the second argument. */
2102 "mov %rax,%rsi");
2103 amd64_emit_call (fn);
2104 EMIT_ASM (amd64_void_call_2_b,
2105 /* Restore the stack top, %rax may have been trashed. */
2106 "pop %rax");
2107 }
2108
2109 static void
2110 amd64_emit_eq_goto (int *offset_p, int *size_p)
2111 {
2112 EMIT_ASM (amd64_eq,
2113 "cmp %rax,(%rsp)\n\t"
2114 "jne .Lamd64_eq_fallthru\n\t"
2115 "lea 0x8(%rsp),%rsp\n\t"
2116 "pop %rax\n\t"
2117 /* jmp, but don't trust the assembler to choose the right jump */
2118 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2119 ".Lamd64_eq_fallthru:\n\t"
2120 "lea 0x8(%rsp),%rsp\n\t"
2121 "pop %rax");
2122
2123 if (offset_p)
2124 *offset_p = 13;
2125 if (size_p)
2126 *size_p = 4;
2127 }
2128
2129 static void
2130 amd64_emit_ne_goto (int *offset_p, int *size_p)
2131 {
2132 EMIT_ASM (amd64_ne,
2133 "cmp %rax,(%rsp)\n\t"
2134 "je .Lamd64_ne_fallthru\n\t"
2135 "lea 0x8(%rsp),%rsp\n\t"
2136 "pop %rax\n\t"
2137 /* jmp, but don't trust the assembler to choose the right jump */
2138 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2139 ".Lamd64_ne_fallthru:\n\t"
2140 "lea 0x8(%rsp),%rsp\n\t"
2141 "pop %rax");
2142
2143 if (offset_p)
2144 *offset_p = 13;
2145 if (size_p)
2146 *size_p = 4;
2147 }
2148
2149 static void
2150 amd64_emit_lt_goto (int *offset_p, int *size_p)
2151 {
2152 EMIT_ASM (amd64_lt,
2153 "cmp %rax,(%rsp)\n\t"
2154 "jnl .Lamd64_lt_fallthru\n\t"
2155 "lea 0x8(%rsp),%rsp\n\t"
2156 "pop %rax\n\t"
2157 /* jmp, but don't trust the assembler to choose the right jump */
2158 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2159 ".Lamd64_lt_fallthru:\n\t"
2160 "lea 0x8(%rsp),%rsp\n\t"
2161 "pop %rax");
2162
2163 if (offset_p)
2164 *offset_p = 13;
2165 if (size_p)
2166 *size_p = 4;
2167 }
2168
2169 static void
2170 amd64_emit_le_goto (int *offset_p, int *size_p)
2171 {
2172 EMIT_ASM (amd64_le,
2173 "cmp %rax,(%rsp)\n\t"
2174 "jnle .Lamd64_le_fallthru\n\t"
2175 "lea 0x8(%rsp),%rsp\n\t"
2176 "pop %rax\n\t"
2177 /* jmp, but don't trust the assembler to choose the right jump */
2178 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2179 ".Lamd64_le_fallthru:\n\t"
2180 "lea 0x8(%rsp),%rsp\n\t"
2181 "pop %rax");
2182
2183 if (offset_p)
2184 *offset_p = 13;
2185 if (size_p)
2186 *size_p = 4;
2187 }
2188
2189 static void
2190 amd64_emit_gt_goto (int *offset_p, int *size_p)
2191 {
2192 EMIT_ASM (amd64_gt,
2193 "cmp %rax,(%rsp)\n\t"
2194 "jng .Lamd64_gt_fallthru\n\t"
2195 "lea 0x8(%rsp),%rsp\n\t"
2196 "pop %rax\n\t"
2197 /* jmp, but don't trust the assembler to choose the right jump */
2198 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2199 ".Lamd64_gt_fallthru:\n\t"
2200 "lea 0x8(%rsp),%rsp\n\t"
2201 "pop %rax");
2202
2203 if (offset_p)
2204 *offset_p = 13;
2205 if (size_p)
2206 *size_p = 4;
2207 }
2208
2209 static void
2210 amd64_emit_ge_goto (int *offset_p, int *size_p)
2211 {
2212 EMIT_ASM (amd64_ge,
2213 "cmp %rax,(%rsp)\n\t"
2214 "jnge .Lamd64_ge_fallthru\n\t"
2215 ".Lamd64_ge_jump:\n\t"
2216 "lea 0x8(%rsp),%rsp\n\t"
2217 "pop %rax\n\t"
2218 /* jmp, but don't trust the assembler to choose the right jump */
2219 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2220 ".Lamd64_ge_fallthru:\n\t"
2221 "lea 0x8(%rsp),%rsp\n\t"
2222 "pop %rax");
2223
2224 if (offset_p)
2225 *offset_p = 13;
2226 if (size_p)
2227 *size_p = 4;
2228 }
2229
2230 struct emit_ops amd64_emit_ops =
2231 {
2232 amd64_emit_prologue,
2233 amd64_emit_epilogue,
2234 amd64_emit_add,
2235 amd64_emit_sub,
2236 amd64_emit_mul,
2237 amd64_emit_lsh,
2238 amd64_emit_rsh_signed,
2239 amd64_emit_rsh_unsigned,
2240 amd64_emit_ext,
2241 amd64_emit_log_not,
2242 amd64_emit_bit_and,
2243 amd64_emit_bit_or,
2244 amd64_emit_bit_xor,
2245 amd64_emit_bit_not,
2246 amd64_emit_equal,
2247 amd64_emit_less_signed,
2248 amd64_emit_less_unsigned,
2249 amd64_emit_ref,
2250 amd64_emit_if_goto,
2251 amd64_emit_goto,
2252 amd64_write_goto_address,
2253 amd64_emit_const,
2254 amd64_emit_call,
2255 amd64_emit_reg,
2256 amd64_emit_pop,
2257 amd64_emit_stack_flush,
2258 amd64_emit_zero_ext,
2259 amd64_emit_swap,
2260 amd64_emit_stack_adjust,
2261 amd64_emit_int_call_1,
2262 amd64_emit_void_call_2,
2263 amd64_emit_eq_goto,
2264 amd64_emit_ne_goto,
2265 amd64_emit_lt_goto,
2266 amd64_emit_le_goto,
2267 amd64_emit_gt_goto,
2268 amd64_emit_ge_goto
2269 };
2270
2271 #endif /* __x86_64__ */
2272
2273 static void
2274 i386_emit_prologue (void)
2275 {
2276 EMIT_ASM32 (i386_prologue,
2277 "push %ebp\n\t"
2278 "mov %esp,%ebp\n\t"
2279 "push %ebx");
2280 /* At this point, the raw regs base address is at 8(%ebp), and the
2281 value pointer is at 12(%ebp). */
2282 }
2283
2284 static void
2285 i386_emit_epilogue (void)
2286 {
2287 EMIT_ASM32 (i386_epilogue,
2288 "mov 12(%ebp),%ecx\n\t"
2289 "mov %eax,(%ecx)\n\t"
2290 "mov %ebx,0x4(%ecx)\n\t"
2291 "xor %eax,%eax\n\t"
2292 "pop %ebx\n\t"
2293 "pop %ebp\n\t"
2294 "ret");
2295 }
2296
2297 static void
2298 i386_emit_add (void)
2299 {
2300 EMIT_ASM32 (i386_add,
2301 "add (%esp),%eax\n\t"
2302 "adc 0x4(%esp),%ebx\n\t"
2303 "lea 0x8(%esp),%esp");
2304 }
2305
2306 static void
2307 i386_emit_sub (void)
2308 {
2309 EMIT_ASM32 (i386_sub,
2310 "subl %eax,(%esp)\n\t"
2311 "sbbl %ebx,4(%esp)\n\t"
2312 "pop %eax\n\t"
2313 "pop %ebx\n\t");
2314 }
2315
2316 static void
2317 i386_emit_mul (void)
2318 {
2319 emit_error = 1;
2320 }
2321
2322 static void
2323 i386_emit_lsh (void)
2324 {
2325 emit_error = 1;
2326 }
2327
2328 static void
2329 i386_emit_rsh_signed (void)
2330 {
2331 emit_error = 1;
2332 }
2333
2334 static void
2335 i386_emit_rsh_unsigned (void)
2336 {
2337 emit_error = 1;
2338 }
2339
2340 static void
2341 i386_emit_ext (int arg)
2342 {
2343 switch (arg)
2344 {
2345 case 8:
2346 EMIT_ASM32 (i386_ext_8,
2347 "cbtw\n\t"
2348 "cwtl\n\t"
2349 "movl %eax,%ebx\n\t"
2350 "sarl $31,%ebx");
2351 break;
2352 case 16:
2353 EMIT_ASM32 (i386_ext_16,
2354 "cwtl\n\t"
2355 "movl %eax,%ebx\n\t"
2356 "sarl $31,%ebx");
2357 break;
2358 case 32:
2359 EMIT_ASM32 (i386_ext_32,
2360 "movl %eax,%ebx\n\t"
2361 "sarl $31,%ebx");
2362 break;
2363 default:
2364 emit_error = 1;
2365 }
2366 }
2367
2368 static void
2369 i386_emit_log_not (void)
2370 {
2371 EMIT_ASM32 (i386_log_not,
2372 "or %ebx,%eax\n\t"
2373 "test %eax,%eax\n\t"
2374 "sete %cl\n\t"
2375 "xor %ebx,%ebx\n\t"
2376 "movzbl %cl,%eax");
2377 }
2378
2379 static void
2380 i386_emit_bit_and (void)
2381 {
2382 EMIT_ASM32 (i386_and,
2383 "and (%esp),%eax\n\t"
2384 "and 0x4(%esp),%ebx\n\t"
2385 "lea 0x8(%esp),%esp");
2386 }
2387
2388 static void
2389 i386_emit_bit_or (void)
2390 {
2391 EMIT_ASM32 (i386_or,
2392 "or (%esp),%eax\n\t"
2393 "or 0x4(%esp),%ebx\n\t"
2394 "lea 0x8(%esp),%esp");
2395 }
2396
2397 static void
2398 i386_emit_bit_xor (void)
2399 {
2400 EMIT_ASM32 (i386_xor,
2401 "xor (%esp),%eax\n\t"
2402 "xor 0x4(%esp),%ebx\n\t"
2403 "lea 0x8(%esp),%esp");
2404 }
2405
2406 static void
2407 i386_emit_bit_not (void)
2408 {
2409 EMIT_ASM32 (i386_bit_not,
2410 "xor $0xffffffff,%eax\n\t"
2411 "xor $0xffffffff,%ebx\n\t");
2412 }
2413
2414 static void
2415 i386_emit_equal (void)
2416 {
2417 EMIT_ASM32 (i386_equal,
2418 "cmpl %ebx,4(%esp)\n\t"
2419 "jne .Li386_equal_false\n\t"
2420 "cmpl %eax,(%esp)\n\t"
2421 "je .Li386_equal_true\n\t"
2422 ".Li386_equal_false:\n\t"
2423 "xor %eax,%eax\n\t"
2424 "jmp .Li386_equal_end\n\t"
2425 ".Li386_equal_true:\n\t"
2426 "mov $1,%eax\n\t"
2427 ".Li386_equal_end:\n\t"
2428 "xor %ebx,%ebx\n\t"
2429 "lea 0x8(%esp),%esp");
2430 }
2431
2432 static void
2433 i386_emit_less_signed (void)
2434 {
2435 EMIT_ASM32 (i386_less_signed,
2436 "cmpl %ebx,4(%esp)\n\t"
2437 "jl .Li386_less_signed_true\n\t"
2438 "jne .Li386_less_signed_false\n\t"
2439 "cmpl %eax,(%esp)\n\t"
2440 "jl .Li386_less_signed_true\n\t"
2441 ".Li386_less_signed_false:\n\t"
2442 "xor %eax,%eax\n\t"
2443 "jmp .Li386_less_signed_end\n\t"
2444 ".Li386_less_signed_true:\n\t"
2445 "mov $1,%eax\n\t"
2446 ".Li386_less_signed_end:\n\t"
2447 "xor %ebx,%ebx\n\t"
2448 "lea 0x8(%esp),%esp");
2449 }
2450
2451 static void
2452 i386_emit_less_unsigned (void)
2453 {
2454 EMIT_ASM32 (i386_less_unsigned,
2455 "cmpl %ebx,4(%esp)\n\t"
2456 "jb .Li386_less_unsigned_true\n\t"
2457 "jne .Li386_less_unsigned_false\n\t"
2458 "cmpl %eax,(%esp)\n\t"
2459 "jb .Li386_less_unsigned_true\n\t"
2460 ".Li386_less_unsigned_false:\n\t"
2461 "xor %eax,%eax\n\t"
2462 "jmp .Li386_less_unsigned_end\n\t"
2463 ".Li386_less_unsigned_true:\n\t"
2464 "mov $1,%eax\n\t"
2465 ".Li386_less_unsigned_end:\n\t"
2466 "xor %ebx,%ebx\n\t"
2467 "lea 0x8(%esp),%esp");
2468 }
2469
2470 static void
2471 i386_emit_ref (int size)
2472 {
2473 switch (size)
2474 {
2475 case 1:
2476 EMIT_ASM32 (i386_ref1,
2477 "movb (%eax),%al");
2478 break;
2479 case 2:
2480 EMIT_ASM32 (i386_ref2,
2481 "movw (%eax),%ax");
2482 break;
2483 case 4:
2484 EMIT_ASM32 (i386_ref4,
2485 "movl (%eax),%eax");
2486 break;
2487 case 8:
2488 EMIT_ASM32 (i386_ref8,
2489 "movl 4(%eax),%ebx\n\t"
2490 "movl (%eax),%eax");
2491 break;
2492 }
2493 }
2494
2495 static void
2496 i386_emit_if_goto (int *offset_p, int *size_p)
2497 {
2498 EMIT_ASM32 (i386_if_goto,
2499 "mov %eax,%ecx\n\t"
2500 "or %ebx,%ecx\n\t"
2501 "pop %eax\n\t"
2502 "pop %ebx\n\t"
2503 "cmpl $0,%ecx\n\t"
2504 /* Don't trust the assembler to choose the right jump */
2505 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2506
2507 if (offset_p)
2508 *offset_p = 11; /* be sure that this matches the sequence above */
2509 if (size_p)
2510 *size_p = 4;
2511 }
2512
2513 static void
2514 i386_emit_goto (int *offset_p, int *size_p)
2515 {
2516 EMIT_ASM32 (i386_goto,
2517 /* Don't trust the assembler to choose the right jump */
2518 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2519 if (offset_p)
2520 *offset_p = 1;
2521 if (size_p)
2522 *size_p = 4;
2523 }
2524
2525 static void
2526 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2527 {
2528 int diff = (to - (from + size));
2529 unsigned char buf[sizeof (int)];
2530
2531 /* We're only doing 4-byte sizes at the moment. */
2532 if (size != 4)
2533 {
2534 emit_error = 1;
2535 return;
2536 }
2537
2538 memcpy (buf, &diff, sizeof (int));
2539 target_write_memory (from, buf, sizeof (int));
2540 }
2541
2542 static void
2543 i386_emit_const (LONGEST num)
2544 {
2545 unsigned char buf[16];
2546 int i, hi, lo;
2547 CORE_ADDR buildaddr = current_insn_ptr;
2548
2549 i = 0;
2550 buf[i++] = 0xb8; /* mov $<n>,%eax */
2551 lo = num & 0xffffffff;
2552 memcpy (&buf[i], &lo, sizeof (lo));
2553 i += 4;
2554 hi = ((num >> 32) & 0xffffffff);
2555 if (hi)
2556 {
2557 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2558 memcpy (&buf[i], &hi, sizeof (hi));
2559 i += 4;
2560 }
2561 else
2562 {
2563 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2564 }
2565 append_insns (&buildaddr, i, buf);
2566 current_insn_ptr = buildaddr;
2567 }
2568
2569 static void
2570 i386_emit_call (CORE_ADDR fn)
2571 {
2572 unsigned char buf[16];
2573 int i, offset;
2574 CORE_ADDR buildaddr;
2575
2576 buildaddr = current_insn_ptr;
2577 i = 0;
2578 buf[i++] = 0xe8; /* call <reladdr> */
2579 offset = ((int) fn) - (buildaddr + 5);
2580 memcpy (buf + 1, &offset, 4);
2581 append_insns (&buildaddr, 5, buf);
2582 current_insn_ptr = buildaddr;
2583 }
2584
2585 static void
2586 i386_emit_reg (int reg)
2587 {
2588 unsigned char buf[16];
2589 int i;
2590 CORE_ADDR buildaddr;
2591
2592 EMIT_ASM32 (i386_reg_a,
2593 "sub $0x8,%esp");
2594 buildaddr = current_insn_ptr;
2595 i = 0;
2596 buf[i++] = 0xb8; /* mov $<n>,%eax */
2597 memcpy (&buf[i], &reg, sizeof (reg));
2598 i += 4;
2599 append_insns (&buildaddr, i, buf);
2600 current_insn_ptr = buildaddr;
2601 EMIT_ASM32 (i386_reg_b,
2602 "mov %eax,4(%esp)\n\t"
2603 "mov 8(%ebp),%eax\n\t"
2604 "mov %eax,(%esp)");
2605 i386_emit_call (get_raw_reg_func_addr ());
2606 EMIT_ASM32 (i386_reg_c,
2607 "xor %ebx,%ebx\n\t"
2608 "lea 0x8(%esp),%esp");
2609 }
2610
2611 static void
2612 i386_emit_pop (void)
2613 {
2614 EMIT_ASM32 (i386_pop,
2615 "pop %eax\n\t"
2616 "pop %ebx");
2617 }
2618
2619 static void
2620 i386_emit_stack_flush (void)
2621 {
2622 EMIT_ASM32 (i386_stack_flush,
2623 "push %ebx\n\t"
2624 "push %eax");
2625 }
2626
2627 static void
2628 i386_emit_zero_ext (int arg)
2629 {
2630 switch (arg)
2631 {
2632 case 8:
2633 EMIT_ASM32 (i386_zero_ext_8,
2634 "and $0xff,%eax\n\t"
2635 "xor %ebx,%ebx");
2636 break;
2637 case 16:
2638 EMIT_ASM32 (i386_zero_ext_16,
2639 "and $0xffff,%eax\n\t"
2640 "xor %ebx,%ebx");
2641 break;
2642 case 32:
2643 EMIT_ASM32 (i386_zero_ext_32,
2644 "xor %ebx,%ebx");
2645 break;
2646 default:
2647 emit_error = 1;
2648 }
2649 }
2650
2651 static void
2652 i386_emit_swap (void)
2653 {
2654 EMIT_ASM32 (i386_swap,
2655 "mov %eax,%ecx\n\t"
2656 "mov %ebx,%edx\n\t"
2657 "pop %eax\n\t"
2658 "pop %ebx\n\t"
2659 "push %edx\n\t"
2660 "push %ecx");
2661 }
2662
2663 static void
2664 i386_emit_stack_adjust (int n)
2665 {
2666 unsigned char buf[16];
2667 int i;
2668 CORE_ADDR buildaddr = current_insn_ptr;
2669
2670 i = 0;
2671 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2672 buf[i++] = 0x64;
2673 buf[i++] = 0x24;
2674 buf[i++] = n * 8;
2675 append_insns (&buildaddr, i, buf);
2676 current_insn_ptr = buildaddr;
2677 }
2678
2679 /* FN's prototype is `LONGEST(*fn)(int)'. */
2680
2681 static void
2682 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2683 {
2684 unsigned char buf[16];
2685 int i;
2686 CORE_ADDR buildaddr;
2687
2688 EMIT_ASM32 (i386_int_call_1_a,
2689 /* Reserve a bit of stack space. */
2690 "sub $0x8,%esp");
2691 /* Put the one argument on the stack. */
2692 buildaddr = current_insn_ptr;
2693 i = 0;
2694 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2695 buf[i++] = 0x04;
2696 buf[i++] = 0x24;
2697 memcpy (&buf[i], &arg1, sizeof (arg1));
2698 i += 4;
2699 append_insns (&buildaddr, i, buf);
2700 current_insn_ptr = buildaddr;
2701 i386_emit_call (fn);
2702 EMIT_ASM32 (i386_int_call_1_c,
2703 "mov %edx,%ebx\n\t"
2704 "lea 0x8(%esp),%esp");
2705 }
2706
2707 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2708
2709 static void
2710 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2711 {
2712 unsigned char buf[16];
2713 int i;
2714 CORE_ADDR buildaddr;
2715
2716 EMIT_ASM32 (i386_void_call_2_a,
2717 /* Preserve %eax only; we don't have to worry about %ebx. */
2718 "push %eax\n\t"
2719 /* Reserve a bit of stack space for arguments. */
2720 "sub $0x10,%esp\n\t"
2721 /* Copy "top" to the second argument position. (Note that
2722 we can't assume function won't scribble on its
2723 arguments, so don't try to restore from this.) */
2724 "mov %eax,4(%esp)\n\t"
2725 "mov %ebx,8(%esp)");
2726 /* Put the first argument on the stack. */
2727 buildaddr = current_insn_ptr;
2728 i = 0;
2729 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2730 buf[i++] = 0x04;
2731 buf[i++] = 0x24;
2732 memcpy (&buf[i], &arg1, sizeof (arg1));
2733 i += 4;
2734 append_insns (&buildaddr, i, buf);
2735 current_insn_ptr = buildaddr;
2736 i386_emit_call (fn);
2737 EMIT_ASM32 (i386_void_call_2_b,
2738 "lea 0x10(%esp),%esp\n\t"
2739 /* Restore original stack top. */
2740 "pop %eax");
2741 }
2742
2743
2744 static void
2745 i386_emit_eq_goto (int *offset_p, int *size_p)
2746 {
2747 EMIT_ASM32 (eq,
2748 /* Check low half first, more likely to be decider */
2749 "cmpl %eax,(%esp)\n\t"
2750 "jne .Leq_fallthru\n\t"
2751 "cmpl %ebx,4(%esp)\n\t"
2752 "jne .Leq_fallthru\n\t"
2753 "lea 0x8(%esp),%esp\n\t"
2754 "pop %eax\n\t"
2755 "pop %ebx\n\t"
2756 /* jmp, but don't trust the assembler to choose the right jump */
2757 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2758 ".Leq_fallthru:\n\t"
2759 "lea 0x8(%esp),%esp\n\t"
2760 "pop %eax\n\t"
2761 "pop %ebx");
2762
2763 if (offset_p)
2764 *offset_p = 18;
2765 if (size_p)
2766 *size_p = 4;
2767 }
2768
2769 static void
2770 i386_emit_ne_goto (int *offset_p, int *size_p)
2771 {
2772 EMIT_ASM32 (ne,
2773 /* Check low half first, more likely to be decider */
2774 "cmpl %eax,(%esp)\n\t"
2775 "jne .Lne_jump\n\t"
2776 "cmpl %ebx,4(%esp)\n\t"
2777 "je .Lne_fallthru\n\t"
2778 ".Lne_jump:\n\t"
2779 "lea 0x8(%esp),%esp\n\t"
2780 "pop %eax\n\t"
2781 "pop %ebx\n\t"
2782 /* jmp, but don't trust the assembler to choose the right jump */
2783 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2784 ".Lne_fallthru:\n\t"
2785 "lea 0x8(%esp),%esp\n\t"
2786 "pop %eax\n\t"
2787 "pop %ebx");
2788
2789 if (offset_p)
2790 *offset_p = 18;
2791 if (size_p)
2792 *size_p = 4;
2793 }
2794
2795 static void
2796 i386_emit_lt_goto (int *offset_p, int *size_p)
2797 {
2798 EMIT_ASM32 (lt,
2799 "cmpl %ebx,4(%esp)\n\t"
2800 "jl .Llt_jump\n\t"
2801 "jne .Llt_fallthru\n\t"
2802 "cmpl %eax,(%esp)\n\t"
2803 "jnl .Llt_fallthru\n\t"
2804 ".Llt_jump:\n\t"
2805 "lea 0x8(%esp),%esp\n\t"
2806 "pop %eax\n\t"
2807 "pop %ebx\n\t"
2808 /* jmp, but don't trust the assembler to choose the right jump */
2809 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2810 ".Llt_fallthru:\n\t"
2811 "lea 0x8(%esp),%esp\n\t"
2812 "pop %eax\n\t"
2813 "pop %ebx");
2814
2815 if (offset_p)
2816 *offset_p = 20;
2817 if (size_p)
2818 *size_p = 4;
2819 }
2820
2821 static void
2822 i386_emit_le_goto (int *offset_p, int *size_p)
2823 {
2824 EMIT_ASM32 (le,
2825 "cmpl %ebx,4(%esp)\n\t"
2826 "jle .Lle_jump\n\t"
2827 "jne .Lle_fallthru\n\t"
2828 "cmpl %eax,(%esp)\n\t"
2829 "jnle .Lle_fallthru\n\t"
2830 ".Lle_jump:\n\t"
2831 "lea 0x8(%esp),%esp\n\t"
2832 "pop %eax\n\t"
2833 "pop %ebx\n\t"
2834 /* jmp, but don't trust the assembler to choose the right jump */
2835 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2836 ".Lle_fallthru:\n\t"
2837 "lea 0x8(%esp),%esp\n\t"
2838 "pop %eax\n\t"
2839 "pop %ebx");
2840
2841 if (offset_p)
2842 *offset_p = 20;
2843 if (size_p)
2844 *size_p = 4;
2845 }
2846
2847 static void
2848 i386_emit_gt_goto (int *offset_p, int *size_p)
2849 {
2850 EMIT_ASM32 (gt,
2851 "cmpl %ebx,4(%esp)\n\t"
2852 "jg .Lgt_jump\n\t"
2853 "jne .Lgt_fallthru\n\t"
2854 "cmpl %eax,(%esp)\n\t"
2855 "jng .Lgt_fallthru\n\t"
2856 ".Lgt_jump:\n\t"
2857 "lea 0x8(%esp),%esp\n\t"
2858 "pop %eax\n\t"
2859 "pop %ebx\n\t"
2860 /* jmp, but don't trust the assembler to choose the right jump */
2861 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2862 ".Lgt_fallthru:\n\t"
2863 "lea 0x8(%esp),%esp\n\t"
2864 "pop %eax\n\t"
2865 "pop %ebx");
2866
2867 if (offset_p)
2868 *offset_p = 20;
2869 if (size_p)
2870 *size_p = 4;
2871 }
2872
2873 static void
2874 i386_emit_ge_goto (int *offset_p, int *size_p)
2875 {
2876 EMIT_ASM32 (ge,
2877 "cmpl %ebx,4(%esp)\n\t"
2878 "jge .Lge_jump\n\t"
2879 "jne .Lge_fallthru\n\t"
2880 "cmpl %eax,(%esp)\n\t"
2881 "jnge .Lge_fallthru\n\t"
2882 ".Lge_jump:\n\t"
2883 "lea 0x8(%esp),%esp\n\t"
2884 "pop %eax\n\t"
2885 "pop %ebx\n\t"
2886 /* jmp, but don't trust the assembler to choose the right jump */
2887 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2888 ".Lge_fallthru:\n\t"
2889 "lea 0x8(%esp),%esp\n\t"
2890 "pop %eax\n\t"
2891 "pop %ebx");
2892
2893 if (offset_p)
2894 *offset_p = 20;
2895 if (size_p)
2896 *size_p = 4;
2897 }
2898
2899 struct emit_ops i386_emit_ops =
2900 {
2901 i386_emit_prologue,
2902 i386_emit_epilogue,
2903 i386_emit_add,
2904 i386_emit_sub,
2905 i386_emit_mul,
2906 i386_emit_lsh,
2907 i386_emit_rsh_signed,
2908 i386_emit_rsh_unsigned,
2909 i386_emit_ext,
2910 i386_emit_log_not,
2911 i386_emit_bit_and,
2912 i386_emit_bit_or,
2913 i386_emit_bit_xor,
2914 i386_emit_bit_not,
2915 i386_emit_equal,
2916 i386_emit_less_signed,
2917 i386_emit_less_unsigned,
2918 i386_emit_ref,
2919 i386_emit_if_goto,
2920 i386_emit_goto,
2921 i386_write_goto_address,
2922 i386_emit_const,
2923 i386_emit_call,
2924 i386_emit_reg,
2925 i386_emit_pop,
2926 i386_emit_stack_flush,
2927 i386_emit_zero_ext,
2928 i386_emit_swap,
2929 i386_emit_stack_adjust,
2930 i386_emit_int_call_1,
2931 i386_emit_void_call_2,
2932 i386_emit_eq_goto,
2933 i386_emit_ne_goto,
2934 i386_emit_lt_goto,
2935 i386_emit_le_goto,
2936 i386_emit_gt_goto,
2937 i386_emit_ge_goto
2938 };
2939
2940
2941 emit_ops *
2942 x86_target::emit_ops ()
2943 {
2944 #ifdef __x86_64__
2945 if (is_64bit_tdesc ())
2946 return &amd64_emit_ops;
2947 else
2948 #endif
2949 return &i386_emit_ops;
2950 }
2951
2952 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2953
2954 const gdb_byte *
2955 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2956 {
2957 *size = x86_breakpoint_len;
2958 return x86_breakpoint;
2959 }
2960
2961 bool
2962 x86_target::low_supports_range_stepping ()
2963 {
2964 return true;
2965 }
2966
2967 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2968 */
2969
2970 static int
2971 x86_supports_hardware_single_step (void)
2972 {
2973 return 1;
2974 }
2975
2976 static int
2977 x86_get_ipa_tdesc_idx (void)
2978 {
2979 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2980 const struct target_desc *tdesc = regcache->tdesc;
2981
2982 #ifdef __x86_64__
2983 return amd64_get_ipa_tdesc_idx (tdesc);
2984 #endif
2985
2986 if (tdesc == tdesc_i386_linux_no_xml)
2987 return X86_TDESC_SSE;
2988
2989 return i386_get_ipa_tdesc_idx (tdesc);
2990 }
2991
2992 /* This is initialized assuming an amd64 target.
2993 x86_arch_setup will correct it for i386 or amd64 targets. */
2994
2995 struct linux_target_ops the_low_target =
2996 {
2997 x86_supports_hardware_single_step,
2998 x86_get_syscall_trapinfo,
2999 x86_get_ipa_tdesc_idx,
3000 };
3001
3002 /* The linux target ops object. */
3003
3004 linux_process_target *the_linux_target = &the_x86_target;
3005
3006 void
3007 initialize_low_arch (void)
3008 {
3009 /* Initialize the Linux target descriptions. */
3010 #ifdef __x86_64__
3011 tdesc_amd64_linux_no_xml = allocate_target_description ();
3012 copy_target_description (tdesc_amd64_linux_no_xml,
3013 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
3014 false));
3015 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3016 #endif
3017
3018 tdesc_i386_linux_no_xml = allocate_target_description ();
3019 copy_target_description (tdesc_i386_linux_no_xml,
3020 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3021 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3022
3023 initialize_regsets_info (&x86_regsets_info);
3024 }
This page took 0.119409 seconds and 3 git commands to generate.