gdb: add target_ops::supports_displaced_step
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 const regs_info *get_regs_info () override;
104
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
107 bool supports_z_point_type (char z_type) override;
108
109 void process_qsupported (char **features, int count) override;
110
111 bool supports_tracepoints () override;
112
113 bool supports_fast_tracepoints () override;
114
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121 char *err) override;
122
123 int get_min_fast_tracepoint_insn_len () override;
124
125 struct emit_ops *emit_ops () override;
126
127 int get_ipa_tdesc_idx () override;
128
129 protected:
130
131 void low_arch_setup () override;
132
133 bool low_cannot_fetch_register (int regno) override;
134
135 bool low_cannot_store_register (int regno) override;
136
137 bool low_supports_breakpoints () override;
138
139 CORE_ADDR low_get_pc (regcache *regcache) override;
140
141 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
142
143 int low_decr_pc_after_break () override;
144
145 bool low_breakpoint_at (CORE_ADDR pc) override;
146
147 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
148 int size, raw_breakpoint *bp) override;
149
150 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
151 int size, raw_breakpoint *bp) override;
152
153 bool low_stopped_by_watchpoint () override;
154
155 CORE_ADDR low_stopped_data_address () override;
156
157 /* collect_ptrace_register/supply_ptrace_register are not needed in the
158 native i386 case (no registers smaller than an xfer unit), and are not
159 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
160
161 /* Need to fix up i386 siginfo if host is amd64. */
162 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
163 int direction) override;
164
165 arch_process_info *low_new_process () override;
166
167 void low_delete_process (arch_process_info *info) override;
168
169 void low_new_thread (lwp_info *) override;
170
171 void low_delete_thread (arch_lwp_info *) override;
172
173 void low_new_fork (process_info *parent, process_info *child) override;
174
175 void low_prepare_to_resume (lwp_info *lwp) override;
176
177 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
178
179 bool low_supports_range_stepping () override;
180
181 bool low_supports_catch_syscall () override;
182
183 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
184
185 private:
186
187 /* Update all the target description of all processes; a new GDB
188 connected, and it may or not support xml target descriptions. */
189 void update_xmltarget ();
190 };
191
192 /* The singleton target ops object. */
193
194 static x86_target the_x86_target;
195
196 /* Per-process arch-specific data we want to keep. */
197
198 struct arch_process_info
199 {
200 struct x86_debug_reg_state debug_reg_state;
201 };
202
203 #ifdef __x86_64__
204
205 /* Mapping between the general-purpose registers in `struct user'
206 format and GDB's register array layout.
207 Note that the transfer layout uses 64-bit regs. */
208 static /*const*/ int i386_regmap[] =
209 {
210 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
211 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
212 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
213 DS * 8, ES * 8, FS * 8, GS * 8
214 };
215
216 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
217
218 /* So code below doesn't have to care, i386 or amd64. */
219 #define ORIG_EAX ORIG_RAX
220 #define REGSIZE 8
221
222 static const int x86_64_regmap[] =
223 {
224 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
225 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
226 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
227 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
228 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
229 DS * 8, ES * 8, FS * 8, GS * 8,
230 -1, -1, -1, -1, -1, -1, -1, -1,
231 -1, -1, -1, -1, -1, -1, -1, -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
233 -1,
234 -1, -1, -1, -1, -1, -1, -1, -1,
235 ORIG_RAX * 8,
236 21 * 8, 22 * 8,
237 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
238 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
239 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
240 -1, -1, -1, -1, -1, -1, -1, -1,
241 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
242 -1, -1, -1, -1, -1, -1, -1, -1,
243 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
244 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
245 -1, -1, -1, -1, -1, -1, -1, -1,
246 -1, -1, -1, -1, -1, -1, -1, -1,
247 -1, -1, -1, -1, -1, -1, -1, -1,
248 -1 /* pkru */
249 };
250
251 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
252 #define X86_64_USER_REGS (GS + 1)
253
254 #else /* ! __x86_64__ */
255
256 /* Mapping between the general-purpose registers in `struct user'
257 format and GDB's register array layout. */
258 static /*const*/ int i386_regmap[] =
259 {
260 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
261 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
262 EIP * 4, EFL * 4, CS * 4, SS * 4,
263 DS * 4, ES * 4, FS * 4, GS * 4
264 };
265
266 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
267
268 #define REGSIZE 4
269
270 #endif
271
272 #ifdef __x86_64__
273
274 /* Returns true if the current inferior belongs to a x86-64 process,
275 per the tdesc. */
276
277 static int
278 is_64bit_tdesc (void)
279 {
280 struct regcache *regcache = get_thread_regcache (current_thread, 0);
281
282 return register_size (regcache->tdesc, 0) == 8;
283 }
284
285 #endif
286
287 \f
288 /* Called by libthread_db. */
289
290 ps_err_e
291 ps_get_thread_area (struct ps_prochandle *ph,
292 lwpid_t lwpid, int idx, void **base)
293 {
294 #ifdef __x86_64__
295 int use_64bit = is_64bit_tdesc ();
296
297 if (use_64bit)
298 {
299 switch (idx)
300 {
301 case FS:
302 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
303 return PS_OK;
304 break;
305 case GS:
306 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
307 return PS_OK;
308 break;
309 default:
310 return PS_BADADDR;
311 }
312 return PS_ERR;
313 }
314 #endif
315
316 {
317 unsigned int desc[4];
318
319 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
320 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
321 return PS_ERR;
322
323 /* Ensure we properly extend the value to 64-bits for x86_64. */
324 *base = (void *) (uintptr_t) desc[1];
325 return PS_OK;
326 }
327 }
328
329 /* Get the thread area address. This is used to recognize which
330 thread is which when tracing with the in-process agent library. We
331 don't read anything from the address, and treat it as opaque; it's
332 the address itself that we assume is unique per-thread. */
333
334 int
335 x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
336 {
337 #ifdef __x86_64__
338 int use_64bit = is_64bit_tdesc ();
339
340 if (use_64bit)
341 {
342 void *base;
343 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
344 {
345 *addr = (CORE_ADDR) (uintptr_t) base;
346 return 0;
347 }
348
349 return -1;
350 }
351 #endif
352
353 {
354 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
355 struct thread_info *thr = get_lwp_thread (lwp);
356 struct regcache *regcache = get_thread_regcache (thr, 1);
357 unsigned int desc[4];
358 ULONGEST gs = 0;
359 const int reg_thread_area = 3; /* bits to scale down register value. */
360 int idx;
361
362 collect_register_by_name (regcache, "gs", &gs);
363
364 idx = gs >> reg_thread_area;
365
366 if (ptrace (PTRACE_GET_THREAD_AREA,
367 lwpid_of (thr),
368 (void *) (long) idx, (unsigned long) &desc) < 0)
369 return -1;
370
371 *addr = desc[1];
372 return 0;
373 }
374 }
375
376
377 \f
378 bool
379 x86_target::low_cannot_store_register (int regno)
380 {
381 #ifdef __x86_64__
382 if (is_64bit_tdesc ())
383 return false;
384 #endif
385
386 return regno >= I386_NUM_REGS;
387 }
388
389 bool
390 x86_target::low_cannot_fetch_register (int regno)
391 {
392 #ifdef __x86_64__
393 if (is_64bit_tdesc ())
394 return false;
395 #endif
396
397 return regno >= I386_NUM_REGS;
398 }
399
400 static void
401 x86_fill_gregset (struct regcache *regcache, void *buf)
402 {
403 int i;
404
405 #ifdef __x86_64__
406 if (register_size (regcache->tdesc, 0) == 8)
407 {
408 for (i = 0; i < X86_64_NUM_REGS; i++)
409 if (x86_64_regmap[i] != -1)
410 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
411
412 return;
413 }
414
415 /* 32-bit inferior registers need to be zero-extended.
416 Callers would read uninitialized memory otherwise. */
417 memset (buf, 0x00, X86_64_USER_REGS * 8);
418 #endif
419
420 for (i = 0; i < I386_NUM_REGS; i++)
421 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
422
423 collect_register_by_name (regcache, "orig_eax",
424 ((char *) buf) + ORIG_EAX * REGSIZE);
425
426 #ifdef __x86_64__
427 /* Sign extend EAX value to avoid potential syscall restart
428 problems.
429
430 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
431 for a detailed explanation. */
432 if (register_size (regcache->tdesc, 0) == 4)
433 {
434 void *ptr = ((gdb_byte *) buf
435 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
436
437 *(int64_t *) ptr = *(int32_t *) ptr;
438 }
439 #endif
440 }
441
442 static void
443 x86_store_gregset (struct regcache *regcache, const void *buf)
444 {
445 int i;
446
447 #ifdef __x86_64__
448 if (register_size (regcache->tdesc, 0) == 8)
449 {
450 for (i = 0; i < X86_64_NUM_REGS; i++)
451 if (x86_64_regmap[i] != -1)
452 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
453
454 return;
455 }
456 #endif
457
458 for (i = 0; i < I386_NUM_REGS; i++)
459 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
460
461 supply_register_by_name (regcache, "orig_eax",
462 ((char *) buf) + ORIG_EAX * REGSIZE);
463 }
464
465 static void
466 x86_fill_fpregset (struct regcache *regcache, void *buf)
467 {
468 #ifdef __x86_64__
469 i387_cache_to_fxsave (regcache, buf);
470 #else
471 i387_cache_to_fsave (regcache, buf);
472 #endif
473 }
474
475 static void
476 x86_store_fpregset (struct regcache *regcache, const void *buf)
477 {
478 #ifdef __x86_64__
479 i387_fxsave_to_cache (regcache, buf);
480 #else
481 i387_fsave_to_cache (regcache, buf);
482 #endif
483 }
484
485 #ifndef __x86_64__
486
487 static void
488 x86_fill_fpxregset (struct regcache *regcache, void *buf)
489 {
490 i387_cache_to_fxsave (regcache, buf);
491 }
492
493 static void
494 x86_store_fpxregset (struct regcache *regcache, const void *buf)
495 {
496 i387_fxsave_to_cache (regcache, buf);
497 }
498
499 #endif
500
501 static void
502 x86_fill_xstateregset (struct regcache *regcache, void *buf)
503 {
504 i387_cache_to_xsave (regcache, buf);
505 }
506
507 static void
508 x86_store_xstateregset (struct regcache *regcache, const void *buf)
509 {
510 i387_xsave_to_cache (regcache, buf);
511 }
512
513 /* ??? The non-biarch i386 case stores all the i387 regs twice.
514 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
515 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
516 doesn't work. IWBN to avoid the duplication in the case where it
517 does work. Maybe the arch_setup routine could check whether it works
518 and update the supported regsets accordingly. */
519
520 static struct regset_info x86_regsets[] =
521 {
522 #ifdef HAVE_PTRACE_GETREGS
523 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
524 GENERAL_REGS,
525 x86_fill_gregset, x86_store_gregset },
526 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
527 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
528 # ifndef __x86_64__
529 # ifdef HAVE_PTRACE_GETFPXREGS
530 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
531 EXTENDED_REGS,
532 x86_fill_fpxregset, x86_store_fpxregset },
533 # endif
534 # endif
535 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
536 FP_REGS,
537 x86_fill_fpregset, x86_store_fpregset },
538 #endif /* HAVE_PTRACE_GETREGS */
539 NULL_REGSET
540 };
541
542 bool
543 x86_target::low_supports_breakpoints ()
544 {
545 return true;
546 }
547
548 CORE_ADDR
549 x86_target::low_get_pc (regcache *regcache)
550 {
551 int use_64bit = register_size (regcache->tdesc, 0) == 8;
552
553 if (use_64bit)
554 {
555 uint64_t pc;
556
557 collect_register_by_name (regcache, "rip", &pc);
558 return (CORE_ADDR) pc;
559 }
560 else
561 {
562 uint32_t pc;
563
564 collect_register_by_name (regcache, "eip", &pc);
565 return (CORE_ADDR) pc;
566 }
567 }
568
569 void
570 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
571 {
572 int use_64bit = register_size (regcache->tdesc, 0) == 8;
573
574 if (use_64bit)
575 {
576 uint64_t newpc = pc;
577
578 supply_register_by_name (regcache, "rip", &newpc);
579 }
580 else
581 {
582 uint32_t newpc = pc;
583
584 supply_register_by_name (regcache, "eip", &newpc);
585 }
586 }
587
588 int
589 x86_target::low_decr_pc_after_break ()
590 {
591 return 1;
592 }
593
594 \f
595 static const gdb_byte x86_breakpoint[] = { 0xCC };
596 #define x86_breakpoint_len 1
597
598 bool
599 x86_target::low_breakpoint_at (CORE_ADDR pc)
600 {
601 unsigned char c;
602
603 read_memory (pc, &c, 1);
604 if (c == 0xCC)
605 return true;
606
607 return false;
608 }
609 \f
610 /* Low-level function vector. */
611 struct x86_dr_low_type x86_dr_low =
612 {
613 x86_linux_dr_set_control,
614 x86_linux_dr_set_addr,
615 x86_linux_dr_get_addr,
616 x86_linux_dr_get_status,
617 x86_linux_dr_get_control,
618 sizeof (void *),
619 };
620 \f
621 /* Breakpoint/Watchpoint support. */
622
623 bool
624 x86_target::supports_z_point_type (char z_type)
625 {
626 switch (z_type)
627 {
628 case Z_PACKET_SW_BP:
629 case Z_PACKET_HW_BP:
630 case Z_PACKET_WRITE_WP:
631 case Z_PACKET_ACCESS_WP:
632 return true;
633 default:
634 return false;
635 }
636 }
637
638 int
639 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
640 int size, raw_breakpoint *bp)
641 {
642 struct process_info *proc = current_process ();
643
644 switch (type)
645 {
646 case raw_bkpt_type_hw:
647 case raw_bkpt_type_write_wp:
648 case raw_bkpt_type_access_wp:
649 {
650 enum target_hw_bp_type hw_type
651 = raw_bkpt_type_to_target_hw_bp_type (type);
652 struct x86_debug_reg_state *state
653 = &proc->priv->arch_private->debug_reg_state;
654
655 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
656 }
657
658 default:
659 /* Unsupported. */
660 return 1;
661 }
662 }
663
664 int
665 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
666 int size, raw_breakpoint *bp)
667 {
668 struct process_info *proc = current_process ();
669
670 switch (type)
671 {
672 case raw_bkpt_type_hw:
673 case raw_bkpt_type_write_wp:
674 case raw_bkpt_type_access_wp:
675 {
676 enum target_hw_bp_type hw_type
677 = raw_bkpt_type_to_target_hw_bp_type (type);
678 struct x86_debug_reg_state *state
679 = &proc->priv->arch_private->debug_reg_state;
680
681 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
682 }
683 default:
684 /* Unsupported. */
685 return 1;
686 }
687 }
688
689 bool
690 x86_target::low_stopped_by_watchpoint ()
691 {
692 struct process_info *proc = current_process ();
693 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
694 }
695
696 CORE_ADDR
697 x86_target::low_stopped_data_address ()
698 {
699 struct process_info *proc = current_process ();
700 CORE_ADDR addr;
701 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
702 &addr))
703 return addr;
704 return 0;
705 }
706 \f
707 /* Called when a new process is created. */
708
709 arch_process_info *
710 x86_target::low_new_process ()
711 {
712 struct arch_process_info *info = XCNEW (struct arch_process_info);
713
714 x86_low_init_dregs (&info->debug_reg_state);
715
716 return info;
717 }
718
719 /* Called when a process is being deleted. */
720
721 void
722 x86_target::low_delete_process (arch_process_info *info)
723 {
724 xfree (info);
725 }
726
727 void
728 x86_target::low_new_thread (lwp_info *lwp)
729 {
730 /* This comes from nat/. */
731 x86_linux_new_thread (lwp);
732 }
733
734 void
735 x86_target::low_delete_thread (arch_lwp_info *alwp)
736 {
737 /* This comes from nat/. */
738 x86_linux_delete_thread (alwp);
739 }
740
741 /* Target routine for new_fork. */
742
743 void
744 x86_target::low_new_fork (process_info *parent, process_info *child)
745 {
746 /* These are allocated by linux_add_process. */
747 gdb_assert (parent->priv != NULL
748 && parent->priv->arch_private != NULL);
749 gdb_assert (child->priv != NULL
750 && child->priv->arch_private != NULL);
751
752 /* Linux kernel before 2.6.33 commit
753 72f674d203cd230426437cdcf7dd6f681dad8b0d
754 will inherit hardware debug registers from parent
755 on fork/vfork/clone. Newer Linux kernels create such tasks with
756 zeroed debug registers.
757
758 GDB core assumes the child inherits the watchpoints/hw
759 breakpoints of the parent, and will remove them all from the
760 forked off process. Copy the debug registers mirrors into the
761 new process so that all breakpoints and watchpoints can be
762 removed together. The debug registers mirror will become zeroed
763 in the end before detaching the forked off process, thus making
764 this compatible with older Linux kernels too. */
765
766 *child->priv->arch_private = *parent->priv->arch_private;
767 }
768
769 void
770 x86_target::low_prepare_to_resume (lwp_info *lwp)
771 {
772 /* This comes from nat/. */
773 x86_linux_prepare_to_resume (lwp);
774 }
775
776 /* See nat/x86-dregs.h. */
777
778 struct x86_debug_reg_state *
779 x86_debug_reg_state (pid_t pid)
780 {
781 struct process_info *proc = find_process_pid (pid);
782
783 return &proc->priv->arch_private->debug_reg_state;
784 }
785 \f
786 /* When GDBSERVER is built as a 64-bit application on linux, the
787 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
788 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
789 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
790 conversion in-place ourselves. */
791
792 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
793 layout of the inferiors' architecture. Returns true if any
794 conversion was done; false otherwise. If DIRECTION is 1, then copy
795 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
796 INF. */
797
798 bool
799 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
800 {
801 #ifdef __x86_64__
802 unsigned int machine;
803 int tid = lwpid_of (current_thread);
804 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
805
806 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
807 if (!is_64bit_tdesc ())
808 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
809 FIXUP_32);
810 /* No fixup for native x32 GDB. */
811 else if (!is_elf64 && sizeof (void *) == 8)
812 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
813 FIXUP_X32);
814 #endif
815
816 return false;
817 }
818 \f
819 static int use_xml;
820
821 /* Format of XSAVE extended state is:
822 struct
823 {
824 fxsave_bytes[0..463]
825 sw_usable_bytes[464..511]
826 xstate_hdr_bytes[512..575]
827 avx_bytes[576..831]
828 future_state etc
829 };
830
831 Same memory layout will be used for the coredump NT_X86_XSTATE
832 representing the XSAVE extended state registers.
833
834 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
835 extended state mask, which is the same as the extended control register
836 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
837 together with the mask saved in the xstate_hdr_bytes to determine what
838 states the processor/OS supports and what state, used or initialized,
839 the process/thread is in. */
840 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
841
842 /* Does the current host support the GETFPXREGS request? The header
843 file may or may not define it, and even if it is defined, the
844 kernel will return EIO if it's running on a pre-SSE processor. */
845 int have_ptrace_getfpxregs =
846 #ifdef HAVE_PTRACE_GETFPXREGS
847 -1
848 #else
849 0
850 #endif
851 ;
852
853 /* Get Linux/x86 target description from running target. */
854
855 static const struct target_desc *
856 x86_linux_read_description (void)
857 {
858 unsigned int machine;
859 int is_elf64;
860 int xcr0_features;
861 int tid;
862 static uint64_t xcr0;
863 struct regset_info *regset;
864
865 tid = lwpid_of (current_thread);
866
867 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
868
869 if (sizeof (void *) == 4)
870 {
871 if (is_elf64 > 0)
872 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
873 #ifndef __x86_64__
874 else if (machine == EM_X86_64)
875 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
876 #endif
877 }
878
879 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
880 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
881 {
882 elf_fpxregset_t fpxregs;
883
884 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
885 {
886 have_ptrace_getfpxregs = 0;
887 have_ptrace_getregset = 0;
888 return i386_linux_read_description (X86_XSTATE_X87);
889 }
890 else
891 have_ptrace_getfpxregs = 1;
892 }
893 #endif
894
895 if (!use_xml)
896 {
897 x86_xcr0 = X86_XSTATE_SSE_MASK;
898
899 /* Don't use XML. */
900 #ifdef __x86_64__
901 if (machine == EM_X86_64)
902 return tdesc_amd64_linux_no_xml;
903 else
904 #endif
905 return tdesc_i386_linux_no_xml;
906 }
907
908 if (have_ptrace_getregset == -1)
909 {
910 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
911 struct iovec iov;
912
913 iov.iov_base = xstateregs;
914 iov.iov_len = sizeof (xstateregs);
915
916 /* Check if PTRACE_GETREGSET works. */
917 if (ptrace (PTRACE_GETREGSET, tid,
918 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
919 have_ptrace_getregset = 0;
920 else
921 {
922 have_ptrace_getregset = 1;
923
924 /* Get XCR0 from XSAVE extended state. */
925 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
926 / sizeof (uint64_t))];
927
928 /* Use PTRACE_GETREGSET if it is available. */
929 for (regset = x86_regsets;
930 regset->fill_function != NULL; regset++)
931 if (regset->get_request == PTRACE_GETREGSET)
932 regset->size = X86_XSTATE_SIZE (xcr0);
933 else if (regset->type != GENERAL_REGS)
934 regset->size = 0;
935 }
936 }
937
938 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
939 xcr0_features = (have_ptrace_getregset
940 && (xcr0 & X86_XSTATE_ALL_MASK));
941
942 if (xcr0_features)
943 x86_xcr0 = xcr0;
944
945 if (machine == EM_X86_64)
946 {
947 #ifdef __x86_64__
948 const target_desc *tdesc = NULL;
949
950 if (xcr0_features)
951 {
952 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
953 !is_elf64);
954 }
955
956 if (tdesc == NULL)
957 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
958 return tdesc;
959 #endif
960 }
961 else
962 {
963 const target_desc *tdesc = NULL;
964
965 if (xcr0_features)
966 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
967
968 if (tdesc == NULL)
969 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
970
971 return tdesc;
972 }
973
974 gdb_assert_not_reached ("failed to return tdesc");
975 }
976
977 /* Update all the target description of all processes; a new GDB
978 connected, and it may or not support xml target descriptions. */
979
980 void
981 x86_target::update_xmltarget ()
982 {
983 struct thread_info *saved_thread = current_thread;
984
985 /* Before changing the register cache's internal layout, flush the
986 contents of the current valid caches back to the threads, and
987 release the current regcache objects. */
988 regcache_release ();
989
990 for_each_process ([this] (process_info *proc) {
991 int pid = proc->pid;
992
993 /* Look up any thread of this process. */
994 current_thread = find_any_thread_of_pid (pid);
995
996 low_arch_setup ();
997 });
998
999 current_thread = saved_thread;
1000 }
1001
1002 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1003 PTRACE_GETREGSET. */
1004
1005 void
1006 x86_target::process_qsupported (char **features, int count)
1007 {
1008 int i;
1009
1010 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1011 with "i386" in qSupported query, it supports x86 XML target
1012 descriptions. */
1013 use_xml = 0;
1014 for (i = 0; i < count; i++)
1015 {
1016 const char *feature = features[i];
1017
1018 if (startswith (feature, "xmlRegisters="))
1019 {
1020 char *copy = xstrdup (feature + 13);
1021
1022 char *saveptr;
1023 for (char *p = strtok_r (copy, ",", &saveptr);
1024 p != NULL;
1025 p = strtok_r (NULL, ",", &saveptr))
1026 {
1027 if (strcmp (p, "i386") == 0)
1028 {
1029 use_xml = 1;
1030 break;
1031 }
1032 }
1033
1034 free (copy);
1035 }
1036 }
1037 update_xmltarget ();
1038 }
1039
1040 /* Common for x86/x86-64. */
1041
1042 static struct regsets_info x86_regsets_info =
1043 {
1044 x86_regsets, /* regsets */
1045 0, /* num_regsets */
1046 NULL, /* disabled_regsets */
1047 };
1048
1049 #ifdef __x86_64__
1050 static struct regs_info amd64_linux_regs_info =
1051 {
1052 NULL, /* regset_bitmap */
1053 NULL, /* usrregs_info */
1054 &x86_regsets_info
1055 };
1056 #endif
1057 static struct usrregs_info i386_linux_usrregs_info =
1058 {
1059 I386_NUM_REGS,
1060 i386_regmap,
1061 };
1062
1063 static struct regs_info i386_linux_regs_info =
1064 {
1065 NULL, /* regset_bitmap */
1066 &i386_linux_usrregs_info,
1067 &x86_regsets_info
1068 };
1069
1070 const regs_info *
1071 x86_target::get_regs_info ()
1072 {
1073 #ifdef __x86_64__
1074 if (is_64bit_tdesc ())
1075 return &amd64_linux_regs_info;
1076 else
1077 #endif
1078 return &i386_linux_regs_info;
1079 }
1080
1081 /* Initialize the target description for the architecture of the
1082 inferior. */
1083
1084 void
1085 x86_target::low_arch_setup ()
1086 {
1087 current_process ()->tdesc = x86_linux_read_description ();
1088 }
1089
1090 bool
1091 x86_target::low_supports_catch_syscall ()
1092 {
1093 return true;
1094 }
1095
1096 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1097 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1098
1099 void
1100 x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
1101 {
1102 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1103
1104 if (use_64bit)
1105 {
1106 long l_sysno;
1107
1108 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1109 *sysno = (int) l_sysno;
1110 }
1111 else
1112 collect_register_by_name (regcache, "orig_eax", sysno);
1113 }
1114
1115 bool
1116 x86_target::supports_tracepoints ()
1117 {
1118 return true;
1119 }
1120
1121 static void
1122 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1123 {
1124 target_write_memory (*to, buf, len);
1125 *to += len;
1126 }
1127
1128 static int
1129 push_opcode (unsigned char *buf, const char *op)
1130 {
1131 unsigned char *buf_org = buf;
1132
1133 while (1)
1134 {
1135 char *endptr;
1136 unsigned long ul = strtoul (op, &endptr, 16);
1137
1138 if (endptr == op)
1139 break;
1140
1141 *buf++ = ul;
1142 op = endptr;
1143 }
1144
1145 return buf - buf_org;
1146 }
1147
1148 #ifdef __x86_64__
1149
1150 /* Build a jump pad that saves registers and calls a collection
1151 function. Writes a jump instruction to the jump pad to
1152 JJUMPAD_INSN. The caller is responsible to write it in at the
1153 tracepoint address. */
1154
1155 static int
1156 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1157 CORE_ADDR collector,
1158 CORE_ADDR lockaddr,
1159 ULONGEST orig_size,
1160 CORE_ADDR *jump_entry,
1161 CORE_ADDR *trampoline,
1162 ULONGEST *trampoline_size,
1163 unsigned char *jjump_pad_insn,
1164 ULONGEST *jjump_pad_insn_size,
1165 CORE_ADDR *adjusted_insn_addr,
1166 CORE_ADDR *adjusted_insn_addr_end,
1167 char *err)
1168 {
1169 unsigned char buf[40];
1170 int i, offset;
1171 int64_t loffset;
1172
1173 CORE_ADDR buildaddr = *jump_entry;
1174
1175 /* Build the jump pad. */
1176
1177 /* First, do tracepoint data collection. Save registers. */
1178 i = 0;
1179 /* Need to ensure stack pointer saved first. */
1180 buf[i++] = 0x54; /* push %rsp */
1181 buf[i++] = 0x55; /* push %rbp */
1182 buf[i++] = 0x57; /* push %rdi */
1183 buf[i++] = 0x56; /* push %rsi */
1184 buf[i++] = 0x52; /* push %rdx */
1185 buf[i++] = 0x51; /* push %rcx */
1186 buf[i++] = 0x53; /* push %rbx */
1187 buf[i++] = 0x50; /* push %rax */
1188 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1189 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1190 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1191 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1192 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1193 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1194 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1195 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1196 buf[i++] = 0x9c; /* pushfq */
1197 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1198 buf[i++] = 0xbf;
1199 memcpy (buf + i, &tpaddr, 8);
1200 i += 8;
1201 buf[i++] = 0x57; /* push %rdi */
1202 append_insns (&buildaddr, i, buf);
1203
1204 /* Stack space for the collecting_t object. */
1205 i = 0;
1206 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1207 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1208 memcpy (buf + i, &tpoint, 8);
1209 i += 8;
1210 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1211 i += push_opcode (&buf[i],
1212 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1213 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1214 append_insns (&buildaddr, i, buf);
1215
1216 /* spin-lock. */
1217 i = 0;
1218 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1219 memcpy (&buf[i], (void *) &lockaddr, 8);
1220 i += 8;
1221 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1222 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1223 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1224 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1225 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1226 append_insns (&buildaddr, i, buf);
1227
1228 /* Set up the gdb_collect call. */
1229 /* At this point, (stack pointer + 0x18) is the base of our saved
1230 register block. */
1231
1232 i = 0;
1233 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1234 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1235
1236 /* tpoint address may be 64-bit wide. */
1237 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1238 memcpy (buf + i, &tpoint, 8);
1239 i += 8;
1240 append_insns (&buildaddr, i, buf);
1241
1242 /* The collector function being in the shared library, may be
1243 >31-bits away off the jump pad. */
1244 i = 0;
1245 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1246 memcpy (buf + i, &collector, 8);
1247 i += 8;
1248 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1249 append_insns (&buildaddr, i, buf);
1250
1251 /* Clear the spin-lock. */
1252 i = 0;
1253 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1254 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1255 memcpy (buf + i, &lockaddr, 8);
1256 i += 8;
1257 append_insns (&buildaddr, i, buf);
1258
1259 /* Remove stack that had been used for the collect_t object. */
1260 i = 0;
1261 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1262 append_insns (&buildaddr, i, buf);
1263
1264 /* Restore register state. */
1265 i = 0;
1266 buf[i++] = 0x48; /* add $0x8,%rsp */
1267 buf[i++] = 0x83;
1268 buf[i++] = 0xc4;
1269 buf[i++] = 0x08;
1270 buf[i++] = 0x9d; /* popfq */
1271 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1272 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1273 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1274 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1275 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1276 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1277 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1278 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1279 buf[i++] = 0x58; /* pop %rax */
1280 buf[i++] = 0x5b; /* pop %rbx */
1281 buf[i++] = 0x59; /* pop %rcx */
1282 buf[i++] = 0x5a; /* pop %rdx */
1283 buf[i++] = 0x5e; /* pop %rsi */
1284 buf[i++] = 0x5f; /* pop %rdi */
1285 buf[i++] = 0x5d; /* pop %rbp */
1286 buf[i++] = 0x5c; /* pop %rsp */
1287 append_insns (&buildaddr, i, buf);
1288
1289 /* Now, adjust the original instruction to execute in the jump
1290 pad. */
1291 *adjusted_insn_addr = buildaddr;
1292 relocate_instruction (&buildaddr, tpaddr);
1293 *adjusted_insn_addr_end = buildaddr;
1294
1295 /* Finally, write a jump back to the program. */
1296
1297 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1298 if (loffset > INT_MAX || loffset < INT_MIN)
1299 {
1300 sprintf (err,
1301 "E.Jump back from jump pad too far from tracepoint "
1302 "(offset 0x%" PRIx64 " > int32).", loffset);
1303 return 1;
1304 }
1305
1306 offset = (int) loffset;
1307 memcpy (buf, jump_insn, sizeof (jump_insn));
1308 memcpy (buf + 1, &offset, 4);
1309 append_insns (&buildaddr, sizeof (jump_insn), buf);
1310
1311 /* The jump pad is now built. Wire in a jump to our jump pad. This
1312 is always done last (by our caller actually), so that we can
1313 install fast tracepoints with threads running. This relies on
1314 the agent's atomic write support. */
1315 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1316 if (loffset > INT_MAX || loffset < INT_MIN)
1317 {
1318 sprintf (err,
1319 "E.Jump pad too far from tracepoint "
1320 "(offset 0x%" PRIx64 " > int32).", loffset);
1321 return 1;
1322 }
1323
1324 offset = (int) loffset;
1325
1326 memcpy (buf, jump_insn, sizeof (jump_insn));
1327 memcpy (buf + 1, &offset, 4);
1328 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1329 *jjump_pad_insn_size = sizeof (jump_insn);
1330
1331 /* Return the end address of our pad. */
1332 *jump_entry = buildaddr;
1333
1334 return 0;
1335 }
1336
1337 #endif /* __x86_64__ */
1338
1339 /* Build a jump pad that saves registers and calls a collection
1340 function. Writes a jump instruction to the jump pad to
1341 JJUMPAD_INSN. The caller is responsible to write it in at the
1342 tracepoint address. */
1343
1344 static int
1345 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1346 CORE_ADDR collector,
1347 CORE_ADDR lockaddr,
1348 ULONGEST orig_size,
1349 CORE_ADDR *jump_entry,
1350 CORE_ADDR *trampoline,
1351 ULONGEST *trampoline_size,
1352 unsigned char *jjump_pad_insn,
1353 ULONGEST *jjump_pad_insn_size,
1354 CORE_ADDR *adjusted_insn_addr,
1355 CORE_ADDR *adjusted_insn_addr_end,
1356 char *err)
1357 {
1358 unsigned char buf[0x100];
1359 int i, offset;
1360 CORE_ADDR buildaddr = *jump_entry;
1361
1362 /* Build the jump pad. */
1363
1364 /* First, do tracepoint data collection. Save registers. */
1365 i = 0;
1366 buf[i++] = 0x60; /* pushad */
1367 buf[i++] = 0x68; /* push tpaddr aka $pc */
1368 *((int *)(buf + i)) = (int) tpaddr;
1369 i += 4;
1370 buf[i++] = 0x9c; /* pushf */
1371 buf[i++] = 0x1e; /* push %ds */
1372 buf[i++] = 0x06; /* push %es */
1373 buf[i++] = 0x0f; /* push %fs */
1374 buf[i++] = 0xa0;
1375 buf[i++] = 0x0f; /* push %gs */
1376 buf[i++] = 0xa8;
1377 buf[i++] = 0x16; /* push %ss */
1378 buf[i++] = 0x0e; /* push %cs */
1379 append_insns (&buildaddr, i, buf);
1380
1381 /* Stack space for the collecting_t object. */
1382 i = 0;
1383 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1384
1385 /* Build the object. */
1386 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1387 memcpy (buf + i, &tpoint, 4);
1388 i += 4;
1389 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1390
1391 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1392 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1393 append_insns (&buildaddr, i, buf);
1394
1395 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1396 If we cared for it, this could be using xchg alternatively. */
1397
1398 i = 0;
1399 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1400 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1401 %esp,<lockaddr> */
1402 memcpy (&buf[i], (void *) &lockaddr, 4);
1403 i += 4;
1404 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1405 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1406 append_insns (&buildaddr, i, buf);
1407
1408
1409 /* Set up arguments to the gdb_collect call. */
1410 i = 0;
1411 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1412 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1413 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1414 append_insns (&buildaddr, i, buf);
1415
1416 i = 0;
1417 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1418 append_insns (&buildaddr, i, buf);
1419
1420 i = 0;
1421 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1422 memcpy (&buf[i], (void *) &tpoint, 4);
1423 i += 4;
1424 append_insns (&buildaddr, i, buf);
1425
1426 buf[0] = 0xe8; /* call <reladdr> */
1427 offset = collector - (buildaddr + sizeof (jump_insn));
1428 memcpy (buf + 1, &offset, 4);
1429 append_insns (&buildaddr, 5, buf);
1430 /* Clean up after the call. */
1431 buf[0] = 0x83; /* add $0x8,%esp */
1432 buf[1] = 0xc4;
1433 buf[2] = 0x08;
1434 append_insns (&buildaddr, 3, buf);
1435
1436
1437 /* Clear the spin-lock. This would need the LOCK prefix on older
1438 broken archs. */
1439 i = 0;
1440 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1441 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1442 memcpy (buf + i, &lockaddr, 4);
1443 i += 4;
1444 append_insns (&buildaddr, i, buf);
1445
1446
1447 /* Remove stack that had been used for the collect_t object. */
1448 i = 0;
1449 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1450 append_insns (&buildaddr, i, buf);
1451
1452 i = 0;
1453 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1454 buf[i++] = 0xc4;
1455 buf[i++] = 0x04;
1456 buf[i++] = 0x17; /* pop %ss */
1457 buf[i++] = 0x0f; /* pop %gs */
1458 buf[i++] = 0xa9;
1459 buf[i++] = 0x0f; /* pop %fs */
1460 buf[i++] = 0xa1;
1461 buf[i++] = 0x07; /* pop %es */
1462 buf[i++] = 0x1f; /* pop %ds */
1463 buf[i++] = 0x9d; /* popf */
1464 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1465 buf[i++] = 0xc4;
1466 buf[i++] = 0x04;
1467 buf[i++] = 0x61; /* popad */
1468 append_insns (&buildaddr, i, buf);
1469
1470 /* Now, adjust the original instruction to execute in the jump
1471 pad. */
1472 *adjusted_insn_addr = buildaddr;
1473 relocate_instruction (&buildaddr, tpaddr);
1474 *adjusted_insn_addr_end = buildaddr;
1475
1476 /* Write the jump back to the program. */
1477 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1478 memcpy (buf, jump_insn, sizeof (jump_insn));
1479 memcpy (buf + 1, &offset, 4);
1480 append_insns (&buildaddr, sizeof (jump_insn), buf);
1481
1482 /* The jump pad is now built. Wire in a jump to our jump pad. This
1483 is always done last (by our caller actually), so that we can
1484 install fast tracepoints with threads running. This relies on
1485 the agent's atomic write support. */
1486 if (orig_size == 4)
1487 {
1488 /* Create a trampoline. */
1489 *trampoline_size = sizeof (jump_insn);
1490 if (!claim_trampoline_space (*trampoline_size, trampoline))
1491 {
1492 /* No trampoline space available. */
1493 strcpy (err,
1494 "E.Cannot allocate trampoline space needed for fast "
1495 "tracepoints on 4-byte instructions.");
1496 return 1;
1497 }
1498
1499 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1500 memcpy (buf, jump_insn, sizeof (jump_insn));
1501 memcpy (buf + 1, &offset, 4);
1502 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1503
1504 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1505 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1506 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1507 memcpy (buf + 2, &offset, 2);
1508 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1509 *jjump_pad_insn_size = sizeof (small_jump_insn);
1510 }
1511 else
1512 {
1513 /* Else use a 32-bit relative jump instruction. */
1514 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1515 memcpy (buf, jump_insn, sizeof (jump_insn));
1516 memcpy (buf + 1, &offset, 4);
1517 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1518 *jjump_pad_insn_size = sizeof (jump_insn);
1519 }
1520
1521 /* Return the end address of our pad. */
1522 *jump_entry = buildaddr;
1523
1524 return 0;
1525 }
1526
1527 bool
1528 x86_target::supports_fast_tracepoints ()
1529 {
1530 return true;
1531 }
1532
1533 int
1534 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1535 CORE_ADDR tpaddr,
1536 CORE_ADDR collector,
1537 CORE_ADDR lockaddr,
1538 ULONGEST orig_size,
1539 CORE_ADDR *jump_entry,
1540 CORE_ADDR *trampoline,
1541 ULONGEST *trampoline_size,
1542 unsigned char *jjump_pad_insn,
1543 ULONGEST *jjump_pad_insn_size,
1544 CORE_ADDR *adjusted_insn_addr,
1545 CORE_ADDR *adjusted_insn_addr_end,
1546 char *err)
1547 {
1548 #ifdef __x86_64__
1549 if (is_64bit_tdesc ())
1550 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1551 collector, lockaddr,
1552 orig_size, jump_entry,
1553 trampoline, trampoline_size,
1554 jjump_pad_insn,
1555 jjump_pad_insn_size,
1556 adjusted_insn_addr,
1557 adjusted_insn_addr_end,
1558 err);
1559 #endif
1560
1561 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1562 collector, lockaddr,
1563 orig_size, jump_entry,
1564 trampoline, trampoline_size,
1565 jjump_pad_insn,
1566 jjump_pad_insn_size,
1567 adjusted_insn_addr,
1568 adjusted_insn_addr_end,
1569 err);
1570 }
1571
1572 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1573 architectures. */
1574
1575 int
1576 x86_target::get_min_fast_tracepoint_insn_len ()
1577 {
1578 static int warned_about_fast_tracepoints = 0;
1579
1580 #ifdef __x86_64__
1581 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1582 used for fast tracepoints. */
1583 if (is_64bit_tdesc ())
1584 return 5;
1585 #endif
1586
1587 if (agent_loaded_p ())
1588 {
1589 char errbuf[IPA_BUFSIZ];
1590
1591 errbuf[0] = '\0';
1592
1593 /* On x86, if trampolines are available, then 4-byte jump instructions
1594 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1595 with a 4-byte offset are used instead. */
1596 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1597 return 4;
1598 else
1599 {
1600 /* GDB has no channel to explain to user why a shorter fast
1601 tracepoint is not possible, but at least make GDBserver
1602 mention that something has gone awry. */
1603 if (!warned_about_fast_tracepoints)
1604 {
1605 warning ("4-byte fast tracepoints not available; %s", errbuf);
1606 warned_about_fast_tracepoints = 1;
1607 }
1608 return 5;
1609 }
1610 }
1611 else
1612 {
1613 /* Indicate that the minimum length is currently unknown since the IPA
1614 has not loaded yet. */
1615 return 0;
1616 }
1617 }
1618
1619 static void
1620 add_insns (unsigned char *start, int len)
1621 {
1622 CORE_ADDR buildaddr = current_insn_ptr;
1623
1624 if (debug_threads)
1625 debug_printf ("Adding %d bytes of insn at %s\n",
1626 len, paddress (buildaddr));
1627
1628 append_insns (&buildaddr, len, start);
1629 current_insn_ptr = buildaddr;
1630 }
1631
1632 /* Our general strategy for emitting code is to avoid specifying raw
1633 bytes whenever possible, and instead copy a block of inline asm
1634 that is embedded in the function. This is a little messy, because
1635 we need to keep the compiler from discarding what looks like dead
1636 code, plus suppress various warnings. */
1637
1638 #define EMIT_ASM(NAME, INSNS) \
1639 do \
1640 { \
1641 extern unsigned char start_ ## NAME, end_ ## NAME; \
1642 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1643 __asm__ ("jmp end_" #NAME "\n" \
1644 "\t" "start_" #NAME ":" \
1645 "\t" INSNS "\n" \
1646 "\t" "end_" #NAME ":"); \
1647 } while (0)
1648
1649 #ifdef __x86_64__
1650
1651 #define EMIT_ASM32(NAME,INSNS) \
1652 do \
1653 { \
1654 extern unsigned char start_ ## NAME, end_ ## NAME; \
1655 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1656 __asm__ (".code32\n" \
1657 "\t" "jmp end_" #NAME "\n" \
1658 "\t" "start_" #NAME ":\n" \
1659 "\t" INSNS "\n" \
1660 "\t" "end_" #NAME ":\n" \
1661 ".code64\n"); \
1662 } while (0)
1663
1664 #else
1665
1666 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1667
1668 #endif
1669
1670 #ifdef __x86_64__
1671
1672 static void
1673 amd64_emit_prologue (void)
1674 {
1675 EMIT_ASM (amd64_prologue,
1676 "pushq %rbp\n\t"
1677 "movq %rsp,%rbp\n\t"
1678 "sub $0x20,%rsp\n\t"
1679 "movq %rdi,-8(%rbp)\n\t"
1680 "movq %rsi,-16(%rbp)");
1681 }
1682
1683
1684 static void
1685 amd64_emit_epilogue (void)
1686 {
1687 EMIT_ASM (amd64_epilogue,
1688 "movq -16(%rbp),%rdi\n\t"
1689 "movq %rax,(%rdi)\n\t"
1690 "xor %rax,%rax\n\t"
1691 "leave\n\t"
1692 "ret");
1693 }
1694
1695 static void
1696 amd64_emit_add (void)
1697 {
1698 EMIT_ASM (amd64_add,
1699 "add (%rsp),%rax\n\t"
1700 "lea 0x8(%rsp),%rsp");
1701 }
1702
1703 static void
1704 amd64_emit_sub (void)
1705 {
1706 EMIT_ASM (amd64_sub,
1707 "sub %rax,(%rsp)\n\t"
1708 "pop %rax");
1709 }
1710
1711 static void
1712 amd64_emit_mul (void)
1713 {
1714 emit_error = 1;
1715 }
1716
1717 static void
1718 amd64_emit_lsh (void)
1719 {
1720 emit_error = 1;
1721 }
1722
1723 static void
1724 amd64_emit_rsh_signed (void)
1725 {
1726 emit_error = 1;
1727 }
1728
1729 static void
1730 amd64_emit_rsh_unsigned (void)
1731 {
1732 emit_error = 1;
1733 }
1734
1735 static void
1736 amd64_emit_ext (int arg)
1737 {
1738 switch (arg)
1739 {
1740 case 8:
1741 EMIT_ASM (amd64_ext_8,
1742 "cbtw\n\t"
1743 "cwtl\n\t"
1744 "cltq");
1745 break;
1746 case 16:
1747 EMIT_ASM (amd64_ext_16,
1748 "cwtl\n\t"
1749 "cltq");
1750 break;
1751 case 32:
1752 EMIT_ASM (amd64_ext_32,
1753 "cltq");
1754 break;
1755 default:
1756 emit_error = 1;
1757 }
1758 }
1759
1760 static void
1761 amd64_emit_log_not (void)
1762 {
1763 EMIT_ASM (amd64_log_not,
1764 "test %rax,%rax\n\t"
1765 "sete %cl\n\t"
1766 "movzbq %cl,%rax");
1767 }
1768
1769 static void
1770 amd64_emit_bit_and (void)
1771 {
1772 EMIT_ASM (amd64_and,
1773 "and (%rsp),%rax\n\t"
1774 "lea 0x8(%rsp),%rsp");
1775 }
1776
1777 static void
1778 amd64_emit_bit_or (void)
1779 {
1780 EMIT_ASM (amd64_or,
1781 "or (%rsp),%rax\n\t"
1782 "lea 0x8(%rsp),%rsp");
1783 }
1784
1785 static void
1786 amd64_emit_bit_xor (void)
1787 {
1788 EMIT_ASM (amd64_xor,
1789 "xor (%rsp),%rax\n\t"
1790 "lea 0x8(%rsp),%rsp");
1791 }
1792
1793 static void
1794 amd64_emit_bit_not (void)
1795 {
1796 EMIT_ASM (amd64_bit_not,
1797 "xorq $0xffffffffffffffff,%rax");
1798 }
1799
1800 static void
1801 amd64_emit_equal (void)
1802 {
1803 EMIT_ASM (amd64_equal,
1804 "cmp %rax,(%rsp)\n\t"
1805 "je .Lamd64_equal_true\n\t"
1806 "xor %rax,%rax\n\t"
1807 "jmp .Lamd64_equal_end\n\t"
1808 ".Lamd64_equal_true:\n\t"
1809 "mov $0x1,%rax\n\t"
1810 ".Lamd64_equal_end:\n\t"
1811 "lea 0x8(%rsp),%rsp");
1812 }
1813
1814 static void
1815 amd64_emit_less_signed (void)
1816 {
1817 EMIT_ASM (amd64_less_signed,
1818 "cmp %rax,(%rsp)\n\t"
1819 "jl .Lamd64_less_signed_true\n\t"
1820 "xor %rax,%rax\n\t"
1821 "jmp .Lamd64_less_signed_end\n\t"
1822 ".Lamd64_less_signed_true:\n\t"
1823 "mov $1,%rax\n\t"
1824 ".Lamd64_less_signed_end:\n\t"
1825 "lea 0x8(%rsp),%rsp");
1826 }
1827
1828 static void
1829 amd64_emit_less_unsigned (void)
1830 {
1831 EMIT_ASM (amd64_less_unsigned,
1832 "cmp %rax,(%rsp)\n\t"
1833 "jb .Lamd64_less_unsigned_true\n\t"
1834 "xor %rax,%rax\n\t"
1835 "jmp .Lamd64_less_unsigned_end\n\t"
1836 ".Lamd64_less_unsigned_true:\n\t"
1837 "mov $1,%rax\n\t"
1838 ".Lamd64_less_unsigned_end:\n\t"
1839 "lea 0x8(%rsp),%rsp");
1840 }
1841
1842 static void
1843 amd64_emit_ref (int size)
1844 {
1845 switch (size)
1846 {
1847 case 1:
1848 EMIT_ASM (amd64_ref1,
1849 "movb (%rax),%al");
1850 break;
1851 case 2:
1852 EMIT_ASM (amd64_ref2,
1853 "movw (%rax),%ax");
1854 break;
1855 case 4:
1856 EMIT_ASM (amd64_ref4,
1857 "movl (%rax),%eax");
1858 break;
1859 case 8:
1860 EMIT_ASM (amd64_ref8,
1861 "movq (%rax),%rax");
1862 break;
1863 }
1864 }
1865
1866 static void
1867 amd64_emit_if_goto (int *offset_p, int *size_p)
1868 {
1869 EMIT_ASM (amd64_if_goto,
1870 "mov %rax,%rcx\n\t"
1871 "pop %rax\n\t"
1872 "cmp $0,%rcx\n\t"
1873 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1874 if (offset_p)
1875 *offset_p = 10;
1876 if (size_p)
1877 *size_p = 4;
1878 }
1879
1880 static void
1881 amd64_emit_goto (int *offset_p, int *size_p)
1882 {
1883 EMIT_ASM (amd64_goto,
1884 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1885 if (offset_p)
1886 *offset_p = 1;
1887 if (size_p)
1888 *size_p = 4;
1889 }
1890
1891 static void
1892 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1893 {
1894 int diff = (to - (from + size));
1895 unsigned char buf[sizeof (int)];
1896
1897 if (size != 4)
1898 {
1899 emit_error = 1;
1900 return;
1901 }
1902
1903 memcpy (buf, &diff, sizeof (int));
1904 target_write_memory (from, buf, sizeof (int));
1905 }
1906
1907 static void
1908 amd64_emit_const (LONGEST num)
1909 {
1910 unsigned char buf[16];
1911 int i;
1912 CORE_ADDR buildaddr = current_insn_ptr;
1913
1914 i = 0;
1915 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1916 memcpy (&buf[i], &num, sizeof (num));
1917 i += 8;
1918 append_insns (&buildaddr, i, buf);
1919 current_insn_ptr = buildaddr;
1920 }
1921
1922 static void
1923 amd64_emit_call (CORE_ADDR fn)
1924 {
1925 unsigned char buf[16];
1926 int i;
1927 CORE_ADDR buildaddr;
1928 LONGEST offset64;
1929
1930 /* The destination function being in the shared library, may be
1931 >31-bits away off the compiled code pad. */
1932
1933 buildaddr = current_insn_ptr;
1934
1935 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1936
1937 i = 0;
1938
1939 if (offset64 > INT_MAX || offset64 < INT_MIN)
1940 {
1941 /* Offset is too large for a call. Use callq, but that requires
1942 a register, so avoid it if possible. Use r10, since it is
1943 call-clobbered, we don't have to push/pop it. */
1944 buf[i++] = 0x48; /* mov $fn,%r10 */
1945 buf[i++] = 0xba;
1946 memcpy (buf + i, &fn, 8);
1947 i += 8;
1948 buf[i++] = 0xff; /* callq *%r10 */
1949 buf[i++] = 0xd2;
1950 }
1951 else
1952 {
1953 int offset32 = offset64; /* we know we can't overflow here. */
1954
1955 buf[i++] = 0xe8; /* call <reladdr> */
1956 memcpy (buf + i, &offset32, 4);
1957 i += 4;
1958 }
1959
1960 append_insns (&buildaddr, i, buf);
1961 current_insn_ptr = buildaddr;
1962 }
1963
1964 static void
1965 amd64_emit_reg (int reg)
1966 {
1967 unsigned char buf[16];
1968 int i;
1969 CORE_ADDR buildaddr;
1970
1971 /* Assume raw_regs is still in %rdi. */
1972 buildaddr = current_insn_ptr;
1973 i = 0;
1974 buf[i++] = 0xbe; /* mov $<n>,%esi */
1975 memcpy (&buf[i], &reg, sizeof (reg));
1976 i += 4;
1977 append_insns (&buildaddr, i, buf);
1978 current_insn_ptr = buildaddr;
1979 amd64_emit_call (get_raw_reg_func_addr ());
1980 }
1981
1982 static void
1983 amd64_emit_pop (void)
1984 {
1985 EMIT_ASM (amd64_pop,
1986 "pop %rax");
1987 }
1988
1989 static void
1990 amd64_emit_stack_flush (void)
1991 {
1992 EMIT_ASM (amd64_stack_flush,
1993 "push %rax");
1994 }
1995
1996 static void
1997 amd64_emit_zero_ext (int arg)
1998 {
1999 switch (arg)
2000 {
2001 case 8:
2002 EMIT_ASM (amd64_zero_ext_8,
2003 "and $0xff,%rax");
2004 break;
2005 case 16:
2006 EMIT_ASM (amd64_zero_ext_16,
2007 "and $0xffff,%rax");
2008 break;
2009 case 32:
2010 EMIT_ASM (amd64_zero_ext_32,
2011 "mov $0xffffffff,%rcx\n\t"
2012 "and %rcx,%rax");
2013 break;
2014 default:
2015 emit_error = 1;
2016 }
2017 }
2018
2019 static void
2020 amd64_emit_swap (void)
2021 {
2022 EMIT_ASM (amd64_swap,
2023 "mov %rax,%rcx\n\t"
2024 "pop %rax\n\t"
2025 "push %rcx");
2026 }
2027
2028 static void
2029 amd64_emit_stack_adjust (int n)
2030 {
2031 unsigned char buf[16];
2032 int i;
2033 CORE_ADDR buildaddr = current_insn_ptr;
2034
2035 i = 0;
2036 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2037 buf[i++] = 0x8d;
2038 buf[i++] = 0x64;
2039 buf[i++] = 0x24;
2040 /* This only handles adjustments up to 16, but we don't expect any more. */
2041 buf[i++] = n * 8;
2042 append_insns (&buildaddr, i, buf);
2043 current_insn_ptr = buildaddr;
2044 }
2045
2046 /* FN's prototype is `LONGEST(*fn)(int)'. */
2047
2048 static void
2049 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2050 {
2051 unsigned char buf[16];
2052 int i;
2053 CORE_ADDR buildaddr;
2054
2055 buildaddr = current_insn_ptr;
2056 i = 0;
2057 buf[i++] = 0xbf; /* movl $<n>,%edi */
2058 memcpy (&buf[i], &arg1, sizeof (arg1));
2059 i += 4;
2060 append_insns (&buildaddr, i, buf);
2061 current_insn_ptr = buildaddr;
2062 amd64_emit_call (fn);
2063 }
2064
2065 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2066
2067 static void
2068 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2069 {
2070 unsigned char buf[16];
2071 int i;
2072 CORE_ADDR buildaddr;
2073
2074 buildaddr = current_insn_ptr;
2075 i = 0;
2076 buf[i++] = 0xbf; /* movl $<n>,%edi */
2077 memcpy (&buf[i], &arg1, sizeof (arg1));
2078 i += 4;
2079 append_insns (&buildaddr, i, buf);
2080 current_insn_ptr = buildaddr;
2081 EMIT_ASM (amd64_void_call_2_a,
2082 /* Save away a copy of the stack top. */
2083 "push %rax\n\t"
2084 /* Also pass top as the second argument. */
2085 "mov %rax,%rsi");
2086 amd64_emit_call (fn);
2087 EMIT_ASM (amd64_void_call_2_b,
2088 /* Restore the stack top, %rax may have been trashed. */
2089 "pop %rax");
2090 }
2091
2092 static void
2093 amd64_emit_eq_goto (int *offset_p, int *size_p)
2094 {
2095 EMIT_ASM (amd64_eq,
2096 "cmp %rax,(%rsp)\n\t"
2097 "jne .Lamd64_eq_fallthru\n\t"
2098 "lea 0x8(%rsp),%rsp\n\t"
2099 "pop %rax\n\t"
2100 /* jmp, but don't trust the assembler to choose the right jump */
2101 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2102 ".Lamd64_eq_fallthru:\n\t"
2103 "lea 0x8(%rsp),%rsp\n\t"
2104 "pop %rax");
2105
2106 if (offset_p)
2107 *offset_p = 13;
2108 if (size_p)
2109 *size_p = 4;
2110 }
2111
2112 static void
2113 amd64_emit_ne_goto (int *offset_p, int *size_p)
2114 {
2115 EMIT_ASM (amd64_ne,
2116 "cmp %rax,(%rsp)\n\t"
2117 "je .Lamd64_ne_fallthru\n\t"
2118 "lea 0x8(%rsp),%rsp\n\t"
2119 "pop %rax\n\t"
2120 /* jmp, but don't trust the assembler to choose the right jump */
2121 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2122 ".Lamd64_ne_fallthru:\n\t"
2123 "lea 0x8(%rsp),%rsp\n\t"
2124 "pop %rax");
2125
2126 if (offset_p)
2127 *offset_p = 13;
2128 if (size_p)
2129 *size_p = 4;
2130 }
2131
2132 static void
2133 amd64_emit_lt_goto (int *offset_p, int *size_p)
2134 {
2135 EMIT_ASM (amd64_lt,
2136 "cmp %rax,(%rsp)\n\t"
2137 "jnl .Lamd64_lt_fallthru\n\t"
2138 "lea 0x8(%rsp),%rsp\n\t"
2139 "pop %rax\n\t"
2140 /* jmp, but don't trust the assembler to choose the right jump */
2141 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2142 ".Lamd64_lt_fallthru:\n\t"
2143 "lea 0x8(%rsp),%rsp\n\t"
2144 "pop %rax");
2145
2146 if (offset_p)
2147 *offset_p = 13;
2148 if (size_p)
2149 *size_p = 4;
2150 }
2151
2152 static void
2153 amd64_emit_le_goto (int *offset_p, int *size_p)
2154 {
2155 EMIT_ASM (amd64_le,
2156 "cmp %rax,(%rsp)\n\t"
2157 "jnle .Lamd64_le_fallthru\n\t"
2158 "lea 0x8(%rsp),%rsp\n\t"
2159 "pop %rax\n\t"
2160 /* jmp, but don't trust the assembler to choose the right jump */
2161 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2162 ".Lamd64_le_fallthru:\n\t"
2163 "lea 0x8(%rsp),%rsp\n\t"
2164 "pop %rax");
2165
2166 if (offset_p)
2167 *offset_p = 13;
2168 if (size_p)
2169 *size_p = 4;
2170 }
2171
2172 static void
2173 amd64_emit_gt_goto (int *offset_p, int *size_p)
2174 {
2175 EMIT_ASM (amd64_gt,
2176 "cmp %rax,(%rsp)\n\t"
2177 "jng .Lamd64_gt_fallthru\n\t"
2178 "lea 0x8(%rsp),%rsp\n\t"
2179 "pop %rax\n\t"
2180 /* jmp, but don't trust the assembler to choose the right jump */
2181 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2182 ".Lamd64_gt_fallthru:\n\t"
2183 "lea 0x8(%rsp),%rsp\n\t"
2184 "pop %rax");
2185
2186 if (offset_p)
2187 *offset_p = 13;
2188 if (size_p)
2189 *size_p = 4;
2190 }
2191
2192 static void
2193 amd64_emit_ge_goto (int *offset_p, int *size_p)
2194 {
2195 EMIT_ASM (amd64_ge,
2196 "cmp %rax,(%rsp)\n\t"
2197 "jnge .Lamd64_ge_fallthru\n\t"
2198 ".Lamd64_ge_jump:\n\t"
2199 "lea 0x8(%rsp),%rsp\n\t"
2200 "pop %rax\n\t"
2201 /* jmp, but don't trust the assembler to choose the right jump */
2202 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2203 ".Lamd64_ge_fallthru:\n\t"
2204 "lea 0x8(%rsp),%rsp\n\t"
2205 "pop %rax");
2206
2207 if (offset_p)
2208 *offset_p = 13;
2209 if (size_p)
2210 *size_p = 4;
2211 }
2212
2213 struct emit_ops amd64_emit_ops =
2214 {
2215 amd64_emit_prologue,
2216 amd64_emit_epilogue,
2217 amd64_emit_add,
2218 amd64_emit_sub,
2219 amd64_emit_mul,
2220 amd64_emit_lsh,
2221 amd64_emit_rsh_signed,
2222 amd64_emit_rsh_unsigned,
2223 amd64_emit_ext,
2224 amd64_emit_log_not,
2225 amd64_emit_bit_and,
2226 amd64_emit_bit_or,
2227 amd64_emit_bit_xor,
2228 amd64_emit_bit_not,
2229 amd64_emit_equal,
2230 amd64_emit_less_signed,
2231 amd64_emit_less_unsigned,
2232 amd64_emit_ref,
2233 amd64_emit_if_goto,
2234 amd64_emit_goto,
2235 amd64_write_goto_address,
2236 amd64_emit_const,
2237 amd64_emit_call,
2238 amd64_emit_reg,
2239 amd64_emit_pop,
2240 amd64_emit_stack_flush,
2241 amd64_emit_zero_ext,
2242 amd64_emit_swap,
2243 amd64_emit_stack_adjust,
2244 amd64_emit_int_call_1,
2245 amd64_emit_void_call_2,
2246 amd64_emit_eq_goto,
2247 amd64_emit_ne_goto,
2248 amd64_emit_lt_goto,
2249 amd64_emit_le_goto,
2250 amd64_emit_gt_goto,
2251 amd64_emit_ge_goto
2252 };
2253
2254 #endif /* __x86_64__ */
2255
2256 static void
2257 i386_emit_prologue (void)
2258 {
2259 EMIT_ASM32 (i386_prologue,
2260 "push %ebp\n\t"
2261 "mov %esp,%ebp\n\t"
2262 "push %ebx");
2263 /* At this point, the raw regs base address is at 8(%ebp), and the
2264 value pointer is at 12(%ebp). */
2265 }
2266
2267 static void
2268 i386_emit_epilogue (void)
2269 {
2270 EMIT_ASM32 (i386_epilogue,
2271 "mov 12(%ebp),%ecx\n\t"
2272 "mov %eax,(%ecx)\n\t"
2273 "mov %ebx,0x4(%ecx)\n\t"
2274 "xor %eax,%eax\n\t"
2275 "pop %ebx\n\t"
2276 "pop %ebp\n\t"
2277 "ret");
2278 }
2279
2280 static void
2281 i386_emit_add (void)
2282 {
2283 EMIT_ASM32 (i386_add,
2284 "add (%esp),%eax\n\t"
2285 "adc 0x4(%esp),%ebx\n\t"
2286 "lea 0x8(%esp),%esp");
2287 }
2288
2289 static void
2290 i386_emit_sub (void)
2291 {
2292 EMIT_ASM32 (i386_sub,
2293 "subl %eax,(%esp)\n\t"
2294 "sbbl %ebx,4(%esp)\n\t"
2295 "pop %eax\n\t"
2296 "pop %ebx\n\t");
2297 }
2298
2299 static void
2300 i386_emit_mul (void)
2301 {
2302 emit_error = 1;
2303 }
2304
2305 static void
2306 i386_emit_lsh (void)
2307 {
2308 emit_error = 1;
2309 }
2310
2311 static void
2312 i386_emit_rsh_signed (void)
2313 {
2314 emit_error = 1;
2315 }
2316
2317 static void
2318 i386_emit_rsh_unsigned (void)
2319 {
2320 emit_error = 1;
2321 }
2322
2323 static void
2324 i386_emit_ext (int arg)
2325 {
2326 switch (arg)
2327 {
2328 case 8:
2329 EMIT_ASM32 (i386_ext_8,
2330 "cbtw\n\t"
2331 "cwtl\n\t"
2332 "movl %eax,%ebx\n\t"
2333 "sarl $31,%ebx");
2334 break;
2335 case 16:
2336 EMIT_ASM32 (i386_ext_16,
2337 "cwtl\n\t"
2338 "movl %eax,%ebx\n\t"
2339 "sarl $31,%ebx");
2340 break;
2341 case 32:
2342 EMIT_ASM32 (i386_ext_32,
2343 "movl %eax,%ebx\n\t"
2344 "sarl $31,%ebx");
2345 break;
2346 default:
2347 emit_error = 1;
2348 }
2349 }
2350
2351 static void
2352 i386_emit_log_not (void)
2353 {
2354 EMIT_ASM32 (i386_log_not,
2355 "or %ebx,%eax\n\t"
2356 "test %eax,%eax\n\t"
2357 "sete %cl\n\t"
2358 "xor %ebx,%ebx\n\t"
2359 "movzbl %cl,%eax");
2360 }
2361
2362 static void
2363 i386_emit_bit_and (void)
2364 {
2365 EMIT_ASM32 (i386_and,
2366 "and (%esp),%eax\n\t"
2367 "and 0x4(%esp),%ebx\n\t"
2368 "lea 0x8(%esp),%esp");
2369 }
2370
2371 static void
2372 i386_emit_bit_or (void)
2373 {
2374 EMIT_ASM32 (i386_or,
2375 "or (%esp),%eax\n\t"
2376 "or 0x4(%esp),%ebx\n\t"
2377 "lea 0x8(%esp),%esp");
2378 }
2379
2380 static void
2381 i386_emit_bit_xor (void)
2382 {
2383 EMIT_ASM32 (i386_xor,
2384 "xor (%esp),%eax\n\t"
2385 "xor 0x4(%esp),%ebx\n\t"
2386 "lea 0x8(%esp),%esp");
2387 }
2388
2389 static void
2390 i386_emit_bit_not (void)
2391 {
2392 EMIT_ASM32 (i386_bit_not,
2393 "xor $0xffffffff,%eax\n\t"
2394 "xor $0xffffffff,%ebx\n\t");
2395 }
2396
2397 static void
2398 i386_emit_equal (void)
2399 {
2400 EMIT_ASM32 (i386_equal,
2401 "cmpl %ebx,4(%esp)\n\t"
2402 "jne .Li386_equal_false\n\t"
2403 "cmpl %eax,(%esp)\n\t"
2404 "je .Li386_equal_true\n\t"
2405 ".Li386_equal_false:\n\t"
2406 "xor %eax,%eax\n\t"
2407 "jmp .Li386_equal_end\n\t"
2408 ".Li386_equal_true:\n\t"
2409 "mov $1,%eax\n\t"
2410 ".Li386_equal_end:\n\t"
2411 "xor %ebx,%ebx\n\t"
2412 "lea 0x8(%esp),%esp");
2413 }
2414
2415 static void
2416 i386_emit_less_signed (void)
2417 {
2418 EMIT_ASM32 (i386_less_signed,
2419 "cmpl %ebx,4(%esp)\n\t"
2420 "jl .Li386_less_signed_true\n\t"
2421 "jne .Li386_less_signed_false\n\t"
2422 "cmpl %eax,(%esp)\n\t"
2423 "jl .Li386_less_signed_true\n\t"
2424 ".Li386_less_signed_false:\n\t"
2425 "xor %eax,%eax\n\t"
2426 "jmp .Li386_less_signed_end\n\t"
2427 ".Li386_less_signed_true:\n\t"
2428 "mov $1,%eax\n\t"
2429 ".Li386_less_signed_end:\n\t"
2430 "xor %ebx,%ebx\n\t"
2431 "lea 0x8(%esp),%esp");
2432 }
2433
2434 static void
2435 i386_emit_less_unsigned (void)
2436 {
2437 EMIT_ASM32 (i386_less_unsigned,
2438 "cmpl %ebx,4(%esp)\n\t"
2439 "jb .Li386_less_unsigned_true\n\t"
2440 "jne .Li386_less_unsigned_false\n\t"
2441 "cmpl %eax,(%esp)\n\t"
2442 "jb .Li386_less_unsigned_true\n\t"
2443 ".Li386_less_unsigned_false:\n\t"
2444 "xor %eax,%eax\n\t"
2445 "jmp .Li386_less_unsigned_end\n\t"
2446 ".Li386_less_unsigned_true:\n\t"
2447 "mov $1,%eax\n\t"
2448 ".Li386_less_unsigned_end:\n\t"
2449 "xor %ebx,%ebx\n\t"
2450 "lea 0x8(%esp),%esp");
2451 }
2452
2453 static void
2454 i386_emit_ref (int size)
2455 {
2456 switch (size)
2457 {
2458 case 1:
2459 EMIT_ASM32 (i386_ref1,
2460 "movb (%eax),%al");
2461 break;
2462 case 2:
2463 EMIT_ASM32 (i386_ref2,
2464 "movw (%eax),%ax");
2465 break;
2466 case 4:
2467 EMIT_ASM32 (i386_ref4,
2468 "movl (%eax),%eax");
2469 break;
2470 case 8:
2471 EMIT_ASM32 (i386_ref8,
2472 "movl 4(%eax),%ebx\n\t"
2473 "movl (%eax),%eax");
2474 break;
2475 }
2476 }
2477
2478 static void
2479 i386_emit_if_goto (int *offset_p, int *size_p)
2480 {
2481 EMIT_ASM32 (i386_if_goto,
2482 "mov %eax,%ecx\n\t"
2483 "or %ebx,%ecx\n\t"
2484 "pop %eax\n\t"
2485 "pop %ebx\n\t"
2486 "cmpl $0,%ecx\n\t"
2487 /* Don't trust the assembler to choose the right jump */
2488 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2489
2490 if (offset_p)
2491 *offset_p = 11; /* be sure that this matches the sequence above */
2492 if (size_p)
2493 *size_p = 4;
2494 }
2495
2496 static void
2497 i386_emit_goto (int *offset_p, int *size_p)
2498 {
2499 EMIT_ASM32 (i386_goto,
2500 /* Don't trust the assembler to choose the right jump */
2501 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2502 if (offset_p)
2503 *offset_p = 1;
2504 if (size_p)
2505 *size_p = 4;
2506 }
2507
2508 static void
2509 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2510 {
2511 int diff = (to - (from + size));
2512 unsigned char buf[sizeof (int)];
2513
2514 /* We're only doing 4-byte sizes at the moment. */
2515 if (size != 4)
2516 {
2517 emit_error = 1;
2518 return;
2519 }
2520
2521 memcpy (buf, &diff, sizeof (int));
2522 target_write_memory (from, buf, sizeof (int));
2523 }
2524
2525 static void
2526 i386_emit_const (LONGEST num)
2527 {
2528 unsigned char buf[16];
2529 int i, hi, lo;
2530 CORE_ADDR buildaddr = current_insn_ptr;
2531
2532 i = 0;
2533 buf[i++] = 0xb8; /* mov $<n>,%eax */
2534 lo = num & 0xffffffff;
2535 memcpy (&buf[i], &lo, sizeof (lo));
2536 i += 4;
2537 hi = ((num >> 32) & 0xffffffff);
2538 if (hi)
2539 {
2540 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2541 memcpy (&buf[i], &hi, sizeof (hi));
2542 i += 4;
2543 }
2544 else
2545 {
2546 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2547 }
2548 append_insns (&buildaddr, i, buf);
2549 current_insn_ptr = buildaddr;
2550 }
2551
2552 static void
2553 i386_emit_call (CORE_ADDR fn)
2554 {
2555 unsigned char buf[16];
2556 int i, offset;
2557 CORE_ADDR buildaddr;
2558
2559 buildaddr = current_insn_ptr;
2560 i = 0;
2561 buf[i++] = 0xe8; /* call <reladdr> */
2562 offset = ((int) fn) - (buildaddr + 5);
2563 memcpy (buf + 1, &offset, 4);
2564 append_insns (&buildaddr, 5, buf);
2565 current_insn_ptr = buildaddr;
2566 }
2567
2568 static void
2569 i386_emit_reg (int reg)
2570 {
2571 unsigned char buf[16];
2572 int i;
2573 CORE_ADDR buildaddr;
2574
2575 EMIT_ASM32 (i386_reg_a,
2576 "sub $0x8,%esp");
2577 buildaddr = current_insn_ptr;
2578 i = 0;
2579 buf[i++] = 0xb8; /* mov $<n>,%eax */
2580 memcpy (&buf[i], &reg, sizeof (reg));
2581 i += 4;
2582 append_insns (&buildaddr, i, buf);
2583 current_insn_ptr = buildaddr;
2584 EMIT_ASM32 (i386_reg_b,
2585 "mov %eax,4(%esp)\n\t"
2586 "mov 8(%ebp),%eax\n\t"
2587 "mov %eax,(%esp)");
2588 i386_emit_call (get_raw_reg_func_addr ());
2589 EMIT_ASM32 (i386_reg_c,
2590 "xor %ebx,%ebx\n\t"
2591 "lea 0x8(%esp),%esp");
2592 }
2593
2594 static void
2595 i386_emit_pop (void)
2596 {
2597 EMIT_ASM32 (i386_pop,
2598 "pop %eax\n\t"
2599 "pop %ebx");
2600 }
2601
2602 static void
2603 i386_emit_stack_flush (void)
2604 {
2605 EMIT_ASM32 (i386_stack_flush,
2606 "push %ebx\n\t"
2607 "push %eax");
2608 }
2609
2610 static void
2611 i386_emit_zero_ext (int arg)
2612 {
2613 switch (arg)
2614 {
2615 case 8:
2616 EMIT_ASM32 (i386_zero_ext_8,
2617 "and $0xff,%eax\n\t"
2618 "xor %ebx,%ebx");
2619 break;
2620 case 16:
2621 EMIT_ASM32 (i386_zero_ext_16,
2622 "and $0xffff,%eax\n\t"
2623 "xor %ebx,%ebx");
2624 break;
2625 case 32:
2626 EMIT_ASM32 (i386_zero_ext_32,
2627 "xor %ebx,%ebx");
2628 break;
2629 default:
2630 emit_error = 1;
2631 }
2632 }
2633
2634 static void
2635 i386_emit_swap (void)
2636 {
2637 EMIT_ASM32 (i386_swap,
2638 "mov %eax,%ecx\n\t"
2639 "mov %ebx,%edx\n\t"
2640 "pop %eax\n\t"
2641 "pop %ebx\n\t"
2642 "push %edx\n\t"
2643 "push %ecx");
2644 }
2645
2646 static void
2647 i386_emit_stack_adjust (int n)
2648 {
2649 unsigned char buf[16];
2650 int i;
2651 CORE_ADDR buildaddr = current_insn_ptr;
2652
2653 i = 0;
2654 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2655 buf[i++] = 0x64;
2656 buf[i++] = 0x24;
2657 buf[i++] = n * 8;
2658 append_insns (&buildaddr, i, buf);
2659 current_insn_ptr = buildaddr;
2660 }
2661
2662 /* FN's prototype is `LONGEST(*fn)(int)'. */
2663
2664 static void
2665 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2666 {
2667 unsigned char buf[16];
2668 int i;
2669 CORE_ADDR buildaddr;
2670
2671 EMIT_ASM32 (i386_int_call_1_a,
2672 /* Reserve a bit of stack space. */
2673 "sub $0x8,%esp");
2674 /* Put the one argument on the stack. */
2675 buildaddr = current_insn_ptr;
2676 i = 0;
2677 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2678 buf[i++] = 0x04;
2679 buf[i++] = 0x24;
2680 memcpy (&buf[i], &arg1, sizeof (arg1));
2681 i += 4;
2682 append_insns (&buildaddr, i, buf);
2683 current_insn_ptr = buildaddr;
2684 i386_emit_call (fn);
2685 EMIT_ASM32 (i386_int_call_1_c,
2686 "mov %edx,%ebx\n\t"
2687 "lea 0x8(%esp),%esp");
2688 }
2689
2690 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2691
2692 static void
2693 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2694 {
2695 unsigned char buf[16];
2696 int i;
2697 CORE_ADDR buildaddr;
2698
2699 EMIT_ASM32 (i386_void_call_2_a,
2700 /* Preserve %eax only; we don't have to worry about %ebx. */
2701 "push %eax\n\t"
2702 /* Reserve a bit of stack space for arguments. */
2703 "sub $0x10,%esp\n\t"
2704 /* Copy "top" to the second argument position. (Note that
2705 we can't assume function won't scribble on its
2706 arguments, so don't try to restore from this.) */
2707 "mov %eax,4(%esp)\n\t"
2708 "mov %ebx,8(%esp)");
2709 /* Put the first argument on the stack. */
2710 buildaddr = current_insn_ptr;
2711 i = 0;
2712 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2713 buf[i++] = 0x04;
2714 buf[i++] = 0x24;
2715 memcpy (&buf[i], &arg1, sizeof (arg1));
2716 i += 4;
2717 append_insns (&buildaddr, i, buf);
2718 current_insn_ptr = buildaddr;
2719 i386_emit_call (fn);
2720 EMIT_ASM32 (i386_void_call_2_b,
2721 "lea 0x10(%esp),%esp\n\t"
2722 /* Restore original stack top. */
2723 "pop %eax");
2724 }
2725
2726
2727 static void
2728 i386_emit_eq_goto (int *offset_p, int *size_p)
2729 {
2730 EMIT_ASM32 (eq,
2731 /* Check low half first, more likely to be decider */
2732 "cmpl %eax,(%esp)\n\t"
2733 "jne .Leq_fallthru\n\t"
2734 "cmpl %ebx,4(%esp)\n\t"
2735 "jne .Leq_fallthru\n\t"
2736 "lea 0x8(%esp),%esp\n\t"
2737 "pop %eax\n\t"
2738 "pop %ebx\n\t"
2739 /* jmp, but don't trust the assembler to choose the right jump */
2740 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2741 ".Leq_fallthru:\n\t"
2742 "lea 0x8(%esp),%esp\n\t"
2743 "pop %eax\n\t"
2744 "pop %ebx");
2745
2746 if (offset_p)
2747 *offset_p = 18;
2748 if (size_p)
2749 *size_p = 4;
2750 }
2751
2752 static void
2753 i386_emit_ne_goto (int *offset_p, int *size_p)
2754 {
2755 EMIT_ASM32 (ne,
2756 /* Check low half first, more likely to be decider */
2757 "cmpl %eax,(%esp)\n\t"
2758 "jne .Lne_jump\n\t"
2759 "cmpl %ebx,4(%esp)\n\t"
2760 "je .Lne_fallthru\n\t"
2761 ".Lne_jump:\n\t"
2762 "lea 0x8(%esp),%esp\n\t"
2763 "pop %eax\n\t"
2764 "pop %ebx\n\t"
2765 /* jmp, but don't trust the assembler to choose the right jump */
2766 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2767 ".Lne_fallthru:\n\t"
2768 "lea 0x8(%esp),%esp\n\t"
2769 "pop %eax\n\t"
2770 "pop %ebx");
2771
2772 if (offset_p)
2773 *offset_p = 18;
2774 if (size_p)
2775 *size_p = 4;
2776 }
2777
2778 static void
2779 i386_emit_lt_goto (int *offset_p, int *size_p)
2780 {
2781 EMIT_ASM32 (lt,
2782 "cmpl %ebx,4(%esp)\n\t"
2783 "jl .Llt_jump\n\t"
2784 "jne .Llt_fallthru\n\t"
2785 "cmpl %eax,(%esp)\n\t"
2786 "jnl .Llt_fallthru\n\t"
2787 ".Llt_jump:\n\t"
2788 "lea 0x8(%esp),%esp\n\t"
2789 "pop %eax\n\t"
2790 "pop %ebx\n\t"
2791 /* jmp, but don't trust the assembler to choose the right jump */
2792 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2793 ".Llt_fallthru:\n\t"
2794 "lea 0x8(%esp),%esp\n\t"
2795 "pop %eax\n\t"
2796 "pop %ebx");
2797
2798 if (offset_p)
2799 *offset_p = 20;
2800 if (size_p)
2801 *size_p = 4;
2802 }
2803
2804 static void
2805 i386_emit_le_goto (int *offset_p, int *size_p)
2806 {
2807 EMIT_ASM32 (le,
2808 "cmpl %ebx,4(%esp)\n\t"
2809 "jle .Lle_jump\n\t"
2810 "jne .Lle_fallthru\n\t"
2811 "cmpl %eax,(%esp)\n\t"
2812 "jnle .Lle_fallthru\n\t"
2813 ".Lle_jump:\n\t"
2814 "lea 0x8(%esp),%esp\n\t"
2815 "pop %eax\n\t"
2816 "pop %ebx\n\t"
2817 /* jmp, but don't trust the assembler to choose the right jump */
2818 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2819 ".Lle_fallthru:\n\t"
2820 "lea 0x8(%esp),%esp\n\t"
2821 "pop %eax\n\t"
2822 "pop %ebx");
2823
2824 if (offset_p)
2825 *offset_p = 20;
2826 if (size_p)
2827 *size_p = 4;
2828 }
2829
2830 static void
2831 i386_emit_gt_goto (int *offset_p, int *size_p)
2832 {
2833 EMIT_ASM32 (gt,
2834 "cmpl %ebx,4(%esp)\n\t"
2835 "jg .Lgt_jump\n\t"
2836 "jne .Lgt_fallthru\n\t"
2837 "cmpl %eax,(%esp)\n\t"
2838 "jng .Lgt_fallthru\n\t"
2839 ".Lgt_jump:\n\t"
2840 "lea 0x8(%esp),%esp\n\t"
2841 "pop %eax\n\t"
2842 "pop %ebx\n\t"
2843 /* jmp, but don't trust the assembler to choose the right jump */
2844 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2845 ".Lgt_fallthru:\n\t"
2846 "lea 0x8(%esp),%esp\n\t"
2847 "pop %eax\n\t"
2848 "pop %ebx");
2849
2850 if (offset_p)
2851 *offset_p = 20;
2852 if (size_p)
2853 *size_p = 4;
2854 }
2855
2856 static void
2857 i386_emit_ge_goto (int *offset_p, int *size_p)
2858 {
2859 EMIT_ASM32 (ge,
2860 "cmpl %ebx,4(%esp)\n\t"
2861 "jge .Lge_jump\n\t"
2862 "jne .Lge_fallthru\n\t"
2863 "cmpl %eax,(%esp)\n\t"
2864 "jnge .Lge_fallthru\n\t"
2865 ".Lge_jump:\n\t"
2866 "lea 0x8(%esp),%esp\n\t"
2867 "pop %eax\n\t"
2868 "pop %ebx\n\t"
2869 /* jmp, but don't trust the assembler to choose the right jump */
2870 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2871 ".Lge_fallthru:\n\t"
2872 "lea 0x8(%esp),%esp\n\t"
2873 "pop %eax\n\t"
2874 "pop %ebx");
2875
2876 if (offset_p)
2877 *offset_p = 20;
2878 if (size_p)
2879 *size_p = 4;
2880 }
2881
2882 struct emit_ops i386_emit_ops =
2883 {
2884 i386_emit_prologue,
2885 i386_emit_epilogue,
2886 i386_emit_add,
2887 i386_emit_sub,
2888 i386_emit_mul,
2889 i386_emit_lsh,
2890 i386_emit_rsh_signed,
2891 i386_emit_rsh_unsigned,
2892 i386_emit_ext,
2893 i386_emit_log_not,
2894 i386_emit_bit_and,
2895 i386_emit_bit_or,
2896 i386_emit_bit_xor,
2897 i386_emit_bit_not,
2898 i386_emit_equal,
2899 i386_emit_less_signed,
2900 i386_emit_less_unsigned,
2901 i386_emit_ref,
2902 i386_emit_if_goto,
2903 i386_emit_goto,
2904 i386_write_goto_address,
2905 i386_emit_const,
2906 i386_emit_call,
2907 i386_emit_reg,
2908 i386_emit_pop,
2909 i386_emit_stack_flush,
2910 i386_emit_zero_ext,
2911 i386_emit_swap,
2912 i386_emit_stack_adjust,
2913 i386_emit_int_call_1,
2914 i386_emit_void_call_2,
2915 i386_emit_eq_goto,
2916 i386_emit_ne_goto,
2917 i386_emit_lt_goto,
2918 i386_emit_le_goto,
2919 i386_emit_gt_goto,
2920 i386_emit_ge_goto
2921 };
2922
2923
2924 emit_ops *
2925 x86_target::emit_ops ()
2926 {
2927 #ifdef __x86_64__
2928 if (is_64bit_tdesc ())
2929 return &amd64_emit_ops;
2930 else
2931 #endif
2932 return &i386_emit_ops;
2933 }
2934
2935 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2936
2937 const gdb_byte *
2938 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2939 {
2940 *size = x86_breakpoint_len;
2941 return x86_breakpoint;
2942 }
2943
2944 bool
2945 x86_target::low_supports_range_stepping ()
2946 {
2947 return true;
2948 }
2949
2950 int
2951 x86_target::get_ipa_tdesc_idx ()
2952 {
2953 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2954 const struct target_desc *tdesc = regcache->tdesc;
2955
2956 #ifdef __x86_64__
2957 return amd64_get_ipa_tdesc_idx (tdesc);
2958 #endif
2959
2960 if (tdesc == tdesc_i386_linux_no_xml)
2961 return X86_TDESC_SSE;
2962
2963 return i386_get_ipa_tdesc_idx (tdesc);
2964 }
2965
2966 /* The linux target ops object. */
2967
2968 linux_process_target *the_linux_target = &the_x86_target;
2969
2970 void
2971 initialize_low_arch (void)
2972 {
2973 /* Initialize the Linux target descriptions. */
2974 #ifdef __x86_64__
2975 tdesc_amd64_linux_no_xml = allocate_target_description ();
2976 copy_target_description (tdesc_amd64_linux_no_xml,
2977 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2978 false));
2979 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2980 #endif
2981
2982 tdesc_i386_linux_no_xml = allocate_target_description ();
2983 copy_target_description (tdesc_i386_linux_no_xml,
2984 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2985 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2986
2987 initialize_regsets_info (&x86_regsets_info);
2988 }
This page took 0.095484 seconds and 4 git commands to generate.