Automatic Copyright Year update after running gdb/copyright.py
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2022 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static target_desc_up tdesc_amd64_linux_no_xml;
52 #endif
53 static target_desc_up tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char xmltarget_i386_linux_no_xml[] = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char xmltarget_amd64_linux_no_xml[] = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 const regs_info *get_regs_info () override;
104
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
107 bool supports_z_point_type (char z_type) override;
108
109 void process_qsupported (gdb::array_view<const char * const> features) override;
110
111 bool supports_tracepoints () override;
112
113 bool supports_fast_tracepoints () override;
114
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121 char *err) override;
122
123 int get_min_fast_tracepoint_insn_len () override;
124
125 struct emit_ops *emit_ops () override;
126
127 int get_ipa_tdesc_idx () override;
128
129 protected:
130
131 void low_arch_setup () override;
132
133 bool low_cannot_fetch_register (int regno) override;
134
135 bool low_cannot_store_register (int regno) override;
136
137 bool low_supports_breakpoints () override;
138
139 CORE_ADDR low_get_pc (regcache *regcache) override;
140
141 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
142
143 int low_decr_pc_after_break () override;
144
145 bool low_breakpoint_at (CORE_ADDR pc) override;
146
147 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
148 int size, raw_breakpoint *bp) override;
149
150 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
151 int size, raw_breakpoint *bp) override;
152
153 bool low_stopped_by_watchpoint () override;
154
155 CORE_ADDR low_stopped_data_address () override;
156
157 /* collect_ptrace_register/supply_ptrace_register are not needed in the
158 native i386 case (no registers smaller than an xfer unit), and are not
159 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
160
161 /* Need to fix up i386 siginfo if host is amd64. */
162 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
163 int direction) override;
164
165 arch_process_info *low_new_process () override;
166
167 void low_delete_process (arch_process_info *info) override;
168
169 void low_new_thread (lwp_info *) override;
170
171 void low_delete_thread (arch_lwp_info *) override;
172
173 void low_new_fork (process_info *parent, process_info *child) override;
174
175 void low_prepare_to_resume (lwp_info *lwp) override;
176
177 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
178
179 bool low_supports_range_stepping () override;
180
181 bool low_supports_catch_syscall () override;
182
183 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
184
185 private:
186
187 /* Update all the target description of all processes; a new GDB
188 connected, and it may or not support xml target descriptions. */
189 void update_xmltarget ();
190 };
191
192 /* The singleton target ops object. */
193
194 static x86_target the_x86_target;
195
196 /* Per-process arch-specific data we want to keep. */
197
198 struct arch_process_info
199 {
200 struct x86_debug_reg_state debug_reg_state;
201 };
202
203 #ifdef __x86_64__
204
205 /* Mapping between the general-purpose registers in `struct user'
206 format and GDB's register array layout.
207 Note that the transfer layout uses 64-bit regs. */
208 static /*const*/ int i386_regmap[] =
209 {
210 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
211 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
212 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
213 DS * 8, ES * 8, FS * 8, GS * 8
214 };
215
216 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
217
218 /* So code below doesn't have to care, i386 or amd64. */
219 #define ORIG_EAX ORIG_RAX
220 #define REGSIZE 8
221
222 static const int x86_64_regmap[] =
223 {
224 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
225 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
226 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
227 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
228 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
229 DS * 8, ES * 8, FS * 8, GS * 8,
230 -1, -1, -1, -1, -1, -1, -1, -1,
231 -1, -1, -1, -1, -1, -1, -1, -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
233 -1,
234 -1, -1, -1, -1, -1, -1, -1, -1,
235 ORIG_RAX * 8,
236 21 * 8, 22 * 8,
237 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
238 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
239 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
240 -1, -1, -1, -1, -1, -1, -1, -1,
241 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
242 -1, -1, -1, -1, -1, -1, -1, -1,
243 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
244 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
245 -1, -1, -1, -1, -1, -1, -1, -1,
246 -1, -1, -1, -1, -1, -1, -1, -1,
247 -1, -1, -1, -1, -1, -1, -1, -1,
248 -1 /* pkru */
249 };
250
251 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
252 #define X86_64_USER_REGS (GS + 1)
253
254 #else /* ! __x86_64__ */
255
256 /* Mapping between the general-purpose registers in `struct user'
257 format and GDB's register array layout. */
258 static /*const*/ int i386_regmap[] =
259 {
260 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
261 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
262 EIP * 4, EFL * 4, CS * 4, SS * 4,
263 DS * 4, ES * 4, FS * 4, GS * 4
264 };
265
266 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
267
268 #define REGSIZE 4
269
270 #endif
271
272 #ifdef __x86_64__
273
274 /* Returns true if the current inferior belongs to a x86-64 process,
275 per the tdesc. */
276
277 static int
278 is_64bit_tdesc (void)
279 {
280 struct regcache *regcache = get_thread_regcache (current_thread, 0);
281
282 return register_size (regcache->tdesc, 0) == 8;
283 }
284
285 #endif
286
287 \f
288 /* Called by libthread_db. */
289
290 ps_err_e
291 ps_get_thread_area (struct ps_prochandle *ph,
292 lwpid_t lwpid, int idx, void **base)
293 {
294 #ifdef __x86_64__
295 int use_64bit = is_64bit_tdesc ();
296
297 if (use_64bit)
298 {
299 switch (idx)
300 {
301 case FS:
302 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
303 return PS_OK;
304 break;
305 case GS:
306 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
307 return PS_OK;
308 break;
309 default:
310 return PS_BADADDR;
311 }
312 return PS_ERR;
313 }
314 #endif
315
316 {
317 unsigned int desc[4];
318
319 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
320 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
321 return PS_ERR;
322
323 /* Ensure we properly extend the value to 64-bits for x86_64. */
324 *base = (void *) (uintptr_t) desc[1];
325 return PS_OK;
326 }
327 }
328
329 /* Get the thread area address. This is used to recognize which
330 thread is which when tracing with the in-process agent library. We
331 don't read anything from the address, and treat it as opaque; it's
332 the address itself that we assume is unique per-thread. */
333
334 int
335 x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
336 {
337 #ifdef __x86_64__
338 int use_64bit = is_64bit_tdesc ();
339
340 if (use_64bit)
341 {
342 void *base;
343 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
344 {
345 *addr = (CORE_ADDR) (uintptr_t) base;
346 return 0;
347 }
348
349 return -1;
350 }
351 #endif
352
353 {
354 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
355 struct thread_info *thr = get_lwp_thread (lwp);
356 struct regcache *regcache = get_thread_regcache (thr, 1);
357 unsigned int desc[4];
358 ULONGEST gs = 0;
359 const int reg_thread_area = 3; /* bits to scale down register value. */
360 int idx;
361
362 collect_register_by_name (regcache, "gs", &gs);
363
364 idx = gs >> reg_thread_area;
365
366 if (ptrace (PTRACE_GET_THREAD_AREA,
367 lwpid_of (thr),
368 (void *) (long) idx, (unsigned long) &desc) < 0)
369 return -1;
370
371 *addr = desc[1];
372 return 0;
373 }
374 }
375
376
377 \f
378 bool
379 x86_target::low_cannot_store_register (int regno)
380 {
381 #ifdef __x86_64__
382 if (is_64bit_tdesc ())
383 return false;
384 #endif
385
386 return regno >= I386_NUM_REGS;
387 }
388
389 bool
390 x86_target::low_cannot_fetch_register (int regno)
391 {
392 #ifdef __x86_64__
393 if (is_64bit_tdesc ())
394 return false;
395 #endif
396
397 return regno >= I386_NUM_REGS;
398 }
399
400 static void
401 collect_register_i386 (struct regcache *regcache, int regno, void *buf)
402 {
403 collect_register (regcache, regno, buf);
404
405 #ifdef __x86_64__
406 /* In case of x86_64 -m32, collect_register only writes 4 bytes, but the
407 space reserved in buf for the register is 8 bytes. Make sure the entire
408 reserved space is initialized. */
409
410 gdb_assert (register_size (regcache->tdesc, regno) == 4);
411
412 if (regno == RAX)
413 {
414 /* Sign extend EAX value to avoid potential syscall restart
415 problems.
416
417 See amd64_linux_collect_native_gregset() in
418 gdb/amd64-linux-nat.c for a detailed explanation. */
419 *(int64_t *) buf = *(int32_t *) buf;
420 }
421 else
422 {
423 /* Zero-extend. */
424 *(uint64_t *) buf = *(uint32_t *) buf;
425 }
426 #endif
427 }
428
429 static void
430 x86_fill_gregset (struct regcache *regcache, void *buf)
431 {
432 int i;
433
434 #ifdef __x86_64__
435 if (register_size (regcache->tdesc, 0) == 8)
436 {
437 for (i = 0; i < X86_64_NUM_REGS; i++)
438 if (x86_64_regmap[i] != -1)
439 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
440
441 return;
442 }
443 #endif
444
445 for (i = 0; i < I386_NUM_REGS; i++)
446 collect_register_i386 (regcache, i, ((char *) buf) + i386_regmap[i]);
447
448 /* Handle ORIG_EAX, which is not in i386_regmap. */
449 collect_register_i386 (regcache, find_regno (regcache->tdesc, "orig_eax"),
450 ((char *) buf) + ORIG_EAX * REGSIZE);
451 }
452
453 static void
454 x86_store_gregset (struct regcache *regcache, const void *buf)
455 {
456 int i;
457
458 #ifdef __x86_64__
459 if (register_size (regcache->tdesc, 0) == 8)
460 {
461 for (i = 0; i < X86_64_NUM_REGS; i++)
462 if (x86_64_regmap[i] != -1)
463 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
464
465 return;
466 }
467 #endif
468
469 for (i = 0; i < I386_NUM_REGS; i++)
470 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
471
472 supply_register_by_name (regcache, "orig_eax",
473 ((char *) buf) + ORIG_EAX * REGSIZE);
474 }
475
476 static void
477 x86_fill_fpregset (struct regcache *regcache, void *buf)
478 {
479 #ifdef __x86_64__
480 i387_cache_to_fxsave (regcache, buf);
481 #else
482 i387_cache_to_fsave (regcache, buf);
483 #endif
484 }
485
486 static void
487 x86_store_fpregset (struct regcache *regcache, const void *buf)
488 {
489 #ifdef __x86_64__
490 i387_fxsave_to_cache (regcache, buf);
491 #else
492 i387_fsave_to_cache (regcache, buf);
493 #endif
494 }
495
496 #ifndef __x86_64__
497
498 static void
499 x86_fill_fpxregset (struct regcache *regcache, void *buf)
500 {
501 i387_cache_to_fxsave (regcache, buf);
502 }
503
504 static void
505 x86_store_fpxregset (struct regcache *regcache, const void *buf)
506 {
507 i387_fxsave_to_cache (regcache, buf);
508 }
509
510 #endif
511
512 static void
513 x86_fill_xstateregset (struct regcache *regcache, void *buf)
514 {
515 i387_cache_to_xsave (regcache, buf);
516 }
517
518 static void
519 x86_store_xstateregset (struct regcache *regcache, const void *buf)
520 {
521 i387_xsave_to_cache (regcache, buf);
522 }
523
524 /* ??? The non-biarch i386 case stores all the i387 regs twice.
525 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
526 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
527 doesn't work. IWBN to avoid the duplication in the case where it
528 does work. Maybe the arch_setup routine could check whether it works
529 and update the supported regsets accordingly. */
530
531 static struct regset_info x86_regsets[] =
532 {
533 #ifdef HAVE_PTRACE_GETREGS
534 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
535 GENERAL_REGS,
536 x86_fill_gregset, x86_store_gregset },
537 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
538 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
539 # ifndef __x86_64__
540 # ifdef HAVE_PTRACE_GETFPXREGS
541 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
542 EXTENDED_REGS,
543 x86_fill_fpxregset, x86_store_fpxregset },
544 # endif
545 # endif
546 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
547 FP_REGS,
548 x86_fill_fpregset, x86_store_fpregset },
549 #endif /* HAVE_PTRACE_GETREGS */
550 NULL_REGSET
551 };
552
553 bool
554 x86_target::low_supports_breakpoints ()
555 {
556 return true;
557 }
558
559 CORE_ADDR
560 x86_target::low_get_pc (regcache *regcache)
561 {
562 int use_64bit = register_size (regcache->tdesc, 0) == 8;
563
564 if (use_64bit)
565 {
566 uint64_t pc;
567
568 collect_register_by_name (regcache, "rip", &pc);
569 return (CORE_ADDR) pc;
570 }
571 else
572 {
573 uint32_t pc;
574
575 collect_register_by_name (regcache, "eip", &pc);
576 return (CORE_ADDR) pc;
577 }
578 }
579
580 void
581 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
582 {
583 int use_64bit = register_size (regcache->tdesc, 0) == 8;
584
585 if (use_64bit)
586 {
587 uint64_t newpc = pc;
588
589 supply_register_by_name (regcache, "rip", &newpc);
590 }
591 else
592 {
593 uint32_t newpc = pc;
594
595 supply_register_by_name (regcache, "eip", &newpc);
596 }
597 }
598
599 int
600 x86_target::low_decr_pc_after_break ()
601 {
602 return 1;
603 }
604
605 \f
606 static const gdb_byte x86_breakpoint[] = { 0xCC };
607 #define x86_breakpoint_len 1
608
609 bool
610 x86_target::low_breakpoint_at (CORE_ADDR pc)
611 {
612 unsigned char c;
613
614 read_memory (pc, &c, 1);
615 if (c == 0xCC)
616 return true;
617
618 return false;
619 }
620 \f
621 /* Low-level function vector. */
622 struct x86_dr_low_type x86_dr_low =
623 {
624 x86_linux_dr_set_control,
625 x86_linux_dr_set_addr,
626 x86_linux_dr_get_addr,
627 x86_linux_dr_get_status,
628 x86_linux_dr_get_control,
629 sizeof (void *),
630 };
631 \f
632 /* Breakpoint/Watchpoint support. */
633
634 bool
635 x86_target::supports_z_point_type (char z_type)
636 {
637 switch (z_type)
638 {
639 case Z_PACKET_SW_BP:
640 case Z_PACKET_HW_BP:
641 case Z_PACKET_WRITE_WP:
642 case Z_PACKET_ACCESS_WP:
643 return true;
644 default:
645 return false;
646 }
647 }
648
649 int
650 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
651 int size, raw_breakpoint *bp)
652 {
653 struct process_info *proc = current_process ();
654
655 switch (type)
656 {
657 case raw_bkpt_type_hw:
658 case raw_bkpt_type_write_wp:
659 case raw_bkpt_type_access_wp:
660 {
661 enum target_hw_bp_type hw_type
662 = raw_bkpt_type_to_target_hw_bp_type (type);
663 struct x86_debug_reg_state *state
664 = &proc->priv->arch_private->debug_reg_state;
665
666 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
667 }
668
669 default:
670 /* Unsupported. */
671 return 1;
672 }
673 }
674
675 int
676 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
677 int size, raw_breakpoint *bp)
678 {
679 struct process_info *proc = current_process ();
680
681 switch (type)
682 {
683 case raw_bkpt_type_hw:
684 case raw_bkpt_type_write_wp:
685 case raw_bkpt_type_access_wp:
686 {
687 enum target_hw_bp_type hw_type
688 = raw_bkpt_type_to_target_hw_bp_type (type);
689 struct x86_debug_reg_state *state
690 = &proc->priv->arch_private->debug_reg_state;
691
692 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
693 }
694 default:
695 /* Unsupported. */
696 return 1;
697 }
698 }
699
700 bool
701 x86_target::low_stopped_by_watchpoint ()
702 {
703 struct process_info *proc = current_process ();
704 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
705 }
706
707 CORE_ADDR
708 x86_target::low_stopped_data_address ()
709 {
710 struct process_info *proc = current_process ();
711 CORE_ADDR addr;
712 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
713 &addr))
714 return addr;
715 return 0;
716 }
717 \f
718 /* Called when a new process is created. */
719
720 arch_process_info *
721 x86_target::low_new_process ()
722 {
723 struct arch_process_info *info = XCNEW (struct arch_process_info);
724
725 x86_low_init_dregs (&info->debug_reg_state);
726
727 return info;
728 }
729
730 /* Called when a process is being deleted. */
731
732 void
733 x86_target::low_delete_process (arch_process_info *info)
734 {
735 xfree (info);
736 }
737
738 void
739 x86_target::low_new_thread (lwp_info *lwp)
740 {
741 /* This comes from nat/. */
742 x86_linux_new_thread (lwp);
743 }
744
745 void
746 x86_target::low_delete_thread (arch_lwp_info *alwp)
747 {
748 /* This comes from nat/. */
749 x86_linux_delete_thread (alwp);
750 }
751
752 /* Target routine for new_fork. */
753
754 void
755 x86_target::low_new_fork (process_info *parent, process_info *child)
756 {
757 /* These are allocated by linux_add_process. */
758 gdb_assert (parent->priv != NULL
759 && parent->priv->arch_private != NULL);
760 gdb_assert (child->priv != NULL
761 && child->priv->arch_private != NULL);
762
763 /* Linux kernel before 2.6.33 commit
764 72f674d203cd230426437cdcf7dd6f681dad8b0d
765 will inherit hardware debug registers from parent
766 on fork/vfork/clone. Newer Linux kernels create such tasks with
767 zeroed debug registers.
768
769 GDB core assumes the child inherits the watchpoints/hw
770 breakpoints of the parent, and will remove them all from the
771 forked off process. Copy the debug registers mirrors into the
772 new process so that all breakpoints and watchpoints can be
773 removed together. The debug registers mirror will become zeroed
774 in the end before detaching the forked off process, thus making
775 this compatible with older Linux kernels too. */
776
777 *child->priv->arch_private = *parent->priv->arch_private;
778 }
779
780 void
781 x86_target::low_prepare_to_resume (lwp_info *lwp)
782 {
783 /* This comes from nat/. */
784 x86_linux_prepare_to_resume (lwp);
785 }
786
787 /* See nat/x86-dregs.h. */
788
789 struct x86_debug_reg_state *
790 x86_debug_reg_state (pid_t pid)
791 {
792 struct process_info *proc = find_process_pid (pid);
793
794 return &proc->priv->arch_private->debug_reg_state;
795 }
796 \f
797 /* When GDBSERVER is built as a 64-bit application on linux, the
798 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
799 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
800 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
801 conversion in-place ourselves. */
802
803 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
804 layout of the inferiors' architecture. Returns true if any
805 conversion was done; false otherwise. If DIRECTION is 1, then copy
806 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
807 INF. */
808
809 bool
810 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
811 {
812 #ifdef __x86_64__
813 unsigned int machine;
814 int tid = lwpid_of (current_thread);
815 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
816
817 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
818 if (!is_64bit_tdesc ())
819 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
820 FIXUP_32);
821 /* No fixup for native x32 GDB. */
822 else if (!is_elf64 && sizeof (void *) == 8)
823 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
824 FIXUP_X32);
825 #endif
826
827 return false;
828 }
829 \f
830 static int use_xml;
831
832 /* Format of XSAVE extended state is:
833 struct
834 {
835 fxsave_bytes[0..463]
836 sw_usable_bytes[464..511]
837 xstate_hdr_bytes[512..575]
838 avx_bytes[576..831]
839 future_state etc
840 };
841
842 Same memory layout will be used for the coredump NT_X86_XSTATE
843 representing the XSAVE extended state registers.
844
845 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
846 extended state mask, which is the same as the extended control register
847 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
848 together with the mask saved in the xstate_hdr_bytes to determine what
849 states the processor/OS supports and what state, used or initialized,
850 the process/thread is in. */
851 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
852
853 /* Does the current host support the GETFPXREGS request? The header
854 file may or may not define it, and even if it is defined, the
855 kernel will return EIO if it's running on a pre-SSE processor. */
856 int have_ptrace_getfpxregs =
857 #ifdef HAVE_PTRACE_GETFPXREGS
858 -1
859 #else
860 0
861 #endif
862 ;
863
864 /* Get Linux/x86 target description from running target. */
865
866 static const struct target_desc *
867 x86_linux_read_description (void)
868 {
869 unsigned int machine;
870 int is_elf64;
871 int xcr0_features;
872 int tid;
873 static uint64_t xcr0;
874 struct regset_info *regset;
875
876 tid = lwpid_of (current_thread);
877
878 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
879
880 if (sizeof (void *) == 4)
881 {
882 if (is_elf64 > 0)
883 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
884 #ifndef __x86_64__
885 else if (machine == EM_X86_64)
886 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
887 #endif
888 }
889
890 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
891 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
892 {
893 elf_fpxregset_t fpxregs;
894
895 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
896 {
897 have_ptrace_getfpxregs = 0;
898 have_ptrace_getregset = 0;
899 return i386_linux_read_description (X86_XSTATE_X87);
900 }
901 else
902 have_ptrace_getfpxregs = 1;
903 }
904 #endif
905
906 if (!use_xml)
907 {
908 x86_xcr0 = X86_XSTATE_SSE_MASK;
909
910 /* Don't use XML. */
911 #ifdef __x86_64__
912 if (machine == EM_X86_64)
913 return tdesc_amd64_linux_no_xml.get ();
914 else
915 #endif
916 return tdesc_i386_linux_no_xml.get ();
917 }
918
919 if (have_ptrace_getregset == -1)
920 {
921 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
922 struct iovec iov;
923
924 iov.iov_base = xstateregs;
925 iov.iov_len = sizeof (xstateregs);
926
927 /* Check if PTRACE_GETREGSET works. */
928 if (ptrace (PTRACE_GETREGSET, tid,
929 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
930 have_ptrace_getregset = 0;
931 else
932 {
933 have_ptrace_getregset = 1;
934
935 /* Get XCR0 from XSAVE extended state. */
936 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
937 / sizeof (uint64_t))];
938
939 /* Use PTRACE_GETREGSET if it is available. */
940 for (regset = x86_regsets;
941 regset->fill_function != NULL; regset++)
942 if (regset->get_request == PTRACE_GETREGSET)
943 regset->size = X86_XSTATE_SIZE (xcr0);
944 else if (regset->type != GENERAL_REGS)
945 regset->size = 0;
946 }
947 }
948
949 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
950 xcr0_features = (have_ptrace_getregset
951 && (xcr0 & X86_XSTATE_ALL_MASK));
952
953 if (xcr0_features)
954 x86_xcr0 = xcr0;
955
956 if (machine == EM_X86_64)
957 {
958 #ifdef __x86_64__
959 const target_desc *tdesc = NULL;
960
961 if (xcr0_features)
962 {
963 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
964 !is_elf64);
965 }
966
967 if (tdesc == NULL)
968 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
969 return tdesc;
970 #endif
971 }
972 else
973 {
974 const target_desc *tdesc = NULL;
975
976 if (xcr0_features)
977 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
978
979 if (tdesc == NULL)
980 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
981
982 return tdesc;
983 }
984
985 gdb_assert_not_reached ("failed to return tdesc");
986 }
987
988 /* Update all the target description of all processes; a new GDB
989 connected, and it may or not support xml target descriptions. */
990
991 void
992 x86_target::update_xmltarget ()
993 {
994 struct thread_info *saved_thread = current_thread;
995
996 /* Before changing the register cache's internal layout, flush the
997 contents of the current valid caches back to the threads, and
998 release the current regcache objects. */
999 regcache_release ();
1000
1001 for_each_process ([this] (process_info *proc) {
1002 int pid = proc->pid;
1003
1004 /* Look up any thread of this process. */
1005 current_thread = find_any_thread_of_pid (pid);
1006
1007 low_arch_setup ();
1008 });
1009
1010 current_thread = saved_thread;
1011 }
1012
1013 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1014 PTRACE_GETREGSET. */
1015
1016 void
1017 x86_target::process_qsupported (gdb::array_view<const char * const> features)
1018 {
1019 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1020 with "i386" in qSupported query, it supports x86 XML target
1021 descriptions. */
1022 use_xml = 0;
1023
1024 for (const char *feature : features)
1025 {
1026 if (startswith (feature, "xmlRegisters="))
1027 {
1028 char *copy = xstrdup (feature + 13);
1029
1030 char *saveptr;
1031 for (char *p = strtok_r (copy, ",", &saveptr);
1032 p != NULL;
1033 p = strtok_r (NULL, ",", &saveptr))
1034 {
1035 if (strcmp (p, "i386") == 0)
1036 {
1037 use_xml = 1;
1038 break;
1039 }
1040 }
1041
1042 free (copy);
1043 }
1044 }
1045
1046 update_xmltarget ();
1047 }
1048
1049 /* Common for x86/x86-64. */
1050
1051 static struct regsets_info x86_regsets_info =
1052 {
1053 x86_regsets, /* regsets */
1054 0, /* num_regsets */
1055 NULL, /* disabled_regsets */
1056 };
1057
1058 #ifdef __x86_64__
1059 static struct regs_info amd64_linux_regs_info =
1060 {
1061 NULL, /* regset_bitmap */
1062 NULL, /* usrregs_info */
1063 &x86_regsets_info
1064 };
1065 #endif
1066 static struct usrregs_info i386_linux_usrregs_info =
1067 {
1068 I386_NUM_REGS,
1069 i386_regmap,
1070 };
1071
1072 static struct regs_info i386_linux_regs_info =
1073 {
1074 NULL, /* regset_bitmap */
1075 &i386_linux_usrregs_info,
1076 &x86_regsets_info
1077 };
1078
1079 const regs_info *
1080 x86_target::get_regs_info ()
1081 {
1082 #ifdef __x86_64__
1083 if (is_64bit_tdesc ())
1084 return &amd64_linux_regs_info;
1085 else
1086 #endif
1087 return &i386_linux_regs_info;
1088 }
1089
1090 /* Initialize the target description for the architecture of the
1091 inferior. */
1092
1093 void
1094 x86_target::low_arch_setup ()
1095 {
1096 current_process ()->tdesc = x86_linux_read_description ();
1097 }
1098
1099 bool
1100 x86_target::low_supports_catch_syscall ()
1101 {
1102 return true;
1103 }
1104
1105 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1106 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1107
1108 void
1109 x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
1110 {
1111 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1112
1113 if (use_64bit)
1114 {
1115 long l_sysno;
1116
1117 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1118 *sysno = (int) l_sysno;
1119 }
1120 else
1121 collect_register_by_name (regcache, "orig_eax", sysno);
1122 }
1123
1124 bool
1125 x86_target::supports_tracepoints ()
1126 {
1127 return true;
1128 }
1129
1130 static void
1131 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1132 {
1133 target_write_memory (*to, buf, len);
1134 *to += len;
1135 }
1136
1137 static int
1138 push_opcode (unsigned char *buf, const char *op)
1139 {
1140 unsigned char *buf_org = buf;
1141
1142 while (1)
1143 {
1144 char *endptr;
1145 unsigned long ul = strtoul (op, &endptr, 16);
1146
1147 if (endptr == op)
1148 break;
1149
1150 *buf++ = ul;
1151 op = endptr;
1152 }
1153
1154 return buf - buf_org;
1155 }
1156
1157 #ifdef __x86_64__
1158
1159 /* Build a jump pad that saves registers and calls a collection
1160 function. Writes a jump instruction to the jump pad to
1161 JJUMPAD_INSN. The caller is responsible to write it in at the
1162 tracepoint address. */
1163
1164 static int
1165 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1166 CORE_ADDR collector,
1167 CORE_ADDR lockaddr,
1168 ULONGEST orig_size,
1169 CORE_ADDR *jump_entry,
1170 CORE_ADDR *trampoline,
1171 ULONGEST *trampoline_size,
1172 unsigned char *jjump_pad_insn,
1173 ULONGEST *jjump_pad_insn_size,
1174 CORE_ADDR *adjusted_insn_addr,
1175 CORE_ADDR *adjusted_insn_addr_end,
1176 char *err)
1177 {
1178 unsigned char buf[40];
1179 int i, offset;
1180 int64_t loffset;
1181
1182 CORE_ADDR buildaddr = *jump_entry;
1183
1184 /* Build the jump pad. */
1185
1186 /* First, do tracepoint data collection. Save registers. */
1187 i = 0;
1188 /* Need to ensure stack pointer saved first. */
1189 buf[i++] = 0x54; /* push %rsp */
1190 buf[i++] = 0x55; /* push %rbp */
1191 buf[i++] = 0x57; /* push %rdi */
1192 buf[i++] = 0x56; /* push %rsi */
1193 buf[i++] = 0x52; /* push %rdx */
1194 buf[i++] = 0x51; /* push %rcx */
1195 buf[i++] = 0x53; /* push %rbx */
1196 buf[i++] = 0x50; /* push %rax */
1197 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1198 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1199 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1200 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1201 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1202 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1203 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1204 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1205 buf[i++] = 0x9c; /* pushfq */
1206 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1207 buf[i++] = 0xbf;
1208 memcpy (buf + i, &tpaddr, 8);
1209 i += 8;
1210 buf[i++] = 0x57; /* push %rdi */
1211 append_insns (&buildaddr, i, buf);
1212
1213 /* Stack space for the collecting_t object. */
1214 i = 0;
1215 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1216 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1217 memcpy (buf + i, &tpoint, 8);
1218 i += 8;
1219 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1220 i += push_opcode (&buf[i],
1221 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1222 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1223 append_insns (&buildaddr, i, buf);
1224
1225 /* spin-lock. */
1226 i = 0;
1227 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1228 memcpy (&buf[i], (void *) &lockaddr, 8);
1229 i += 8;
1230 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1231 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1232 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1233 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1234 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1235 append_insns (&buildaddr, i, buf);
1236
1237 /* Set up the gdb_collect call. */
1238 /* At this point, (stack pointer + 0x18) is the base of our saved
1239 register block. */
1240
1241 i = 0;
1242 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1243 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1244
1245 /* tpoint address may be 64-bit wide. */
1246 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1247 memcpy (buf + i, &tpoint, 8);
1248 i += 8;
1249 append_insns (&buildaddr, i, buf);
1250
1251 /* The collector function being in the shared library, may be
1252 >31-bits away off the jump pad. */
1253 i = 0;
1254 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1255 memcpy (buf + i, &collector, 8);
1256 i += 8;
1257 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1258 append_insns (&buildaddr, i, buf);
1259
1260 /* Clear the spin-lock. */
1261 i = 0;
1262 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1263 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1264 memcpy (buf + i, &lockaddr, 8);
1265 i += 8;
1266 append_insns (&buildaddr, i, buf);
1267
1268 /* Remove stack that had been used for the collect_t object. */
1269 i = 0;
1270 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1271 append_insns (&buildaddr, i, buf);
1272
1273 /* Restore register state. */
1274 i = 0;
1275 buf[i++] = 0x48; /* add $0x8,%rsp */
1276 buf[i++] = 0x83;
1277 buf[i++] = 0xc4;
1278 buf[i++] = 0x08;
1279 buf[i++] = 0x9d; /* popfq */
1280 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1281 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1282 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1283 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1284 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1285 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1286 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1287 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1288 buf[i++] = 0x58; /* pop %rax */
1289 buf[i++] = 0x5b; /* pop %rbx */
1290 buf[i++] = 0x59; /* pop %rcx */
1291 buf[i++] = 0x5a; /* pop %rdx */
1292 buf[i++] = 0x5e; /* pop %rsi */
1293 buf[i++] = 0x5f; /* pop %rdi */
1294 buf[i++] = 0x5d; /* pop %rbp */
1295 buf[i++] = 0x5c; /* pop %rsp */
1296 append_insns (&buildaddr, i, buf);
1297
1298 /* Now, adjust the original instruction to execute in the jump
1299 pad. */
1300 *adjusted_insn_addr = buildaddr;
1301 relocate_instruction (&buildaddr, tpaddr);
1302 *adjusted_insn_addr_end = buildaddr;
1303
1304 /* Finally, write a jump back to the program. */
1305
1306 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1307 if (loffset > INT_MAX || loffset < INT_MIN)
1308 {
1309 sprintf (err,
1310 "E.Jump back from jump pad too far from tracepoint "
1311 "(offset 0x%" PRIx64 " > int32).", loffset);
1312 return 1;
1313 }
1314
1315 offset = (int) loffset;
1316 memcpy (buf, jump_insn, sizeof (jump_insn));
1317 memcpy (buf + 1, &offset, 4);
1318 append_insns (&buildaddr, sizeof (jump_insn), buf);
1319
1320 /* The jump pad is now built. Wire in a jump to our jump pad. This
1321 is always done last (by our caller actually), so that we can
1322 install fast tracepoints with threads running. This relies on
1323 the agent's atomic write support. */
1324 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1325 if (loffset > INT_MAX || loffset < INT_MIN)
1326 {
1327 sprintf (err,
1328 "E.Jump pad too far from tracepoint "
1329 "(offset 0x%" PRIx64 " > int32).", loffset);
1330 return 1;
1331 }
1332
1333 offset = (int) loffset;
1334
1335 memcpy (buf, jump_insn, sizeof (jump_insn));
1336 memcpy (buf + 1, &offset, 4);
1337 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1338 *jjump_pad_insn_size = sizeof (jump_insn);
1339
1340 /* Return the end address of our pad. */
1341 *jump_entry = buildaddr;
1342
1343 return 0;
1344 }
1345
1346 #endif /* __x86_64__ */
1347
1348 /* Build a jump pad that saves registers and calls a collection
1349 function. Writes a jump instruction to the jump pad to
1350 JJUMPAD_INSN. The caller is responsible to write it in at the
1351 tracepoint address. */
1352
1353 static int
1354 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1355 CORE_ADDR collector,
1356 CORE_ADDR lockaddr,
1357 ULONGEST orig_size,
1358 CORE_ADDR *jump_entry,
1359 CORE_ADDR *trampoline,
1360 ULONGEST *trampoline_size,
1361 unsigned char *jjump_pad_insn,
1362 ULONGEST *jjump_pad_insn_size,
1363 CORE_ADDR *adjusted_insn_addr,
1364 CORE_ADDR *adjusted_insn_addr_end,
1365 char *err)
1366 {
1367 unsigned char buf[0x100];
1368 int i, offset;
1369 CORE_ADDR buildaddr = *jump_entry;
1370
1371 /* Build the jump pad. */
1372
1373 /* First, do tracepoint data collection. Save registers. */
1374 i = 0;
1375 buf[i++] = 0x60; /* pushad */
1376 buf[i++] = 0x68; /* push tpaddr aka $pc */
1377 *((int *)(buf + i)) = (int) tpaddr;
1378 i += 4;
1379 buf[i++] = 0x9c; /* pushf */
1380 buf[i++] = 0x1e; /* push %ds */
1381 buf[i++] = 0x06; /* push %es */
1382 buf[i++] = 0x0f; /* push %fs */
1383 buf[i++] = 0xa0;
1384 buf[i++] = 0x0f; /* push %gs */
1385 buf[i++] = 0xa8;
1386 buf[i++] = 0x16; /* push %ss */
1387 buf[i++] = 0x0e; /* push %cs */
1388 append_insns (&buildaddr, i, buf);
1389
1390 /* Stack space for the collecting_t object. */
1391 i = 0;
1392 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1393
1394 /* Build the object. */
1395 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1396 memcpy (buf + i, &tpoint, 4);
1397 i += 4;
1398 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1399
1400 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1401 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1402 append_insns (&buildaddr, i, buf);
1403
1404 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1405 If we cared for it, this could be using xchg alternatively. */
1406
1407 i = 0;
1408 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1409 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1410 %esp,<lockaddr> */
1411 memcpy (&buf[i], (void *) &lockaddr, 4);
1412 i += 4;
1413 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1414 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1415 append_insns (&buildaddr, i, buf);
1416
1417
1418 /* Set up arguments to the gdb_collect call. */
1419 i = 0;
1420 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1421 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1422 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1423 append_insns (&buildaddr, i, buf);
1424
1425 i = 0;
1426 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1427 append_insns (&buildaddr, i, buf);
1428
1429 i = 0;
1430 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1431 memcpy (&buf[i], (void *) &tpoint, 4);
1432 i += 4;
1433 append_insns (&buildaddr, i, buf);
1434
1435 buf[0] = 0xe8; /* call <reladdr> */
1436 offset = collector - (buildaddr + sizeof (jump_insn));
1437 memcpy (buf + 1, &offset, 4);
1438 append_insns (&buildaddr, 5, buf);
1439 /* Clean up after the call. */
1440 buf[0] = 0x83; /* add $0x8,%esp */
1441 buf[1] = 0xc4;
1442 buf[2] = 0x08;
1443 append_insns (&buildaddr, 3, buf);
1444
1445
1446 /* Clear the spin-lock. This would need the LOCK prefix on older
1447 broken archs. */
1448 i = 0;
1449 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1450 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1451 memcpy (buf + i, &lockaddr, 4);
1452 i += 4;
1453 append_insns (&buildaddr, i, buf);
1454
1455
1456 /* Remove stack that had been used for the collect_t object. */
1457 i = 0;
1458 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1459 append_insns (&buildaddr, i, buf);
1460
1461 i = 0;
1462 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1463 buf[i++] = 0xc4;
1464 buf[i++] = 0x04;
1465 buf[i++] = 0x17; /* pop %ss */
1466 buf[i++] = 0x0f; /* pop %gs */
1467 buf[i++] = 0xa9;
1468 buf[i++] = 0x0f; /* pop %fs */
1469 buf[i++] = 0xa1;
1470 buf[i++] = 0x07; /* pop %es */
1471 buf[i++] = 0x1f; /* pop %ds */
1472 buf[i++] = 0x9d; /* popf */
1473 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1474 buf[i++] = 0xc4;
1475 buf[i++] = 0x04;
1476 buf[i++] = 0x61; /* popad */
1477 append_insns (&buildaddr, i, buf);
1478
1479 /* Now, adjust the original instruction to execute in the jump
1480 pad. */
1481 *adjusted_insn_addr = buildaddr;
1482 relocate_instruction (&buildaddr, tpaddr);
1483 *adjusted_insn_addr_end = buildaddr;
1484
1485 /* Write the jump back to the program. */
1486 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1487 memcpy (buf, jump_insn, sizeof (jump_insn));
1488 memcpy (buf + 1, &offset, 4);
1489 append_insns (&buildaddr, sizeof (jump_insn), buf);
1490
1491 /* The jump pad is now built. Wire in a jump to our jump pad. This
1492 is always done last (by our caller actually), so that we can
1493 install fast tracepoints with threads running. This relies on
1494 the agent's atomic write support. */
1495 if (orig_size == 4)
1496 {
1497 /* Create a trampoline. */
1498 *trampoline_size = sizeof (jump_insn);
1499 if (!claim_trampoline_space (*trampoline_size, trampoline))
1500 {
1501 /* No trampoline space available. */
1502 strcpy (err,
1503 "E.Cannot allocate trampoline space needed for fast "
1504 "tracepoints on 4-byte instructions.");
1505 return 1;
1506 }
1507
1508 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1509 memcpy (buf, jump_insn, sizeof (jump_insn));
1510 memcpy (buf + 1, &offset, 4);
1511 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1512
1513 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1514 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1515 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1516 memcpy (buf + 2, &offset, 2);
1517 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1518 *jjump_pad_insn_size = sizeof (small_jump_insn);
1519 }
1520 else
1521 {
1522 /* Else use a 32-bit relative jump instruction. */
1523 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1524 memcpy (buf, jump_insn, sizeof (jump_insn));
1525 memcpy (buf + 1, &offset, 4);
1526 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1527 *jjump_pad_insn_size = sizeof (jump_insn);
1528 }
1529
1530 /* Return the end address of our pad. */
1531 *jump_entry = buildaddr;
1532
1533 return 0;
1534 }
1535
1536 bool
1537 x86_target::supports_fast_tracepoints ()
1538 {
1539 return true;
1540 }
1541
1542 int
1543 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1544 CORE_ADDR tpaddr,
1545 CORE_ADDR collector,
1546 CORE_ADDR lockaddr,
1547 ULONGEST orig_size,
1548 CORE_ADDR *jump_entry,
1549 CORE_ADDR *trampoline,
1550 ULONGEST *trampoline_size,
1551 unsigned char *jjump_pad_insn,
1552 ULONGEST *jjump_pad_insn_size,
1553 CORE_ADDR *adjusted_insn_addr,
1554 CORE_ADDR *adjusted_insn_addr_end,
1555 char *err)
1556 {
1557 #ifdef __x86_64__
1558 if (is_64bit_tdesc ())
1559 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1560 collector, lockaddr,
1561 orig_size, jump_entry,
1562 trampoline, trampoline_size,
1563 jjump_pad_insn,
1564 jjump_pad_insn_size,
1565 adjusted_insn_addr,
1566 adjusted_insn_addr_end,
1567 err);
1568 #endif
1569
1570 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1571 collector, lockaddr,
1572 orig_size, jump_entry,
1573 trampoline, trampoline_size,
1574 jjump_pad_insn,
1575 jjump_pad_insn_size,
1576 adjusted_insn_addr,
1577 adjusted_insn_addr_end,
1578 err);
1579 }
1580
1581 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1582 architectures. */
1583
1584 int
1585 x86_target::get_min_fast_tracepoint_insn_len ()
1586 {
1587 static int warned_about_fast_tracepoints = 0;
1588
1589 #ifdef __x86_64__
1590 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1591 used for fast tracepoints. */
1592 if (is_64bit_tdesc ())
1593 return 5;
1594 #endif
1595
1596 if (agent_loaded_p ())
1597 {
1598 char errbuf[IPA_BUFSIZ];
1599
1600 errbuf[0] = '\0';
1601
1602 /* On x86, if trampolines are available, then 4-byte jump instructions
1603 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1604 with a 4-byte offset are used instead. */
1605 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1606 return 4;
1607 else
1608 {
1609 /* GDB has no channel to explain to user why a shorter fast
1610 tracepoint is not possible, but at least make GDBserver
1611 mention that something has gone awry. */
1612 if (!warned_about_fast_tracepoints)
1613 {
1614 warning ("4-byte fast tracepoints not available; %s", errbuf);
1615 warned_about_fast_tracepoints = 1;
1616 }
1617 return 5;
1618 }
1619 }
1620 else
1621 {
1622 /* Indicate that the minimum length is currently unknown since the IPA
1623 has not loaded yet. */
1624 return 0;
1625 }
1626 }
1627
1628 static void
1629 add_insns (unsigned char *start, int len)
1630 {
1631 CORE_ADDR buildaddr = current_insn_ptr;
1632
1633 if (debug_threads)
1634 debug_printf ("Adding %d bytes of insn at %s\n",
1635 len, paddress (buildaddr));
1636
1637 append_insns (&buildaddr, len, start);
1638 current_insn_ptr = buildaddr;
1639 }
1640
1641 /* Our general strategy for emitting code is to avoid specifying raw
1642 bytes whenever possible, and instead copy a block of inline asm
1643 that is embedded in the function. This is a little messy, because
1644 we need to keep the compiler from discarding what looks like dead
1645 code, plus suppress various warnings. */
1646
1647 #define EMIT_ASM(NAME, INSNS) \
1648 do \
1649 { \
1650 extern unsigned char start_ ## NAME, end_ ## NAME; \
1651 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1652 __asm__ ("jmp end_" #NAME "\n" \
1653 "\t" "start_" #NAME ":" \
1654 "\t" INSNS "\n" \
1655 "\t" "end_" #NAME ":"); \
1656 } while (0)
1657
1658 #ifdef __x86_64__
1659
1660 #define EMIT_ASM32(NAME,INSNS) \
1661 do \
1662 { \
1663 extern unsigned char start_ ## NAME, end_ ## NAME; \
1664 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1665 __asm__ (".code32\n" \
1666 "\t" "jmp end_" #NAME "\n" \
1667 "\t" "start_" #NAME ":\n" \
1668 "\t" INSNS "\n" \
1669 "\t" "end_" #NAME ":\n" \
1670 ".code64\n"); \
1671 } while (0)
1672
1673 #else
1674
1675 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1676
1677 #endif
1678
1679 #ifdef __x86_64__
1680
1681 static void
1682 amd64_emit_prologue (void)
1683 {
1684 EMIT_ASM (amd64_prologue,
1685 "pushq %rbp\n\t"
1686 "movq %rsp,%rbp\n\t"
1687 "sub $0x20,%rsp\n\t"
1688 "movq %rdi,-8(%rbp)\n\t"
1689 "movq %rsi,-16(%rbp)");
1690 }
1691
1692
1693 static void
1694 amd64_emit_epilogue (void)
1695 {
1696 EMIT_ASM (amd64_epilogue,
1697 "movq -16(%rbp),%rdi\n\t"
1698 "movq %rax,(%rdi)\n\t"
1699 "xor %rax,%rax\n\t"
1700 "leave\n\t"
1701 "ret");
1702 }
1703
1704 static void
1705 amd64_emit_add (void)
1706 {
1707 EMIT_ASM (amd64_add,
1708 "add (%rsp),%rax\n\t"
1709 "lea 0x8(%rsp),%rsp");
1710 }
1711
1712 static void
1713 amd64_emit_sub (void)
1714 {
1715 EMIT_ASM (amd64_sub,
1716 "sub %rax,(%rsp)\n\t"
1717 "pop %rax");
1718 }
1719
1720 static void
1721 amd64_emit_mul (void)
1722 {
1723 emit_error = 1;
1724 }
1725
1726 static void
1727 amd64_emit_lsh (void)
1728 {
1729 emit_error = 1;
1730 }
1731
1732 static void
1733 amd64_emit_rsh_signed (void)
1734 {
1735 emit_error = 1;
1736 }
1737
1738 static void
1739 amd64_emit_rsh_unsigned (void)
1740 {
1741 emit_error = 1;
1742 }
1743
1744 static void
1745 amd64_emit_ext (int arg)
1746 {
1747 switch (arg)
1748 {
1749 case 8:
1750 EMIT_ASM (amd64_ext_8,
1751 "cbtw\n\t"
1752 "cwtl\n\t"
1753 "cltq");
1754 break;
1755 case 16:
1756 EMIT_ASM (amd64_ext_16,
1757 "cwtl\n\t"
1758 "cltq");
1759 break;
1760 case 32:
1761 EMIT_ASM (amd64_ext_32,
1762 "cltq");
1763 break;
1764 default:
1765 emit_error = 1;
1766 }
1767 }
1768
1769 static void
1770 amd64_emit_log_not (void)
1771 {
1772 EMIT_ASM (amd64_log_not,
1773 "test %rax,%rax\n\t"
1774 "sete %cl\n\t"
1775 "movzbq %cl,%rax");
1776 }
1777
1778 static void
1779 amd64_emit_bit_and (void)
1780 {
1781 EMIT_ASM (amd64_and,
1782 "and (%rsp),%rax\n\t"
1783 "lea 0x8(%rsp),%rsp");
1784 }
1785
1786 static void
1787 amd64_emit_bit_or (void)
1788 {
1789 EMIT_ASM (amd64_or,
1790 "or (%rsp),%rax\n\t"
1791 "lea 0x8(%rsp),%rsp");
1792 }
1793
1794 static void
1795 amd64_emit_bit_xor (void)
1796 {
1797 EMIT_ASM (amd64_xor,
1798 "xor (%rsp),%rax\n\t"
1799 "lea 0x8(%rsp),%rsp");
1800 }
1801
1802 static void
1803 amd64_emit_bit_not (void)
1804 {
1805 EMIT_ASM (amd64_bit_not,
1806 "xorq $0xffffffffffffffff,%rax");
1807 }
1808
1809 static void
1810 amd64_emit_equal (void)
1811 {
1812 EMIT_ASM (amd64_equal,
1813 "cmp %rax,(%rsp)\n\t"
1814 "je .Lamd64_equal_true\n\t"
1815 "xor %rax,%rax\n\t"
1816 "jmp .Lamd64_equal_end\n\t"
1817 ".Lamd64_equal_true:\n\t"
1818 "mov $0x1,%rax\n\t"
1819 ".Lamd64_equal_end:\n\t"
1820 "lea 0x8(%rsp),%rsp");
1821 }
1822
1823 static void
1824 amd64_emit_less_signed (void)
1825 {
1826 EMIT_ASM (amd64_less_signed,
1827 "cmp %rax,(%rsp)\n\t"
1828 "jl .Lamd64_less_signed_true\n\t"
1829 "xor %rax,%rax\n\t"
1830 "jmp .Lamd64_less_signed_end\n\t"
1831 ".Lamd64_less_signed_true:\n\t"
1832 "mov $1,%rax\n\t"
1833 ".Lamd64_less_signed_end:\n\t"
1834 "lea 0x8(%rsp),%rsp");
1835 }
1836
1837 static void
1838 amd64_emit_less_unsigned (void)
1839 {
1840 EMIT_ASM (amd64_less_unsigned,
1841 "cmp %rax,(%rsp)\n\t"
1842 "jb .Lamd64_less_unsigned_true\n\t"
1843 "xor %rax,%rax\n\t"
1844 "jmp .Lamd64_less_unsigned_end\n\t"
1845 ".Lamd64_less_unsigned_true:\n\t"
1846 "mov $1,%rax\n\t"
1847 ".Lamd64_less_unsigned_end:\n\t"
1848 "lea 0x8(%rsp),%rsp");
1849 }
1850
1851 static void
1852 amd64_emit_ref (int size)
1853 {
1854 switch (size)
1855 {
1856 case 1:
1857 EMIT_ASM (amd64_ref1,
1858 "movb (%rax),%al");
1859 break;
1860 case 2:
1861 EMIT_ASM (amd64_ref2,
1862 "movw (%rax),%ax");
1863 break;
1864 case 4:
1865 EMIT_ASM (amd64_ref4,
1866 "movl (%rax),%eax");
1867 break;
1868 case 8:
1869 EMIT_ASM (amd64_ref8,
1870 "movq (%rax),%rax");
1871 break;
1872 }
1873 }
1874
1875 static void
1876 amd64_emit_if_goto (int *offset_p, int *size_p)
1877 {
1878 EMIT_ASM (amd64_if_goto,
1879 "mov %rax,%rcx\n\t"
1880 "pop %rax\n\t"
1881 "cmp $0,%rcx\n\t"
1882 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1883 if (offset_p)
1884 *offset_p = 10;
1885 if (size_p)
1886 *size_p = 4;
1887 }
1888
1889 static void
1890 amd64_emit_goto (int *offset_p, int *size_p)
1891 {
1892 EMIT_ASM (amd64_goto,
1893 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1894 if (offset_p)
1895 *offset_p = 1;
1896 if (size_p)
1897 *size_p = 4;
1898 }
1899
1900 static void
1901 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1902 {
1903 int diff = (to - (from + size));
1904 unsigned char buf[sizeof (int)];
1905
1906 if (size != 4)
1907 {
1908 emit_error = 1;
1909 return;
1910 }
1911
1912 memcpy (buf, &diff, sizeof (int));
1913 target_write_memory (from, buf, sizeof (int));
1914 }
1915
1916 static void
1917 amd64_emit_const (LONGEST num)
1918 {
1919 unsigned char buf[16];
1920 int i;
1921 CORE_ADDR buildaddr = current_insn_ptr;
1922
1923 i = 0;
1924 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1925 memcpy (&buf[i], &num, sizeof (num));
1926 i += 8;
1927 append_insns (&buildaddr, i, buf);
1928 current_insn_ptr = buildaddr;
1929 }
1930
1931 static void
1932 amd64_emit_call (CORE_ADDR fn)
1933 {
1934 unsigned char buf[16];
1935 int i;
1936 CORE_ADDR buildaddr;
1937 LONGEST offset64;
1938
1939 /* The destination function being in the shared library, may be
1940 >31-bits away off the compiled code pad. */
1941
1942 buildaddr = current_insn_ptr;
1943
1944 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1945
1946 i = 0;
1947
1948 if (offset64 > INT_MAX || offset64 < INT_MIN)
1949 {
1950 /* Offset is too large for a call. Use callq, but that requires
1951 a register, so avoid it if possible. Use r10, since it is
1952 call-clobbered, we don't have to push/pop it. */
1953 buf[i++] = 0x48; /* mov $fn,%r10 */
1954 buf[i++] = 0xba;
1955 memcpy (buf + i, &fn, 8);
1956 i += 8;
1957 buf[i++] = 0xff; /* callq *%r10 */
1958 buf[i++] = 0xd2;
1959 }
1960 else
1961 {
1962 int offset32 = offset64; /* we know we can't overflow here. */
1963
1964 buf[i++] = 0xe8; /* call <reladdr> */
1965 memcpy (buf + i, &offset32, 4);
1966 i += 4;
1967 }
1968
1969 append_insns (&buildaddr, i, buf);
1970 current_insn_ptr = buildaddr;
1971 }
1972
1973 static void
1974 amd64_emit_reg (int reg)
1975 {
1976 unsigned char buf[16];
1977 int i;
1978 CORE_ADDR buildaddr;
1979
1980 /* Assume raw_regs is still in %rdi. */
1981 buildaddr = current_insn_ptr;
1982 i = 0;
1983 buf[i++] = 0xbe; /* mov $<n>,%esi */
1984 memcpy (&buf[i], &reg, sizeof (reg));
1985 i += 4;
1986 append_insns (&buildaddr, i, buf);
1987 current_insn_ptr = buildaddr;
1988 amd64_emit_call (get_raw_reg_func_addr ());
1989 }
1990
1991 static void
1992 amd64_emit_pop (void)
1993 {
1994 EMIT_ASM (amd64_pop,
1995 "pop %rax");
1996 }
1997
1998 static void
1999 amd64_emit_stack_flush (void)
2000 {
2001 EMIT_ASM (amd64_stack_flush,
2002 "push %rax");
2003 }
2004
2005 static void
2006 amd64_emit_zero_ext (int arg)
2007 {
2008 switch (arg)
2009 {
2010 case 8:
2011 EMIT_ASM (amd64_zero_ext_8,
2012 "and $0xff,%rax");
2013 break;
2014 case 16:
2015 EMIT_ASM (amd64_zero_ext_16,
2016 "and $0xffff,%rax");
2017 break;
2018 case 32:
2019 EMIT_ASM (amd64_zero_ext_32,
2020 "mov $0xffffffff,%rcx\n\t"
2021 "and %rcx,%rax");
2022 break;
2023 default:
2024 emit_error = 1;
2025 }
2026 }
2027
2028 static void
2029 amd64_emit_swap (void)
2030 {
2031 EMIT_ASM (amd64_swap,
2032 "mov %rax,%rcx\n\t"
2033 "pop %rax\n\t"
2034 "push %rcx");
2035 }
2036
2037 static void
2038 amd64_emit_stack_adjust (int n)
2039 {
2040 unsigned char buf[16];
2041 int i;
2042 CORE_ADDR buildaddr = current_insn_ptr;
2043
2044 i = 0;
2045 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2046 buf[i++] = 0x8d;
2047 buf[i++] = 0x64;
2048 buf[i++] = 0x24;
2049 /* This only handles adjustments up to 16, but we don't expect any more. */
2050 buf[i++] = n * 8;
2051 append_insns (&buildaddr, i, buf);
2052 current_insn_ptr = buildaddr;
2053 }
2054
2055 /* FN's prototype is `LONGEST(*fn)(int)'. */
2056
2057 static void
2058 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2059 {
2060 unsigned char buf[16];
2061 int i;
2062 CORE_ADDR buildaddr;
2063
2064 buildaddr = current_insn_ptr;
2065 i = 0;
2066 buf[i++] = 0xbf; /* movl $<n>,%edi */
2067 memcpy (&buf[i], &arg1, sizeof (arg1));
2068 i += 4;
2069 append_insns (&buildaddr, i, buf);
2070 current_insn_ptr = buildaddr;
2071 amd64_emit_call (fn);
2072 }
2073
2074 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2075
2076 static void
2077 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2078 {
2079 unsigned char buf[16];
2080 int i;
2081 CORE_ADDR buildaddr;
2082
2083 buildaddr = current_insn_ptr;
2084 i = 0;
2085 buf[i++] = 0xbf; /* movl $<n>,%edi */
2086 memcpy (&buf[i], &arg1, sizeof (arg1));
2087 i += 4;
2088 append_insns (&buildaddr, i, buf);
2089 current_insn_ptr = buildaddr;
2090 EMIT_ASM (amd64_void_call_2_a,
2091 /* Save away a copy of the stack top. */
2092 "push %rax\n\t"
2093 /* Also pass top as the second argument. */
2094 "mov %rax,%rsi");
2095 amd64_emit_call (fn);
2096 EMIT_ASM (amd64_void_call_2_b,
2097 /* Restore the stack top, %rax may have been trashed. */
2098 "pop %rax");
2099 }
2100
2101 static void
2102 amd64_emit_eq_goto (int *offset_p, int *size_p)
2103 {
2104 EMIT_ASM (amd64_eq,
2105 "cmp %rax,(%rsp)\n\t"
2106 "jne .Lamd64_eq_fallthru\n\t"
2107 "lea 0x8(%rsp),%rsp\n\t"
2108 "pop %rax\n\t"
2109 /* jmp, but don't trust the assembler to choose the right jump */
2110 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2111 ".Lamd64_eq_fallthru:\n\t"
2112 "lea 0x8(%rsp),%rsp\n\t"
2113 "pop %rax");
2114
2115 if (offset_p)
2116 *offset_p = 13;
2117 if (size_p)
2118 *size_p = 4;
2119 }
2120
2121 static void
2122 amd64_emit_ne_goto (int *offset_p, int *size_p)
2123 {
2124 EMIT_ASM (amd64_ne,
2125 "cmp %rax,(%rsp)\n\t"
2126 "je .Lamd64_ne_fallthru\n\t"
2127 "lea 0x8(%rsp),%rsp\n\t"
2128 "pop %rax\n\t"
2129 /* jmp, but don't trust the assembler to choose the right jump */
2130 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2131 ".Lamd64_ne_fallthru:\n\t"
2132 "lea 0x8(%rsp),%rsp\n\t"
2133 "pop %rax");
2134
2135 if (offset_p)
2136 *offset_p = 13;
2137 if (size_p)
2138 *size_p = 4;
2139 }
2140
2141 static void
2142 amd64_emit_lt_goto (int *offset_p, int *size_p)
2143 {
2144 EMIT_ASM (amd64_lt,
2145 "cmp %rax,(%rsp)\n\t"
2146 "jnl .Lamd64_lt_fallthru\n\t"
2147 "lea 0x8(%rsp),%rsp\n\t"
2148 "pop %rax\n\t"
2149 /* jmp, but don't trust the assembler to choose the right jump */
2150 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2151 ".Lamd64_lt_fallthru:\n\t"
2152 "lea 0x8(%rsp),%rsp\n\t"
2153 "pop %rax");
2154
2155 if (offset_p)
2156 *offset_p = 13;
2157 if (size_p)
2158 *size_p = 4;
2159 }
2160
2161 static void
2162 amd64_emit_le_goto (int *offset_p, int *size_p)
2163 {
2164 EMIT_ASM (amd64_le,
2165 "cmp %rax,(%rsp)\n\t"
2166 "jnle .Lamd64_le_fallthru\n\t"
2167 "lea 0x8(%rsp),%rsp\n\t"
2168 "pop %rax\n\t"
2169 /* jmp, but don't trust the assembler to choose the right jump */
2170 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2171 ".Lamd64_le_fallthru:\n\t"
2172 "lea 0x8(%rsp),%rsp\n\t"
2173 "pop %rax");
2174
2175 if (offset_p)
2176 *offset_p = 13;
2177 if (size_p)
2178 *size_p = 4;
2179 }
2180
2181 static void
2182 amd64_emit_gt_goto (int *offset_p, int *size_p)
2183 {
2184 EMIT_ASM (amd64_gt,
2185 "cmp %rax,(%rsp)\n\t"
2186 "jng .Lamd64_gt_fallthru\n\t"
2187 "lea 0x8(%rsp),%rsp\n\t"
2188 "pop %rax\n\t"
2189 /* jmp, but don't trust the assembler to choose the right jump */
2190 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2191 ".Lamd64_gt_fallthru:\n\t"
2192 "lea 0x8(%rsp),%rsp\n\t"
2193 "pop %rax");
2194
2195 if (offset_p)
2196 *offset_p = 13;
2197 if (size_p)
2198 *size_p = 4;
2199 }
2200
2201 static void
2202 amd64_emit_ge_goto (int *offset_p, int *size_p)
2203 {
2204 EMIT_ASM (amd64_ge,
2205 "cmp %rax,(%rsp)\n\t"
2206 "jnge .Lamd64_ge_fallthru\n\t"
2207 ".Lamd64_ge_jump:\n\t"
2208 "lea 0x8(%rsp),%rsp\n\t"
2209 "pop %rax\n\t"
2210 /* jmp, but don't trust the assembler to choose the right jump */
2211 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2212 ".Lamd64_ge_fallthru:\n\t"
2213 "lea 0x8(%rsp),%rsp\n\t"
2214 "pop %rax");
2215
2216 if (offset_p)
2217 *offset_p = 13;
2218 if (size_p)
2219 *size_p = 4;
2220 }
2221
2222 static emit_ops amd64_emit_ops =
2223 {
2224 amd64_emit_prologue,
2225 amd64_emit_epilogue,
2226 amd64_emit_add,
2227 amd64_emit_sub,
2228 amd64_emit_mul,
2229 amd64_emit_lsh,
2230 amd64_emit_rsh_signed,
2231 amd64_emit_rsh_unsigned,
2232 amd64_emit_ext,
2233 amd64_emit_log_not,
2234 amd64_emit_bit_and,
2235 amd64_emit_bit_or,
2236 amd64_emit_bit_xor,
2237 amd64_emit_bit_not,
2238 amd64_emit_equal,
2239 amd64_emit_less_signed,
2240 amd64_emit_less_unsigned,
2241 amd64_emit_ref,
2242 amd64_emit_if_goto,
2243 amd64_emit_goto,
2244 amd64_write_goto_address,
2245 amd64_emit_const,
2246 amd64_emit_call,
2247 amd64_emit_reg,
2248 amd64_emit_pop,
2249 amd64_emit_stack_flush,
2250 amd64_emit_zero_ext,
2251 amd64_emit_swap,
2252 amd64_emit_stack_adjust,
2253 amd64_emit_int_call_1,
2254 amd64_emit_void_call_2,
2255 amd64_emit_eq_goto,
2256 amd64_emit_ne_goto,
2257 amd64_emit_lt_goto,
2258 amd64_emit_le_goto,
2259 amd64_emit_gt_goto,
2260 amd64_emit_ge_goto
2261 };
2262
2263 #endif /* __x86_64__ */
2264
2265 static void
2266 i386_emit_prologue (void)
2267 {
2268 EMIT_ASM32 (i386_prologue,
2269 "push %ebp\n\t"
2270 "mov %esp,%ebp\n\t"
2271 "push %ebx");
2272 /* At this point, the raw regs base address is at 8(%ebp), and the
2273 value pointer is at 12(%ebp). */
2274 }
2275
2276 static void
2277 i386_emit_epilogue (void)
2278 {
2279 EMIT_ASM32 (i386_epilogue,
2280 "mov 12(%ebp),%ecx\n\t"
2281 "mov %eax,(%ecx)\n\t"
2282 "mov %ebx,0x4(%ecx)\n\t"
2283 "xor %eax,%eax\n\t"
2284 "pop %ebx\n\t"
2285 "pop %ebp\n\t"
2286 "ret");
2287 }
2288
2289 static void
2290 i386_emit_add (void)
2291 {
2292 EMIT_ASM32 (i386_add,
2293 "add (%esp),%eax\n\t"
2294 "adc 0x4(%esp),%ebx\n\t"
2295 "lea 0x8(%esp),%esp");
2296 }
2297
2298 static void
2299 i386_emit_sub (void)
2300 {
2301 EMIT_ASM32 (i386_sub,
2302 "subl %eax,(%esp)\n\t"
2303 "sbbl %ebx,4(%esp)\n\t"
2304 "pop %eax\n\t"
2305 "pop %ebx\n\t");
2306 }
2307
2308 static void
2309 i386_emit_mul (void)
2310 {
2311 emit_error = 1;
2312 }
2313
2314 static void
2315 i386_emit_lsh (void)
2316 {
2317 emit_error = 1;
2318 }
2319
2320 static void
2321 i386_emit_rsh_signed (void)
2322 {
2323 emit_error = 1;
2324 }
2325
2326 static void
2327 i386_emit_rsh_unsigned (void)
2328 {
2329 emit_error = 1;
2330 }
2331
2332 static void
2333 i386_emit_ext (int arg)
2334 {
2335 switch (arg)
2336 {
2337 case 8:
2338 EMIT_ASM32 (i386_ext_8,
2339 "cbtw\n\t"
2340 "cwtl\n\t"
2341 "movl %eax,%ebx\n\t"
2342 "sarl $31,%ebx");
2343 break;
2344 case 16:
2345 EMIT_ASM32 (i386_ext_16,
2346 "cwtl\n\t"
2347 "movl %eax,%ebx\n\t"
2348 "sarl $31,%ebx");
2349 break;
2350 case 32:
2351 EMIT_ASM32 (i386_ext_32,
2352 "movl %eax,%ebx\n\t"
2353 "sarl $31,%ebx");
2354 break;
2355 default:
2356 emit_error = 1;
2357 }
2358 }
2359
2360 static void
2361 i386_emit_log_not (void)
2362 {
2363 EMIT_ASM32 (i386_log_not,
2364 "or %ebx,%eax\n\t"
2365 "test %eax,%eax\n\t"
2366 "sete %cl\n\t"
2367 "xor %ebx,%ebx\n\t"
2368 "movzbl %cl,%eax");
2369 }
2370
2371 static void
2372 i386_emit_bit_and (void)
2373 {
2374 EMIT_ASM32 (i386_and,
2375 "and (%esp),%eax\n\t"
2376 "and 0x4(%esp),%ebx\n\t"
2377 "lea 0x8(%esp),%esp");
2378 }
2379
2380 static void
2381 i386_emit_bit_or (void)
2382 {
2383 EMIT_ASM32 (i386_or,
2384 "or (%esp),%eax\n\t"
2385 "or 0x4(%esp),%ebx\n\t"
2386 "lea 0x8(%esp),%esp");
2387 }
2388
2389 static void
2390 i386_emit_bit_xor (void)
2391 {
2392 EMIT_ASM32 (i386_xor,
2393 "xor (%esp),%eax\n\t"
2394 "xor 0x4(%esp),%ebx\n\t"
2395 "lea 0x8(%esp),%esp");
2396 }
2397
2398 static void
2399 i386_emit_bit_not (void)
2400 {
2401 EMIT_ASM32 (i386_bit_not,
2402 "xor $0xffffffff,%eax\n\t"
2403 "xor $0xffffffff,%ebx\n\t");
2404 }
2405
2406 static void
2407 i386_emit_equal (void)
2408 {
2409 EMIT_ASM32 (i386_equal,
2410 "cmpl %ebx,4(%esp)\n\t"
2411 "jne .Li386_equal_false\n\t"
2412 "cmpl %eax,(%esp)\n\t"
2413 "je .Li386_equal_true\n\t"
2414 ".Li386_equal_false:\n\t"
2415 "xor %eax,%eax\n\t"
2416 "jmp .Li386_equal_end\n\t"
2417 ".Li386_equal_true:\n\t"
2418 "mov $1,%eax\n\t"
2419 ".Li386_equal_end:\n\t"
2420 "xor %ebx,%ebx\n\t"
2421 "lea 0x8(%esp),%esp");
2422 }
2423
2424 static void
2425 i386_emit_less_signed (void)
2426 {
2427 EMIT_ASM32 (i386_less_signed,
2428 "cmpl %ebx,4(%esp)\n\t"
2429 "jl .Li386_less_signed_true\n\t"
2430 "jne .Li386_less_signed_false\n\t"
2431 "cmpl %eax,(%esp)\n\t"
2432 "jl .Li386_less_signed_true\n\t"
2433 ".Li386_less_signed_false:\n\t"
2434 "xor %eax,%eax\n\t"
2435 "jmp .Li386_less_signed_end\n\t"
2436 ".Li386_less_signed_true:\n\t"
2437 "mov $1,%eax\n\t"
2438 ".Li386_less_signed_end:\n\t"
2439 "xor %ebx,%ebx\n\t"
2440 "lea 0x8(%esp),%esp");
2441 }
2442
2443 static void
2444 i386_emit_less_unsigned (void)
2445 {
2446 EMIT_ASM32 (i386_less_unsigned,
2447 "cmpl %ebx,4(%esp)\n\t"
2448 "jb .Li386_less_unsigned_true\n\t"
2449 "jne .Li386_less_unsigned_false\n\t"
2450 "cmpl %eax,(%esp)\n\t"
2451 "jb .Li386_less_unsigned_true\n\t"
2452 ".Li386_less_unsigned_false:\n\t"
2453 "xor %eax,%eax\n\t"
2454 "jmp .Li386_less_unsigned_end\n\t"
2455 ".Li386_less_unsigned_true:\n\t"
2456 "mov $1,%eax\n\t"
2457 ".Li386_less_unsigned_end:\n\t"
2458 "xor %ebx,%ebx\n\t"
2459 "lea 0x8(%esp),%esp");
2460 }
2461
2462 static void
2463 i386_emit_ref (int size)
2464 {
2465 switch (size)
2466 {
2467 case 1:
2468 EMIT_ASM32 (i386_ref1,
2469 "movb (%eax),%al");
2470 break;
2471 case 2:
2472 EMIT_ASM32 (i386_ref2,
2473 "movw (%eax),%ax");
2474 break;
2475 case 4:
2476 EMIT_ASM32 (i386_ref4,
2477 "movl (%eax),%eax");
2478 break;
2479 case 8:
2480 EMIT_ASM32 (i386_ref8,
2481 "movl 4(%eax),%ebx\n\t"
2482 "movl (%eax),%eax");
2483 break;
2484 }
2485 }
2486
2487 static void
2488 i386_emit_if_goto (int *offset_p, int *size_p)
2489 {
2490 EMIT_ASM32 (i386_if_goto,
2491 "mov %eax,%ecx\n\t"
2492 "or %ebx,%ecx\n\t"
2493 "pop %eax\n\t"
2494 "pop %ebx\n\t"
2495 "cmpl $0,%ecx\n\t"
2496 /* Don't trust the assembler to choose the right jump */
2497 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2498
2499 if (offset_p)
2500 *offset_p = 11; /* be sure that this matches the sequence above */
2501 if (size_p)
2502 *size_p = 4;
2503 }
2504
2505 static void
2506 i386_emit_goto (int *offset_p, int *size_p)
2507 {
2508 EMIT_ASM32 (i386_goto,
2509 /* Don't trust the assembler to choose the right jump */
2510 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2511 if (offset_p)
2512 *offset_p = 1;
2513 if (size_p)
2514 *size_p = 4;
2515 }
2516
2517 static void
2518 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2519 {
2520 int diff = (to - (from + size));
2521 unsigned char buf[sizeof (int)];
2522
2523 /* We're only doing 4-byte sizes at the moment. */
2524 if (size != 4)
2525 {
2526 emit_error = 1;
2527 return;
2528 }
2529
2530 memcpy (buf, &diff, sizeof (int));
2531 target_write_memory (from, buf, sizeof (int));
2532 }
2533
2534 static void
2535 i386_emit_const (LONGEST num)
2536 {
2537 unsigned char buf[16];
2538 int i, hi, lo;
2539 CORE_ADDR buildaddr = current_insn_ptr;
2540
2541 i = 0;
2542 buf[i++] = 0xb8; /* mov $<n>,%eax */
2543 lo = num & 0xffffffff;
2544 memcpy (&buf[i], &lo, sizeof (lo));
2545 i += 4;
2546 hi = ((num >> 32) & 0xffffffff);
2547 if (hi)
2548 {
2549 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2550 memcpy (&buf[i], &hi, sizeof (hi));
2551 i += 4;
2552 }
2553 else
2554 {
2555 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2556 }
2557 append_insns (&buildaddr, i, buf);
2558 current_insn_ptr = buildaddr;
2559 }
2560
2561 static void
2562 i386_emit_call (CORE_ADDR fn)
2563 {
2564 unsigned char buf[16];
2565 int i, offset;
2566 CORE_ADDR buildaddr;
2567
2568 buildaddr = current_insn_ptr;
2569 i = 0;
2570 buf[i++] = 0xe8; /* call <reladdr> */
2571 offset = ((int) fn) - (buildaddr + 5);
2572 memcpy (buf + 1, &offset, 4);
2573 append_insns (&buildaddr, 5, buf);
2574 current_insn_ptr = buildaddr;
2575 }
2576
2577 static void
2578 i386_emit_reg (int reg)
2579 {
2580 unsigned char buf[16];
2581 int i;
2582 CORE_ADDR buildaddr;
2583
2584 EMIT_ASM32 (i386_reg_a,
2585 "sub $0x8,%esp");
2586 buildaddr = current_insn_ptr;
2587 i = 0;
2588 buf[i++] = 0xb8; /* mov $<n>,%eax */
2589 memcpy (&buf[i], &reg, sizeof (reg));
2590 i += 4;
2591 append_insns (&buildaddr, i, buf);
2592 current_insn_ptr = buildaddr;
2593 EMIT_ASM32 (i386_reg_b,
2594 "mov %eax,4(%esp)\n\t"
2595 "mov 8(%ebp),%eax\n\t"
2596 "mov %eax,(%esp)");
2597 i386_emit_call (get_raw_reg_func_addr ());
2598 EMIT_ASM32 (i386_reg_c,
2599 "xor %ebx,%ebx\n\t"
2600 "lea 0x8(%esp),%esp");
2601 }
2602
2603 static void
2604 i386_emit_pop (void)
2605 {
2606 EMIT_ASM32 (i386_pop,
2607 "pop %eax\n\t"
2608 "pop %ebx");
2609 }
2610
2611 static void
2612 i386_emit_stack_flush (void)
2613 {
2614 EMIT_ASM32 (i386_stack_flush,
2615 "push %ebx\n\t"
2616 "push %eax");
2617 }
2618
2619 static void
2620 i386_emit_zero_ext (int arg)
2621 {
2622 switch (arg)
2623 {
2624 case 8:
2625 EMIT_ASM32 (i386_zero_ext_8,
2626 "and $0xff,%eax\n\t"
2627 "xor %ebx,%ebx");
2628 break;
2629 case 16:
2630 EMIT_ASM32 (i386_zero_ext_16,
2631 "and $0xffff,%eax\n\t"
2632 "xor %ebx,%ebx");
2633 break;
2634 case 32:
2635 EMIT_ASM32 (i386_zero_ext_32,
2636 "xor %ebx,%ebx");
2637 break;
2638 default:
2639 emit_error = 1;
2640 }
2641 }
2642
2643 static void
2644 i386_emit_swap (void)
2645 {
2646 EMIT_ASM32 (i386_swap,
2647 "mov %eax,%ecx\n\t"
2648 "mov %ebx,%edx\n\t"
2649 "pop %eax\n\t"
2650 "pop %ebx\n\t"
2651 "push %edx\n\t"
2652 "push %ecx");
2653 }
2654
2655 static void
2656 i386_emit_stack_adjust (int n)
2657 {
2658 unsigned char buf[16];
2659 int i;
2660 CORE_ADDR buildaddr = current_insn_ptr;
2661
2662 i = 0;
2663 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2664 buf[i++] = 0x64;
2665 buf[i++] = 0x24;
2666 buf[i++] = n * 8;
2667 append_insns (&buildaddr, i, buf);
2668 current_insn_ptr = buildaddr;
2669 }
2670
2671 /* FN's prototype is `LONGEST(*fn)(int)'. */
2672
2673 static void
2674 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2675 {
2676 unsigned char buf[16];
2677 int i;
2678 CORE_ADDR buildaddr;
2679
2680 EMIT_ASM32 (i386_int_call_1_a,
2681 /* Reserve a bit of stack space. */
2682 "sub $0x8,%esp");
2683 /* Put the one argument on the stack. */
2684 buildaddr = current_insn_ptr;
2685 i = 0;
2686 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2687 buf[i++] = 0x04;
2688 buf[i++] = 0x24;
2689 memcpy (&buf[i], &arg1, sizeof (arg1));
2690 i += 4;
2691 append_insns (&buildaddr, i, buf);
2692 current_insn_ptr = buildaddr;
2693 i386_emit_call (fn);
2694 EMIT_ASM32 (i386_int_call_1_c,
2695 "mov %edx,%ebx\n\t"
2696 "lea 0x8(%esp),%esp");
2697 }
2698
2699 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2700
2701 static void
2702 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2703 {
2704 unsigned char buf[16];
2705 int i;
2706 CORE_ADDR buildaddr;
2707
2708 EMIT_ASM32 (i386_void_call_2_a,
2709 /* Preserve %eax only; we don't have to worry about %ebx. */
2710 "push %eax\n\t"
2711 /* Reserve a bit of stack space for arguments. */
2712 "sub $0x10,%esp\n\t"
2713 /* Copy "top" to the second argument position. (Note that
2714 we can't assume function won't scribble on its
2715 arguments, so don't try to restore from this.) */
2716 "mov %eax,4(%esp)\n\t"
2717 "mov %ebx,8(%esp)");
2718 /* Put the first argument on the stack. */
2719 buildaddr = current_insn_ptr;
2720 i = 0;
2721 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2722 buf[i++] = 0x04;
2723 buf[i++] = 0x24;
2724 memcpy (&buf[i], &arg1, sizeof (arg1));
2725 i += 4;
2726 append_insns (&buildaddr, i, buf);
2727 current_insn_ptr = buildaddr;
2728 i386_emit_call (fn);
2729 EMIT_ASM32 (i386_void_call_2_b,
2730 "lea 0x10(%esp),%esp\n\t"
2731 /* Restore original stack top. */
2732 "pop %eax");
2733 }
2734
2735
2736 static void
2737 i386_emit_eq_goto (int *offset_p, int *size_p)
2738 {
2739 EMIT_ASM32 (eq,
2740 /* Check low half first, more likely to be decider */
2741 "cmpl %eax,(%esp)\n\t"
2742 "jne .Leq_fallthru\n\t"
2743 "cmpl %ebx,4(%esp)\n\t"
2744 "jne .Leq_fallthru\n\t"
2745 "lea 0x8(%esp),%esp\n\t"
2746 "pop %eax\n\t"
2747 "pop %ebx\n\t"
2748 /* jmp, but don't trust the assembler to choose the right jump */
2749 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2750 ".Leq_fallthru:\n\t"
2751 "lea 0x8(%esp),%esp\n\t"
2752 "pop %eax\n\t"
2753 "pop %ebx");
2754
2755 if (offset_p)
2756 *offset_p = 18;
2757 if (size_p)
2758 *size_p = 4;
2759 }
2760
2761 static void
2762 i386_emit_ne_goto (int *offset_p, int *size_p)
2763 {
2764 EMIT_ASM32 (ne,
2765 /* Check low half first, more likely to be decider */
2766 "cmpl %eax,(%esp)\n\t"
2767 "jne .Lne_jump\n\t"
2768 "cmpl %ebx,4(%esp)\n\t"
2769 "je .Lne_fallthru\n\t"
2770 ".Lne_jump:\n\t"
2771 "lea 0x8(%esp),%esp\n\t"
2772 "pop %eax\n\t"
2773 "pop %ebx\n\t"
2774 /* jmp, but don't trust the assembler to choose the right jump */
2775 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2776 ".Lne_fallthru:\n\t"
2777 "lea 0x8(%esp),%esp\n\t"
2778 "pop %eax\n\t"
2779 "pop %ebx");
2780
2781 if (offset_p)
2782 *offset_p = 18;
2783 if (size_p)
2784 *size_p = 4;
2785 }
2786
2787 static void
2788 i386_emit_lt_goto (int *offset_p, int *size_p)
2789 {
2790 EMIT_ASM32 (lt,
2791 "cmpl %ebx,4(%esp)\n\t"
2792 "jl .Llt_jump\n\t"
2793 "jne .Llt_fallthru\n\t"
2794 "cmpl %eax,(%esp)\n\t"
2795 "jnl .Llt_fallthru\n\t"
2796 ".Llt_jump:\n\t"
2797 "lea 0x8(%esp),%esp\n\t"
2798 "pop %eax\n\t"
2799 "pop %ebx\n\t"
2800 /* jmp, but don't trust the assembler to choose the right jump */
2801 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2802 ".Llt_fallthru:\n\t"
2803 "lea 0x8(%esp),%esp\n\t"
2804 "pop %eax\n\t"
2805 "pop %ebx");
2806
2807 if (offset_p)
2808 *offset_p = 20;
2809 if (size_p)
2810 *size_p = 4;
2811 }
2812
2813 static void
2814 i386_emit_le_goto (int *offset_p, int *size_p)
2815 {
2816 EMIT_ASM32 (le,
2817 "cmpl %ebx,4(%esp)\n\t"
2818 "jle .Lle_jump\n\t"
2819 "jne .Lle_fallthru\n\t"
2820 "cmpl %eax,(%esp)\n\t"
2821 "jnle .Lle_fallthru\n\t"
2822 ".Lle_jump:\n\t"
2823 "lea 0x8(%esp),%esp\n\t"
2824 "pop %eax\n\t"
2825 "pop %ebx\n\t"
2826 /* jmp, but don't trust the assembler to choose the right jump */
2827 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2828 ".Lle_fallthru:\n\t"
2829 "lea 0x8(%esp),%esp\n\t"
2830 "pop %eax\n\t"
2831 "pop %ebx");
2832
2833 if (offset_p)
2834 *offset_p = 20;
2835 if (size_p)
2836 *size_p = 4;
2837 }
2838
2839 static void
2840 i386_emit_gt_goto (int *offset_p, int *size_p)
2841 {
2842 EMIT_ASM32 (gt,
2843 "cmpl %ebx,4(%esp)\n\t"
2844 "jg .Lgt_jump\n\t"
2845 "jne .Lgt_fallthru\n\t"
2846 "cmpl %eax,(%esp)\n\t"
2847 "jng .Lgt_fallthru\n\t"
2848 ".Lgt_jump:\n\t"
2849 "lea 0x8(%esp),%esp\n\t"
2850 "pop %eax\n\t"
2851 "pop %ebx\n\t"
2852 /* jmp, but don't trust the assembler to choose the right jump */
2853 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2854 ".Lgt_fallthru:\n\t"
2855 "lea 0x8(%esp),%esp\n\t"
2856 "pop %eax\n\t"
2857 "pop %ebx");
2858
2859 if (offset_p)
2860 *offset_p = 20;
2861 if (size_p)
2862 *size_p = 4;
2863 }
2864
2865 static void
2866 i386_emit_ge_goto (int *offset_p, int *size_p)
2867 {
2868 EMIT_ASM32 (ge,
2869 "cmpl %ebx,4(%esp)\n\t"
2870 "jge .Lge_jump\n\t"
2871 "jne .Lge_fallthru\n\t"
2872 "cmpl %eax,(%esp)\n\t"
2873 "jnge .Lge_fallthru\n\t"
2874 ".Lge_jump:\n\t"
2875 "lea 0x8(%esp),%esp\n\t"
2876 "pop %eax\n\t"
2877 "pop %ebx\n\t"
2878 /* jmp, but don't trust the assembler to choose the right jump */
2879 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2880 ".Lge_fallthru:\n\t"
2881 "lea 0x8(%esp),%esp\n\t"
2882 "pop %eax\n\t"
2883 "pop %ebx");
2884
2885 if (offset_p)
2886 *offset_p = 20;
2887 if (size_p)
2888 *size_p = 4;
2889 }
2890
2891 static emit_ops i386_emit_ops =
2892 {
2893 i386_emit_prologue,
2894 i386_emit_epilogue,
2895 i386_emit_add,
2896 i386_emit_sub,
2897 i386_emit_mul,
2898 i386_emit_lsh,
2899 i386_emit_rsh_signed,
2900 i386_emit_rsh_unsigned,
2901 i386_emit_ext,
2902 i386_emit_log_not,
2903 i386_emit_bit_and,
2904 i386_emit_bit_or,
2905 i386_emit_bit_xor,
2906 i386_emit_bit_not,
2907 i386_emit_equal,
2908 i386_emit_less_signed,
2909 i386_emit_less_unsigned,
2910 i386_emit_ref,
2911 i386_emit_if_goto,
2912 i386_emit_goto,
2913 i386_write_goto_address,
2914 i386_emit_const,
2915 i386_emit_call,
2916 i386_emit_reg,
2917 i386_emit_pop,
2918 i386_emit_stack_flush,
2919 i386_emit_zero_ext,
2920 i386_emit_swap,
2921 i386_emit_stack_adjust,
2922 i386_emit_int_call_1,
2923 i386_emit_void_call_2,
2924 i386_emit_eq_goto,
2925 i386_emit_ne_goto,
2926 i386_emit_lt_goto,
2927 i386_emit_le_goto,
2928 i386_emit_gt_goto,
2929 i386_emit_ge_goto
2930 };
2931
2932
2933 emit_ops *
2934 x86_target::emit_ops ()
2935 {
2936 #ifdef __x86_64__
2937 if (is_64bit_tdesc ())
2938 return &amd64_emit_ops;
2939 else
2940 #endif
2941 return &i386_emit_ops;
2942 }
2943
2944 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2945
2946 const gdb_byte *
2947 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2948 {
2949 *size = x86_breakpoint_len;
2950 return x86_breakpoint;
2951 }
2952
2953 bool
2954 x86_target::low_supports_range_stepping ()
2955 {
2956 return true;
2957 }
2958
2959 int
2960 x86_target::get_ipa_tdesc_idx ()
2961 {
2962 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2963 const struct target_desc *tdesc = regcache->tdesc;
2964
2965 #ifdef __x86_64__
2966 return amd64_get_ipa_tdesc_idx (tdesc);
2967 #endif
2968
2969 if (tdesc == tdesc_i386_linux_no_xml.get ())
2970 return X86_TDESC_SSE;
2971
2972 return i386_get_ipa_tdesc_idx (tdesc);
2973 }
2974
2975 /* The linux target ops object. */
2976
2977 linux_process_target *the_linux_target = &the_x86_target;
2978
2979 void
2980 initialize_low_arch (void)
2981 {
2982 /* Initialize the Linux target descriptions. */
2983 #ifdef __x86_64__
2984 tdesc_amd64_linux_no_xml = allocate_target_description ();
2985 copy_target_description (tdesc_amd64_linux_no_xml.get (),
2986 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2987 false));
2988 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2989 #endif
2990
2991 tdesc_i386_linux_no_xml = allocate_target_description ();
2992 copy_target_description (tdesc_i386_linux_no_xml.get (),
2993 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2994 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2995
2996 initialize_regsets_info (&x86_regsets_info);
2997 }
This page took 0.091003 seconds and 4 git commands to generate.