Implement stopped_by_sw_breakpoint for Windows gdbserver
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 const regs_info *get_regs_info () override;
104
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
107 bool supports_z_point_type (char z_type) override;
108
109 void process_qsupported (char **features, int count) override;
110
111 bool supports_tracepoints () override;
112
113 bool supports_fast_tracepoints () override;
114
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121 char *err) override;
122
123 int get_min_fast_tracepoint_insn_len () override;
124
125 struct emit_ops *emit_ops () override;
126
127 int get_ipa_tdesc_idx () override;
128
129 protected:
130
131 void low_arch_setup () override;
132
133 bool low_cannot_fetch_register (int regno) override;
134
135 bool low_cannot_store_register (int regno) override;
136
137 bool low_supports_breakpoints () override;
138
139 CORE_ADDR low_get_pc (regcache *regcache) override;
140
141 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
142
143 int low_decr_pc_after_break () override;
144
145 bool low_breakpoint_at (CORE_ADDR pc) override;
146
147 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
148 int size, raw_breakpoint *bp) override;
149
150 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
151 int size, raw_breakpoint *bp) override;
152
153 bool low_stopped_by_watchpoint () override;
154
155 CORE_ADDR low_stopped_data_address () override;
156
157 /* collect_ptrace_register/supply_ptrace_register are not needed in the
158 native i386 case (no registers smaller than an xfer unit), and are not
159 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
160
161 /* Need to fix up i386 siginfo if host is amd64. */
162 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
163 int direction) override;
164
165 arch_process_info *low_new_process () override;
166
167 void low_delete_process (arch_process_info *info) override;
168
169 void low_new_thread (lwp_info *) override;
170
171 void low_delete_thread (arch_lwp_info *) override;
172
173 void low_new_fork (process_info *parent, process_info *child) override;
174
175 void low_prepare_to_resume (lwp_info *lwp) override;
176
177 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
178
179 bool low_supports_range_stepping () override;
180
181 bool low_supports_catch_syscall () override;
182
183 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
184
185 private:
186
187 /* Update all the target description of all processes; a new GDB
188 connected, and it may or not support xml target descriptions. */
189 void update_xmltarget ();
190 };
191
192 /* The singleton target ops object. */
193
194 static x86_target the_x86_target;
195
196 /* Per-process arch-specific data we want to keep. */
197
198 struct arch_process_info
199 {
200 struct x86_debug_reg_state debug_reg_state;
201 };
202
203 #ifdef __x86_64__
204
205 /* Mapping between the general-purpose registers in `struct user'
206 format and GDB's register array layout.
207 Note that the transfer layout uses 64-bit regs. */
208 static /*const*/ int i386_regmap[] =
209 {
210 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
211 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
212 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
213 DS * 8, ES * 8, FS * 8, GS * 8
214 };
215
216 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
217
218 /* So code below doesn't have to care, i386 or amd64. */
219 #define ORIG_EAX ORIG_RAX
220 #define REGSIZE 8
221
222 static const int x86_64_regmap[] =
223 {
224 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
225 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
226 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
227 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
228 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
229 DS * 8, ES * 8, FS * 8, GS * 8,
230 -1, -1, -1, -1, -1, -1, -1, -1,
231 -1, -1, -1, -1, -1, -1, -1, -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
233 -1,
234 -1, -1, -1, -1, -1, -1, -1, -1,
235 ORIG_RAX * 8,
236 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
237 21 * 8, 22 * 8,
238 #else
239 -1, -1,
240 #endif
241 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
242 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
243 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
244 -1, -1, -1, -1, -1, -1, -1, -1,
245 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
246 -1, -1, -1, -1, -1, -1, -1, -1,
247 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
248 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
249 -1, -1, -1, -1, -1, -1, -1, -1,
250 -1, -1, -1, -1, -1, -1, -1, -1,
251 -1, -1, -1, -1, -1, -1, -1, -1,
252 -1 /* pkru */
253 };
254
255 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
256 #define X86_64_USER_REGS (GS + 1)
257
258 #else /* ! __x86_64__ */
259
260 /* Mapping between the general-purpose registers in `struct user'
261 format and GDB's register array layout. */
262 static /*const*/ int i386_regmap[] =
263 {
264 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
265 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
266 EIP * 4, EFL * 4, CS * 4, SS * 4,
267 DS * 4, ES * 4, FS * 4, GS * 4
268 };
269
270 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
271
272 #define REGSIZE 4
273
274 #endif
275
276 #ifdef __x86_64__
277
278 /* Returns true if the current inferior belongs to a x86-64 process,
279 per the tdesc. */
280
281 static int
282 is_64bit_tdesc (void)
283 {
284 struct regcache *regcache = get_thread_regcache (current_thread, 0);
285
286 return register_size (regcache->tdesc, 0) == 8;
287 }
288
289 #endif
290
291 \f
292 /* Called by libthread_db. */
293
294 ps_err_e
295 ps_get_thread_area (struct ps_prochandle *ph,
296 lwpid_t lwpid, int idx, void **base)
297 {
298 #ifdef __x86_64__
299 int use_64bit = is_64bit_tdesc ();
300
301 if (use_64bit)
302 {
303 switch (idx)
304 {
305 case FS:
306 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
307 return PS_OK;
308 break;
309 case GS:
310 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
311 return PS_OK;
312 break;
313 default:
314 return PS_BADADDR;
315 }
316 return PS_ERR;
317 }
318 #endif
319
320 {
321 unsigned int desc[4];
322
323 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
324 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
325 return PS_ERR;
326
327 /* Ensure we properly extend the value to 64-bits for x86_64. */
328 *base = (void *) (uintptr_t) desc[1];
329 return PS_OK;
330 }
331 }
332
333 /* Get the thread area address. This is used to recognize which
334 thread is which when tracing with the in-process agent library. We
335 don't read anything from the address, and treat it as opaque; it's
336 the address itself that we assume is unique per-thread. */
337
338 int
339 x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
340 {
341 #ifdef __x86_64__
342 int use_64bit = is_64bit_tdesc ();
343
344 if (use_64bit)
345 {
346 void *base;
347 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
348 {
349 *addr = (CORE_ADDR) (uintptr_t) base;
350 return 0;
351 }
352
353 return -1;
354 }
355 #endif
356
357 {
358 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
359 struct thread_info *thr = get_lwp_thread (lwp);
360 struct regcache *regcache = get_thread_regcache (thr, 1);
361 unsigned int desc[4];
362 ULONGEST gs = 0;
363 const int reg_thread_area = 3; /* bits to scale down register value. */
364 int idx;
365
366 collect_register_by_name (regcache, "gs", &gs);
367
368 idx = gs >> reg_thread_area;
369
370 if (ptrace (PTRACE_GET_THREAD_AREA,
371 lwpid_of (thr),
372 (void *) (long) idx, (unsigned long) &desc) < 0)
373 return -1;
374
375 *addr = desc[1];
376 return 0;
377 }
378 }
379
380
381 \f
382 bool
383 x86_target::low_cannot_store_register (int regno)
384 {
385 #ifdef __x86_64__
386 if (is_64bit_tdesc ())
387 return false;
388 #endif
389
390 return regno >= I386_NUM_REGS;
391 }
392
393 bool
394 x86_target::low_cannot_fetch_register (int regno)
395 {
396 #ifdef __x86_64__
397 if (is_64bit_tdesc ())
398 return false;
399 #endif
400
401 return regno >= I386_NUM_REGS;
402 }
403
404 static void
405 x86_fill_gregset (struct regcache *regcache, void *buf)
406 {
407 int i;
408
409 #ifdef __x86_64__
410 if (register_size (regcache->tdesc, 0) == 8)
411 {
412 for (i = 0; i < X86_64_NUM_REGS; i++)
413 if (x86_64_regmap[i] != -1)
414 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
415
416 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
417 {
418 unsigned long base;
419 int lwpid = lwpid_of (current_thread);
420
421 collect_register_by_name (regcache, "fs_base", &base);
422 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
423
424 collect_register_by_name (regcache, "gs_base", &base);
425 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
426 }
427 #endif
428
429 return;
430 }
431
432 /* 32-bit inferior registers need to be zero-extended.
433 Callers would read uninitialized memory otherwise. */
434 memset (buf, 0x00, X86_64_USER_REGS * 8);
435 #endif
436
437 for (i = 0; i < I386_NUM_REGS; i++)
438 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
439
440 collect_register_by_name (regcache, "orig_eax",
441 ((char *) buf) + ORIG_EAX * REGSIZE);
442
443 #ifdef __x86_64__
444 /* Sign extend EAX value to avoid potential syscall restart
445 problems.
446
447 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
448 for a detailed explanation. */
449 if (register_size (regcache->tdesc, 0) == 4)
450 {
451 void *ptr = ((gdb_byte *) buf
452 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
453
454 *(int64_t *) ptr = *(int32_t *) ptr;
455 }
456 #endif
457 }
458
459 static void
460 x86_store_gregset (struct regcache *regcache, const void *buf)
461 {
462 int i;
463
464 #ifdef __x86_64__
465 if (register_size (regcache->tdesc, 0) == 8)
466 {
467 for (i = 0; i < X86_64_NUM_REGS; i++)
468 if (x86_64_regmap[i] != -1)
469 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
470
471 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
472 {
473 unsigned long base;
474 int lwpid = lwpid_of (current_thread);
475
476 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
477 supply_register_by_name (regcache, "fs_base", &base);
478
479 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
480 supply_register_by_name (regcache, "gs_base", &base);
481 }
482 #endif
483 return;
484 }
485 #endif
486
487 for (i = 0; i < I386_NUM_REGS; i++)
488 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
489
490 supply_register_by_name (regcache, "orig_eax",
491 ((char *) buf) + ORIG_EAX * REGSIZE);
492 }
493
494 static void
495 x86_fill_fpregset (struct regcache *regcache, void *buf)
496 {
497 #ifdef __x86_64__
498 i387_cache_to_fxsave (regcache, buf);
499 #else
500 i387_cache_to_fsave (regcache, buf);
501 #endif
502 }
503
504 static void
505 x86_store_fpregset (struct regcache *regcache, const void *buf)
506 {
507 #ifdef __x86_64__
508 i387_fxsave_to_cache (regcache, buf);
509 #else
510 i387_fsave_to_cache (regcache, buf);
511 #endif
512 }
513
514 #ifndef __x86_64__
515
516 static void
517 x86_fill_fpxregset (struct regcache *regcache, void *buf)
518 {
519 i387_cache_to_fxsave (regcache, buf);
520 }
521
522 static void
523 x86_store_fpxregset (struct regcache *regcache, const void *buf)
524 {
525 i387_fxsave_to_cache (regcache, buf);
526 }
527
528 #endif
529
530 static void
531 x86_fill_xstateregset (struct regcache *regcache, void *buf)
532 {
533 i387_cache_to_xsave (regcache, buf);
534 }
535
536 static void
537 x86_store_xstateregset (struct regcache *regcache, const void *buf)
538 {
539 i387_xsave_to_cache (regcache, buf);
540 }
541
542 /* ??? The non-biarch i386 case stores all the i387 regs twice.
543 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
544 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
545 doesn't work. IWBN to avoid the duplication in the case where it
546 does work. Maybe the arch_setup routine could check whether it works
547 and update the supported regsets accordingly. */
548
549 static struct regset_info x86_regsets[] =
550 {
551 #ifdef HAVE_PTRACE_GETREGS
552 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
553 GENERAL_REGS,
554 x86_fill_gregset, x86_store_gregset },
555 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
556 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
557 # ifndef __x86_64__
558 # ifdef HAVE_PTRACE_GETFPXREGS
559 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
560 EXTENDED_REGS,
561 x86_fill_fpxregset, x86_store_fpxregset },
562 # endif
563 # endif
564 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
565 FP_REGS,
566 x86_fill_fpregset, x86_store_fpregset },
567 #endif /* HAVE_PTRACE_GETREGS */
568 NULL_REGSET
569 };
570
571 bool
572 x86_target::low_supports_breakpoints ()
573 {
574 return true;
575 }
576
577 CORE_ADDR
578 x86_target::low_get_pc (regcache *regcache)
579 {
580 int use_64bit = register_size (regcache->tdesc, 0) == 8;
581
582 if (use_64bit)
583 {
584 uint64_t pc;
585
586 collect_register_by_name (regcache, "rip", &pc);
587 return (CORE_ADDR) pc;
588 }
589 else
590 {
591 uint32_t pc;
592
593 collect_register_by_name (regcache, "eip", &pc);
594 return (CORE_ADDR) pc;
595 }
596 }
597
598 void
599 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
600 {
601 int use_64bit = register_size (regcache->tdesc, 0) == 8;
602
603 if (use_64bit)
604 {
605 uint64_t newpc = pc;
606
607 supply_register_by_name (regcache, "rip", &newpc);
608 }
609 else
610 {
611 uint32_t newpc = pc;
612
613 supply_register_by_name (regcache, "eip", &newpc);
614 }
615 }
616
617 int
618 x86_target::low_decr_pc_after_break ()
619 {
620 return 1;
621 }
622
623 \f
624 static const gdb_byte x86_breakpoint[] = { 0xCC };
625 #define x86_breakpoint_len 1
626
627 bool
628 x86_target::low_breakpoint_at (CORE_ADDR pc)
629 {
630 unsigned char c;
631
632 read_memory (pc, &c, 1);
633 if (c == 0xCC)
634 return true;
635
636 return false;
637 }
638 \f
639 /* Low-level function vector. */
640 struct x86_dr_low_type x86_dr_low =
641 {
642 x86_linux_dr_set_control,
643 x86_linux_dr_set_addr,
644 x86_linux_dr_get_addr,
645 x86_linux_dr_get_status,
646 x86_linux_dr_get_control,
647 sizeof (void *),
648 };
649 \f
650 /* Breakpoint/Watchpoint support. */
651
652 bool
653 x86_target::supports_z_point_type (char z_type)
654 {
655 switch (z_type)
656 {
657 case Z_PACKET_SW_BP:
658 case Z_PACKET_HW_BP:
659 case Z_PACKET_WRITE_WP:
660 case Z_PACKET_ACCESS_WP:
661 return true;
662 default:
663 return false;
664 }
665 }
666
667 int
668 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
669 int size, raw_breakpoint *bp)
670 {
671 struct process_info *proc = current_process ();
672
673 switch (type)
674 {
675 case raw_bkpt_type_hw:
676 case raw_bkpt_type_write_wp:
677 case raw_bkpt_type_access_wp:
678 {
679 enum target_hw_bp_type hw_type
680 = raw_bkpt_type_to_target_hw_bp_type (type);
681 struct x86_debug_reg_state *state
682 = &proc->priv->arch_private->debug_reg_state;
683
684 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
685 }
686
687 default:
688 /* Unsupported. */
689 return 1;
690 }
691 }
692
693 int
694 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
695 int size, raw_breakpoint *bp)
696 {
697 struct process_info *proc = current_process ();
698
699 switch (type)
700 {
701 case raw_bkpt_type_hw:
702 case raw_bkpt_type_write_wp:
703 case raw_bkpt_type_access_wp:
704 {
705 enum target_hw_bp_type hw_type
706 = raw_bkpt_type_to_target_hw_bp_type (type);
707 struct x86_debug_reg_state *state
708 = &proc->priv->arch_private->debug_reg_state;
709
710 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
711 }
712 default:
713 /* Unsupported. */
714 return 1;
715 }
716 }
717
718 bool
719 x86_target::low_stopped_by_watchpoint ()
720 {
721 struct process_info *proc = current_process ();
722 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
723 }
724
725 CORE_ADDR
726 x86_target::low_stopped_data_address ()
727 {
728 struct process_info *proc = current_process ();
729 CORE_ADDR addr;
730 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
731 &addr))
732 return addr;
733 return 0;
734 }
735 \f
736 /* Called when a new process is created. */
737
738 arch_process_info *
739 x86_target::low_new_process ()
740 {
741 struct arch_process_info *info = XCNEW (struct arch_process_info);
742
743 x86_low_init_dregs (&info->debug_reg_state);
744
745 return info;
746 }
747
748 /* Called when a process is being deleted. */
749
750 void
751 x86_target::low_delete_process (arch_process_info *info)
752 {
753 xfree (info);
754 }
755
756 void
757 x86_target::low_new_thread (lwp_info *lwp)
758 {
759 /* This comes from nat/. */
760 x86_linux_new_thread (lwp);
761 }
762
763 void
764 x86_target::low_delete_thread (arch_lwp_info *alwp)
765 {
766 /* This comes from nat/. */
767 x86_linux_delete_thread (alwp);
768 }
769
770 /* Target routine for new_fork. */
771
772 void
773 x86_target::low_new_fork (process_info *parent, process_info *child)
774 {
775 /* These are allocated by linux_add_process. */
776 gdb_assert (parent->priv != NULL
777 && parent->priv->arch_private != NULL);
778 gdb_assert (child->priv != NULL
779 && child->priv->arch_private != NULL);
780
781 /* Linux kernel before 2.6.33 commit
782 72f674d203cd230426437cdcf7dd6f681dad8b0d
783 will inherit hardware debug registers from parent
784 on fork/vfork/clone. Newer Linux kernels create such tasks with
785 zeroed debug registers.
786
787 GDB core assumes the child inherits the watchpoints/hw
788 breakpoints of the parent, and will remove them all from the
789 forked off process. Copy the debug registers mirrors into the
790 new process so that all breakpoints and watchpoints can be
791 removed together. The debug registers mirror will become zeroed
792 in the end before detaching the forked off process, thus making
793 this compatible with older Linux kernels too. */
794
795 *child->priv->arch_private = *parent->priv->arch_private;
796 }
797
798 void
799 x86_target::low_prepare_to_resume (lwp_info *lwp)
800 {
801 /* This comes from nat/. */
802 x86_linux_prepare_to_resume (lwp);
803 }
804
805 /* See nat/x86-dregs.h. */
806
807 struct x86_debug_reg_state *
808 x86_debug_reg_state (pid_t pid)
809 {
810 struct process_info *proc = find_process_pid (pid);
811
812 return &proc->priv->arch_private->debug_reg_state;
813 }
814 \f
815 /* When GDBSERVER is built as a 64-bit application on linux, the
816 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
817 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
818 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
819 conversion in-place ourselves. */
820
821 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
822 layout of the inferiors' architecture. Returns true if any
823 conversion was done; false otherwise. If DIRECTION is 1, then copy
824 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
825 INF. */
826
827 bool
828 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
829 {
830 #ifdef __x86_64__
831 unsigned int machine;
832 int tid = lwpid_of (current_thread);
833 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
834
835 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
836 if (!is_64bit_tdesc ())
837 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
838 FIXUP_32);
839 /* No fixup for native x32 GDB. */
840 else if (!is_elf64 && sizeof (void *) == 8)
841 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
842 FIXUP_X32);
843 #endif
844
845 return false;
846 }
847 \f
848 static int use_xml;
849
850 /* Format of XSAVE extended state is:
851 struct
852 {
853 fxsave_bytes[0..463]
854 sw_usable_bytes[464..511]
855 xstate_hdr_bytes[512..575]
856 avx_bytes[576..831]
857 future_state etc
858 };
859
860 Same memory layout will be used for the coredump NT_X86_XSTATE
861 representing the XSAVE extended state registers.
862
863 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
864 extended state mask, which is the same as the extended control register
865 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
866 together with the mask saved in the xstate_hdr_bytes to determine what
867 states the processor/OS supports and what state, used or initialized,
868 the process/thread is in. */
869 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
870
871 /* Does the current host support the GETFPXREGS request? The header
872 file may or may not define it, and even if it is defined, the
873 kernel will return EIO if it's running on a pre-SSE processor. */
874 int have_ptrace_getfpxregs =
875 #ifdef HAVE_PTRACE_GETFPXREGS
876 -1
877 #else
878 0
879 #endif
880 ;
881
882 /* Get Linux/x86 target description from running target. */
883
884 static const struct target_desc *
885 x86_linux_read_description (void)
886 {
887 unsigned int machine;
888 int is_elf64;
889 int xcr0_features;
890 int tid;
891 static uint64_t xcr0;
892 struct regset_info *regset;
893
894 tid = lwpid_of (current_thread);
895
896 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
897
898 if (sizeof (void *) == 4)
899 {
900 if (is_elf64 > 0)
901 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
902 #ifndef __x86_64__
903 else if (machine == EM_X86_64)
904 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
905 #endif
906 }
907
908 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
909 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
910 {
911 elf_fpxregset_t fpxregs;
912
913 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
914 {
915 have_ptrace_getfpxregs = 0;
916 have_ptrace_getregset = 0;
917 return i386_linux_read_description (X86_XSTATE_X87);
918 }
919 else
920 have_ptrace_getfpxregs = 1;
921 }
922 #endif
923
924 if (!use_xml)
925 {
926 x86_xcr0 = X86_XSTATE_SSE_MASK;
927
928 /* Don't use XML. */
929 #ifdef __x86_64__
930 if (machine == EM_X86_64)
931 return tdesc_amd64_linux_no_xml;
932 else
933 #endif
934 return tdesc_i386_linux_no_xml;
935 }
936
937 if (have_ptrace_getregset == -1)
938 {
939 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
940 struct iovec iov;
941
942 iov.iov_base = xstateregs;
943 iov.iov_len = sizeof (xstateregs);
944
945 /* Check if PTRACE_GETREGSET works. */
946 if (ptrace (PTRACE_GETREGSET, tid,
947 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
948 have_ptrace_getregset = 0;
949 else
950 {
951 have_ptrace_getregset = 1;
952
953 /* Get XCR0 from XSAVE extended state. */
954 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
955 / sizeof (uint64_t))];
956
957 /* Use PTRACE_GETREGSET if it is available. */
958 for (regset = x86_regsets;
959 regset->fill_function != NULL; regset++)
960 if (regset->get_request == PTRACE_GETREGSET)
961 regset->size = X86_XSTATE_SIZE (xcr0);
962 else if (regset->type != GENERAL_REGS)
963 regset->size = 0;
964 }
965 }
966
967 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
968 xcr0_features = (have_ptrace_getregset
969 && (xcr0 & X86_XSTATE_ALL_MASK));
970
971 if (xcr0_features)
972 x86_xcr0 = xcr0;
973
974 if (machine == EM_X86_64)
975 {
976 #ifdef __x86_64__
977 const target_desc *tdesc = NULL;
978
979 if (xcr0_features)
980 {
981 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
982 !is_elf64);
983 }
984
985 if (tdesc == NULL)
986 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
987 return tdesc;
988 #endif
989 }
990 else
991 {
992 const target_desc *tdesc = NULL;
993
994 if (xcr0_features)
995 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
996
997 if (tdesc == NULL)
998 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
999
1000 return tdesc;
1001 }
1002
1003 gdb_assert_not_reached ("failed to return tdesc");
1004 }
1005
1006 /* Update all the target description of all processes; a new GDB
1007 connected, and it may or not support xml target descriptions. */
1008
1009 void
1010 x86_target::update_xmltarget ()
1011 {
1012 struct thread_info *saved_thread = current_thread;
1013
1014 /* Before changing the register cache's internal layout, flush the
1015 contents of the current valid caches back to the threads, and
1016 release the current regcache objects. */
1017 regcache_release ();
1018
1019 for_each_process ([this] (process_info *proc) {
1020 int pid = proc->pid;
1021
1022 /* Look up any thread of this process. */
1023 current_thread = find_any_thread_of_pid (pid);
1024
1025 low_arch_setup ();
1026 });
1027
1028 current_thread = saved_thread;
1029 }
1030
1031 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1032 PTRACE_GETREGSET. */
1033
1034 void
1035 x86_target::process_qsupported (char **features, int count)
1036 {
1037 int i;
1038
1039 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1040 with "i386" in qSupported query, it supports x86 XML target
1041 descriptions. */
1042 use_xml = 0;
1043 for (i = 0; i < count; i++)
1044 {
1045 const char *feature = features[i];
1046
1047 if (startswith (feature, "xmlRegisters="))
1048 {
1049 char *copy = xstrdup (feature + 13);
1050
1051 char *saveptr;
1052 for (char *p = strtok_r (copy, ",", &saveptr);
1053 p != NULL;
1054 p = strtok_r (NULL, ",", &saveptr))
1055 {
1056 if (strcmp (p, "i386") == 0)
1057 {
1058 use_xml = 1;
1059 break;
1060 }
1061 }
1062
1063 free (copy);
1064 }
1065 }
1066 update_xmltarget ();
1067 }
1068
1069 /* Common for x86/x86-64. */
1070
1071 static struct regsets_info x86_regsets_info =
1072 {
1073 x86_regsets, /* regsets */
1074 0, /* num_regsets */
1075 NULL, /* disabled_regsets */
1076 };
1077
1078 #ifdef __x86_64__
1079 static struct regs_info amd64_linux_regs_info =
1080 {
1081 NULL, /* regset_bitmap */
1082 NULL, /* usrregs_info */
1083 &x86_regsets_info
1084 };
1085 #endif
1086 static struct usrregs_info i386_linux_usrregs_info =
1087 {
1088 I386_NUM_REGS,
1089 i386_regmap,
1090 };
1091
1092 static struct regs_info i386_linux_regs_info =
1093 {
1094 NULL, /* regset_bitmap */
1095 &i386_linux_usrregs_info,
1096 &x86_regsets_info
1097 };
1098
1099 const regs_info *
1100 x86_target::get_regs_info ()
1101 {
1102 #ifdef __x86_64__
1103 if (is_64bit_tdesc ())
1104 return &amd64_linux_regs_info;
1105 else
1106 #endif
1107 return &i386_linux_regs_info;
1108 }
1109
1110 /* Initialize the target description for the architecture of the
1111 inferior. */
1112
1113 void
1114 x86_target::low_arch_setup ()
1115 {
1116 current_process ()->tdesc = x86_linux_read_description ();
1117 }
1118
1119 bool
1120 x86_target::low_supports_catch_syscall ()
1121 {
1122 return true;
1123 }
1124
1125 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1126 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1127
1128 void
1129 x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
1130 {
1131 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1132
1133 if (use_64bit)
1134 {
1135 long l_sysno;
1136
1137 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1138 *sysno = (int) l_sysno;
1139 }
1140 else
1141 collect_register_by_name (regcache, "orig_eax", sysno);
1142 }
1143
1144 bool
1145 x86_target::supports_tracepoints ()
1146 {
1147 return true;
1148 }
1149
1150 static void
1151 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1152 {
1153 target_write_memory (*to, buf, len);
1154 *to += len;
1155 }
1156
1157 static int
1158 push_opcode (unsigned char *buf, const char *op)
1159 {
1160 unsigned char *buf_org = buf;
1161
1162 while (1)
1163 {
1164 char *endptr;
1165 unsigned long ul = strtoul (op, &endptr, 16);
1166
1167 if (endptr == op)
1168 break;
1169
1170 *buf++ = ul;
1171 op = endptr;
1172 }
1173
1174 return buf - buf_org;
1175 }
1176
1177 #ifdef __x86_64__
1178
1179 /* Build a jump pad that saves registers and calls a collection
1180 function. Writes a jump instruction to the jump pad to
1181 JJUMPAD_INSN. The caller is responsible to write it in at the
1182 tracepoint address. */
1183
1184 static int
1185 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1186 CORE_ADDR collector,
1187 CORE_ADDR lockaddr,
1188 ULONGEST orig_size,
1189 CORE_ADDR *jump_entry,
1190 CORE_ADDR *trampoline,
1191 ULONGEST *trampoline_size,
1192 unsigned char *jjump_pad_insn,
1193 ULONGEST *jjump_pad_insn_size,
1194 CORE_ADDR *adjusted_insn_addr,
1195 CORE_ADDR *adjusted_insn_addr_end,
1196 char *err)
1197 {
1198 unsigned char buf[40];
1199 int i, offset;
1200 int64_t loffset;
1201
1202 CORE_ADDR buildaddr = *jump_entry;
1203
1204 /* Build the jump pad. */
1205
1206 /* First, do tracepoint data collection. Save registers. */
1207 i = 0;
1208 /* Need to ensure stack pointer saved first. */
1209 buf[i++] = 0x54; /* push %rsp */
1210 buf[i++] = 0x55; /* push %rbp */
1211 buf[i++] = 0x57; /* push %rdi */
1212 buf[i++] = 0x56; /* push %rsi */
1213 buf[i++] = 0x52; /* push %rdx */
1214 buf[i++] = 0x51; /* push %rcx */
1215 buf[i++] = 0x53; /* push %rbx */
1216 buf[i++] = 0x50; /* push %rax */
1217 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1218 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1219 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1220 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1221 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1222 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1223 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1224 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1225 buf[i++] = 0x9c; /* pushfq */
1226 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1227 buf[i++] = 0xbf;
1228 memcpy (buf + i, &tpaddr, 8);
1229 i += 8;
1230 buf[i++] = 0x57; /* push %rdi */
1231 append_insns (&buildaddr, i, buf);
1232
1233 /* Stack space for the collecting_t object. */
1234 i = 0;
1235 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1236 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1237 memcpy (buf + i, &tpoint, 8);
1238 i += 8;
1239 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1240 i += push_opcode (&buf[i],
1241 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1242 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1243 append_insns (&buildaddr, i, buf);
1244
1245 /* spin-lock. */
1246 i = 0;
1247 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1248 memcpy (&buf[i], (void *) &lockaddr, 8);
1249 i += 8;
1250 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1251 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1252 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1253 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1254 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1255 append_insns (&buildaddr, i, buf);
1256
1257 /* Set up the gdb_collect call. */
1258 /* At this point, (stack pointer + 0x18) is the base of our saved
1259 register block. */
1260
1261 i = 0;
1262 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1263 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1264
1265 /* tpoint address may be 64-bit wide. */
1266 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1267 memcpy (buf + i, &tpoint, 8);
1268 i += 8;
1269 append_insns (&buildaddr, i, buf);
1270
1271 /* The collector function being in the shared library, may be
1272 >31-bits away off the jump pad. */
1273 i = 0;
1274 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1275 memcpy (buf + i, &collector, 8);
1276 i += 8;
1277 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1278 append_insns (&buildaddr, i, buf);
1279
1280 /* Clear the spin-lock. */
1281 i = 0;
1282 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1283 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1284 memcpy (buf + i, &lockaddr, 8);
1285 i += 8;
1286 append_insns (&buildaddr, i, buf);
1287
1288 /* Remove stack that had been used for the collect_t object. */
1289 i = 0;
1290 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1291 append_insns (&buildaddr, i, buf);
1292
1293 /* Restore register state. */
1294 i = 0;
1295 buf[i++] = 0x48; /* add $0x8,%rsp */
1296 buf[i++] = 0x83;
1297 buf[i++] = 0xc4;
1298 buf[i++] = 0x08;
1299 buf[i++] = 0x9d; /* popfq */
1300 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1301 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1302 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1303 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1304 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1305 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1306 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1307 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1308 buf[i++] = 0x58; /* pop %rax */
1309 buf[i++] = 0x5b; /* pop %rbx */
1310 buf[i++] = 0x59; /* pop %rcx */
1311 buf[i++] = 0x5a; /* pop %rdx */
1312 buf[i++] = 0x5e; /* pop %rsi */
1313 buf[i++] = 0x5f; /* pop %rdi */
1314 buf[i++] = 0x5d; /* pop %rbp */
1315 buf[i++] = 0x5c; /* pop %rsp */
1316 append_insns (&buildaddr, i, buf);
1317
1318 /* Now, adjust the original instruction to execute in the jump
1319 pad. */
1320 *adjusted_insn_addr = buildaddr;
1321 relocate_instruction (&buildaddr, tpaddr);
1322 *adjusted_insn_addr_end = buildaddr;
1323
1324 /* Finally, write a jump back to the program. */
1325
1326 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1327 if (loffset > INT_MAX || loffset < INT_MIN)
1328 {
1329 sprintf (err,
1330 "E.Jump back from jump pad too far from tracepoint "
1331 "(offset 0x%" PRIx64 " > int32).", loffset);
1332 return 1;
1333 }
1334
1335 offset = (int) loffset;
1336 memcpy (buf, jump_insn, sizeof (jump_insn));
1337 memcpy (buf + 1, &offset, 4);
1338 append_insns (&buildaddr, sizeof (jump_insn), buf);
1339
1340 /* The jump pad is now built. Wire in a jump to our jump pad. This
1341 is always done last (by our caller actually), so that we can
1342 install fast tracepoints with threads running. This relies on
1343 the agent's atomic write support. */
1344 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1345 if (loffset > INT_MAX || loffset < INT_MIN)
1346 {
1347 sprintf (err,
1348 "E.Jump pad too far from tracepoint "
1349 "(offset 0x%" PRIx64 " > int32).", loffset);
1350 return 1;
1351 }
1352
1353 offset = (int) loffset;
1354
1355 memcpy (buf, jump_insn, sizeof (jump_insn));
1356 memcpy (buf + 1, &offset, 4);
1357 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1358 *jjump_pad_insn_size = sizeof (jump_insn);
1359
1360 /* Return the end address of our pad. */
1361 *jump_entry = buildaddr;
1362
1363 return 0;
1364 }
1365
1366 #endif /* __x86_64__ */
1367
1368 /* Build a jump pad that saves registers and calls a collection
1369 function. Writes a jump instruction to the jump pad to
1370 JJUMPAD_INSN. The caller is responsible to write it in at the
1371 tracepoint address. */
1372
1373 static int
1374 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1375 CORE_ADDR collector,
1376 CORE_ADDR lockaddr,
1377 ULONGEST orig_size,
1378 CORE_ADDR *jump_entry,
1379 CORE_ADDR *trampoline,
1380 ULONGEST *trampoline_size,
1381 unsigned char *jjump_pad_insn,
1382 ULONGEST *jjump_pad_insn_size,
1383 CORE_ADDR *adjusted_insn_addr,
1384 CORE_ADDR *adjusted_insn_addr_end,
1385 char *err)
1386 {
1387 unsigned char buf[0x100];
1388 int i, offset;
1389 CORE_ADDR buildaddr = *jump_entry;
1390
1391 /* Build the jump pad. */
1392
1393 /* First, do tracepoint data collection. Save registers. */
1394 i = 0;
1395 buf[i++] = 0x60; /* pushad */
1396 buf[i++] = 0x68; /* push tpaddr aka $pc */
1397 *((int *)(buf + i)) = (int) tpaddr;
1398 i += 4;
1399 buf[i++] = 0x9c; /* pushf */
1400 buf[i++] = 0x1e; /* push %ds */
1401 buf[i++] = 0x06; /* push %es */
1402 buf[i++] = 0x0f; /* push %fs */
1403 buf[i++] = 0xa0;
1404 buf[i++] = 0x0f; /* push %gs */
1405 buf[i++] = 0xa8;
1406 buf[i++] = 0x16; /* push %ss */
1407 buf[i++] = 0x0e; /* push %cs */
1408 append_insns (&buildaddr, i, buf);
1409
1410 /* Stack space for the collecting_t object. */
1411 i = 0;
1412 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1413
1414 /* Build the object. */
1415 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1416 memcpy (buf + i, &tpoint, 4);
1417 i += 4;
1418 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1419
1420 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1421 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1422 append_insns (&buildaddr, i, buf);
1423
1424 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1425 If we cared for it, this could be using xchg alternatively. */
1426
1427 i = 0;
1428 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1429 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1430 %esp,<lockaddr> */
1431 memcpy (&buf[i], (void *) &lockaddr, 4);
1432 i += 4;
1433 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1434 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1435 append_insns (&buildaddr, i, buf);
1436
1437
1438 /* Set up arguments to the gdb_collect call. */
1439 i = 0;
1440 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1441 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1442 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1443 append_insns (&buildaddr, i, buf);
1444
1445 i = 0;
1446 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1447 append_insns (&buildaddr, i, buf);
1448
1449 i = 0;
1450 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1451 memcpy (&buf[i], (void *) &tpoint, 4);
1452 i += 4;
1453 append_insns (&buildaddr, i, buf);
1454
1455 buf[0] = 0xe8; /* call <reladdr> */
1456 offset = collector - (buildaddr + sizeof (jump_insn));
1457 memcpy (buf + 1, &offset, 4);
1458 append_insns (&buildaddr, 5, buf);
1459 /* Clean up after the call. */
1460 buf[0] = 0x83; /* add $0x8,%esp */
1461 buf[1] = 0xc4;
1462 buf[2] = 0x08;
1463 append_insns (&buildaddr, 3, buf);
1464
1465
1466 /* Clear the spin-lock. This would need the LOCK prefix on older
1467 broken archs. */
1468 i = 0;
1469 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1470 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1471 memcpy (buf + i, &lockaddr, 4);
1472 i += 4;
1473 append_insns (&buildaddr, i, buf);
1474
1475
1476 /* Remove stack that had been used for the collect_t object. */
1477 i = 0;
1478 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1479 append_insns (&buildaddr, i, buf);
1480
1481 i = 0;
1482 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1483 buf[i++] = 0xc4;
1484 buf[i++] = 0x04;
1485 buf[i++] = 0x17; /* pop %ss */
1486 buf[i++] = 0x0f; /* pop %gs */
1487 buf[i++] = 0xa9;
1488 buf[i++] = 0x0f; /* pop %fs */
1489 buf[i++] = 0xa1;
1490 buf[i++] = 0x07; /* pop %es */
1491 buf[i++] = 0x1f; /* pop %ds */
1492 buf[i++] = 0x9d; /* popf */
1493 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1494 buf[i++] = 0xc4;
1495 buf[i++] = 0x04;
1496 buf[i++] = 0x61; /* popad */
1497 append_insns (&buildaddr, i, buf);
1498
1499 /* Now, adjust the original instruction to execute in the jump
1500 pad. */
1501 *adjusted_insn_addr = buildaddr;
1502 relocate_instruction (&buildaddr, tpaddr);
1503 *adjusted_insn_addr_end = buildaddr;
1504
1505 /* Write the jump back to the program. */
1506 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1507 memcpy (buf, jump_insn, sizeof (jump_insn));
1508 memcpy (buf + 1, &offset, 4);
1509 append_insns (&buildaddr, sizeof (jump_insn), buf);
1510
1511 /* The jump pad is now built. Wire in a jump to our jump pad. This
1512 is always done last (by our caller actually), so that we can
1513 install fast tracepoints with threads running. This relies on
1514 the agent's atomic write support. */
1515 if (orig_size == 4)
1516 {
1517 /* Create a trampoline. */
1518 *trampoline_size = sizeof (jump_insn);
1519 if (!claim_trampoline_space (*trampoline_size, trampoline))
1520 {
1521 /* No trampoline space available. */
1522 strcpy (err,
1523 "E.Cannot allocate trampoline space needed for fast "
1524 "tracepoints on 4-byte instructions.");
1525 return 1;
1526 }
1527
1528 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1529 memcpy (buf, jump_insn, sizeof (jump_insn));
1530 memcpy (buf + 1, &offset, 4);
1531 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1532
1533 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1534 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1535 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1536 memcpy (buf + 2, &offset, 2);
1537 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1538 *jjump_pad_insn_size = sizeof (small_jump_insn);
1539 }
1540 else
1541 {
1542 /* Else use a 32-bit relative jump instruction. */
1543 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1544 memcpy (buf, jump_insn, sizeof (jump_insn));
1545 memcpy (buf + 1, &offset, 4);
1546 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1547 *jjump_pad_insn_size = sizeof (jump_insn);
1548 }
1549
1550 /* Return the end address of our pad. */
1551 *jump_entry = buildaddr;
1552
1553 return 0;
1554 }
1555
1556 bool
1557 x86_target::supports_fast_tracepoints ()
1558 {
1559 return true;
1560 }
1561
1562 int
1563 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1564 CORE_ADDR tpaddr,
1565 CORE_ADDR collector,
1566 CORE_ADDR lockaddr,
1567 ULONGEST orig_size,
1568 CORE_ADDR *jump_entry,
1569 CORE_ADDR *trampoline,
1570 ULONGEST *trampoline_size,
1571 unsigned char *jjump_pad_insn,
1572 ULONGEST *jjump_pad_insn_size,
1573 CORE_ADDR *adjusted_insn_addr,
1574 CORE_ADDR *adjusted_insn_addr_end,
1575 char *err)
1576 {
1577 #ifdef __x86_64__
1578 if (is_64bit_tdesc ())
1579 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1580 collector, lockaddr,
1581 orig_size, jump_entry,
1582 trampoline, trampoline_size,
1583 jjump_pad_insn,
1584 jjump_pad_insn_size,
1585 adjusted_insn_addr,
1586 adjusted_insn_addr_end,
1587 err);
1588 #endif
1589
1590 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1591 collector, lockaddr,
1592 orig_size, jump_entry,
1593 trampoline, trampoline_size,
1594 jjump_pad_insn,
1595 jjump_pad_insn_size,
1596 adjusted_insn_addr,
1597 adjusted_insn_addr_end,
1598 err);
1599 }
1600
1601 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1602 architectures. */
1603
1604 int
1605 x86_target::get_min_fast_tracepoint_insn_len ()
1606 {
1607 static int warned_about_fast_tracepoints = 0;
1608
1609 #ifdef __x86_64__
1610 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1611 used for fast tracepoints. */
1612 if (is_64bit_tdesc ())
1613 return 5;
1614 #endif
1615
1616 if (agent_loaded_p ())
1617 {
1618 char errbuf[IPA_BUFSIZ];
1619
1620 errbuf[0] = '\0';
1621
1622 /* On x86, if trampolines are available, then 4-byte jump instructions
1623 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1624 with a 4-byte offset are used instead. */
1625 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1626 return 4;
1627 else
1628 {
1629 /* GDB has no channel to explain to user why a shorter fast
1630 tracepoint is not possible, but at least make GDBserver
1631 mention that something has gone awry. */
1632 if (!warned_about_fast_tracepoints)
1633 {
1634 warning ("4-byte fast tracepoints not available; %s", errbuf);
1635 warned_about_fast_tracepoints = 1;
1636 }
1637 return 5;
1638 }
1639 }
1640 else
1641 {
1642 /* Indicate that the minimum length is currently unknown since the IPA
1643 has not loaded yet. */
1644 return 0;
1645 }
1646 }
1647
1648 static void
1649 add_insns (unsigned char *start, int len)
1650 {
1651 CORE_ADDR buildaddr = current_insn_ptr;
1652
1653 if (debug_threads)
1654 debug_printf ("Adding %d bytes of insn at %s\n",
1655 len, paddress (buildaddr));
1656
1657 append_insns (&buildaddr, len, start);
1658 current_insn_ptr = buildaddr;
1659 }
1660
1661 /* Our general strategy for emitting code is to avoid specifying raw
1662 bytes whenever possible, and instead copy a block of inline asm
1663 that is embedded in the function. This is a little messy, because
1664 we need to keep the compiler from discarding what looks like dead
1665 code, plus suppress various warnings. */
1666
1667 #define EMIT_ASM(NAME, INSNS) \
1668 do \
1669 { \
1670 extern unsigned char start_ ## NAME, end_ ## NAME; \
1671 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1672 __asm__ ("jmp end_" #NAME "\n" \
1673 "\t" "start_" #NAME ":" \
1674 "\t" INSNS "\n" \
1675 "\t" "end_" #NAME ":"); \
1676 } while (0)
1677
1678 #ifdef __x86_64__
1679
1680 #define EMIT_ASM32(NAME,INSNS) \
1681 do \
1682 { \
1683 extern unsigned char start_ ## NAME, end_ ## NAME; \
1684 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1685 __asm__ (".code32\n" \
1686 "\t" "jmp end_" #NAME "\n" \
1687 "\t" "start_" #NAME ":\n" \
1688 "\t" INSNS "\n" \
1689 "\t" "end_" #NAME ":\n" \
1690 ".code64\n"); \
1691 } while (0)
1692
1693 #else
1694
1695 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1696
1697 #endif
1698
1699 #ifdef __x86_64__
1700
1701 static void
1702 amd64_emit_prologue (void)
1703 {
1704 EMIT_ASM (amd64_prologue,
1705 "pushq %rbp\n\t"
1706 "movq %rsp,%rbp\n\t"
1707 "sub $0x20,%rsp\n\t"
1708 "movq %rdi,-8(%rbp)\n\t"
1709 "movq %rsi,-16(%rbp)");
1710 }
1711
1712
1713 static void
1714 amd64_emit_epilogue (void)
1715 {
1716 EMIT_ASM (amd64_epilogue,
1717 "movq -16(%rbp),%rdi\n\t"
1718 "movq %rax,(%rdi)\n\t"
1719 "xor %rax,%rax\n\t"
1720 "leave\n\t"
1721 "ret");
1722 }
1723
1724 static void
1725 amd64_emit_add (void)
1726 {
1727 EMIT_ASM (amd64_add,
1728 "add (%rsp),%rax\n\t"
1729 "lea 0x8(%rsp),%rsp");
1730 }
1731
1732 static void
1733 amd64_emit_sub (void)
1734 {
1735 EMIT_ASM (amd64_sub,
1736 "sub %rax,(%rsp)\n\t"
1737 "pop %rax");
1738 }
1739
1740 static void
1741 amd64_emit_mul (void)
1742 {
1743 emit_error = 1;
1744 }
1745
1746 static void
1747 amd64_emit_lsh (void)
1748 {
1749 emit_error = 1;
1750 }
1751
1752 static void
1753 amd64_emit_rsh_signed (void)
1754 {
1755 emit_error = 1;
1756 }
1757
1758 static void
1759 amd64_emit_rsh_unsigned (void)
1760 {
1761 emit_error = 1;
1762 }
1763
1764 static void
1765 amd64_emit_ext (int arg)
1766 {
1767 switch (arg)
1768 {
1769 case 8:
1770 EMIT_ASM (amd64_ext_8,
1771 "cbtw\n\t"
1772 "cwtl\n\t"
1773 "cltq");
1774 break;
1775 case 16:
1776 EMIT_ASM (amd64_ext_16,
1777 "cwtl\n\t"
1778 "cltq");
1779 break;
1780 case 32:
1781 EMIT_ASM (amd64_ext_32,
1782 "cltq");
1783 break;
1784 default:
1785 emit_error = 1;
1786 }
1787 }
1788
1789 static void
1790 amd64_emit_log_not (void)
1791 {
1792 EMIT_ASM (amd64_log_not,
1793 "test %rax,%rax\n\t"
1794 "sete %cl\n\t"
1795 "movzbq %cl,%rax");
1796 }
1797
1798 static void
1799 amd64_emit_bit_and (void)
1800 {
1801 EMIT_ASM (amd64_and,
1802 "and (%rsp),%rax\n\t"
1803 "lea 0x8(%rsp),%rsp");
1804 }
1805
1806 static void
1807 amd64_emit_bit_or (void)
1808 {
1809 EMIT_ASM (amd64_or,
1810 "or (%rsp),%rax\n\t"
1811 "lea 0x8(%rsp),%rsp");
1812 }
1813
1814 static void
1815 amd64_emit_bit_xor (void)
1816 {
1817 EMIT_ASM (amd64_xor,
1818 "xor (%rsp),%rax\n\t"
1819 "lea 0x8(%rsp),%rsp");
1820 }
1821
1822 static void
1823 amd64_emit_bit_not (void)
1824 {
1825 EMIT_ASM (amd64_bit_not,
1826 "xorq $0xffffffffffffffff,%rax");
1827 }
1828
1829 static void
1830 amd64_emit_equal (void)
1831 {
1832 EMIT_ASM (amd64_equal,
1833 "cmp %rax,(%rsp)\n\t"
1834 "je .Lamd64_equal_true\n\t"
1835 "xor %rax,%rax\n\t"
1836 "jmp .Lamd64_equal_end\n\t"
1837 ".Lamd64_equal_true:\n\t"
1838 "mov $0x1,%rax\n\t"
1839 ".Lamd64_equal_end:\n\t"
1840 "lea 0x8(%rsp),%rsp");
1841 }
1842
1843 static void
1844 amd64_emit_less_signed (void)
1845 {
1846 EMIT_ASM (amd64_less_signed,
1847 "cmp %rax,(%rsp)\n\t"
1848 "jl .Lamd64_less_signed_true\n\t"
1849 "xor %rax,%rax\n\t"
1850 "jmp .Lamd64_less_signed_end\n\t"
1851 ".Lamd64_less_signed_true:\n\t"
1852 "mov $1,%rax\n\t"
1853 ".Lamd64_less_signed_end:\n\t"
1854 "lea 0x8(%rsp),%rsp");
1855 }
1856
1857 static void
1858 amd64_emit_less_unsigned (void)
1859 {
1860 EMIT_ASM (amd64_less_unsigned,
1861 "cmp %rax,(%rsp)\n\t"
1862 "jb .Lamd64_less_unsigned_true\n\t"
1863 "xor %rax,%rax\n\t"
1864 "jmp .Lamd64_less_unsigned_end\n\t"
1865 ".Lamd64_less_unsigned_true:\n\t"
1866 "mov $1,%rax\n\t"
1867 ".Lamd64_less_unsigned_end:\n\t"
1868 "lea 0x8(%rsp),%rsp");
1869 }
1870
1871 static void
1872 amd64_emit_ref (int size)
1873 {
1874 switch (size)
1875 {
1876 case 1:
1877 EMIT_ASM (amd64_ref1,
1878 "movb (%rax),%al");
1879 break;
1880 case 2:
1881 EMIT_ASM (amd64_ref2,
1882 "movw (%rax),%ax");
1883 break;
1884 case 4:
1885 EMIT_ASM (amd64_ref4,
1886 "movl (%rax),%eax");
1887 break;
1888 case 8:
1889 EMIT_ASM (amd64_ref8,
1890 "movq (%rax),%rax");
1891 break;
1892 }
1893 }
1894
1895 static void
1896 amd64_emit_if_goto (int *offset_p, int *size_p)
1897 {
1898 EMIT_ASM (amd64_if_goto,
1899 "mov %rax,%rcx\n\t"
1900 "pop %rax\n\t"
1901 "cmp $0,%rcx\n\t"
1902 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1903 if (offset_p)
1904 *offset_p = 10;
1905 if (size_p)
1906 *size_p = 4;
1907 }
1908
1909 static void
1910 amd64_emit_goto (int *offset_p, int *size_p)
1911 {
1912 EMIT_ASM (amd64_goto,
1913 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1914 if (offset_p)
1915 *offset_p = 1;
1916 if (size_p)
1917 *size_p = 4;
1918 }
1919
1920 static void
1921 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1922 {
1923 int diff = (to - (from + size));
1924 unsigned char buf[sizeof (int)];
1925
1926 if (size != 4)
1927 {
1928 emit_error = 1;
1929 return;
1930 }
1931
1932 memcpy (buf, &diff, sizeof (int));
1933 target_write_memory (from, buf, sizeof (int));
1934 }
1935
1936 static void
1937 amd64_emit_const (LONGEST num)
1938 {
1939 unsigned char buf[16];
1940 int i;
1941 CORE_ADDR buildaddr = current_insn_ptr;
1942
1943 i = 0;
1944 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1945 memcpy (&buf[i], &num, sizeof (num));
1946 i += 8;
1947 append_insns (&buildaddr, i, buf);
1948 current_insn_ptr = buildaddr;
1949 }
1950
1951 static void
1952 amd64_emit_call (CORE_ADDR fn)
1953 {
1954 unsigned char buf[16];
1955 int i;
1956 CORE_ADDR buildaddr;
1957 LONGEST offset64;
1958
1959 /* The destination function being in the shared library, may be
1960 >31-bits away off the compiled code pad. */
1961
1962 buildaddr = current_insn_ptr;
1963
1964 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1965
1966 i = 0;
1967
1968 if (offset64 > INT_MAX || offset64 < INT_MIN)
1969 {
1970 /* Offset is too large for a call. Use callq, but that requires
1971 a register, so avoid it if possible. Use r10, since it is
1972 call-clobbered, we don't have to push/pop it. */
1973 buf[i++] = 0x48; /* mov $fn,%r10 */
1974 buf[i++] = 0xba;
1975 memcpy (buf + i, &fn, 8);
1976 i += 8;
1977 buf[i++] = 0xff; /* callq *%r10 */
1978 buf[i++] = 0xd2;
1979 }
1980 else
1981 {
1982 int offset32 = offset64; /* we know we can't overflow here. */
1983
1984 buf[i++] = 0xe8; /* call <reladdr> */
1985 memcpy (buf + i, &offset32, 4);
1986 i += 4;
1987 }
1988
1989 append_insns (&buildaddr, i, buf);
1990 current_insn_ptr = buildaddr;
1991 }
1992
1993 static void
1994 amd64_emit_reg (int reg)
1995 {
1996 unsigned char buf[16];
1997 int i;
1998 CORE_ADDR buildaddr;
1999
2000 /* Assume raw_regs is still in %rdi. */
2001 buildaddr = current_insn_ptr;
2002 i = 0;
2003 buf[i++] = 0xbe; /* mov $<n>,%esi */
2004 memcpy (&buf[i], &reg, sizeof (reg));
2005 i += 4;
2006 append_insns (&buildaddr, i, buf);
2007 current_insn_ptr = buildaddr;
2008 amd64_emit_call (get_raw_reg_func_addr ());
2009 }
2010
2011 static void
2012 amd64_emit_pop (void)
2013 {
2014 EMIT_ASM (amd64_pop,
2015 "pop %rax");
2016 }
2017
2018 static void
2019 amd64_emit_stack_flush (void)
2020 {
2021 EMIT_ASM (amd64_stack_flush,
2022 "push %rax");
2023 }
2024
2025 static void
2026 amd64_emit_zero_ext (int arg)
2027 {
2028 switch (arg)
2029 {
2030 case 8:
2031 EMIT_ASM (amd64_zero_ext_8,
2032 "and $0xff,%rax");
2033 break;
2034 case 16:
2035 EMIT_ASM (amd64_zero_ext_16,
2036 "and $0xffff,%rax");
2037 break;
2038 case 32:
2039 EMIT_ASM (amd64_zero_ext_32,
2040 "mov $0xffffffff,%rcx\n\t"
2041 "and %rcx,%rax");
2042 break;
2043 default:
2044 emit_error = 1;
2045 }
2046 }
2047
2048 static void
2049 amd64_emit_swap (void)
2050 {
2051 EMIT_ASM (amd64_swap,
2052 "mov %rax,%rcx\n\t"
2053 "pop %rax\n\t"
2054 "push %rcx");
2055 }
2056
2057 static void
2058 amd64_emit_stack_adjust (int n)
2059 {
2060 unsigned char buf[16];
2061 int i;
2062 CORE_ADDR buildaddr = current_insn_ptr;
2063
2064 i = 0;
2065 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2066 buf[i++] = 0x8d;
2067 buf[i++] = 0x64;
2068 buf[i++] = 0x24;
2069 /* This only handles adjustments up to 16, but we don't expect any more. */
2070 buf[i++] = n * 8;
2071 append_insns (&buildaddr, i, buf);
2072 current_insn_ptr = buildaddr;
2073 }
2074
2075 /* FN's prototype is `LONGEST(*fn)(int)'. */
2076
2077 static void
2078 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2079 {
2080 unsigned char buf[16];
2081 int i;
2082 CORE_ADDR buildaddr;
2083
2084 buildaddr = current_insn_ptr;
2085 i = 0;
2086 buf[i++] = 0xbf; /* movl $<n>,%edi */
2087 memcpy (&buf[i], &arg1, sizeof (arg1));
2088 i += 4;
2089 append_insns (&buildaddr, i, buf);
2090 current_insn_ptr = buildaddr;
2091 amd64_emit_call (fn);
2092 }
2093
2094 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2095
2096 static void
2097 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2098 {
2099 unsigned char buf[16];
2100 int i;
2101 CORE_ADDR buildaddr;
2102
2103 buildaddr = current_insn_ptr;
2104 i = 0;
2105 buf[i++] = 0xbf; /* movl $<n>,%edi */
2106 memcpy (&buf[i], &arg1, sizeof (arg1));
2107 i += 4;
2108 append_insns (&buildaddr, i, buf);
2109 current_insn_ptr = buildaddr;
2110 EMIT_ASM (amd64_void_call_2_a,
2111 /* Save away a copy of the stack top. */
2112 "push %rax\n\t"
2113 /* Also pass top as the second argument. */
2114 "mov %rax,%rsi");
2115 amd64_emit_call (fn);
2116 EMIT_ASM (amd64_void_call_2_b,
2117 /* Restore the stack top, %rax may have been trashed. */
2118 "pop %rax");
2119 }
2120
2121 static void
2122 amd64_emit_eq_goto (int *offset_p, int *size_p)
2123 {
2124 EMIT_ASM (amd64_eq,
2125 "cmp %rax,(%rsp)\n\t"
2126 "jne .Lamd64_eq_fallthru\n\t"
2127 "lea 0x8(%rsp),%rsp\n\t"
2128 "pop %rax\n\t"
2129 /* jmp, but don't trust the assembler to choose the right jump */
2130 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2131 ".Lamd64_eq_fallthru:\n\t"
2132 "lea 0x8(%rsp),%rsp\n\t"
2133 "pop %rax");
2134
2135 if (offset_p)
2136 *offset_p = 13;
2137 if (size_p)
2138 *size_p = 4;
2139 }
2140
2141 static void
2142 amd64_emit_ne_goto (int *offset_p, int *size_p)
2143 {
2144 EMIT_ASM (amd64_ne,
2145 "cmp %rax,(%rsp)\n\t"
2146 "je .Lamd64_ne_fallthru\n\t"
2147 "lea 0x8(%rsp),%rsp\n\t"
2148 "pop %rax\n\t"
2149 /* jmp, but don't trust the assembler to choose the right jump */
2150 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2151 ".Lamd64_ne_fallthru:\n\t"
2152 "lea 0x8(%rsp),%rsp\n\t"
2153 "pop %rax");
2154
2155 if (offset_p)
2156 *offset_p = 13;
2157 if (size_p)
2158 *size_p = 4;
2159 }
2160
2161 static void
2162 amd64_emit_lt_goto (int *offset_p, int *size_p)
2163 {
2164 EMIT_ASM (amd64_lt,
2165 "cmp %rax,(%rsp)\n\t"
2166 "jnl .Lamd64_lt_fallthru\n\t"
2167 "lea 0x8(%rsp),%rsp\n\t"
2168 "pop %rax\n\t"
2169 /* jmp, but don't trust the assembler to choose the right jump */
2170 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2171 ".Lamd64_lt_fallthru:\n\t"
2172 "lea 0x8(%rsp),%rsp\n\t"
2173 "pop %rax");
2174
2175 if (offset_p)
2176 *offset_p = 13;
2177 if (size_p)
2178 *size_p = 4;
2179 }
2180
2181 static void
2182 amd64_emit_le_goto (int *offset_p, int *size_p)
2183 {
2184 EMIT_ASM (amd64_le,
2185 "cmp %rax,(%rsp)\n\t"
2186 "jnle .Lamd64_le_fallthru\n\t"
2187 "lea 0x8(%rsp),%rsp\n\t"
2188 "pop %rax\n\t"
2189 /* jmp, but don't trust the assembler to choose the right jump */
2190 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2191 ".Lamd64_le_fallthru:\n\t"
2192 "lea 0x8(%rsp),%rsp\n\t"
2193 "pop %rax");
2194
2195 if (offset_p)
2196 *offset_p = 13;
2197 if (size_p)
2198 *size_p = 4;
2199 }
2200
2201 static void
2202 amd64_emit_gt_goto (int *offset_p, int *size_p)
2203 {
2204 EMIT_ASM (amd64_gt,
2205 "cmp %rax,(%rsp)\n\t"
2206 "jng .Lamd64_gt_fallthru\n\t"
2207 "lea 0x8(%rsp),%rsp\n\t"
2208 "pop %rax\n\t"
2209 /* jmp, but don't trust the assembler to choose the right jump */
2210 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2211 ".Lamd64_gt_fallthru:\n\t"
2212 "lea 0x8(%rsp),%rsp\n\t"
2213 "pop %rax");
2214
2215 if (offset_p)
2216 *offset_p = 13;
2217 if (size_p)
2218 *size_p = 4;
2219 }
2220
2221 static void
2222 amd64_emit_ge_goto (int *offset_p, int *size_p)
2223 {
2224 EMIT_ASM (amd64_ge,
2225 "cmp %rax,(%rsp)\n\t"
2226 "jnge .Lamd64_ge_fallthru\n\t"
2227 ".Lamd64_ge_jump:\n\t"
2228 "lea 0x8(%rsp),%rsp\n\t"
2229 "pop %rax\n\t"
2230 /* jmp, but don't trust the assembler to choose the right jump */
2231 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2232 ".Lamd64_ge_fallthru:\n\t"
2233 "lea 0x8(%rsp),%rsp\n\t"
2234 "pop %rax");
2235
2236 if (offset_p)
2237 *offset_p = 13;
2238 if (size_p)
2239 *size_p = 4;
2240 }
2241
2242 struct emit_ops amd64_emit_ops =
2243 {
2244 amd64_emit_prologue,
2245 amd64_emit_epilogue,
2246 amd64_emit_add,
2247 amd64_emit_sub,
2248 amd64_emit_mul,
2249 amd64_emit_lsh,
2250 amd64_emit_rsh_signed,
2251 amd64_emit_rsh_unsigned,
2252 amd64_emit_ext,
2253 amd64_emit_log_not,
2254 amd64_emit_bit_and,
2255 amd64_emit_bit_or,
2256 amd64_emit_bit_xor,
2257 amd64_emit_bit_not,
2258 amd64_emit_equal,
2259 amd64_emit_less_signed,
2260 amd64_emit_less_unsigned,
2261 amd64_emit_ref,
2262 amd64_emit_if_goto,
2263 amd64_emit_goto,
2264 amd64_write_goto_address,
2265 amd64_emit_const,
2266 amd64_emit_call,
2267 amd64_emit_reg,
2268 amd64_emit_pop,
2269 amd64_emit_stack_flush,
2270 amd64_emit_zero_ext,
2271 amd64_emit_swap,
2272 amd64_emit_stack_adjust,
2273 amd64_emit_int_call_1,
2274 amd64_emit_void_call_2,
2275 amd64_emit_eq_goto,
2276 amd64_emit_ne_goto,
2277 amd64_emit_lt_goto,
2278 amd64_emit_le_goto,
2279 amd64_emit_gt_goto,
2280 amd64_emit_ge_goto
2281 };
2282
2283 #endif /* __x86_64__ */
2284
2285 static void
2286 i386_emit_prologue (void)
2287 {
2288 EMIT_ASM32 (i386_prologue,
2289 "push %ebp\n\t"
2290 "mov %esp,%ebp\n\t"
2291 "push %ebx");
2292 /* At this point, the raw regs base address is at 8(%ebp), and the
2293 value pointer is at 12(%ebp). */
2294 }
2295
2296 static void
2297 i386_emit_epilogue (void)
2298 {
2299 EMIT_ASM32 (i386_epilogue,
2300 "mov 12(%ebp),%ecx\n\t"
2301 "mov %eax,(%ecx)\n\t"
2302 "mov %ebx,0x4(%ecx)\n\t"
2303 "xor %eax,%eax\n\t"
2304 "pop %ebx\n\t"
2305 "pop %ebp\n\t"
2306 "ret");
2307 }
2308
2309 static void
2310 i386_emit_add (void)
2311 {
2312 EMIT_ASM32 (i386_add,
2313 "add (%esp),%eax\n\t"
2314 "adc 0x4(%esp),%ebx\n\t"
2315 "lea 0x8(%esp),%esp");
2316 }
2317
2318 static void
2319 i386_emit_sub (void)
2320 {
2321 EMIT_ASM32 (i386_sub,
2322 "subl %eax,(%esp)\n\t"
2323 "sbbl %ebx,4(%esp)\n\t"
2324 "pop %eax\n\t"
2325 "pop %ebx\n\t");
2326 }
2327
2328 static void
2329 i386_emit_mul (void)
2330 {
2331 emit_error = 1;
2332 }
2333
2334 static void
2335 i386_emit_lsh (void)
2336 {
2337 emit_error = 1;
2338 }
2339
2340 static void
2341 i386_emit_rsh_signed (void)
2342 {
2343 emit_error = 1;
2344 }
2345
2346 static void
2347 i386_emit_rsh_unsigned (void)
2348 {
2349 emit_error = 1;
2350 }
2351
2352 static void
2353 i386_emit_ext (int arg)
2354 {
2355 switch (arg)
2356 {
2357 case 8:
2358 EMIT_ASM32 (i386_ext_8,
2359 "cbtw\n\t"
2360 "cwtl\n\t"
2361 "movl %eax,%ebx\n\t"
2362 "sarl $31,%ebx");
2363 break;
2364 case 16:
2365 EMIT_ASM32 (i386_ext_16,
2366 "cwtl\n\t"
2367 "movl %eax,%ebx\n\t"
2368 "sarl $31,%ebx");
2369 break;
2370 case 32:
2371 EMIT_ASM32 (i386_ext_32,
2372 "movl %eax,%ebx\n\t"
2373 "sarl $31,%ebx");
2374 break;
2375 default:
2376 emit_error = 1;
2377 }
2378 }
2379
2380 static void
2381 i386_emit_log_not (void)
2382 {
2383 EMIT_ASM32 (i386_log_not,
2384 "or %ebx,%eax\n\t"
2385 "test %eax,%eax\n\t"
2386 "sete %cl\n\t"
2387 "xor %ebx,%ebx\n\t"
2388 "movzbl %cl,%eax");
2389 }
2390
2391 static void
2392 i386_emit_bit_and (void)
2393 {
2394 EMIT_ASM32 (i386_and,
2395 "and (%esp),%eax\n\t"
2396 "and 0x4(%esp),%ebx\n\t"
2397 "lea 0x8(%esp),%esp");
2398 }
2399
2400 static void
2401 i386_emit_bit_or (void)
2402 {
2403 EMIT_ASM32 (i386_or,
2404 "or (%esp),%eax\n\t"
2405 "or 0x4(%esp),%ebx\n\t"
2406 "lea 0x8(%esp),%esp");
2407 }
2408
2409 static void
2410 i386_emit_bit_xor (void)
2411 {
2412 EMIT_ASM32 (i386_xor,
2413 "xor (%esp),%eax\n\t"
2414 "xor 0x4(%esp),%ebx\n\t"
2415 "lea 0x8(%esp),%esp");
2416 }
2417
2418 static void
2419 i386_emit_bit_not (void)
2420 {
2421 EMIT_ASM32 (i386_bit_not,
2422 "xor $0xffffffff,%eax\n\t"
2423 "xor $0xffffffff,%ebx\n\t");
2424 }
2425
2426 static void
2427 i386_emit_equal (void)
2428 {
2429 EMIT_ASM32 (i386_equal,
2430 "cmpl %ebx,4(%esp)\n\t"
2431 "jne .Li386_equal_false\n\t"
2432 "cmpl %eax,(%esp)\n\t"
2433 "je .Li386_equal_true\n\t"
2434 ".Li386_equal_false:\n\t"
2435 "xor %eax,%eax\n\t"
2436 "jmp .Li386_equal_end\n\t"
2437 ".Li386_equal_true:\n\t"
2438 "mov $1,%eax\n\t"
2439 ".Li386_equal_end:\n\t"
2440 "xor %ebx,%ebx\n\t"
2441 "lea 0x8(%esp),%esp");
2442 }
2443
2444 static void
2445 i386_emit_less_signed (void)
2446 {
2447 EMIT_ASM32 (i386_less_signed,
2448 "cmpl %ebx,4(%esp)\n\t"
2449 "jl .Li386_less_signed_true\n\t"
2450 "jne .Li386_less_signed_false\n\t"
2451 "cmpl %eax,(%esp)\n\t"
2452 "jl .Li386_less_signed_true\n\t"
2453 ".Li386_less_signed_false:\n\t"
2454 "xor %eax,%eax\n\t"
2455 "jmp .Li386_less_signed_end\n\t"
2456 ".Li386_less_signed_true:\n\t"
2457 "mov $1,%eax\n\t"
2458 ".Li386_less_signed_end:\n\t"
2459 "xor %ebx,%ebx\n\t"
2460 "lea 0x8(%esp),%esp");
2461 }
2462
2463 static void
2464 i386_emit_less_unsigned (void)
2465 {
2466 EMIT_ASM32 (i386_less_unsigned,
2467 "cmpl %ebx,4(%esp)\n\t"
2468 "jb .Li386_less_unsigned_true\n\t"
2469 "jne .Li386_less_unsigned_false\n\t"
2470 "cmpl %eax,(%esp)\n\t"
2471 "jb .Li386_less_unsigned_true\n\t"
2472 ".Li386_less_unsigned_false:\n\t"
2473 "xor %eax,%eax\n\t"
2474 "jmp .Li386_less_unsigned_end\n\t"
2475 ".Li386_less_unsigned_true:\n\t"
2476 "mov $1,%eax\n\t"
2477 ".Li386_less_unsigned_end:\n\t"
2478 "xor %ebx,%ebx\n\t"
2479 "lea 0x8(%esp),%esp");
2480 }
2481
2482 static void
2483 i386_emit_ref (int size)
2484 {
2485 switch (size)
2486 {
2487 case 1:
2488 EMIT_ASM32 (i386_ref1,
2489 "movb (%eax),%al");
2490 break;
2491 case 2:
2492 EMIT_ASM32 (i386_ref2,
2493 "movw (%eax),%ax");
2494 break;
2495 case 4:
2496 EMIT_ASM32 (i386_ref4,
2497 "movl (%eax),%eax");
2498 break;
2499 case 8:
2500 EMIT_ASM32 (i386_ref8,
2501 "movl 4(%eax),%ebx\n\t"
2502 "movl (%eax),%eax");
2503 break;
2504 }
2505 }
2506
2507 static void
2508 i386_emit_if_goto (int *offset_p, int *size_p)
2509 {
2510 EMIT_ASM32 (i386_if_goto,
2511 "mov %eax,%ecx\n\t"
2512 "or %ebx,%ecx\n\t"
2513 "pop %eax\n\t"
2514 "pop %ebx\n\t"
2515 "cmpl $0,%ecx\n\t"
2516 /* Don't trust the assembler to choose the right jump */
2517 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2518
2519 if (offset_p)
2520 *offset_p = 11; /* be sure that this matches the sequence above */
2521 if (size_p)
2522 *size_p = 4;
2523 }
2524
2525 static void
2526 i386_emit_goto (int *offset_p, int *size_p)
2527 {
2528 EMIT_ASM32 (i386_goto,
2529 /* Don't trust the assembler to choose the right jump */
2530 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2531 if (offset_p)
2532 *offset_p = 1;
2533 if (size_p)
2534 *size_p = 4;
2535 }
2536
2537 static void
2538 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2539 {
2540 int diff = (to - (from + size));
2541 unsigned char buf[sizeof (int)];
2542
2543 /* We're only doing 4-byte sizes at the moment. */
2544 if (size != 4)
2545 {
2546 emit_error = 1;
2547 return;
2548 }
2549
2550 memcpy (buf, &diff, sizeof (int));
2551 target_write_memory (from, buf, sizeof (int));
2552 }
2553
2554 static void
2555 i386_emit_const (LONGEST num)
2556 {
2557 unsigned char buf[16];
2558 int i, hi, lo;
2559 CORE_ADDR buildaddr = current_insn_ptr;
2560
2561 i = 0;
2562 buf[i++] = 0xb8; /* mov $<n>,%eax */
2563 lo = num & 0xffffffff;
2564 memcpy (&buf[i], &lo, sizeof (lo));
2565 i += 4;
2566 hi = ((num >> 32) & 0xffffffff);
2567 if (hi)
2568 {
2569 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2570 memcpy (&buf[i], &hi, sizeof (hi));
2571 i += 4;
2572 }
2573 else
2574 {
2575 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2576 }
2577 append_insns (&buildaddr, i, buf);
2578 current_insn_ptr = buildaddr;
2579 }
2580
2581 static void
2582 i386_emit_call (CORE_ADDR fn)
2583 {
2584 unsigned char buf[16];
2585 int i, offset;
2586 CORE_ADDR buildaddr;
2587
2588 buildaddr = current_insn_ptr;
2589 i = 0;
2590 buf[i++] = 0xe8; /* call <reladdr> */
2591 offset = ((int) fn) - (buildaddr + 5);
2592 memcpy (buf + 1, &offset, 4);
2593 append_insns (&buildaddr, 5, buf);
2594 current_insn_ptr = buildaddr;
2595 }
2596
2597 static void
2598 i386_emit_reg (int reg)
2599 {
2600 unsigned char buf[16];
2601 int i;
2602 CORE_ADDR buildaddr;
2603
2604 EMIT_ASM32 (i386_reg_a,
2605 "sub $0x8,%esp");
2606 buildaddr = current_insn_ptr;
2607 i = 0;
2608 buf[i++] = 0xb8; /* mov $<n>,%eax */
2609 memcpy (&buf[i], &reg, sizeof (reg));
2610 i += 4;
2611 append_insns (&buildaddr, i, buf);
2612 current_insn_ptr = buildaddr;
2613 EMIT_ASM32 (i386_reg_b,
2614 "mov %eax,4(%esp)\n\t"
2615 "mov 8(%ebp),%eax\n\t"
2616 "mov %eax,(%esp)");
2617 i386_emit_call (get_raw_reg_func_addr ());
2618 EMIT_ASM32 (i386_reg_c,
2619 "xor %ebx,%ebx\n\t"
2620 "lea 0x8(%esp),%esp");
2621 }
2622
2623 static void
2624 i386_emit_pop (void)
2625 {
2626 EMIT_ASM32 (i386_pop,
2627 "pop %eax\n\t"
2628 "pop %ebx");
2629 }
2630
2631 static void
2632 i386_emit_stack_flush (void)
2633 {
2634 EMIT_ASM32 (i386_stack_flush,
2635 "push %ebx\n\t"
2636 "push %eax");
2637 }
2638
2639 static void
2640 i386_emit_zero_ext (int arg)
2641 {
2642 switch (arg)
2643 {
2644 case 8:
2645 EMIT_ASM32 (i386_zero_ext_8,
2646 "and $0xff,%eax\n\t"
2647 "xor %ebx,%ebx");
2648 break;
2649 case 16:
2650 EMIT_ASM32 (i386_zero_ext_16,
2651 "and $0xffff,%eax\n\t"
2652 "xor %ebx,%ebx");
2653 break;
2654 case 32:
2655 EMIT_ASM32 (i386_zero_ext_32,
2656 "xor %ebx,%ebx");
2657 break;
2658 default:
2659 emit_error = 1;
2660 }
2661 }
2662
2663 static void
2664 i386_emit_swap (void)
2665 {
2666 EMIT_ASM32 (i386_swap,
2667 "mov %eax,%ecx\n\t"
2668 "mov %ebx,%edx\n\t"
2669 "pop %eax\n\t"
2670 "pop %ebx\n\t"
2671 "push %edx\n\t"
2672 "push %ecx");
2673 }
2674
2675 static void
2676 i386_emit_stack_adjust (int n)
2677 {
2678 unsigned char buf[16];
2679 int i;
2680 CORE_ADDR buildaddr = current_insn_ptr;
2681
2682 i = 0;
2683 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2684 buf[i++] = 0x64;
2685 buf[i++] = 0x24;
2686 buf[i++] = n * 8;
2687 append_insns (&buildaddr, i, buf);
2688 current_insn_ptr = buildaddr;
2689 }
2690
2691 /* FN's prototype is `LONGEST(*fn)(int)'. */
2692
2693 static void
2694 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2695 {
2696 unsigned char buf[16];
2697 int i;
2698 CORE_ADDR buildaddr;
2699
2700 EMIT_ASM32 (i386_int_call_1_a,
2701 /* Reserve a bit of stack space. */
2702 "sub $0x8,%esp");
2703 /* Put the one argument on the stack. */
2704 buildaddr = current_insn_ptr;
2705 i = 0;
2706 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2707 buf[i++] = 0x04;
2708 buf[i++] = 0x24;
2709 memcpy (&buf[i], &arg1, sizeof (arg1));
2710 i += 4;
2711 append_insns (&buildaddr, i, buf);
2712 current_insn_ptr = buildaddr;
2713 i386_emit_call (fn);
2714 EMIT_ASM32 (i386_int_call_1_c,
2715 "mov %edx,%ebx\n\t"
2716 "lea 0x8(%esp),%esp");
2717 }
2718
2719 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2720
2721 static void
2722 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2723 {
2724 unsigned char buf[16];
2725 int i;
2726 CORE_ADDR buildaddr;
2727
2728 EMIT_ASM32 (i386_void_call_2_a,
2729 /* Preserve %eax only; we don't have to worry about %ebx. */
2730 "push %eax\n\t"
2731 /* Reserve a bit of stack space for arguments. */
2732 "sub $0x10,%esp\n\t"
2733 /* Copy "top" to the second argument position. (Note that
2734 we can't assume function won't scribble on its
2735 arguments, so don't try to restore from this.) */
2736 "mov %eax,4(%esp)\n\t"
2737 "mov %ebx,8(%esp)");
2738 /* Put the first argument on the stack. */
2739 buildaddr = current_insn_ptr;
2740 i = 0;
2741 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2742 buf[i++] = 0x04;
2743 buf[i++] = 0x24;
2744 memcpy (&buf[i], &arg1, sizeof (arg1));
2745 i += 4;
2746 append_insns (&buildaddr, i, buf);
2747 current_insn_ptr = buildaddr;
2748 i386_emit_call (fn);
2749 EMIT_ASM32 (i386_void_call_2_b,
2750 "lea 0x10(%esp),%esp\n\t"
2751 /* Restore original stack top. */
2752 "pop %eax");
2753 }
2754
2755
2756 static void
2757 i386_emit_eq_goto (int *offset_p, int *size_p)
2758 {
2759 EMIT_ASM32 (eq,
2760 /* Check low half first, more likely to be decider */
2761 "cmpl %eax,(%esp)\n\t"
2762 "jne .Leq_fallthru\n\t"
2763 "cmpl %ebx,4(%esp)\n\t"
2764 "jne .Leq_fallthru\n\t"
2765 "lea 0x8(%esp),%esp\n\t"
2766 "pop %eax\n\t"
2767 "pop %ebx\n\t"
2768 /* jmp, but don't trust the assembler to choose the right jump */
2769 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2770 ".Leq_fallthru:\n\t"
2771 "lea 0x8(%esp),%esp\n\t"
2772 "pop %eax\n\t"
2773 "pop %ebx");
2774
2775 if (offset_p)
2776 *offset_p = 18;
2777 if (size_p)
2778 *size_p = 4;
2779 }
2780
2781 static void
2782 i386_emit_ne_goto (int *offset_p, int *size_p)
2783 {
2784 EMIT_ASM32 (ne,
2785 /* Check low half first, more likely to be decider */
2786 "cmpl %eax,(%esp)\n\t"
2787 "jne .Lne_jump\n\t"
2788 "cmpl %ebx,4(%esp)\n\t"
2789 "je .Lne_fallthru\n\t"
2790 ".Lne_jump:\n\t"
2791 "lea 0x8(%esp),%esp\n\t"
2792 "pop %eax\n\t"
2793 "pop %ebx\n\t"
2794 /* jmp, but don't trust the assembler to choose the right jump */
2795 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2796 ".Lne_fallthru:\n\t"
2797 "lea 0x8(%esp),%esp\n\t"
2798 "pop %eax\n\t"
2799 "pop %ebx");
2800
2801 if (offset_p)
2802 *offset_p = 18;
2803 if (size_p)
2804 *size_p = 4;
2805 }
2806
2807 static void
2808 i386_emit_lt_goto (int *offset_p, int *size_p)
2809 {
2810 EMIT_ASM32 (lt,
2811 "cmpl %ebx,4(%esp)\n\t"
2812 "jl .Llt_jump\n\t"
2813 "jne .Llt_fallthru\n\t"
2814 "cmpl %eax,(%esp)\n\t"
2815 "jnl .Llt_fallthru\n\t"
2816 ".Llt_jump:\n\t"
2817 "lea 0x8(%esp),%esp\n\t"
2818 "pop %eax\n\t"
2819 "pop %ebx\n\t"
2820 /* jmp, but don't trust the assembler to choose the right jump */
2821 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2822 ".Llt_fallthru:\n\t"
2823 "lea 0x8(%esp),%esp\n\t"
2824 "pop %eax\n\t"
2825 "pop %ebx");
2826
2827 if (offset_p)
2828 *offset_p = 20;
2829 if (size_p)
2830 *size_p = 4;
2831 }
2832
2833 static void
2834 i386_emit_le_goto (int *offset_p, int *size_p)
2835 {
2836 EMIT_ASM32 (le,
2837 "cmpl %ebx,4(%esp)\n\t"
2838 "jle .Lle_jump\n\t"
2839 "jne .Lle_fallthru\n\t"
2840 "cmpl %eax,(%esp)\n\t"
2841 "jnle .Lle_fallthru\n\t"
2842 ".Lle_jump:\n\t"
2843 "lea 0x8(%esp),%esp\n\t"
2844 "pop %eax\n\t"
2845 "pop %ebx\n\t"
2846 /* jmp, but don't trust the assembler to choose the right jump */
2847 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2848 ".Lle_fallthru:\n\t"
2849 "lea 0x8(%esp),%esp\n\t"
2850 "pop %eax\n\t"
2851 "pop %ebx");
2852
2853 if (offset_p)
2854 *offset_p = 20;
2855 if (size_p)
2856 *size_p = 4;
2857 }
2858
2859 static void
2860 i386_emit_gt_goto (int *offset_p, int *size_p)
2861 {
2862 EMIT_ASM32 (gt,
2863 "cmpl %ebx,4(%esp)\n\t"
2864 "jg .Lgt_jump\n\t"
2865 "jne .Lgt_fallthru\n\t"
2866 "cmpl %eax,(%esp)\n\t"
2867 "jng .Lgt_fallthru\n\t"
2868 ".Lgt_jump:\n\t"
2869 "lea 0x8(%esp),%esp\n\t"
2870 "pop %eax\n\t"
2871 "pop %ebx\n\t"
2872 /* jmp, but don't trust the assembler to choose the right jump */
2873 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2874 ".Lgt_fallthru:\n\t"
2875 "lea 0x8(%esp),%esp\n\t"
2876 "pop %eax\n\t"
2877 "pop %ebx");
2878
2879 if (offset_p)
2880 *offset_p = 20;
2881 if (size_p)
2882 *size_p = 4;
2883 }
2884
2885 static void
2886 i386_emit_ge_goto (int *offset_p, int *size_p)
2887 {
2888 EMIT_ASM32 (ge,
2889 "cmpl %ebx,4(%esp)\n\t"
2890 "jge .Lge_jump\n\t"
2891 "jne .Lge_fallthru\n\t"
2892 "cmpl %eax,(%esp)\n\t"
2893 "jnge .Lge_fallthru\n\t"
2894 ".Lge_jump:\n\t"
2895 "lea 0x8(%esp),%esp\n\t"
2896 "pop %eax\n\t"
2897 "pop %ebx\n\t"
2898 /* jmp, but don't trust the assembler to choose the right jump */
2899 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2900 ".Lge_fallthru:\n\t"
2901 "lea 0x8(%esp),%esp\n\t"
2902 "pop %eax\n\t"
2903 "pop %ebx");
2904
2905 if (offset_p)
2906 *offset_p = 20;
2907 if (size_p)
2908 *size_p = 4;
2909 }
2910
2911 struct emit_ops i386_emit_ops =
2912 {
2913 i386_emit_prologue,
2914 i386_emit_epilogue,
2915 i386_emit_add,
2916 i386_emit_sub,
2917 i386_emit_mul,
2918 i386_emit_lsh,
2919 i386_emit_rsh_signed,
2920 i386_emit_rsh_unsigned,
2921 i386_emit_ext,
2922 i386_emit_log_not,
2923 i386_emit_bit_and,
2924 i386_emit_bit_or,
2925 i386_emit_bit_xor,
2926 i386_emit_bit_not,
2927 i386_emit_equal,
2928 i386_emit_less_signed,
2929 i386_emit_less_unsigned,
2930 i386_emit_ref,
2931 i386_emit_if_goto,
2932 i386_emit_goto,
2933 i386_write_goto_address,
2934 i386_emit_const,
2935 i386_emit_call,
2936 i386_emit_reg,
2937 i386_emit_pop,
2938 i386_emit_stack_flush,
2939 i386_emit_zero_ext,
2940 i386_emit_swap,
2941 i386_emit_stack_adjust,
2942 i386_emit_int_call_1,
2943 i386_emit_void_call_2,
2944 i386_emit_eq_goto,
2945 i386_emit_ne_goto,
2946 i386_emit_lt_goto,
2947 i386_emit_le_goto,
2948 i386_emit_gt_goto,
2949 i386_emit_ge_goto
2950 };
2951
2952
2953 emit_ops *
2954 x86_target::emit_ops ()
2955 {
2956 #ifdef __x86_64__
2957 if (is_64bit_tdesc ())
2958 return &amd64_emit_ops;
2959 else
2960 #endif
2961 return &i386_emit_ops;
2962 }
2963
2964 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2965
2966 const gdb_byte *
2967 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2968 {
2969 *size = x86_breakpoint_len;
2970 return x86_breakpoint;
2971 }
2972
2973 bool
2974 x86_target::low_supports_range_stepping ()
2975 {
2976 return true;
2977 }
2978
2979 int
2980 x86_target::get_ipa_tdesc_idx ()
2981 {
2982 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2983 const struct target_desc *tdesc = regcache->tdesc;
2984
2985 #ifdef __x86_64__
2986 return amd64_get_ipa_tdesc_idx (tdesc);
2987 #endif
2988
2989 if (tdesc == tdesc_i386_linux_no_xml)
2990 return X86_TDESC_SSE;
2991
2992 return i386_get_ipa_tdesc_idx (tdesc);
2993 }
2994
2995 /* The linux target ops object. */
2996
2997 linux_process_target *the_linux_target = &the_x86_target;
2998
2999 void
3000 initialize_low_arch (void)
3001 {
3002 /* Initialize the Linux target descriptions. */
3003 #ifdef __x86_64__
3004 tdesc_amd64_linux_no_xml = allocate_target_description ();
3005 copy_target_description (tdesc_amd64_linux_no_xml,
3006 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
3007 false));
3008 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3009 #endif
3010
3011 tdesc_i386_linux_no_xml = allocate_target_description ();
3012 copy_target_description (tdesc_i386_linux_no_xml,
3013 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3014 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3015
3016 initialize_regsets_info (&x86_regsets_info);
3017 }
This page took 0.094893 seconds and 4 git commands to generate.