f9459dc18cb31e19b6de581d22fe74a7cfbb032f
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
107 const regs_info *get_regs_info () override;
108
109 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
110
111 bool supports_z_point_type (char z_type) override;
112
113 protected:
114
115 void low_arch_setup () override;
116
117 bool low_cannot_fetch_register (int regno) override;
118
119 bool low_cannot_store_register (int regno) override;
120
121 bool low_supports_breakpoints () override;
122
123 CORE_ADDR low_get_pc (regcache *regcache) override;
124
125 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
126
127 int low_decr_pc_after_break () override;
128
129 bool low_breakpoint_at (CORE_ADDR pc) override;
130
131 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
132 int size, raw_breakpoint *bp) override;
133
134 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
135 int size, raw_breakpoint *bp) override;
136
137 bool low_stopped_by_watchpoint () override;
138
139 CORE_ADDR low_stopped_data_address () override;
140
141 /* collect_ptrace_register/supply_ptrace_register are not needed in the
142 native i386 case (no registers smaller than an xfer unit), and are not
143 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
144
145 /* Need to fix up i386 siginfo if host is amd64. */
146 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
147 int direction) override;
148
149 arch_process_info *low_new_process () override;
150
151 void low_delete_process (arch_process_info *info) override;
152
153 void low_new_thread (lwp_info *) override;
154
155 void low_delete_thread (arch_lwp_info *) override;
156
157 void low_new_fork (process_info *parent, process_info *child) override;
158
159 void low_prepare_to_resume (lwp_info *lwp) override;
160 };
161
162 /* The singleton target ops object. */
163
164 static x86_target the_x86_target;
165
166 /* Per-process arch-specific data we want to keep. */
167
168 struct arch_process_info
169 {
170 struct x86_debug_reg_state debug_reg_state;
171 };
172
173 #ifdef __x86_64__
174
175 /* Mapping between the general-purpose registers in `struct user'
176 format and GDB's register array layout.
177 Note that the transfer layout uses 64-bit regs. */
178 static /*const*/ int i386_regmap[] =
179 {
180 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
181 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
182 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
183 DS * 8, ES * 8, FS * 8, GS * 8
184 };
185
186 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
187
188 /* So code below doesn't have to care, i386 or amd64. */
189 #define ORIG_EAX ORIG_RAX
190 #define REGSIZE 8
191
192 static const int x86_64_regmap[] =
193 {
194 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
195 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
196 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
197 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
198 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
199 DS * 8, ES * 8, FS * 8, GS * 8,
200 -1, -1, -1, -1, -1, -1, -1, -1,
201 -1, -1, -1, -1, -1, -1, -1, -1,
202 -1, -1, -1, -1, -1, -1, -1, -1,
203 -1,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 ORIG_RAX * 8,
206 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
207 21 * 8, 22 * 8,
208 #else
209 -1, -1,
210 #endif
211 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
212 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
213 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
214 -1, -1, -1, -1, -1, -1, -1, -1,
215 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
216 -1, -1, -1, -1, -1, -1, -1, -1,
217 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
218 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
219 -1, -1, -1, -1, -1, -1, -1, -1,
220 -1, -1, -1, -1, -1, -1, -1, -1,
221 -1, -1, -1, -1, -1, -1, -1, -1,
222 -1 /* pkru */
223 };
224
225 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
226 #define X86_64_USER_REGS (GS + 1)
227
228 #else /* ! __x86_64__ */
229
230 /* Mapping between the general-purpose registers in `struct user'
231 format and GDB's register array layout. */
232 static /*const*/ int i386_regmap[] =
233 {
234 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
235 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
236 EIP * 4, EFL * 4, CS * 4, SS * 4,
237 DS * 4, ES * 4, FS * 4, GS * 4
238 };
239
240 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
241
242 #define REGSIZE 4
243
244 #endif
245
246 #ifdef __x86_64__
247
248 /* Returns true if the current inferior belongs to a x86-64 process,
249 per the tdesc. */
250
251 static int
252 is_64bit_tdesc (void)
253 {
254 struct regcache *regcache = get_thread_regcache (current_thread, 0);
255
256 return register_size (regcache->tdesc, 0) == 8;
257 }
258
259 #endif
260
261 \f
262 /* Called by libthread_db. */
263
264 ps_err_e
265 ps_get_thread_area (struct ps_prochandle *ph,
266 lwpid_t lwpid, int idx, void **base)
267 {
268 #ifdef __x86_64__
269 int use_64bit = is_64bit_tdesc ();
270
271 if (use_64bit)
272 {
273 switch (idx)
274 {
275 case FS:
276 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
277 return PS_OK;
278 break;
279 case GS:
280 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
281 return PS_OK;
282 break;
283 default:
284 return PS_BADADDR;
285 }
286 return PS_ERR;
287 }
288 #endif
289
290 {
291 unsigned int desc[4];
292
293 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
294 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
295 return PS_ERR;
296
297 /* Ensure we properly extend the value to 64-bits for x86_64. */
298 *base = (void *) (uintptr_t) desc[1];
299 return PS_OK;
300 }
301 }
302
303 /* Get the thread area address. This is used to recognize which
304 thread is which when tracing with the in-process agent library. We
305 don't read anything from the address, and treat it as opaque; it's
306 the address itself that we assume is unique per-thread. */
307
308 static int
309 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
310 {
311 #ifdef __x86_64__
312 int use_64bit = is_64bit_tdesc ();
313
314 if (use_64bit)
315 {
316 void *base;
317 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
318 {
319 *addr = (CORE_ADDR) (uintptr_t) base;
320 return 0;
321 }
322
323 return -1;
324 }
325 #endif
326
327 {
328 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
329 struct thread_info *thr = get_lwp_thread (lwp);
330 struct regcache *regcache = get_thread_regcache (thr, 1);
331 unsigned int desc[4];
332 ULONGEST gs = 0;
333 const int reg_thread_area = 3; /* bits to scale down register value. */
334 int idx;
335
336 collect_register_by_name (regcache, "gs", &gs);
337
338 idx = gs >> reg_thread_area;
339
340 if (ptrace (PTRACE_GET_THREAD_AREA,
341 lwpid_of (thr),
342 (void *) (long) idx, (unsigned long) &desc) < 0)
343 return -1;
344
345 *addr = desc[1];
346 return 0;
347 }
348 }
349
350
351 \f
352 bool
353 x86_target::low_cannot_store_register (int regno)
354 {
355 #ifdef __x86_64__
356 if (is_64bit_tdesc ())
357 return false;
358 #endif
359
360 return regno >= I386_NUM_REGS;
361 }
362
363 bool
364 x86_target::low_cannot_fetch_register (int regno)
365 {
366 #ifdef __x86_64__
367 if (is_64bit_tdesc ())
368 return false;
369 #endif
370
371 return regno >= I386_NUM_REGS;
372 }
373
374 static void
375 x86_fill_gregset (struct regcache *regcache, void *buf)
376 {
377 int i;
378
379 #ifdef __x86_64__
380 if (register_size (regcache->tdesc, 0) == 8)
381 {
382 for (i = 0; i < X86_64_NUM_REGS; i++)
383 if (x86_64_regmap[i] != -1)
384 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
385
386 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
387 {
388 unsigned long base;
389 int lwpid = lwpid_of (current_thread);
390
391 collect_register_by_name (regcache, "fs_base", &base);
392 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
393
394 collect_register_by_name (regcache, "gs_base", &base);
395 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
396 }
397 #endif
398
399 return;
400 }
401
402 /* 32-bit inferior registers need to be zero-extended.
403 Callers would read uninitialized memory otherwise. */
404 memset (buf, 0x00, X86_64_USER_REGS * 8);
405 #endif
406
407 for (i = 0; i < I386_NUM_REGS; i++)
408 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
409
410 collect_register_by_name (regcache, "orig_eax",
411 ((char *) buf) + ORIG_EAX * REGSIZE);
412
413 #ifdef __x86_64__
414 /* Sign extend EAX value to avoid potential syscall restart
415 problems.
416
417 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
418 for a detailed explanation. */
419 if (register_size (regcache->tdesc, 0) == 4)
420 {
421 void *ptr = ((gdb_byte *) buf
422 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
423
424 *(int64_t *) ptr = *(int32_t *) ptr;
425 }
426 #endif
427 }
428
429 static void
430 x86_store_gregset (struct regcache *regcache, const void *buf)
431 {
432 int i;
433
434 #ifdef __x86_64__
435 if (register_size (regcache->tdesc, 0) == 8)
436 {
437 for (i = 0; i < X86_64_NUM_REGS; i++)
438 if (x86_64_regmap[i] != -1)
439 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
440
441 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
442 {
443 unsigned long base;
444 int lwpid = lwpid_of (current_thread);
445
446 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
447 supply_register_by_name (regcache, "fs_base", &base);
448
449 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
450 supply_register_by_name (regcache, "gs_base", &base);
451 }
452 #endif
453 return;
454 }
455 #endif
456
457 for (i = 0; i < I386_NUM_REGS; i++)
458 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
459
460 supply_register_by_name (regcache, "orig_eax",
461 ((char *) buf) + ORIG_EAX * REGSIZE);
462 }
463
464 static void
465 x86_fill_fpregset (struct regcache *regcache, void *buf)
466 {
467 #ifdef __x86_64__
468 i387_cache_to_fxsave (regcache, buf);
469 #else
470 i387_cache_to_fsave (regcache, buf);
471 #endif
472 }
473
474 static void
475 x86_store_fpregset (struct regcache *regcache, const void *buf)
476 {
477 #ifdef __x86_64__
478 i387_fxsave_to_cache (regcache, buf);
479 #else
480 i387_fsave_to_cache (regcache, buf);
481 #endif
482 }
483
484 #ifndef __x86_64__
485
486 static void
487 x86_fill_fpxregset (struct regcache *regcache, void *buf)
488 {
489 i387_cache_to_fxsave (regcache, buf);
490 }
491
492 static void
493 x86_store_fpxregset (struct regcache *regcache, const void *buf)
494 {
495 i387_fxsave_to_cache (regcache, buf);
496 }
497
498 #endif
499
500 static void
501 x86_fill_xstateregset (struct regcache *regcache, void *buf)
502 {
503 i387_cache_to_xsave (regcache, buf);
504 }
505
506 static void
507 x86_store_xstateregset (struct regcache *regcache, const void *buf)
508 {
509 i387_xsave_to_cache (regcache, buf);
510 }
511
512 /* ??? The non-biarch i386 case stores all the i387 regs twice.
513 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
514 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
515 doesn't work. IWBN to avoid the duplication in the case where it
516 does work. Maybe the arch_setup routine could check whether it works
517 and update the supported regsets accordingly. */
518
519 static struct regset_info x86_regsets[] =
520 {
521 #ifdef HAVE_PTRACE_GETREGS
522 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
523 GENERAL_REGS,
524 x86_fill_gregset, x86_store_gregset },
525 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
526 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
527 # ifndef __x86_64__
528 # ifdef HAVE_PTRACE_GETFPXREGS
529 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
530 EXTENDED_REGS,
531 x86_fill_fpxregset, x86_store_fpxregset },
532 # endif
533 # endif
534 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
535 FP_REGS,
536 x86_fill_fpregset, x86_store_fpregset },
537 #endif /* HAVE_PTRACE_GETREGS */
538 NULL_REGSET
539 };
540
541 bool
542 x86_target::low_supports_breakpoints ()
543 {
544 return true;
545 }
546
547 CORE_ADDR
548 x86_target::low_get_pc (regcache *regcache)
549 {
550 int use_64bit = register_size (regcache->tdesc, 0) == 8;
551
552 if (use_64bit)
553 {
554 uint64_t pc;
555
556 collect_register_by_name (regcache, "rip", &pc);
557 return (CORE_ADDR) pc;
558 }
559 else
560 {
561 uint32_t pc;
562
563 collect_register_by_name (regcache, "eip", &pc);
564 return (CORE_ADDR) pc;
565 }
566 }
567
568 void
569 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
570 {
571 int use_64bit = register_size (regcache->tdesc, 0) == 8;
572
573 if (use_64bit)
574 {
575 uint64_t newpc = pc;
576
577 supply_register_by_name (regcache, "rip", &newpc);
578 }
579 else
580 {
581 uint32_t newpc = pc;
582
583 supply_register_by_name (regcache, "eip", &newpc);
584 }
585 }
586
587 int
588 x86_target::low_decr_pc_after_break ()
589 {
590 return 1;
591 }
592
593 \f
594 static const gdb_byte x86_breakpoint[] = { 0xCC };
595 #define x86_breakpoint_len 1
596
597 bool
598 x86_target::low_breakpoint_at (CORE_ADDR pc)
599 {
600 unsigned char c;
601
602 read_memory (pc, &c, 1);
603 if (c == 0xCC)
604 return true;
605
606 return false;
607 }
608 \f
609 /* Low-level function vector. */
610 struct x86_dr_low_type x86_dr_low =
611 {
612 x86_linux_dr_set_control,
613 x86_linux_dr_set_addr,
614 x86_linux_dr_get_addr,
615 x86_linux_dr_get_status,
616 x86_linux_dr_get_control,
617 sizeof (void *),
618 };
619 \f
620 /* Breakpoint/Watchpoint support. */
621
622 bool
623 x86_target::supports_z_point_type (char z_type)
624 {
625 switch (z_type)
626 {
627 case Z_PACKET_SW_BP:
628 case Z_PACKET_HW_BP:
629 case Z_PACKET_WRITE_WP:
630 case Z_PACKET_ACCESS_WP:
631 return true;
632 default:
633 return false;
634 }
635 }
636
637 int
638 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
639 int size, raw_breakpoint *bp)
640 {
641 struct process_info *proc = current_process ();
642
643 switch (type)
644 {
645 case raw_bkpt_type_hw:
646 case raw_bkpt_type_write_wp:
647 case raw_bkpt_type_access_wp:
648 {
649 enum target_hw_bp_type hw_type
650 = raw_bkpt_type_to_target_hw_bp_type (type);
651 struct x86_debug_reg_state *state
652 = &proc->priv->arch_private->debug_reg_state;
653
654 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
655 }
656
657 default:
658 /* Unsupported. */
659 return 1;
660 }
661 }
662
663 int
664 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
665 int size, raw_breakpoint *bp)
666 {
667 struct process_info *proc = current_process ();
668
669 switch (type)
670 {
671 case raw_bkpt_type_hw:
672 case raw_bkpt_type_write_wp:
673 case raw_bkpt_type_access_wp:
674 {
675 enum target_hw_bp_type hw_type
676 = raw_bkpt_type_to_target_hw_bp_type (type);
677 struct x86_debug_reg_state *state
678 = &proc->priv->arch_private->debug_reg_state;
679
680 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
681 }
682 default:
683 /* Unsupported. */
684 return 1;
685 }
686 }
687
688 bool
689 x86_target::low_stopped_by_watchpoint ()
690 {
691 struct process_info *proc = current_process ();
692 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
693 }
694
695 CORE_ADDR
696 x86_target::low_stopped_data_address ()
697 {
698 struct process_info *proc = current_process ();
699 CORE_ADDR addr;
700 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
701 &addr))
702 return addr;
703 return 0;
704 }
705 \f
706 /* Called when a new process is created. */
707
708 arch_process_info *
709 x86_target::low_new_process ()
710 {
711 struct arch_process_info *info = XCNEW (struct arch_process_info);
712
713 x86_low_init_dregs (&info->debug_reg_state);
714
715 return info;
716 }
717
718 /* Called when a process is being deleted. */
719
720 void
721 x86_target::low_delete_process (arch_process_info *info)
722 {
723 xfree (info);
724 }
725
726 void
727 x86_target::low_new_thread (lwp_info *lwp)
728 {
729 /* This comes from nat/. */
730 x86_linux_new_thread (lwp);
731 }
732
733 void
734 x86_target::low_delete_thread (arch_lwp_info *alwp)
735 {
736 /* This comes from nat/. */
737 x86_linux_delete_thread (alwp);
738 }
739
740 /* Target routine for new_fork. */
741
742 void
743 x86_target::low_new_fork (process_info *parent, process_info *child)
744 {
745 /* These are allocated by linux_add_process. */
746 gdb_assert (parent->priv != NULL
747 && parent->priv->arch_private != NULL);
748 gdb_assert (child->priv != NULL
749 && child->priv->arch_private != NULL);
750
751 /* Linux kernel before 2.6.33 commit
752 72f674d203cd230426437cdcf7dd6f681dad8b0d
753 will inherit hardware debug registers from parent
754 on fork/vfork/clone. Newer Linux kernels create such tasks with
755 zeroed debug registers.
756
757 GDB core assumes the child inherits the watchpoints/hw
758 breakpoints of the parent, and will remove them all from the
759 forked off process. Copy the debug registers mirrors into the
760 new process so that all breakpoints and watchpoints can be
761 removed together. The debug registers mirror will become zeroed
762 in the end before detaching the forked off process, thus making
763 this compatible with older Linux kernels too. */
764
765 *child->priv->arch_private = *parent->priv->arch_private;
766 }
767
768 void
769 x86_target::low_prepare_to_resume (lwp_info *lwp)
770 {
771 /* This comes from nat/. */
772 x86_linux_prepare_to_resume (lwp);
773 }
774
775 /* See nat/x86-dregs.h. */
776
777 struct x86_debug_reg_state *
778 x86_debug_reg_state (pid_t pid)
779 {
780 struct process_info *proc = find_process_pid (pid);
781
782 return &proc->priv->arch_private->debug_reg_state;
783 }
784 \f
785 /* When GDBSERVER is built as a 64-bit application on linux, the
786 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
787 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
788 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
789 conversion in-place ourselves. */
790
791 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
792 layout of the inferiors' architecture. Returns true if any
793 conversion was done; false otherwise. If DIRECTION is 1, then copy
794 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
795 INF. */
796
797 bool
798 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
799 {
800 #ifdef __x86_64__
801 unsigned int machine;
802 int tid = lwpid_of (current_thread);
803 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
804
805 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
806 if (!is_64bit_tdesc ())
807 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
808 FIXUP_32);
809 /* No fixup for native x32 GDB. */
810 else if (!is_elf64 && sizeof (void *) == 8)
811 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
812 FIXUP_X32);
813 #endif
814
815 return false;
816 }
817 \f
818 static int use_xml;
819
820 /* Format of XSAVE extended state is:
821 struct
822 {
823 fxsave_bytes[0..463]
824 sw_usable_bytes[464..511]
825 xstate_hdr_bytes[512..575]
826 avx_bytes[576..831]
827 future_state etc
828 };
829
830 Same memory layout will be used for the coredump NT_X86_XSTATE
831 representing the XSAVE extended state registers.
832
833 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
834 extended state mask, which is the same as the extended control register
835 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
836 together with the mask saved in the xstate_hdr_bytes to determine what
837 states the processor/OS supports and what state, used or initialized,
838 the process/thread is in. */
839 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
840
841 /* Does the current host support the GETFPXREGS request? The header
842 file may or may not define it, and even if it is defined, the
843 kernel will return EIO if it's running on a pre-SSE processor. */
844 int have_ptrace_getfpxregs =
845 #ifdef HAVE_PTRACE_GETFPXREGS
846 -1
847 #else
848 0
849 #endif
850 ;
851
852 /* Get Linux/x86 target description from running target. */
853
854 static const struct target_desc *
855 x86_linux_read_description (void)
856 {
857 unsigned int machine;
858 int is_elf64;
859 int xcr0_features;
860 int tid;
861 static uint64_t xcr0;
862 struct regset_info *regset;
863
864 tid = lwpid_of (current_thread);
865
866 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
867
868 if (sizeof (void *) == 4)
869 {
870 if (is_elf64 > 0)
871 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
872 #ifndef __x86_64__
873 else if (machine == EM_X86_64)
874 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
875 #endif
876 }
877
878 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
879 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
880 {
881 elf_fpxregset_t fpxregs;
882
883 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
884 {
885 have_ptrace_getfpxregs = 0;
886 have_ptrace_getregset = 0;
887 return i386_linux_read_description (X86_XSTATE_X87);
888 }
889 else
890 have_ptrace_getfpxregs = 1;
891 }
892 #endif
893
894 if (!use_xml)
895 {
896 x86_xcr0 = X86_XSTATE_SSE_MASK;
897
898 /* Don't use XML. */
899 #ifdef __x86_64__
900 if (machine == EM_X86_64)
901 return tdesc_amd64_linux_no_xml;
902 else
903 #endif
904 return tdesc_i386_linux_no_xml;
905 }
906
907 if (have_ptrace_getregset == -1)
908 {
909 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
910 struct iovec iov;
911
912 iov.iov_base = xstateregs;
913 iov.iov_len = sizeof (xstateregs);
914
915 /* Check if PTRACE_GETREGSET works. */
916 if (ptrace (PTRACE_GETREGSET, tid,
917 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
918 have_ptrace_getregset = 0;
919 else
920 {
921 have_ptrace_getregset = 1;
922
923 /* Get XCR0 from XSAVE extended state. */
924 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
925 / sizeof (uint64_t))];
926
927 /* Use PTRACE_GETREGSET if it is available. */
928 for (regset = x86_regsets;
929 regset->fill_function != NULL; regset++)
930 if (regset->get_request == PTRACE_GETREGSET)
931 regset->size = X86_XSTATE_SIZE (xcr0);
932 else if (regset->type != GENERAL_REGS)
933 regset->size = 0;
934 }
935 }
936
937 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
938 xcr0_features = (have_ptrace_getregset
939 && (xcr0 & X86_XSTATE_ALL_MASK));
940
941 if (xcr0_features)
942 x86_xcr0 = xcr0;
943
944 if (machine == EM_X86_64)
945 {
946 #ifdef __x86_64__
947 const target_desc *tdesc = NULL;
948
949 if (xcr0_features)
950 {
951 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
952 !is_elf64);
953 }
954
955 if (tdesc == NULL)
956 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
957 return tdesc;
958 #endif
959 }
960 else
961 {
962 const target_desc *tdesc = NULL;
963
964 if (xcr0_features)
965 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
966
967 if (tdesc == NULL)
968 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
969
970 return tdesc;
971 }
972
973 gdb_assert_not_reached ("failed to return tdesc");
974 }
975
976 /* Update all the target description of all processes; a new GDB
977 connected, and it may or not support xml target descriptions. */
978
979 void
980 x86_target::update_xmltarget ()
981 {
982 struct thread_info *saved_thread = current_thread;
983
984 /* Before changing the register cache's internal layout, flush the
985 contents of the current valid caches back to the threads, and
986 release the current regcache objects. */
987 regcache_release ();
988
989 for_each_process ([this] (process_info *proc) {
990 int pid = proc->pid;
991
992 /* Look up any thread of this process. */
993 current_thread = find_any_thread_of_pid (pid);
994
995 low_arch_setup ();
996 });
997
998 current_thread = saved_thread;
999 }
1000
1001 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1002 PTRACE_GETREGSET. */
1003
1004 static void
1005 x86_linux_process_qsupported (char **features, int count)
1006 {
1007 int i;
1008
1009 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1010 with "i386" in qSupported query, it supports x86 XML target
1011 descriptions. */
1012 use_xml = 0;
1013 for (i = 0; i < count; i++)
1014 {
1015 const char *feature = features[i];
1016
1017 if (startswith (feature, "xmlRegisters="))
1018 {
1019 char *copy = xstrdup (feature + 13);
1020
1021 char *saveptr;
1022 for (char *p = strtok_r (copy, ",", &saveptr);
1023 p != NULL;
1024 p = strtok_r (NULL, ",", &saveptr))
1025 {
1026 if (strcmp (p, "i386") == 0)
1027 {
1028 use_xml = 1;
1029 break;
1030 }
1031 }
1032
1033 free (copy);
1034 }
1035 }
1036 the_x86_target.update_xmltarget ();
1037 }
1038
1039 /* Common for x86/x86-64. */
1040
1041 static struct regsets_info x86_regsets_info =
1042 {
1043 x86_regsets, /* regsets */
1044 0, /* num_regsets */
1045 NULL, /* disabled_regsets */
1046 };
1047
1048 #ifdef __x86_64__
1049 static struct regs_info amd64_linux_regs_info =
1050 {
1051 NULL, /* regset_bitmap */
1052 NULL, /* usrregs_info */
1053 &x86_regsets_info
1054 };
1055 #endif
1056 static struct usrregs_info i386_linux_usrregs_info =
1057 {
1058 I386_NUM_REGS,
1059 i386_regmap,
1060 };
1061
1062 static struct regs_info i386_linux_regs_info =
1063 {
1064 NULL, /* regset_bitmap */
1065 &i386_linux_usrregs_info,
1066 &x86_regsets_info
1067 };
1068
1069 const regs_info *
1070 x86_target::get_regs_info ()
1071 {
1072 #ifdef __x86_64__
1073 if (is_64bit_tdesc ())
1074 return &amd64_linux_regs_info;
1075 else
1076 #endif
1077 return &i386_linux_regs_info;
1078 }
1079
1080 /* Initialize the target description for the architecture of the
1081 inferior. */
1082
1083 void
1084 x86_target::low_arch_setup ()
1085 {
1086 current_process ()->tdesc = x86_linux_read_description ();
1087 }
1088
1089 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1090 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1091
1092 static void
1093 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
1094 {
1095 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1096
1097 if (use_64bit)
1098 {
1099 long l_sysno;
1100
1101 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1102 *sysno = (int) l_sysno;
1103 }
1104 else
1105 collect_register_by_name (regcache, "orig_eax", sysno);
1106 }
1107
1108 static int
1109 x86_supports_tracepoints (void)
1110 {
1111 return 1;
1112 }
1113
1114 static void
1115 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1116 {
1117 target_write_memory (*to, buf, len);
1118 *to += len;
1119 }
1120
1121 static int
1122 push_opcode (unsigned char *buf, const char *op)
1123 {
1124 unsigned char *buf_org = buf;
1125
1126 while (1)
1127 {
1128 char *endptr;
1129 unsigned long ul = strtoul (op, &endptr, 16);
1130
1131 if (endptr == op)
1132 break;
1133
1134 *buf++ = ul;
1135 op = endptr;
1136 }
1137
1138 return buf - buf_org;
1139 }
1140
1141 #ifdef __x86_64__
1142
1143 /* Build a jump pad that saves registers and calls a collection
1144 function. Writes a jump instruction to the jump pad to
1145 JJUMPAD_INSN. The caller is responsible to write it in at the
1146 tracepoint address. */
1147
1148 static int
1149 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1150 CORE_ADDR collector,
1151 CORE_ADDR lockaddr,
1152 ULONGEST orig_size,
1153 CORE_ADDR *jump_entry,
1154 CORE_ADDR *trampoline,
1155 ULONGEST *trampoline_size,
1156 unsigned char *jjump_pad_insn,
1157 ULONGEST *jjump_pad_insn_size,
1158 CORE_ADDR *adjusted_insn_addr,
1159 CORE_ADDR *adjusted_insn_addr_end,
1160 char *err)
1161 {
1162 unsigned char buf[40];
1163 int i, offset;
1164 int64_t loffset;
1165
1166 CORE_ADDR buildaddr = *jump_entry;
1167
1168 /* Build the jump pad. */
1169
1170 /* First, do tracepoint data collection. Save registers. */
1171 i = 0;
1172 /* Need to ensure stack pointer saved first. */
1173 buf[i++] = 0x54; /* push %rsp */
1174 buf[i++] = 0x55; /* push %rbp */
1175 buf[i++] = 0x57; /* push %rdi */
1176 buf[i++] = 0x56; /* push %rsi */
1177 buf[i++] = 0x52; /* push %rdx */
1178 buf[i++] = 0x51; /* push %rcx */
1179 buf[i++] = 0x53; /* push %rbx */
1180 buf[i++] = 0x50; /* push %rax */
1181 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1182 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1183 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1184 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1185 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1186 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1187 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1188 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1189 buf[i++] = 0x9c; /* pushfq */
1190 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1191 buf[i++] = 0xbf;
1192 memcpy (buf + i, &tpaddr, 8);
1193 i += 8;
1194 buf[i++] = 0x57; /* push %rdi */
1195 append_insns (&buildaddr, i, buf);
1196
1197 /* Stack space for the collecting_t object. */
1198 i = 0;
1199 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1200 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1201 memcpy (buf + i, &tpoint, 8);
1202 i += 8;
1203 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1204 i += push_opcode (&buf[i],
1205 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1206 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1207 append_insns (&buildaddr, i, buf);
1208
1209 /* spin-lock. */
1210 i = 0;
1211 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1212 memcpy (&buf[i], (void *) &lockaddr, 8);
1213 i += 8;
1214 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1215 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1216 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1217 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1218 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1219 append_insns (&buildaddr, i, buf);
1220
1221 /* Set up the gdb_collect call. */
1222 /* At this point, (stack pointer + 0x18) is the base of our saved
1223 register block. */
1224
1225 i = 0;
1226 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1227 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1228
1229 /* tpoint address may be 64-bit wide. */
1230 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1231 memcpy (buf + i, &tpoint, 8);
1232 i += 8;
1233 append_insns (&buildaddr, i, buf);
1234
1235 /* The collector function being in the shared library, may be
1236 >31-bits away off the jump pad. */
1237 i = 0;
1238 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1239 memcpy (buf + i, &collector, 8);
1240 i += 8;
1241 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1242 append_insns (&buildaddr, i, buf);
1243
1244 /* Clear the spin-lock. */
1245 i = 0;
1246 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1247 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1248 memcpy (buf + i, &lockaddr, 8);
1249 i += 8;
1250 append_insns (&buildaddr, i, buf);
1251
1252 /* Remove stack that had been used for the collect_t object. */
1253 i = 0;
1254 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1255 append_insns (&buildaddr, i, buf);
1256
1257 /* Restore register state. */
1258 i = 0;
1259 buf[i++] = 0x48; /* add $0x8,%rsp */
1260 buf[i++] = 0x83;
1261 buf[i++] = 0xc4;
1262 buf[i++] = 0x08;
1263 buf[i++] = 0x9d; /* popfq */
1264 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1265 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1266 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1267 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1268 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1269 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1270 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1271 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1272 buf[i++] = 0x58; /* pop %rax */
1273 buf[i++] = 0x5b; /* pop %rbx */
1274 buf[i++] = 0x59; /* pop %rcx */
1275 buf[i++] = 0x5a; /* pop %rdx */
1276 buf[i++] = 0x5e; /* pop %rsi */
1277 buf[i++] = 0x5f; /* pop %rdi */
1278 buf[i++] = 0x5d; /* pop %rbp */
1279 buf[i++] = 0x5c; /* pop %rsp */
1280 append_insns (&buildaddr, i, buf);
1281
1282 /* Now, adjust the original instruction to execute in the jump
1283 pad. */
1284 *adjusted_insn_addr = buildaddr;
1285 relocate_instruction (&buildaddr, tpaddr);
1286 *adjusted_insn_addr_end = buildaddr;
1287
1288 /* Finally, write a jump back to the program. */
1289
1290 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1291 if (loffset > INT_MAX || loffset < INT_MIN)
1292 {
1293 sprintf (err,
1294 "E.Jump back from jump pad too far from tracepoint "
1295 "(offset 0x%" PRIx64 " > int32).", loffset);
1296 return 1;
1297 }
1298
1299 offset = (int) loffset;
1300 memcpy (buf, jump_insn, sizeof (jump_insn));
1301 memcpy (buf + 1, &offset, 4);
1302 append_insns (&buildaddr, sizeof (jump_insn), buf);
1303
1304 /* The jump pad is now built. Wire in a jump to our jump pad. This
1305 is always done last (by our caller actually), so that we can
1306 install fast tracepoints with threads running. This relies on
1307 the agent's atomic write support. */
1308 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1309 if (loffset > INT_MAX || loffset < INT_MIN)
1310 {
1311 sprintf (err,
1312 "E.Jump pad too far from tracepoint "
1313 "(offset 0x%" PRIx64 " > int32).", loffset);
1314 return 1;
1315 }
1316
1317 offset = (int) loffset;
1318
1319 memcpy (buf, jump_insn, sizeof (jump_insn));
1320 memcpy (buf + 1, &offset, 4);
1321 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1322 *jjump_pad_insn_size = sizeof (jump_insn);
1323
1324 /* Return the end address of our pad. */
1325 *jump_entry = buildaddr;
1326
1327 return 0;
1328 }
1329
1330 #endif /* __x86_64__ */
1331
1332 /* Build a jump pad that saves registers and calls a collection
1333 function. Writes a jump instruction to the jump pad to
1334 JJUMPAD_INSN. The caller is responsible to write it in at the
1335 tracepoint address. */
1336
1337 static int
1338 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1339 CORE_ADDR collector,
1340 CORE_ADDR lockaddr,
1341 ULONGEST orig_size,
1342 CORE_ADDR *jump_entry,
1343 CORE_ADDR *trampoline,
1344 ULONGEST *trampoline_size,
1345 unsigned char *jjump_pad_insn,
1346 ULONGEST *jjump_pad_insn_size,
1347 CORE_ADDR *adjusted_insn_addr,
1348 CORE_ADDR *adjusted_insn_addr_end,
1349 char *err)
1350 {
1351 unsigned char buf[0x100];
1352 int i, offset;
1353 CORE_ADDR buildaddr = *jump_entry;
1354
1355 /* Build the jump pad. */
1356
1357 /* First, do tracepoint data collection. Save registers. */
1358 i = 0;
1359 buf[i++] = 0x60; /* pushad */
1360 buf[i++] = 0x68; /* push tpaddr aka $pc */
1361 *((int *)(buf + i)) = (int) tpaddr;
1362 i += 4;
1363 buf[i++] = 0x9c; /* pushf */
1364 buf[i++] = 0x1e; /* push %ds */
1365 buf[i++] = 0x06; /* push %es */
1366 buf[i++] = 0x0f; /* push %fs */
1367 buf[i++] = 0xa0;
1368 buf[i++] = 0x0f; /* push %gs */
1369 buf[i++] = 0xa8;
1370 buf[i++] = 0x16; /* push %ss */
1371 buf[i++] = 0x0e; /* push %cs */
1372 append_insns (&buildaddr, i, buf);
1373
1374 /* Stack space for the collecting_t object. */
1375 i = 0;
1376 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1377
1378 /* Build the object. */
1379 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1380 memcpy (buf + i, &tpoint, 4);
1381 i += 4;
1382 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1383
1384 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1385 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1386 append_insns (&buildaddr, i, buf);
1387
1388 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1389 If we cared for it, this could be using xchg alternatively. */
1390
1391 i = 0;
1392 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1393 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1394 %esp,<lockaddr> */
1395 memcpy (&buf[i], (void *) &lockaddr, 4);
1396 i += 4;
1397 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1398 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1399 append_insns (&buildaddr, i, buf);
1400
1401
1402 /* Set up arguments to the gdb_collect call. */
1403 i = 0;
1404 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1405 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1406 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1407 append_insns (&buildaddr, i, buf);
1408
1409 i = 0;
1410 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1411 append_insns (&buildaddr, i, buf);
1412
1413 i = 0;
1414 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1415 memcpy (&buf[i], (void *) &tpoint, 4);
1416 i += 4;
1417 append_insns (&buildaddr, i, buf);
1418
1419 buf[0] = 0xe8; /* call <reladdr> */
1420 offset = collector - (buildaddr + sizeof (jump_insn));
1421 memcpy (buf + 1, &offset, 4);
1422 append_insns (&buildaddr, 5, buf);
1423 /* Clean up after the call. */
1424 buf[0] = 0x83; /* add $0x8,%esp */
1425 buf[1] = 0xc4;
1426 buf[2] = 0x08;
1427 append_insns (&buildaddr, 3, buf);
1428
1429
1430 /* Clear the spin-lock. This would need the LOCK prefix on older
1431 broken archs. */
1432 i = 0;
1433 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1434 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1435 memcpy (buf + i, &lockaddr, 4);
1436 i += 4;
1437 append_insns (&buildaddr, i, buf);
1438
1439
1440 /* Remove stack that had been used for the collect_t object. */
1441 i = 0;
1442 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1443 append_insns (&buildaddr, i, buf);
1444
1445 i = 0;
1446 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1447 buf[i++] = 0xc4;
1448 buf[i++] = 0x04;
1449 buf[i++] = 0x17; /* pop %ss */
1450 buf[i++] = 0x0f; /* pop %gs */
1451 buf[i++] = 0xa9;
1452 buf[i++] = 0x0f; /* pop %fs */
1453 buf[i++] = 0xa1;
1454 buf[i++] = 0x07; /* pop %es */
1455 buf[i++] = 0x1f; /* pop %ds */
1456 buf[i++] = 0x9d; /* popf */
1457 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1458 buf[i++] = 0xc4;
1459 buf[i++] = 0x04;
1460 buf[i++] = 0x61; /* popad */
1461 append_insns (&buildaddr, i, buf);
1462
1463 /* Now, adjust the original instruction to execute in the jump
1464 pad. */
1465 *adjusted_insn_addr = buildaddr;
1466 relocate_instruction (&buildaddr, tpaddr);
1467 *adjusted_insn_addr_end = buildaddr;
1468
1469 /* Write the jump back to the program. */
1470 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1471 memcpy (buf, jump_insn, sizeof (jump_insn));
1472 memcpy (buf + 1, &offset, 4);
1473 append_insns (&buildaddr, sizeof (jump_insn), buf);
1474
1475 /* The jump pad is now built. Wire in a jump to our jump pad. This
1476 is always done last (by our caller actually), so that we can
1477 install fast tracepoints with threads running. This relies on
1478 the agent's atomic write support. */
1479 if (orig_size == 4)
1480 {
1481 /* Create a trampoline. */
1482 *trampoline_size = sizeof (jump_insn);
1483 if (!claim_trampoline_space (*trampoline_size, trampoline))
1484 {
1485 /* No trampoline space available. */
1486 strcpy (err,
1487 "E.Cannot allocate trampoline space needed for fast "
1488 "tracepoints on 4-byte instructions.");
1489 return 1;
1490 }
1491
1492 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1493 memcpy (buf, jump_insn, sizeof (jump_insn));
1494 memcpy (buf + 1, &offset, 4);
1495 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1496
1497 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1498 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1499 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1500 memcpy (buf + 2, &offset, 2);
1501 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1502 *jjump_pad_insn_size = sizeof (small_jump_insn);
1503 }
1504 else
1505 {
1506 /* Else use a 32-bit relative jump instruction. */
1507 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1508 memcpy (buf, jump_insn, sizeof (jump_insn));
1509 memcpy (buf + 1, &offset, 4);
1510 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1511 *jjump_pad_insn_size = sizeof (jump_insn);
1512 }
1513
1514 /* Return the end address of our pad. */
1515 *jump_entry = buildaddr;
1516
1517 return 0;
1518 }
1519
1520 static int
1521 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1522 CORE_ADDR collector,
1523 CORE_ADDR lockaddr,
1524 ULONGEST orig_size,
1525 CORE_ADDR *jump_entry,
1526 CORE_ADDR *trampoline,
1527 ULONGEST *trampoline_size,
1528 unsigned char *jjump_pad_insn,
1529 ULONGEST *jjump_pad_insn_size,
1530 CORE_ADDR *adjusted_insn_addr,
1531 CORE_ADDR *adjusted_insn_addr_end,
1532 char *err)
1533 {
1534 #ifdef __x86_64__
1535 if (is_64bit_tdesc ())
1536 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1537 collector, lockaddr,
1538 orig_size, jump_entry,
1539 trampoline, trampoline_size,
1540 jjump_pad_insn,
1541 jjump_pad_insn_size,
1542 adjusted_insn_addr,
1543 adjusted_insn_addr_end,
1544 err);
1545 #endif
1546
1547 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1548 collector, lockaddr,
1549 orig_size, jump_entry,
1550 trampoline, trampoline_size,
1551 jjump_pad_insn,
1552 jjump_pad_insn_size,
1553 adjusted_insn_addr,
1554 adjusted_insn_addr_end,
1555 err);
1556 }
1557
1558 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1559 architectures. */
1560
1561 static int
1562 x86_get_min_fast_tracepoint_insn_len (void)
1563 {
1564 static int warned_about_fast_tracepoints = 0;
1565
1566 #ifdef __x86_64__
1567 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1568 used for fast tracepoints. */
1569 if (is_64bit_tdesc ())
1570 return 5;
1571 #endif
1572
1573 if (agent_loaded_p ())
1574 {
1575 char errbuf[IPA_BUFSIZ];
1576
1577 errbuf[0] = '\0';
1578
1579 /* On x86, if trampolines are available, then 4-byte jump instructions
1580 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1581 with a 4-byte offset are used instead. */
1582 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1583 return 4;
1584 else
1585 {
1586 /* GDB has no channel to explain to user why a shorter fast
1587 tracepoint is not possible, but at least make GDBserver
1588 mention that something has gone awry. */
1589 if (!warned_about_fast_tracepoints)
1590 {
1591 warning ("4-byte fast tracepoints not available; %s", errbuf);
1592 warned_about_fast_tracepoints = 1;
1593 }
1594 return 5;
1595 }
1596 }
1597 else
1598 {
1599 /* Indicate that the minimum length is currently unknown since the IPA
1600 has not loaded yet. */
1601 return 0;
1602 }
1603 }
1604
1605 static void
1606 add_insns (unsigned char *start, int len)
1607 {
1608 CORE_ADDR buildaddr = current_insn_ptr;
1609
1610 if (debug_threads)
1611 debug_printf ("Adding %d bytes of insn at %s\n",
1612 len, paddress (buildaddr));
1613
1614 append_insns (&buildaddr, len, start);
1615 current_insn_ptr = buildaddr;
1616 }
1617
1618 /* Our general strategy for emitting code is to avoid specifying raw
1619 bytes whenever possible, and instead copy a block of inline asm
1620 that is embedded in the function. This is a little messy, because
1621 we need to keep the compiler from discarding what looks like dead
1622 code, plus suppress various warnings. */
1623
1624 #define EMIT_ASM(NAME, INSNS) \
1625 do \
1626 { \
1627 extern unsigned char start_ ## NAME, end_ ## NAME; \
1628 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1629 __asm__ ("jmp end_" #NAME "\n" \
1630 "\t" "start_" #NAME ":" \
1631 "\t" INSNS "\n" \
1632 "\t" "end_" #NAME ":"); \
1633 } while (0)
1634
1635 #ifdef __x86_64__
1636
1637 #define EMIT_ASM32(NAME,INSNS) \
1638 do \
1639 { \
1640 extern unsigned char start_ ## NAME, end_ ## NAME; \
1641 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1642 __asm__ (".code32\n" \
1643 "\t" "jmp end_" #NAME "\n" \
1644 "\t" "start_" #NAME ":\n" \
1645 "\t" INSNS "\n" \
1646 "\t" "end_" #NAME ":\n" \
1647 ".code64\n"); \
1648 } while (0)
1649
1650 #else
1651
1652 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1653
1654 #endif
1655
1656 #ifdef __x86_64__
1657
1658 static void
1659 amd64_emit_prologue (void)
1660 {
1661 EMIT_ASM (amd64_prologue,
1662 "pushq %rbp\n\t"
1663 "movq %rsp,%rbp\n\t"
1664 "sub $0x20,%rsp\n\t"
1665 "movq %rdi,-8(%rbp)\n\t"
1666 "movq %rsi,-16(%rbp)");
1667 }
1668
1669
1670 static void
1671 amd64_emit_epilogue (void)
1672 {
1673 EMIT_ASM (amd64_epilogue,
1674 "movq -16(%rbp),%rdi\n\t"
1675 "movq %rax,(%rdi)\n\t"
1676 "xor %rax,%rax\n\t"
1677 "leave\n\t"
1678 "ret");
1679 }
1680
1681 static void
1682 amd64_emit_add (void)
1683 {
1684 EMIT_ASM (amd64_add,
1685 "add (%rsp),%rax\n\t"
1686 "lea 0x8(%rsp),%rsp");
1687 }
1688
1689 static void
1690 amd64_emit_sub (void)
1691 {
1692 EMIT_ASM (amd64_sub,
1693 "sub %rax,(%rsp)\n\t"
1694 "pop %rax");
1695 }
1696
1697 static void
1698 amd64_emit_mul (void)
1699 {
1700 emit_error = 1;
1701 }
1702
1703 static void
1704 amd64_emit_lsh (void)
1705 {
1706 emit_error = 1;
1707 }
1708
1709 static void
1710 amd64_emit_rsh_signed (void)
1711 {
1712 emit_error = 1;
1713 }
1714
1715 static void
1716 amd64_emit_rsh_unsigned (void)
1717 {
1718 emit_error = 1;
1719 }
1720
1721 static void
1722 amd64_emit_ext (int arg)
1723 {
1724 switch (arg)
1725 {
1726 case 8:
1727 EMIT_ASM (amd64_ext_8,
1728 "cbtw\n\t"
1729 "cwtl\n\t"
1730 "cltq");
1731 break;
1732 case 16:
1733 EMIT_ASM (amd64_ext_16,
1734 "cwtl\n\t"
1735 "cltq");
1736 break;
1737 case 32:
1738 EMIT_ASM (amd64_ext_32,
1739 "cltq");
1740 break;
1741 default:
1742 emit_error = 1;
1743 }
1744 }
1745
1746 static void
1747 amd64_emit_log_not (void)
1748 {
1749 EMIT_ASM (amd64_log_not,
1750 "test %rax,%rax\n\t"
1751 "sete %cl\n\t"
1752 "movzbq %cl,%rax");
1753 }
1754
1755 static void
1756 amd64_emit_bit_and (void)
1757 {
1758 EMIT_ASM (amd64_and,
1759 "and (%rsp),%rax\n\t"
1760 "lea 0x8(%rsp),%rsp");
1761 }
1762
1763 static void
1764 amd64_emit_bit_or (void)
1765 {
1766 EMIT_ASM (amd64_or,
1767 "or (%rsp),%rax\n\t"
1768 "lea 0x8(%rsp),%rsp");
1769 }
1770
1771 static void
1772 amd64_emit_bit_xor (void)
1773 {
1774 EMIT_ASM (amd64_xor,
1775 "xor (%rsp),%rax\n\t"
1776 "lea 0x8(%rsp),%rsp");
1777 }
1778
1779 static void
1780 amd64_emit_bit_not (void)
1781 {
1782 EMIT_ASM (amd64_bit_not,
1783 "xorq $0xffffffffffffffff,%rax");
1784 }
1785
1786 static void
1787 amd64_emit_equal (void)
1788 {
1789 EMIT_ASM (amd64_equal,
1790 "cmp %rax,(%rsp)\n\t"
1791 "je .Lamd64_equal_true\n\t"
1792 "xor %rax,%rax\n\t"
1793 "jmp .Lamd64_equal_end\n\t"
1794 ".Lamd64_equal_true:\n\t"
1795 "mov $0x1,%rax\n\t"
1796 ".Lamd64_equal_end:\n\t"
1797 "lea 0x8(%rsp),%rsp");
1798 }
1799
1800 static void
1801 amd64_emit_less_signed (void)
1802 {
1803 EMIT_ASM (amd64_less_signed,
1804 "cmp %rax,(%rsp)\n\t"
1805 "jl .Lamd64_less_signed_true\n\t"
1806 "xor %rax,%rax\n\t"
1807 "jmp .Lamd64_less_signed_end\n\t"
1808 ".Lamd64_less_signed_true:\n\t"
1809 "mov $1,%rax\n\t"
1810 ".Lamd64_less_signed_end:\n\t"
1811 "lea 0x8(%rsp),%rsp");
1812 }
1813
1814 static void
1815 amd64_emit_less_unsigned (void)
1816 {
1817 EMIT_ASM (amd64_less_unsigned,
1818 "cmp %rax,(%rsp)\n\t"
1819 "jb .Lamd64_less_unsigned_true\n\t"
1820 "xor %rax,%rax\n\t"
1821 "jmp .Lamd64_less_unsigned_end\n\t"
1822 ".Lamd64_less_unsigned_true:\n\t"
1823 "mov $1,%rax\n\t"
1824 ".Lamd64_less_unsigned_end:\n\t"
1825 "lea 0x8(%rsp),%rsp");
1826 }
1827
1828 static void
1829 amd64_emit_ref (int size)
1830 {
1831 switch (size)
1832 {
1833 case 1:
1834 EMIT_ASM (amd64_ref1,
1835 "movb (%rax),%al");
1836 break;
1837 case 2:
1838 EMIT_ASM (amd64_ref2,
1839 "movw (%rax),%ax");
1840 break;
1841 case 4:
1842 EMIT_ASM (amd64_ref4,
1843 "movl (%rax),%eax");
1844 break;
1845 case 8:
1846 EMIT_ASM (amd64_ref8,
1847 "movq (%rax),%rax");
1848 break;
1849 }
1850 }
1851
1852 static void
1853 amd64_emit_if_goto (int *offset_p, int *size_p)
1854 {
1855 EMIT_ASM (amd64_if_goto,
1856 "mov %rax,%rcx\n\t"
1857 "pop %rax\n\t"
1858 "cmp $0,%rcx\n\t"
1859 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1860 if (offset_p)
1861 *offset_p = 10;
1862 if (size_p)
1863 *size_p = 4;
1864 }
1865
1866 static void
1867 amd64_emit_goto (int *offset_p, int *size_p)
1868 {
1869 EMIT_ASM (amd64_goto,
1870 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1871 if (offset_p)
1872 *offset_p = 1;
1873 if (size_p)
1874 *size_p = 4;
1875 }
1876
1877 static void
1878 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1879 {
1880 int diff = (to - (from + size));
1881 unsigned char buf[sizeof (int)];
1882
1883 if (size != 4)
1884 {
1885 emit_error = 1;
1886 return;
1887 }
1888
1889 memcpy (buf, &diff, sizeof (int));
1890 target_write_memory (from, buf, sizeof (int));
1891 }
1892
1893 static void
1894 amd64_emit_const (LONGEST num)
1895 {
1896 unsigned char buf[16];
1897 int i;
1898 CORE_ADDR buildaddr = current_insn_ptr;
1899
1900 i = 0;
1901 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1902 memcpy (&buf[i], &num, sizeof (num));
1903 i += 8;
1904 append_insns (&buildaddr, i, buf);
1905 current_insn_ptr = buildaddr;
1906 }
1907
1908 static void
1909 amd64_emit_call (CORE_ADDR fn)
1910 {
1911 unsigned char buf[16];
1912 int i;
1913 CORE_ADDR buildaddr;
1914 LONGEST offset64;
1915
1916 /* The destination function being in the shared library, may be
1917 >31-bits away off the compiled code pad. */
1918
1919 buildaddr = current_insn_ptr;
1920
1921 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1922
1923 i = 0;
1924
1925 if (offset64 > INT_MAX || offset64 < INT_MIN)
1926 {
1927 /* Offset is too large for a call. Use callq, but that requires
1928 a register, so avoid it if possible. Use r10, since it is
1929 call-clobbered, we don't have to push/pop it. */
1930 buf[i++] = 0x48; /* mov $fn,%r10 */
1931 buf[i++] = 0xba;
1932 memcpy (buf + i, &fn, 8);
1933 i += 8;
1934 buf[i++] = 0xff; /* callq *%r10 */
1935 buf[i++] = 0xd2;
1936 }
1937 else
1938 {
1939 int offset32 = offset64; /* we know we can't overflow here. */
1940
1941 buf[i++] = 0xe8; /* call <reladdr> */
1942 memcpy (buf + i, &offset32, 4);
1943 i += 4;
1944 }
1945
1946 append_insns (&buildaddr, i, buf);
1947 current_insn_ptr = buildaddr;
1948 }
1949
1950 static void
1951 amd64_emit_reg (int reg)
1952 {
1953 unsigned char buf[16];
1954 int i;
1955 CORE_ADDR buildaddr;
1956
1957 /* Assume raw_regs is still in %rdi. */
1958 buildaddr = current_insn_ptr;
1959 i = 0;
1960 buf[i++] = 0xbe; /* mov $<n>,%esi */
1961 memcpy (&buf[i], &reg, sizeof (reg));
1962 i += 4;
1963 append_insns (&buildaddr, i, buf);
1964 current_insn_ptr = buildaddr;
1965 amd64_emit_call (get_raw_reg_func_addr ());
1966 }
1967
1968 static void
1969 amd64_emit_pop (void)
1970 {
1971 EMIT_ASM (amd64_pop,
1972 "pop %rax");
1973 }
1974
1975 static void
1976 amd64_emit_stack_flush (void)
1977 {
1978 EMIT_ASM (amd64_stack_flush,
1979 "push %rax");
1980 }
1981
1982 static void
1983 amd64_emit_zero_ext (int arg)
1984 {
1985 switch (arg)
1986 {
1987 case 8:
1988 EMIT_ASM (amd64_zero_ext_8,
1989 "and $0xff,%rax");
1990 break;
1991 case 16:
1992 EMIT_ASM (amd64_zero_ext_16,
1993 "and $0xffff,%rax");
1994 break;
1995 case 32:
1996 EMIT_ASM (amd64_zero_ext_32,
1997 "mov $0xffffffff,%rcx\n\t"
1998 "and %rcx,%rax");
1999 break;
2000 default:
2001 emit_error = 1;
2002 }
2003 }
2004
2005 static void
2006 amd64_emit_swap (void)
2007 {
2008 EMIT_ASM (amd64_swap,
2009 "mov %rax,%rcx\n\t"
2010 "pop %rax\n\t"
2011 "push %rcx");
2012 }
2013
2014 static void
2015 amd64_emit_stack_adjust (int n)
2016 {
2017 unsigned char buf[16];
2018 int i;
2019 CORE_ADDR buildaddr = current_insn_ptr;
2020
2021 i = 0;
2022 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2023 buf[i++] = 0x8d;
2024 buf[i++] = 0x64;
2025 buf[i++] = 0x24;
2026 /* This only handles adjustments up to 16, but we don't expect any more. */
2027 buf[i++] = n * 8;
2028 append_insns (&buildaddr, i, buf);
2029 current_insn_ptr = buildaddr;
2030 }
2031
2032 /* FN's prototype is `LONGEST(*fn)(int)'. */
2033
2034 static void
2035 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2036 {
2037 unsigned char buf[16];
2038 int i;
2039 CORE_ADDR buildaddr;
2040
2041 buildaddr = current_insn_ptr;
2042 i = 0;
2043 buf[i++] = 0xbf; /* movl $<n>,%edi */
2044 memcpy (&buf[i], &arg1, sizeof (arg1));
2045 i += 4;
2046 append_insns (&buildaddr, i, buf);
2047 current_insn_ptr = buildaddr;
2048 amd64_emit_call (fn);
2049 }
2050
2051 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2052
2053 static void
2054 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2055 {
2056 unsigned char buf[16];
2057 int i;
2058 CORE_ADDR buildaddr;
2059
2060 buildaddr = current_insn_ptr;
2061 i = 0;
2062 buf[i++] = 0xbf; /* movl $<n>,%edi */
2063 memcpy (&buf[i], &arg1, sizeof (arg1));
2064 i += 4;
2065 append_insns (&buildaddr, i, buf);
2066 current_insn_ptr = buildaddr;
2067 EMIT_ASM (amd64_void_call_2_a,
2068 /* Save away a copy of the stack top. */
2069 "push %rax\n\t"
2070 /* Also pass top as the second argument. */
2071 "mov %rax,%rsi");
2072 amd64_emit_call (fn);
2073 EMIT_ASM (amd64_void_call_2_b,
2074 /* Restore the stack top, %rax may have been trashed. */
2075 "pop %rax");
2076 }
2077
2078 static void
2079 amd64_emit_eq_goto (int *offset_p, int *size_p)
2080 {
2081 EMIT_ASM (amd64_eq,
2082 "cmp %rax,(%rsp)\n\t"
2083 "jne .Lamd64_eq_fallthru\n\t"
2084 "lea 0x8(%rsp),%rsp\n\t"
2085 "pop %rax\n\t"
2086 /* jmp, but don't trust the assembler to choose the right jump */
2087 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2088 ".Lamd64_eq_fallthru:\n\t"
2089 "lea 0x8(%rsp),%rsp\n\t"
2090 "pop %rax");
2091
2092 if (offset_p)
2093 *offset_p = 13;
2094 if (size_p)
2095 *size_p = 4;
2096 }
2097
2098 static void
2099 amd64_emit_ne_goto (int *offset_p, int *size_p)
2100 {
2101 EMIT_ASM (amd64_ne,
2102 "cmp %rax,(%rsp)\n\t"
2103 "je .Lamd64_ne_fallthru\n\t"
2104 "lea 0x8(%rsp),%rsp\n\t"
2105 "pop %rax\n\t"
2106 /* jmp, but don't trust the assembler to choose the right jump */
2107 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2108 ".Lamd64_ne_fallthru:\n\t"
2109 "lea 0x8(%rsp),%rsp\n\t"
2110 "pop %rax");
2111
2112 if (offset_p)
2113 *offset_p = 13;
2114 if (size_p)
2115 *size_p = 4;
2116 }
2117
2118 static void
2119 amd64_emit_lt_goto (int *offset_p, int *size_p)
2120 {
2121 EMIT_ASM (amd64_lt,
2122 "cmp %rax,(%rsp)\n\t"
2123 "jnl .Lamd64_lt_fallthru\n\t"
2124 "lea 0x8(%rsp),%rsp\n\t"
2125 "pop %rax\n\t"
2126 /* jmp, but don't trust the assembler to choose the right jump */
2127 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2128 ".Lamd64_lt_fallthru:\n\t"
2129 "lea 0x8(%rsp),%rsp\n\t"
2130 "pop %rax");
2131
2132 if (offset_p)
2133 *offset_p = 13;
2134 if (size_p)
2135 *size_p = 4;
2136 }
2137
2138 static void
2139 amd64_emit_le_goto (int *offset_p, int *size_p)
2140 {
2141 EMIT_ASM (amd64_le,
2142 "cmp %rax,(%rsp)\n\t"
2143 "jnle .Lamd64_le_fallthru\n\t"
2144 "lea 0x8(%rsp),%rsp\n\t"
2145 "pop %rax\n\t"
2146 /* jmp, but don't trust the assembler to choose the right jump */
2147 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2148 ".Lamd64_le_fallthru:\n\t"
2149 "lea 0x8(%rsp),%rsp\n\t"
2150 "pop %rax");
2151
2152 if (offset_p)
2153 *offset_p = 13;
2154 if (size_p)
2155 *size_p = 4;
2156 }
2157
2158 static void
2159 amd64_emit_gt_goto (int *offset_p, int *size_p)
2160 {
2161 EMIT_ASM (amd64_gt,
2162 "cmp %rax,(%rsp)\n\t"
2163 "jng .Lamd64_gt_fallthru\n\t"
2164 "lea 0x8(%rsp),%rsp\n\t"
2165 "pop %rax\n\t"
2166 /* jmp, but don't trust the assembler to choose the right jump */
2167 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2168 ".Lamd64_gt_fallthru:\n\t"
2169 "lea 0x8(%rsp),%rsp\n\t"
2170 "pop %rax");
2171
2172 if (offset_p)
2173 *offset_p = 13;
2174 if (size_p)
2175 *size_p = 4;
2176 }
2177
2178 static void
2179 amd64_emit_ge_goto (int *offset_p, int *size_p)
2180 {
2181 EMIT_ASM (amd64_ge,
2182 "cmp %rax,(%rsp)\n\t"
2183 "jnge .Lamd64_ge_fallthru\n\t"
2184 ".Lamd64_ge_jump:\n\t"
2185 "lea 0x8(%rsp),%rsp\n\t"
2186 "pop %rax\n\t"
2187 /* jmp, but don't trust the assembler to choose the right jump */
2188 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2189 ".Lamd64_ge_fallthru:\n\t"
2190 "lea 0x8(%rsp),%rsp\n\t"
2191 "pop %rax");
2192
2193 if (offset_p)
2194 *offset_p = 13;
2195 if (size_p)
2196 *size_p = 4;
2197 }
2198
2199 struct emit_ops amd64_emit_ops =
2200 {
2201 amd64_emit_prologue,
2202 amd64_emit_epilogue,
2203 amd64_emit_add,
2204 amd64_emit_sub,
2205 amd64_emit_mul,
2206 amd64_emit_lsh,
2207 amd64_emit_rsh_signed,
2208 amd64_emit_rsh_unsigned,
2209 amd64_emit_ext,
2210 amd64_emit_log_not,
2211 amd64_emit_bit_and,
2212 amd64_emit_bit_or,
2213 amd64_emit_bit_xor,
2214 amd64_emit_bit_not,
2215 amd64_emit_equal,
2216 amd64_emit_less_signed,
2217 amd64_emit_less_unsigned,
2218 amd64_emit_ref,
2219 amd64_emit_if_goto,
2220 amd64_emit_goto,
2221 amd64_write_goto_address,
2222 amd64_emit_const,
2223 amd64_emit_call,
2224 amd64_emit_reg,
2225 amd64_emit_pop,
2226 amd64_emit_stack_flush,
2227 amd64_emit_zero_ext,
2228 amd64_emit_swap,
2229 amd64_emit_stack_adjust,
2230 amd64_emit_int_call_1,
2231 amd64_emit_void_call_2,
2232 amd64_emit_eq_goto,
2233 amd64_emit_ne_goto,
2234 amd64_emit_lt_goto,
2235 amd64_emit_le_goto,
2236 amd64_emit_gt_goto,
2237 amd64_emit_ge_goto
2238 };
2239
2240 #endif /* __x86_64__ */
2241
2242 static void
2243 i386_emit_prologue (void)
2244 {
2245 EMIT_ASM32 (i386_prologue,
2246 "push %ebp\n\t"
2247 "mov %esp,%ebp\n\t"
2248 "push %ebx");
2249 /* At this point, the raw regs base address is at 8(%ebp), and the
2250 value pointer is at 12(%ebp). */
2251 }
2252
2253 static void
2254 i386_emit_epilogue (void)
2255 {
2256 EMIT_ASM32 (i386_epilogue,
2257 "mov 12(%ebp),%ecx\n\t"
2258 "mov %eax,(%ecx)\n\t"
2259 "mov %ebx,0x4(%ecx)\n\t"
2260 "xor %eax,%eax\n\t"
2261 "pop %ebx\n\t"
2262 "pop %ebp\n\t"
2263 "ret");
2264 }
2265
2266 static void
2267 i386_emit_add (void)
2268 {
2269 EMIT_ASM32 (i386_add,
2270 "add (%esp),%eax\n\t"
2271 "adc 0x4(%esp),%ebx\n\t"
2272 "lea 0x8(%esp),%esp");
2273 }
2274
2275 static void
2276 i386_emit_sub (void)
2277 {
2278 EMIT_ASM32 (i386_sub,
2279 "subl %eax,(%esp)\n\t"
2280 "sbbl %ebx,4(%esp)\n\t"
2281 "pop %eax\n\t"
2282 "pop %ebx\n\t");
2283 }
2284
2285 static void
2286 i386_emit_mul (void)
2287 {
2288 emit_error = 1;
2289 }
2290
2291 static void
2292 i386_emit_lsh (void)
2293 {
2294 emit_error = 1;
2295 }
2296
2297 static void
2298 i386_emit_rsh_signed (void)
2299 {
2300 emit_error = 1;
2301 }
2302
2303 static void
2304 i386_emit_rsh_unsigned (void)
2305 {
2306 emit_error = 1;
2307 }
2308
2309 static void
2310 i386_emit_ext (int arg)
2311 {
2312 switch (arg)
2313 {
2314 case 8:
2315 EMIT_ASM32 (i386_ext_8,
2316 "cbtw\n\t"
2317 "cwtl\n\t"
2318 "movl %eax,%ebx\n\t"
2319 "sarl $31,%ebx");
2320 break;
2321 case 16:
2322 EMIT_ASM32 (i386_ext_16,
2323 "cwtl\n\t"
2324 "movl %eax,%ebx\n\t"
2325 "sarl $31,%ebx");
2326 break;
2327 case 32:
2328 EMIT_ASM32 (i386_ext_32,
2329 "movl %eax,%ebx\n\t"
2330 "sarl $31,%ebx");
2331 break;
2332 default:
2333 emit_error = 1;
2334 }
2335 }
2336
2337 static void
2338 i386_emit_log_not (void)
2339 {
2340 EMIT_ASM32 (i386_log_not,
2341 "or %ebx,%eax\n\t"
2342 "test %eax,%eax\n\t"
2343 "sete %cl\n\t"
2344 "xor %ebx,%ebx\n\t"
2345 "movzbl %cl,%eax");
2346 }
2347
2348 static void
2349 i386_emit_bit_and (void)
2350 {
2351 EMIT_ASM32 (i386_and,
2352 "and (%esp),%eax\n\t"
2353 "and 0x4(%esp),%ebx\n\t"
2354 "lea 0x8(%esp),%esp");
2355 }
2356
2357 static void
2358 i386_emit_bit_or (void)
2359 {
2360 EMIT_ASM32 (i386_or,
2361 "or (%esp),%eax\n\t"
2362 "or 0x4(%esp),%ebx\n\t"
2363 "lea 0x8(%esp),%esp");
2364 }
2365
2366 static void
2367 i386_emit_bit_xor (void)
2368 {
2369 EMIT_ASM32 (i386_xor,
2370 "xor (%esp),%eax\n\t"
2371 "xor 0x4(%esp),%ebx\n\t"
2372 "lea 0x8(%esp),%esp");
2373 }
2374
2375 static void
2376 i386_emit_bit_not (void)
2377 {
2378 EMIT_ASM32 (i386_bit_not,
2379 "xor $0xffffffff,%eax\n\t"
2380 "xor $0xffffffff,%ebx\n\t");
2381 }
2382
2383 static void
2384 i386_emit_equal (void)
2385 {
2386 EMIT_ASM32 (i386_equal,
2387 "cmpl %ebx,4(%esp)\n\t"
2388 "jne .Li386_equal_false\n\t"
2389 "cmpl %eax,(%esp)\n\t"
2390 "je .Li386_equal_true\n\t"
2391 ".Li386_equal_false:\n\t"
2392 "xor %eax,%eax\n\t"
2393 "jmp .Li386_equal_end\n\t"
2394 ".Li386_equal_true:\n\t"
2395 "mov $1,%eax\n\t"
2396 ".Li386_equal_end:\n\t"
2397 "xor %ebx,%ebx\n\t"
2398 "lea 0x8(%esp),%esp");
2399 }
2400
2401 static void
2402 i386_emit_less_signed (void)
2403 {
2404 EMIT_ASM32 (i386_less_signed,
2405 "cmpl %ebx,4(%esp)\n\t"
2406 "jl .Li386_less_signed_true\n\t"
2407 "jne .Li386_less_signed_false\n\t"
2408 "cmpl %eax,(%esp)\n\t"
2409 "jl .Li386_less_signed_true\n\t"
2410 ".Li386_less_signed_false:\n\t"
2411 "xor %eax,%eax\n\t"
2412 "jmp .Li386_less_signed_end\n\t"
2413 ".Li386_less_signed_true:\n\t"
2414 "mov $1,%eax\n\t"
2415 ".Li386_less_signed_end:\n\t"
2416 "xor %ebx,%ebx\n\t"
2417 "lea 0x8(%esp),%esp");
2418 }
2419
2420 static void
2421 i386_emit_less_unsigned (void)
2422 {
2423 EMIT_ASM32 (i386_less_unsigned,
2424 "cmpl %ebx,4(%esp)\n\t"
2425 "jb .Li386_less_unsigned_true\n\t"
2426 "jne .Li386_less_unsigned_false\n\t"
2427 "cmpl %eax,(%esp)\n\t"
2428 "jb .Li386_less_unsigned_true\n\t"
2429 ".Li386_less_unsigned_false:\n\t"
2430 "xor %eax,%eax\n\t"
2431 "jmp .Li386_less_unsigned_end\n\t"
2432 ".Li386_less_unsigned_true:\n\t"
2433 "mov $1,%eax\n\t"
2434 ".Li386_less_unsigned_end:\n\t"
2435 "xor %ebx,%ebx\n\t"
2436 "lea 0x8(%esp),%esp");
2437 }
2438
2439 static void
2440 i386_emit_ref (int size)
2441 {
2442 switch (size)
2443 {
2444 case 1:
2445 EMIT_ASM32 (i386_ref1,
2446 "movb (%eax),%al");
2447 break;
2448 case 2:
2449 EMIT_ASM32 (i386_ref2,
2450 "movw (%eax),%ax");
2451 break;
2452 case 4:
2453 EMIT_ASM32 (i386_ref4,
2454 "movl (%eax),%eax");
2455 break;
2456 case 8:
2457 EMIT_ASM32 (i386_ref8,
2458 "movl 4(%eax),%ebx\n\t"
2459 "movl (%eax),%eax");
2460 break;
2461 }
2462 }
2463
2464 static void
2465 i386_emit_if_goto (int *offset_p, int *size_p)
2466 {
2467 EMIT_ASM32 (i386_if_goto,
2468 "mov %eax,%ecx\n\t"
2469 "or %ebx,%ecx\n\t"
2470 "pop %eax\n\t"
2471 "pop %ebx\n\t"
2472 "cmpl $0,%ecx\n\t"
2473 /* Don't trust the assembler to choose the right jump */
2474 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2475
2476 if (offset_p)
2477 *offset_p = 11; /* be sure that this matches the sequence above */
2478 if (size_p)
2479 *size_p = 4;
2480 }
2481
2482 static void
2483 i386_emit_goto (int *offset_p, int *size_p)
2484 {
2485 EMIT_ASM32 (i386_goto,
2486 /* Don't trust the assembler to choose the right jump */
2487 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2488 if (offset_p)
2489 *offset_p = 1;
2490 if (size_p)
2491 *size_p = 4;
2492 }
2493
2494 static void
2495 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2496 {
2497 int diff = (to - (from + size));
2498 unsigned char buf[sizeof (int)];
2499
2500 /* We're only doing 4-byte sizes at the moment. */
2501 if (size != 4)
2502 {
2503 emit_error = 1;
2504 return;
2505 }
2506
2507 memcpy (buf, &diff, sizeof (int));
2508 target_write_memory (from, buf, sizeof (int));
2509 }
2510
2511 static void
2512 i386_emit_const (LONGEST num)
2513 {
2514 unsigned char buf[16];
2515 int i, hi, lo;
2516 CORE_ADDR buildaddr = current_insn_ptr;
2517
2518 i = 0;
2519 buf[i++] = 0xb8; /* mov $<n>,%eax */
2520 lo = num & 0xffffffff;
2521 memcpy (&buf[i], &lo, sizeof (lo));
2522 i += 4;
2523 hi = ((num >> 32) & 0xffffffff);
2524 if (hi)
2525 {
2526 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2527 memcpy (&buf[i], &hi, sizeof (hi));
2528 i += 4;
2529 }
2530 else
2531 {
2532 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2533 }
2534 append_insns (&buildaddr, i, buf);
2535 current_insn_ptr = buildaddr;
2536 }
2537
2538 static void
2539 i386_emit_call (CORE_ADDR fn)
2540 {
2541 unsigned char buf[16];
2542 int i, offset;
2543 CORE_ADDR buildaddr;
2544
2545 buildaddr = current_insn_ptr;
2546 i = 0;
2547 buf[i++] = 0xe8; /* call <reladdr> */
2548 offset = ((int) fn) - (buildaddr + 5);
2549 memcpy (buf + 1, &offset, 4);
2550 append_insns (&buildaddr, 5, buf);
2551 current_insn_ptr = buildaddr;
2552 }
2553
2554 static void
2555 i386_emit_reg (int reg)
2556 {
2557 unsigned char buf[16];
2558 int i;
2559 CORE_ADDR buildaddr;
2560
2561 EMIT_ASM32 (i386_reg_a,
2562 "sub $0x8,%esp");
2563 buildaddr = current_insn_ptr;
2564 i = 0;
2565 buf[i++] = 0xb8; /* mov $<n>,%eax */
2566 memcpy (&buf[i], &reg, sizeof (reg));
2567 i += 4;
2568 append_insns (&buildaddr, i, buf);
2569 current_insn_ptr = buildaddr;
2570 EMIT_ASM32 (i386_reg_b,
2571 "mov %eax,4(%esp)\n\t"
2572 "mov 8(%ebp),%eax\n\t"
2573 "mov %eax,(%esp)");
2574 i386_emit_call (get_raw_reg_func_addr ());
2575 EMIT_ASM32 (i386_reg_c,
2576 "xor %ebx,%ebx\n\t"
2577 "lea 0x8(%esp),%esp");
2578 }
2579
2580 static void
2581 i386_emit_pop (void)
2582 {
2583 EMIT_ASM32 (i386_pop,
2584 "pop %eax\n\t"
2585 "pop %ebx");
2586 }
2587
2588 static void
2589 i386_emit_stack_flush (void)
2590 {
2591 EMIT_ASM32 (i386_stack_flush,
2592 "push %ebx\n\t"
2593 "push %eax");
2594 }
2595
2596 static void
2597 i386_emit_zero_ext (int arg)
2598 {
2599 switch (arg)
2600 {
2601 case 8:
2602 EMIT_ASM32 (i386_zero_ext_8,
2603 "and $0xff,%eax\n\t"
2604 "xor %ebx,%ebx");
2605 break;
2606 case 16:
2607 EMIT_ASM32 (i386_zero_ext_16,
2608 "and $0xffff,%eax\n\t"
2609 "xor %ebx,%ebx");
2610 break;
2611 case 32:
2612 EMIT_ASM32 (i386_zero_ext_32,
2613 "xor %ebx,%ebx");
2614 break;
2615 default:
2616 emit_error = 1;
2617 }
2618 }
2619
2620 static void
2621 i386_emit_swap (void)
2622 {
2623 EMIT_ASM32 (i386_swap,
2624 "mov %eax,%ecx\n\t"
2625 "mov %ebx,%edx\n\t"
2626 "pop %eax\n\t"
2627 "pop %ebx\n\t"
2628 "push %edx\n\t"
2629 "push %ecx");
2630 }
2631
2632 static void
2633 i386_emit_stack_adjust (int n)
2634 {
2635 unsigned char buf[16];
2636 int i;
2637 CORE_ADDR buildaddr = current_insn_ptr;
2638
2639 i = 0;
2640 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2641 buf[i++] = 0x64;
2642 buf[i++] = 0x24;
2643 buf[i++] = n * 8;
2644 append_insns (&buildaddr, i, buf);
2645 current_insn_ptr = buildaddr;
2646 }
2647
2648 /* FN's prototype is `LONGEST(*fn)(int)'. */
2649
2650 static void
2651 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2652 {
2653 unsigned char buf[16];
2654 int i;
2655 CORE_ADDR buildaddr;
2656
2657 EMIT_ASM32 (i386_int_call_1_a,
2658 /* Reserve a bit of stack space. */
2659 "sub $0x8,%esp");
2660 /* Put the one argument on the stack. */
2661 buildaddr = current_insn_ptr;
2662 i = 0;
2663 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2664 buf[i++] = 0x04;
2665 buf[i++] = 0x24;
2666 memcpy (&buf[i], &arg1, sizeof (arg1));
2667 i += 4;
2668 append_insns (&buildaddr, i, buf);
2669 current_insn_ptr = buildaddr;
2670 i386_emit_call (fn);
2671 EMIT_ASM32 (i386_int_call_1_c,
2672 "mov %edx,%ebx\n\t"
2673 "lea 0x8(%esp),%esp");
2674 }
2675
2676 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2677
2678 static void
2679 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2680 {
2681 unsigned char buf[16];
2682 int i;
2683 CORE_ADDR buildaddr;
2684
2685 EMIT_ASM32 (i386_void_call_2_a,
2686 /* Preserve %eax only; we don't have to worry about %ebx. */
2687 "push %eax\n\t"
2688 /* Reserve a bit of stack space for arguments. */
2689 "sub $0x10,%esp\n\t"
2690 /* Copy "top" to the second argument position. (Note that
2691 we can't assume function won't scribble on its
2692 arguments, so don't try to restore from this.) */
2693 "mov %eax,4(%esp)\n\t"
2694 "mov %ebx,8(%esp)");
2695 /* Put the first argument on the stack. */
2696 buildaddr = current_insn_ptr;
2697 i = 0;
2698 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2699 buf[i++] = 0x04;
2700 buf[i++] = 0x24;
2701 memcpy (&buf[i], &arg1, sizeof (arg1));
2702 i += 4;
2703 append_insns (&buildaddr, i, buf);
2704 current_insn_ptr = buildaddr;
2705 i386_emit_call (fn);
2706 EMIT_ASM32 (i386_void_call_2_b,
2707 "lea 0x10(%esp),%esp\n\t"
2708 /* Restore original stack top. */
2709 "pop %eax");
2710 }
2711
2712
2713 static void
2714 i386_emit_eq_goto (int *offset_p, int *size_p)
2715 {
2716 EMIT_ASM32 (eq,
2717 /* Check low half first, more likely to be decider */
2718 "cmpl %eax,(%esp)\n\t"
2719 "jne .Leq_fallthru\n\t"
2720 "cmpl %ebx,4(%esp)\n\t"
2721 "jne .Leq_fallthru\n\t"
2722 "lea 0x8(%esp),%esp\n\t"
2723 "pop %eax\n\t"
2724 "pop %ebx\n\t"
2725 /* jmp, but don't trust the assembler to choose the right jump */
2726 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2727 ".Leq_fallthru:\n\t"
2728 "lea 0x8(%esp),%esp\n\t"
2729 "pop %eax\n\t"
2730 "pop %ebx");
2731
2732 if (offset_p)
2733 *offset_p = 18;
2734 if (size_p)
2735 *size_p = 4;
2736 }
2737
2738 static void
2739 i386_emit_ne_goto (int *offset_p, int *size_p)
2740 {
2741 EMIT_ASM32 (ne,
2742 /* Check low half first, more likely to be decider */
2743 "cmpl %eax,(%esp)\n\t"
2744 "jne .Lne_jump\n\t"
2745 "cmpl %ebx,4(%esp)\n\t"
2746 "je .Lne_fallthru\n\t"
2747 ".Lne_jump:\n\t"
2748 "lea 0x8(%esp),%esp\n\t"
2749 "pop %eax\n\t"
2750 "pop %ebx\n\t"
2751 /* jmp, but don't trust the assembler to choose the right jump */
2752 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2753 ".Lne_fallthru:\n\t"
2754 "lea 0x8(%esp),%esp\n\t"
2755 "pop %eax\n\t"
2756 "pop %ebx");
2757
2758 if (offset_p)
2759 *offset_p = 18;
2760 if (size_p)
2761 *size_p = 4;
2762 }
2763
2764 static void
2765 i386_emit_lt_goto (int *offset_p, int *size_p)
2766 {
2767 EMIT_ASM32 (lt,
2768 "cmpl %ebx,4(%esp)\n\t"
2769 "jl .Llt_jump\n\t"
2770 "jne .Llt_fallthru\n\t"
2771 "cmpl %eax,(%esp)\n\t"
2772 "jnl .Llt_fallthru\n\t"
2773 ".Llt_jump:\n\t"
2774 "lea 0x8(%esp),%esp\n\t"
2775 "pop %eax\n\t"
2776 "pop %ebx\n\t"
2777 /* jmp, but don't trust the assembler to choose the right jump */
2778 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2779 ".Llt_fallthru:\n\t"
2780 "lea 0x8(%esp),%esp\n\t"
2781 "pop %eax\n\t"
2782 "pop %ebx");
2783
2784 if (offset_p)
2785 *offset_p = 20;
2786 if (size_p)
2787 *size_p = 4;
2788 }
2789
2790 static void
2791 i386_emit_le_goto (int *offset_p, int *size_p)
2792 {
2793 EMIT_ASM32 (le,
2794 "cmpl %ebx,4(%esp)\n\t"
2795 "jle .Lle_jump\n\t"
2796 "jne .Lle_fallthru\n\t"
2797 "cmpl %eax,(%esp)\n\t"
2798 "jnle .Lle_fallthru\n\t"
2799 ".Lle_jump:\n\t"
2800 "lea 0x8(%esp),%esp\n\t"
2801 "pop %eax\n\t"
2802 "pop %ebx\n\t"
2803 /* jmp, but don't trust the assembler to choose the right jump */
2804 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2805 ".Lle_fallthru:\n\t"
2806 "lea 0x8(%esp),%esp\n\t"
2807 "pop %eax\n\t"
2808 "pop %ebx");
2809
2810 if (offset_p)
2811 *offset_p = 20;
2812 if (size_p)
2813 *size_p = 4;
2814 }
2815
2816 static void
2817 i386_emit_gt_goto (int *offset_p, int *size_p)
2818 {
2819 EMIT_ASM32 (gt,
2820 "cmpl %ebx,4(%esp)\n\t"
2821 "jg .Lgt_jump\n\t"
2822 "jne .Lgt_fallthru\n\t"
2823 "cmpl %eax,(%esp)\n\t"
2824 "jng .Lgt_fallthru\n\t"
2825 ".Lgt_jump:\n\t"
2826 "lea 0x8(%esp),%esp\n\t"
2827 "pop %eax\n\t"
2828 "pop %ebx\n\t"
2829 /* jmp, but don't trust the assembler to choose the right jump */
2830 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2831 ".Lgt_fallthru:\n\t"
2832 "lea 0x8(%esp),%esp\n\t"
2833 "pop %eax\n\t"
2834 "pop %ebx");
2835
2836 if (offset_p)
2837 *offset_p = 20;
2838 if (size_p)
2839 *size_p = 4;
2840 }
2841
2842 static void
2843 i386_emit_ge_goto (int *offset_p, int *size_p)
2844 {
2845 EMIT_ASM32 (ge,
2846 "cmpl %ebx,4(%esp)\n\t"
2847 "jge .Lge_jump\n\t"
2848 "jne .Lge_fallthru\n\t"
2849 "cmpl %eax,(%esp)\n\t"
2850 "jnge .Lge_fallthru\n\t"
2851 ".Lge_jump:\n\t"
2852 "lea 0x8(%esp),%esp\n\t"
2853 "pop %eax\n\t"
2854 "pop %ebx\n\t"
2855 /* jmp, but don't trust the assembler to choose the right jump */
2856 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2857 ".Lge_fallthru:\n\t"
2858 "lea 0x8(%esp),%esp\n\t"
2859 "pop %eax\n\t"
2860 "pop %ebx");
2861
2862 if (offset_p)
2863 *offset_p = 20;
2864 if (size_p)
2865 *size_p = 4;
2866 }
2867
2868 struct emit_ops i386_emit_ops =
2869 {
2870 i386_emit_prologue,
2871 i386_emit_epilogue,
2872 i386_emit_add,
2873 i386_emit_sub,
2874 i386_emit_mul,
2875 i386_emit_lsh,
2876 i386_emit_rsh_signed,
2877 i386_emit_rsh_unsigned,
2878 i386_emit_ext,
2879 i386_emit_log_not,
2880 i386_emit_bit_and,
2881 i386_emit_bit_or,
2882 i386_emit_bit_xor,
2883 i386_emit_bit_not,
2884 i386_emit_equal,
2885 i386_emit_less_signed,
2886 i386_emit_less_unsigned,
2887 i386_emit_ref,
2888 i386_emit_if_goto,
2889 i386_emit_goto,
2890 i386_write_goto_address,
2891 i386_emit_const,
2892 i386_emit_call,
2893 i386_emit_reg,
2894 i386_emit_pop,
2895 i386_emit_stack_flush,
2896 i386_emit_zero_ext,
2897 i386_emit_swap,
2898 i386_emit_stack_adjust,
2899 i386_emit_int_call_1,
2900 i386_emit_void_call_2,
2901 i386_emit_eq_goto,
2902 i386_emit_ne_goto,
2903 i386_emit_lt_goto,
2904 i386_emit_le_goto,
2905 i386_emit_gt_goto,
2906 i386_emit_ge_goto
2907 };
2908
2909
2910 static struct emit_ops *
2911 x86_emit_ops (void)
2912 {
2913 #ifdef __x86_64__
2914 if (is_64bit_tdesc ())
2915 return &amd64_emit_ops;
2916 else
2917 #endif
2918 return &i386_emit_ops;
2919 }
2920
2921 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2922
2923 const gdb_byte *
2924 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2925 {
2926 *size = x86_breakpoint_len;
2927 return x86_breakpoint;
2928 }
2929
2930 static int
2931 x86_supports_range_stepping (void)
2932 {
2933 return 1;
2934 }
2935
2936 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2937 */
2938
2939 static int
2940 x86_supports_hardware_single_step (void)
2941 {
2942 return 1;
2943 }
2944
2945 static int
2946 x86_get_ipa_tdesc_idx (void)
2947 {
2948 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2949 const struct target_desc *tdesc = regcache->tdesc;
2950
2951 #ifdef __x86_64__
2952 return amd64_get_ipa_tdesc_idx (tdesc);
2953 #endif
2954
2955 if (tdesc == tdesc_i386_linux_no_xml)
2956 return X86_TDESC_SSE;
2957
2958 return i386_get_ipa_tdesc_idx (tdesc);
2959 }
2960
2961 /* This is initialized assuming an amd64 target.
2962 x86_arch_setup will correct it for i386 or amd64 targets. */
2963
2964 struct linux_target_ops the_low_target =
2965 {
2966 x86_linux_process_qsupported,
2967 x86_supports_tracepoints,
2968 x86_get_thread_area,
2969 x86_install_fast_tracepoint_jump_pad,
2970 x86_emit_ops,
2971 x86_get_min_fast_tracepoint_insn_len,
2972 x86_supports_range_stepping,
2973 x86_supports_hardware_single_step,
2974 x86_get_syscall_trapinfo,
2975 x86_get_ipa_tdesc_idx,
2976 };
2977
2978 /* The linux target ops object. */
2979
2980 linux_process_target *the_linux_target = &the_x86_target;
2981
2982 void
2983 initialize_low_arch (void)
2984 {
2985 /* Initialize the Linux target descriptions. */
2986 #ifdef __x86_64__
2987 tdesc_amd64_linux_no_xml = allocate_target_description ();
2988 copy_target_description (tdesc_amd64_linux_no_xml,
2989 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2990 false));
2991 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2992 #endif
2993
2994 tdesc_i386_linux_no_xml = allocate_target_description ();
2995 copy_target_description (tdesc_i386_linux_no_xml,
2996 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2997 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2998
2999 initialize_regsets_info (&x86_regsets_info);
3000 }
This page took 0.125384 seconds and 3 git commands to generate.