gdbserver/linux-low: turn 'process_qsupported' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 const regs_info *get_regs_info () override;
104
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
107 bool supports_z_point_type (char z_type) override;
108
109 void process_qsupported (char **features, int count) override;
110
111 protected:
112
113 void low_arch_setup () override;
114
115 bool low_cannot_fetch_register (int regno) override;
116
117 bool low_cannot_store_register (int regno) override;
118
119 bool low_supports_breakpoints () override;
120
121 CORE_ADDR low_get_pc (regcache *regcache) override;
122
123 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
124
125 int low_decr_pc_after_break () override;
126
127 bool low_breakpoint_at (CORE_ADDR pc) override;
128
129 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
130 int size, raw_breakpoint *bp) override;
131
132 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
133 int size, raw_breakpoint *bp) override;
134
135 bool low_stopped_by_watchpoint () override;
136
137 CORE_ADDR low_stopped_data_address () override;
138
139 /* collect_ptrace_register/supply_ptrace_register are not needed in the
140 native i386 case (no registers smaller than an xfer unit), and are not
141 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
142
143 /* Need to fix up i386 siginfo if host is amd64. */
144 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
145 int direction) override;
146
147 arch_process_info *low_new_process () override;
148
149 void low_delete_process (arch_process_info *info) override;
150
151 void low_new_thread (lwp_info *) override;
152
153 void low_delete_thread (arch_lwp_info *) override;
154
155 void low_new_fork (process_info *parent, process_info *child) override;
156
157 void low_prepare_to_resume (lwp_info *lwp) override;
158
159 private:
160
161 /* Update all the target description of all processes; a new GDB
162 connected, and it may or not support xml target descriptions. */
163 void update_xmltarget ();
164 };
165
166 /* The singleton target ops object. */
167
168 static x86_target the_x86_target;
169
170 /* Per-process arch-specific data we want to keep. */
171
172 struct arch_process_info
173 {
174 struct x86_debug_reg_state debug_reg_state;
175 };
176
177 #ifdef __x86_64__
178
179 /* Mapping between the general-purpose registers in `struct user'
180 format and GDB's register array layout.
181 Note that the transfer layout uses 64-bit regs. */
182 static /*const*/ int i386_regmap[] =
183 {
184 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
185 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
186 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
187 DS * 8, ES * 8, FS * 8, GS * 8
188 };
189
190 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
191
192 /* So code below doesn't have to care, i386 or amd64. */
193 #define ORIG_EAX ORIG_RAX
194 #define REGSIZE 8
195
196 static const int x86_64_regmap[] =
197 {
198 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
199 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
200 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
201 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
202 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
203 DS * 8, ES * 8, FS * 8, GS * 8,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1,
206 -1, -1, -1, -1, -1, -1, -1, -1,
207 -1,
208 -1, -1, -1, -1, -1, -1, -1, -1,
209 ORIG_RAX * 8,
210 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
211 21 * 8, 22 * 8,
212 #else
213 -1, -1,
214 #endif
215 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
216 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
217 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
218 -1, -1, -1, -1, -1, -1, -1, -1,
219 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
220 -1, -1, -1, -1, -1, -1, -1, -1,
221 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
222 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
223 -1, -1, -1, -1, -1, -1, -1, -1,
224 -1, -1, -1, -1, -1, -1, -1, -1,
225 -1, -1, -1, -1, -1, -1, -1, -1,
226 -1 /* pkru */
227 };
228
229 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
230 #define X86_64_USER_REGS (GS + 1)
231
232 #else /* ! __x86_64__ */
233
234 /* Mapping between the general-purpose registers in `struct user'
235 format and GDB's register array layout. */
236 static /*const*/ int i386_regmap[] =
237 {
238 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
239 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
240 EIP * 4, EFL * 4, CS * 4, SS * 4,
241 DS * 4, ES * 4, FS * 4, GS * 4
242 };
243
244 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
245
246 #define REGSIZE 4
247
248 #endif
249
250 #ifdef __x86_64__
251
252 /* Returns true if the current inferior belongs to a x86-64 process,
253 per the tdesc. */
254
255 static int
256 is_64bit_tdesc (void)
257 {
258 struct regcache *regcache = get_thread_regcache (current_thread, 0);
259
260 return register_size (regcache->tdesc, 0) == 8;
261 }
262
263 #endif
264
265 \f
266 /* Called by libthread_db. */
267
268 ps_err_e
269 ps_get_thread_area (struct ps_prochandle *ph,
270 lwpid_t lwpid, int idx, void **base)
271 {
272 #ifdef __x86_64__
273 int use_64bit = is_64bit_tdesc ();
274
275 if (use_64bit)
276 {
277 switch (idx)
278 {
279 case FS:
280 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
281 return PS_OK;
282 break;
283 case GS:
284 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
285 return PS_OK;
286 break;
287 default:
288 return PS_BADADDR;
289 }
290 return PS_ERR;
291 }
292 #endif
293
294 {
295 unsigned int desc[4];
296
297 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
298 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
299 return PS_ERR;
300
301 /* Ensure we properly extend the value to 64-bits for x86_64. */
302 *base = (void *) (uintptr_t) desc[1];
303 return PS_OK;
304 }
305 }
306
307 /* Get the thread area address. This is used to recognize which
308 thread is which when tracing with the in-process agent library. We
309 don't read anything from the address, and treat it as opaque; it's
310 the address itself that we assume is unique per-thread. */
311
312 static int
313 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
314 {
315 #ifdef __x86_64__
316 int use_64bit = is_64bit_tdesc ();
317
318 if (use_64bit)
319 {
320 void *base;
321 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
322 {
323 *addr = (CORE_ADDR) (uintptr_t) base;
324 return 0;
325 }
326
327 return -1;
328 }
329 #endif
330
331 {
332 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
333 struct thread_info *thr = get_lwp_thread (lwp);
334 struct regcache *regcache = get_thread_regcache (thr, 1);
335 unsigned int desc[4];
336 ULONGEST gs = 0;
337 const int reg_thread_area = 3; /* bits to scale down register value. */
338 int idx;
339
340 collect_register_by_name (regcache, "gs", &gs);
341
342 idx = gs >> reg_thread_area;
343
344 if (ptrace (PTRACE_GET_THREAD_AREA,
345 lwpid_of (thr),
346 (void *) (long) idx, (unsigned long) &desc) < 0)
347 return -1;
348
349 *addr = desc[1];
350 return 0;
351 }
352 }
353
354
355 \f
356 bool
357 x86_target::low_cannot_store_register (int regno)
358 {
359 #ifdef __x86_64__
360 if (is_64bit_tdesc ())
361 return false;
362 #endif
363
364 return regno >= I386_NUM_REGS;
365 }
366
367 bool
368 x86_target::low_cannot_fetch_register (int regno)
369 {
370 #ifdef __x86_64__
371 if (is_64bit_tdesc ())
372 return false;
373 #endif
374
375 return regno >= I386_NUM_REGS;
376 }
377
378 static void
379 x86_fill_gregset (struct regcache *regcache, void *buf)
380 {
381 int i;
382
383 #ifdef __x86_64__
384 if (register_size (regcache->tdesc, 0) == 8)
385 {
386 for (i = 0; i < X86_64_NUM_REGS; i++)
387 if (x86_64_regmap[i] != -1)
388 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
389
390 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
391 {
392 unsigned long base;
393 int lwpid = lwpid_of (current_thread);
394
395 collect_register_by_name (regcache, "fs_base", &base);
396 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
397
398 collect_register_by_name (regcache, "gs_base", &base);
399 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
400 }
401 #endif
402
403 return;
404 }
405
406 /* 32-bit inferior registers need to be zero-extended.
407 Callers would read uninitialized memory otherwise. */
408 memset (buf, 0x00, X86_64_USER_REGS * 8);
409 #endif
410
411 for (i = 0; i < I386_NUM_REGS; i++)
412 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
413
414 collect_register_by_name (regcache, "orig_eax",
415 ((char *) buf) + ORIG_EAX * REGSIZE);
416
417 #ifdef __x86_64__
418 /* Sign extend EAX value to avoid potential syscall restart
419 problems.
420
421 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
422 for a detailed explanation. */
423 if (register_size (regcache->tdesc, 0) == 4)
424 {
425 void *ptr = ((gdb_byte *) buf
426 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
427
428 *(int64_t *) ptr = *(int32_t *) ptr;
429 }
430 #endif
431 }
432
433 static void
434 x86_store_gregset (struct regcache *regcache, const void *buf)
435 {
436 int i;
437
438 #ifdef __x86_64__
439 if (register_size (regcache->tdesc, 0) == 8)
440 {
441 for (i = 0; i < X86_64_NUM_REGS; i++)
442 if (x86_64_regmap[i] != -1)
443 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
444
445 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
446 {
447 unsigned long base;
448 int lwpid = lwpid_of (current_thread);
449
450 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
451 supply_register_by_name (regcache, "fs_base", &base);
452
453 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
454 supply_register_by_name (regcache, "gs_base", &base);
455 }
456 #endif
457 return;
458 }
459 #endif
460
461 for (i = 0; i < I386_NUM_REGS; i++)
462 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
463
464 supply_register_by_name (regcache, "orig_eax",
465 ((char *) buf) + ORIG_EAX * REGSIZE);
466 }
467
468 static void
469 x86_fill_fpregset (struct regcache *regcache, void *buf)
470 {
471 #ifdef __x86_64__
472 i387_cache_to_fxsave (regcache, buf);
473 #else
474 i387_cache_to_fsave (regcache, buf);
475 #endif
476 }
477
478 static void
479 x86_store_fpregset (struct regcache *regcache, const void *buf)
480 {
481 #ifdef __x86_64__
482 i387_fxsave_to_cache (regcache, buf);
483 #else
484 i387_fsave_to_cache (regcache, buf);
485 #endif
486 }
487
488 #ifndef __x86_64__
489
490 static void
491 x86_fill_fpxregset (struct regcache *regcache, void *buf)
492 {
493 i387_cache_to_fxsave (regcache, buf);
494 }
495
496 static void
497 x86_store_fpxregset (struct regcache *regcache, const void *buf)
498 {
499 i387_fxsave_to_cache (regcache, buf);
500 }
501
502 #endif
503
504 static void
505 x86_fill_xstateregset (struct regcache *regcache, void *buf)
506 {
507 i387_cache_to_xsave (regcache, buf);
508 }
509
510 static void
511 x86_store_xstateregset (struct regcache *regcache, const void *buf)
512 {
513 i387_xsave_to_cache (regcache, buf);
514 }
515
516 /* ??? The non-biarch i386 case stores all the i387 regs twice.
517 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
518 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
519 doesn't work. IWBN to avoid the duplication in the case where it
520 does work. Maybe the arch_setup routine could check whether it works
521 and update the supported regsets accordingly. */
522
523 static struct regset_info x86_regsets[] =
524 {
525 #ifdef HAVE_PTRACE_GETREGS
526 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
527 GENERAL_REGS,
528 x86_fill_gregset, x86_store_gregset },
529 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
530 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
531 # ifndef __x86_64__
532 # ifdef HAVE_PTRACE_GETFPXREGS
533 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
534 EXTENDED_REGS,
535 x86_fill_fpxregset, x86_store_fpxregset },
536 # endif
537 # endif
538 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
539 FP_REGS,
540 x86_fill_fpregset, x86_store_fpregset },
541 #endif /* HAVE_PTRACE_GETREGS */
542 NULL_REGSET
543 };
544
545 bool
546 x86_target::low_supports_breakpoints ()
547 {
548 return true;
549 }
550
551 CORE_ADDR
552 x86_target::low_get_pc (regcache *regcache)
553 {
554 int use_64bit = register_size (regcache->tdesc, 0) == 8;
555
556 if (use_64bit)
557 {
558 uint64_t pc;
559
560 collect_register_by_name (regcache, "rip", &pc);
561 return (CORE_ADDR) pc;
562 }
563 else
564 {
565 uint32_t pc;
566
567 collect_register_by_name (regcache, "eip", &pc);
568 return (CORE_ADDR) pc;
569 }
570 }
571
572 void
573 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
574 {
575 int use_64bit = register_size (regcache->tdesc, 0) == 8;
576
577 if (use_64bit)
578 {
579 uint64_t newpc = pc;
580
581 supply_register_by_name (regcache, "rip", &newpc);
582 }
583 else
584 {
585 uint32_t newpc = pc;
586
587 supply_register_by_name (regcache, "eip", &newpc);
588 }
589 }
590
591 int
592 x86_target::low_decr_pc_after_break ()
593 {
594 return 1;
595 }
596
597 \f
598 static const gdb_byte x86_breakpoint[] = { 0xCC };
599 #define x86_breakpoint_len 1
600
601 bool
602 x86_target::low_breakpoint_at (CORE_ADDR pc)
603 {
604 unsigned char c;
605
606 read_memory (pc, &c, 1);
607 if (c == 0xCC)
608 return true;
609
610 return false;
611 }
612 \f
613 /* Low-level function vector. */
614 struct x86_dr_low_type x86_dr_low =
615 {
616 x86_linux_dr_set_control,
617 x86_linux_dr_set_addr,
618 x86_linux_dr_get_addr,
619 x86_linux_dr_get_status,
620 x86_linux_dr_get_control,
621 sizeof (void *),
622 };
623 \f
624 /* Breakpoint/Watchpoint support. */
625
626 bool
627 x86_target::supports_z_point_type (char z_type)
628 {
629 switch (z_type)
630 {
631 case Z_PACKET_SW_BP:
632 case Z_PACKET_HW_BP:
633 case Z_PACKET_WRITE_WP:
634 case Z_PACKET_ACCESS_WP:
635 return true;
636 default:
637 return false;
638 }
639 }
640
641 int
642 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
643 int size, raw_breakpoint *bp)
644 {
645 struct process_info *proc = current_process ();
646
647 switch (type)
648 {
649 case raw_bkpt_type_hw:
650 case raw_bkpt_type_write_wp:
651 case raw_bkpt_type_access_wp:
652 {
653 enum target_hw_bp_type hw_type
654 = raw_bkpt_type_to_target_hw_bp_type (type);
655 struct x86_debug_reg_state *state
656 = &proc->priv->arch_private->debug_reg_state;
657
658 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
659 }
660
661 default:
662 /* Unsupported. */
663 return 1;
664 }
665 }
666
667 int
668 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
669 int size, raw_breakpoint *bp)
670 {
671 struct process_info *proc = current_process ();
672
673 switch (type)
674 {
675 case raw_bkpt_type_hw:
676 case raw_bkpt_type_write_wp:
677 case raw_bkpt_type_access_wp:
678 {
679 enum target_hw_bp_type hw_type
680 = raw_bkpt_type_to_target_hw_bp_type (type);
681 struct x86_debug_reg_state *state
682 = &proc->priv->arch_private->debug_reg_state;
683
684 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
685 }
686 default:
687 /* Unsupported. */
688 return 1;
689 }
690 }
691
692 bool
693 x86_target::low_stopped_by_watchpoint ()
694 {
695 struct process_info *proc = current_process ();
696 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
697 }
698
699 CORE_ADDR
700 x86_target::low_stopped_data_address ()
701 {
702 struct process_info *proc = current_process ();
703 CORE_ADDR addr;
704 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
705 &addr))
706 return addr;
707 return 0;
708 }
709 \f
710 /* Called when a new process is created. */
711
712 arch_process_info *
713 x86_target::low_new_process ()
714 {
715 struct arch_process_info *info = XCNEW (struct arch_process_info);
716
717 x86_low_init_dregs (&info->debug_reg_state);
718
719 return info;
720 }
721
722 /* Called when a process is being deleted. */
723
724 void
725 x86_target::low_delete_process (arch_process_info *info)
726 {
727 xfree (info);
728 }
729
730 void
731 x86_target::low_new_thread (lwp_info *lwp)
732 {
733 /* This comes from nat/. */
734 x86_linux_new_thread (lwp);
735 }
736
737 void
738 x86_target::low_delete_thread (arch_lwp_info *alwp)
739 {
740 /* This comes from nat/. */
741 x86_linux_delete_thread (alwp);
742 }
743
744 /* Target routine for new_fork. */
745
746 void
747 x86_target::low_new_fork (process_info *parent, process_info *child)
748 {
749 /* These are allocated by linux_add_process. */
750 gdb_assert (parent->priv != NULL
751 && parent->priv->arch_private != NULL);
752 gdb_assert (child->priv != NULL
753 && child->priv->arch_private != NULL);
754
755 /* Linux kernel before 2.6.33 commit
756 72f674d203cd230426437cdcf7dd6f681dad8b0d
757 will inherit hardware debug registers from parent
758 on fork/vfork/clone. Newer Linux kernels create such tasks with
759 zeroed debug registers.
760
761 GDB core assumes the child inherits the watchpoints/hw
762 breakpoints of the parent, and will remove them all from the
763 forked off process. Copy the debug registers mirrors into the
764 new process so that all breakpoints and watchpoints can be
765 removed together. The debug registers mirror will become zeroed
766 in the end before detaching the forked off process, thus making
767 this compatible with older Linux kernels too. */
768
769 *child->priv->arch_private = *parent->priv->arch_private;
770 }
771
772 void
773 x86_target::low_prepare_to_resume (lwp_info *lwp)
774 {
775 /* This comes from nat/. */
776 x86_linux_prepare_to_resume (lwp);
777 }
778
779 /* See nat/x86-dregs.h. */
780
781 struct x86_debug_reg_state *
782 x86_debug_reg_state (pid_t pid)
783 {
784 struct process_info *proc = find_process_pid (pid);
785
786 return &proc->priv->arch_private->debug_reg_state;
787 }
788 \f
789 /* When GDBSERVER is built as a 64-bit application on linux, the
790 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
791 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
792 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
793 conversion in-place ourselves. */
794
795 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
796 layout of the inferiors' architecture. Returns true if any
797 conversion was done; false otherwise. If DIRECTION is 1, then copy
798 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
799 INF. */
800
801 bool
802 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
803 {
804 #ifdef __x86_64__
805 unsigned int machine;
806 int tid = lwpid_of (current_thread);
807 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
808
809 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
810 if (!is_64bit_tdesc ())
811 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
812 FIXUP_32);
813 /* No fixup for native x32 GDB. */
814 else if (!is_elf64 && sizeof (void *) == 8)
815 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
816 FIXUP_X32);
817 #endif
818
819 return false;
820 }
821 \f
822 static int use_xml;
823
824 /* Format of XSAVE extended state is:
825 struct
826 {
827 fxsave_bytes[0..463]
828 sw_usable_bytes[464..511]
829 xstate_hdr_bytes[512..575]
830 avx_bytes[576..831]
831 future_state etc
832 };
833
834 Same memory layout will be used for the coredump NT_X86_XSTATE
835 representing the XSAVE extended state registers.
836
837 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
838 extended state mask, which is the same as the extended control register
839 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
840 together with the mask saved in the xstate_hdr_bytes to determine what
841 states the processor/OS supports and what state, used or initialized,
842 the process/thread is in. */
843 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
844
845 /* Does the current host support the GETFPXREGS request? The header
846 file may or may not define it, and even if it is defined, the
847 kernel will return EIO if it's running on a pre-SSE processor. */
848 int have_ptrace_getfpxregs =
849 #ifdef HAVE_PTRACE_GETFPXREGS
850 -1
851 #else
852 0
853 #endif
854 ;
855
856 /* Get Linux/x86 target description from running target. */
857
858 static const struct target_desc *
859 x86_linux_read_description (void)
860 {
861 unsigned int machine;
862 int is_elf64;
863 int xcr0_features;
864 int tid;
865 static uint64_t xcr0;
866 struct regset_info *regset;
867
868 tid = lwpid_of (current_thread);
869
870 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
871
872 if (sizeof (void *) == 4)
873 {
874 if (is_elf64 > 0)
875 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
876 #ifndef __x86_64__
877 else if (machine == EM_X86_64)
878 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
879 #endif
880 }
881
882 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
883 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
884 {
885 elf_fpxregset_t fpxregs;
886
887 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
888 {
889 have_ptrace_getfpxregs = 0;
890 have_ptrace_getregset = 0;
891 return i386_linux_read_description (X86_XSTATE_X87);
892 }
893 else
894 have_ptrace_getfpxregs = 1;
895 }
896 #endif
897
898 if (!use_xml)
899 {
900 x86_xcr0 = X86_XSTATE_SSE_MASK;
901
902 /* Don't use XML. */
903 #ifdef __x86_64__
904 if (machine == EM_X86_64)
905 return tdesc_amd64_linux_no_xml;
906 else
907 #endif
908 return tdesc_i386_linux_no_xml;
909 }
910
911 if (have_ptrace_getregset == -1)
912 {
913 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
914 struct iovec iov;
915
916 iov.iov_base = xstateregs;
917 iov.iov_len = sizeof (xstateregs);
918
919 /* Check if PTRACE_GETREGSET works. */
920 if (ptrace (PTRACE_GETREGSET, tid,
921 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
922 have_ptrace_getregset = 0;
923 else
924 {
925 have_ptrace_getregset = 1;
926
927 /* Get XCR0 from XSAVE extended state. */
928 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
929 / sizeof (uint64_t))];
930
931 /* Use PTRACE_GETREGSET if it is available. */
932 for (regset = x86_regsets;
933 regset->fill_function != NULL; regset++)
934 if (regset->get_request == PTRACE_GETREGSET)
935 regset->size = X86_XSTATE_SIZE (xcr0);
936 else if (regset->type != GENERAL_REGS)
937 regset->size = 0;
938 }
939 }
940
941 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
942 xcr0_features = (have_ptrace_getregset
943 && (xcr0 & X86_XSTATE_ALL_MASK));
944
945 if (xcr0_features)
946 x86_xcr0 = xcr0;
947
948 if (machine == EM_X86_64)
949 {
950 #ifdef __x86_64__
951 const target_desc *tdesc = NULL;
952
953 if (xcr0_features)
954 {
955 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
956 !is_elf64);
957 }
958
959 if (tdesc == NULL)
960 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
961 return tdesc;
962 #endif
963 }
964 else
965 {
966 const target_desc *tdesc = NULL;
967
968 if (xcr0_features)
969 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
970
971 if (tdesc == NULL)
972 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
973
974 return tdesc;
975 }
976
977 gdb_assert_not_reached ("failed to return tdesc");
978 }
979
980 /* Update all the target description of all processes; a new GDB
981 connected, and it may or not support xml target descriptions. */
982
983 void
984 x86_target::update_xmltarget ()
985 {
986 struct thread_info *saved_thread = current_thread;
987
988 /* Before changing the register cache's internal layout, flush the
989 contents of the current valid caches back to the threads, and
990 release the current regcache objects. */
991 regcache_release ();
992
993 for_each_process ([this] (process_info *proc) {
994 int pid = proc->pid;
995
996 /* Look up any thread of this process. */
997 current_thread = find_any_thread_of_pid (pid);
998
999 low_arch_setup ();
1000 });
1001
1002 current_thread = saved_thread;
1003 }
1004
1005 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1006 PTRACE_GETREGSET. */
1007
1008 void
1009 x86_target::process_qsupported (char **features, int count)
1010 {
1011 int i;
1012
1013 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1014 with "i386" in qSupported query, it supports x86 XML target
1015 descriptions. */
1016 use_xml = 0;
1017 for (i = 0; i < count; i++)
1018 {
1019 const char *feature = features[i];
1020
1021 if (startswith (feature, "xmlRegisters="))
1022 {
1023 char *copy = xstrdup (feature + 13);
1024
1025 char *saveptr;
1026 for (char *p = strtok_r (copy, ",", &saveptr);
1027 p != NULL;
1028 p = strtok_r (NULL, ",", &saveptr))
1029 {
1030 if (strcmp (p, "i386") == 0)
1031 {
1032 use_xml = 1;
1033 break;
1034 }
1035 }
1036
1037 free (copy);
1038 }
1039 }
1040 update_xmltarget ();
1041 }
1042
1043 /* Common for x86/x86-64. */
1044
1045 static struct regsets_info x86_regsets_info =
1046 {
1047 x86_regsets, /* regsets */
1048 0, /* num_regsets */
1049 NULL, /* disabled_regsets */
1050 };
1051
1052 #ifdef __x86_64__
1053 static struct regs_info amd64_linux_regs_info =
1054 {
1055 NULL, /* regset_bitmap */
1056 NULL, /* usrregs_info */
1057 &x86_regsets_info
1058 };
1059 #endif
1060 static struct usrregs_info i386_linux_usrregs_info =
1061 {
1062 I386_NUM_REGS,
1063 i386_regmap,
1064 };
1065
1066 static struct regs_info i386_linux_regs_info =
1067 {
1068 NULL, /* regset_bitmap */
1069 &i386_linux_usrregs_info,
1070 &x86_regsets_info
1071 };
1072
1073 const regs_info *
1074 x86_target::get_regs_info ()
1075 {
1076 #ifdef __x86_64__
1077 if (is_64bit_tdesc ())
1078 return &amd64_linux_regs_info;
1079 else
1080 #endif
1081 return &i386_linux_regs_info;
1082 }
1083
1084 /* Initialize the target description for the architecture of the
1085 inferior. */
1086
1087 void
1088 x86_target::low_arch_setup ()
1089 {
1090 current_process ()->tdesc = x86_linux_read_description ();
1091 }
1092
1093 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1094 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1095
1096 static void
1097 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
1098 {
1099 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1100
1101 if (use_64bit)
1102 {
1103 long l_sysno;
1104
1105 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1106 *sysno = (int) l_sysno;
1107 }
1108 else
1109 collect_register_by_name (regcache, "orig_eax", sysno);
1110 }
1111
1112 static int
1113 x86_supports_tracepoints (void)
1114 {
1115 return 1;
1116 }
1117
1118 static void
1119 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1120 {
1121 target_write_memory (*to, buf, len);
1122 *to += len;
1123 }
1124
1125 static int
1126 push_opcode (unsigned char *buf, const char *op)
1127 {
1128 unsigned char *buf_org = buf;
1129
1130 while (1)
1131 {
1132 char *endptr;
1133 unsigned long ul = strtoul (op, &endptr, 16);
1134
1135 if (endptr == op)
1136 break;
1137
1138 *buf++ = ul;
1139 op = endptr;
1140 }
1141
1142 return buf - buf_org;
1143 }
1144
1145 #ifdef __x86_64__
1146
1147 /* Build a jump pad that saves registers and calls a collection
1148 function. Writes a jump instruction to the jump pad to
1149 JJUMPAD_INSN. The caller is responsible to write it in at the
1150 tracepoint address. */
1151
1152 static int
1153 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1154 CORE_ADDR collector,
1155 CORE_ADDR lockaddr,
1156 ULONGEST orig_size,
1157 CORE_ADDR *jump_entry,
1158 CORE_ADDR *trampoline,
1159 ULONGEST *trampoline_size,
1160 unsigned char *jjump_pad_insn,
1161 ULONGEST *jjump_pad_insn_size,
1162 CORE_ADDR *adjusted_insn_addr,
1163 CORE_ADDR *adjusted_insn_addr_end,
1164 char *err)
1165 {
1166 unsigned char buf[40];
1167 int i, offset;
1168 int64_t loffset;
1169
1170 CORE_ADDR buildaddr = *jump_entry;
1171
1172 /* Build the jump pad. */
1173
1174 /* First, do tracepoint data collection. Save registers. */
1175 i = 0;
1176 /* Need to ensure stack pointer saved first. */
1177 buf[i++] = 0x54; /* push %rsp */
1178 buf[i++] = 0x55; /* push %rbp */
1179 buf[i++] = 0x57; /* push %rdi */
1180 buf[i++] = 0x56; /* push %rsi */
1181 buf[i++] = 0x52; /* push %rdx */
1182 buf[i++] = 0x51; /* push %rcx */
1183 buf[i++] = 0x53; /* push %rbx */
1184 buf[i++] = 0x50; /* push %rax */
1185 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1186 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1187 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1188 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1189 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1190 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1191 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1192 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1193 buf[i++] = 0x9c; /* pushfq */
1194 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1195 buf[i++] = 0xbf;
1196 memcpy (buf + i, &tpaddr, 8);
1197 i += 8;
1198 buf[i++] = 0x57; /* push %rdi */
1199 append_insns (&buildaddr, i, buf);
1200
1201 /* Stack space for the collecting_t object. */
1202 i = 0;
1203 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1204 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1205 memcpy (buf + i, &tpoint, 8);
1206 i += 8;
1207 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1208 i += push_opcode (&buf[i],
1209 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1210 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1211 append_insns (&buildaddr, i, buf);
1212
1213 /* spin-lock. */
1214 i = 0;
1215 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1216 memcpy (&buf[i], (void *) &lockaddr, 8);
1217 i += 8;
1218 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1219 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1220 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1221 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1222 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1223 append_insns (&buildaddr, i, buf);
1224
1225 /* Set up the gdb_collect call. */
1226 /* At this point, (stack pointer + 0x18) is the base of our saved
1227 register block. */
1228
1229 i = 0;
1230 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1231 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1232
1233 /* tpoint address may be 64-bit wide. */
1234 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1235 memcpy (buf + i, &tpoint, 8);
1236 i += 8;
1237 append_insns (&buildaddr, i, buf);
1238
1239 /* The collector function being in the shared library, may be
1240 >31-bits away off the jump pad. */
1241 i = 0;
1242 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1243 memcpy (buf + i, &collector, 8);
1244 i += 8;
1245 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1246 append_insns (&buildaddr, i, buf);
1247
1248 /* Clear the spin-lock. */
1249 i = 0;
1250 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1251 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1252 memcpy (buf + i, &lockaddr, 8);
1253 i += 8;
1254 append_insns (&buildaddr, i, buf);
1255
1256 /* Remove stack that had been used for the collect_t object. */
1257 i = 0;
1258 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1259 append_insns (&buildaddr, i, buf);
1260
1261 /* Restore register state. */
1262 i = 0;
1263 buf[i++] = 0x48; /* add $0x8,%rsp */
1264 buf[i++] = 0x83;
1265 buf[i++] = 0xc4;
1266 buf[i++] = 0x08;
1267 buf[i++] = 0x9d; /* popfq */
1268 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1269 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1270 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1271 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1272 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1273 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1274 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1275 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1276 buf[i++] = 0x58; /* pop %rax */
1277 buf[i++] = 0x5b; /* pop %rbx */
1278 buf[i++] = 0x59; /* pop %rcx */
1279 buf[i++] = 0x5a; /* pop %rdx */
1280 buf[i++] = 0x5e; /* pop %rsi */
1281 buf[i++] = 0x5f; /* pop %rdi */
1282 buf[i++] = 0x5d; /* pop %rbp */
1283 buf[i++] = 0x5c; /* pop %rsp */
1284 append_insns (&buildaddr, i, buf);
1285
1286 /* Now, adjust the original instruction to execute in the jump
1287 pad. */
1288 *adjusted_insn_addr = buildaddr;
1289 relocate_instruction (&buildaddr, tpaddr);
1290 *adjusted_insn_addr_end = buildaddr;
1291
1292 /* Finally, write a jump back to the program. */
1293
1294 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1295 if (loffset > INT_MAX || loffset < INT_MIN)
1296 {
1297 sprintf (err,
1298 "E.Jump back from jump pad too far from tracepoint "
1299 "(offset 0x%" PRIx64 " > int32).", loffset);
1300 return 1;
1301 }
1302
1303 offset = (int) loffset;
1304 memcpy (buf, jump_insn, sizeof (jump_insn));
1305 memcpy (buf + 1, &offset, 4);
1306 append_insns (&buildaddr, sizeof (jump_insn), buf);
1307
1308 /* The jump pad is now built. Wire in a jump to our jump pad. This
1309 is always done last (by our caller actually), so that we can
1310 install fast tracepoints with threads running. This relies on
1311 the agent's atomic write support. */
1312 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1313 if (loffset > INT_MAX || loffset < INT_MIN)
1314 {
1315 sprintf (err,
1316 "E.Jump pad too far from tracepoint "
1317 "(offset 0x%" PRIx64 " > int32).", loffset);
1318 return 1;
1319 }
1320
1321 offset = (int) loffset;
1322
1323 memcpy (buf, jump_insn, sizeof (jump_insn));
1324 memcpy (buf + 1, &offset, 4);
1325 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1326 *jjump_pad_insn_size = sizeof (jump_insn);
1327
1328 /* Return the end address of our pad. */
1329 *jump_entry = buildaddr;
1330
1331 return 0;
1332 }
1333
1334 #endif /* __x86_64__ */
1335
1336 /* Build a jump pad that saves registers and calls a collection
1337 function. Writes a jump instruction to the jump pad to
1338 JJUMPAD_INSN. The caller is responsible to write it in at the
1339 tracepoint address. */
1340
1341 static int
1342 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1343 CORE_ADDR collector,
1344 CORE_ADDR lockaddr,
1345 ULONGEST orig_size,
1346 CORE_ADDR *jump_entry,
1347 CORE_ADDR *trampoline,
1348 ULONGEST *trampoline_size,
1349 unsigned char *jjump_pad_insn,
1350 ULONGEST *jjump_pad_insn_size,
1351 CORE_ADDR *adjusted_insn_addr,
1352 CORE_ADDR *adjusted_insn_addr_end,
1353 char *err)
1354 {
1355 unsigned char buf[0x100];
1356 int i, offset;
1357 CORE_ADDR buildaddr = *jump_entry;
1358
1359 /* Build the jump pad. */
1360
1361 /* First, do tracepoint data collection. Save registers. */
1362 i = 0;
1363 buf[i++] = 0x60; /* pushad */
1364 buf[i++] = 0x68; /* push tpaddr aka $pc */
1365 *((int *)(buf + i)) = (int) tpaddr;
1366 i += 4;
1367 buf[i++] = 0x9c; /* pushf */
1368 buf[i++] = 0x1e; /* push %ds */
1369 buf[i++] = 0x06; /* push %es */
1370 buf[i++] = 0x0f; /* push %fs */
1371 buf[i++] = 0xa0;
1372 buf[i++] = 0x0f; /* push %gs */
1373 buf[i++] = 0xa8;
1374 buf[i++] = 0x16; /* push %ss */
1375 buf[i++] = 0x0e; /* push %cs */
1376 append_insns (&buildaddr, i, buf);
1377
1378 /* Stack space for the collecting_t object. */
1379 i = 0;
1380 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1381
1382 /* Build the object. */
1383 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1384 memcpy (buf + i, &tpoint, 4);
1385 i += 4;
1386 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1387
1388 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1389 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1390 append_insns (&buildaddr, i, buf);
1391
1392 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1393 If we cared for it, this could be using xchg alternatively. */
1394
1395 i = 0;
1396 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1397 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1398 %esp,<lockaddr> */
1399 memcpy (&buf[i], (void *) &lockaddr, 4);
1400 i += 4;
1401 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1402 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1403 append_insns (&buildaddr, i, buf);
1404
1405
1406 /* Set up arguments to the gdb_collect call. */
1407 i = 0;
1408 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1409 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1410 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1411 append_insns (&buildaddr, i, buf);
1412
1413 i = 0;
1414 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1415 append_insns (&buildaddr, i, buf);
1416
1417 i = 0;
1418 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1419 memcpy (&buf[i], (void *) &tpoint, 4);
1420 i += 4;
1421 append_insns (&buildaddr, i, buf);
1422
1423 buf[0] = 0xe8; /* call <reladdr> */
1424 offset = collector - (buildaddr + sizeof (jump_insn));
1425 memcpy (buf + 1, &offset, 4);
1426 append_insns (&buildaddr, 5, buf);
1427 /* Clean up after the call. */
1428 buf[0] = 0x83; /* add $0x8,%esp */
1429 buf[1] = 0xc4;
1430 buf[2] = 0x08;
1431 append_insns (&buildaddr, 3, buf);
1432
1433
1434 /* Clear the spin-lock. This would need the LOCK prefix on older
1435 broken archs. */
1436 i = 0;
1437 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1438 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1439 memcpy (buf + i, &lockaddr, 4);
1440 i += 4;
1441 append_insns (&buildaddr, i, buf);
1442
1443
1444 /* Remove stack that had been used for the collect_t object. */
1445 i = 0;
1446 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1447 append_insns (&buildaddr, i, buf);
1448
1449 i = 0;
1450 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1451 buf[i++] = 0xc4;
1452 buf[i++] = 0x04;
1453 buf[i++] = 0x17; /* pop %ss */
1454 buf[i++] = 0x0f; /* pop %gs */
1455 buf[i++] = 0xa9;
1456 buf[i++] = 0x0f; /* pop %fs */
1457 buf[i++] = 0xa1;
1458 buf[i++] = 0x07; /* pop %es */
1459 buf[i++] = 0x1f; /* pop %ds */
1460 buf[i++] = 0x9d; /* popf */
1461 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1462 buf[i++] = 0xc4;
1463 buf[i++] = 0x04;
1464 buf[i++] = 0x61; /* popad */
1465 append_insns (&buildaddr, i, buf);
1466
1467 /* Now, adjust the original instruction to execute in the jump
1468 pad. */
1469 *adjusted_insn_addr = buildaddr;
1470 relocate_instruction (&buildaddr, tpaddr);
1471 *adjusted_insn_addr_end = buildaddr;
1472
1473 /* Write the jump back to the program. */
1474 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1475 memcpy (buf, jump_insn, sizeof (jump_insn));
1476 memcpy (buf + 1, &offset, 4);
1477 append_insns (&buildaddr, sizeof (jump_insn), buf);
1478
1479 /* The jump pad is now built. Wire in a jump to our jump pad. This
1480 is always done last (by our caller actually), so that we can
1481 install fast tracepoints with threads running. This relies on
1482 the agent's atomic write support. */
1483 if (orig_size == 4)
1484 {
1485 /* Create a trampoline. */
1486 *trampoline_size = sizeof (jump_insn);
1487 if (!claim_trampoline_space (*trampoline_size, trampoline))
1488 {
1489 /* No trampoline space available. */
1490 strcpy (err,
1491 "E.Cannot allocate trampoline space needed for fast "
1492 "tracepoints on 4-byte instructions.");
1493 return 1;
1494 }
1495
1496 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1497 memcpy (buf, jump_insn, sizeof (jump_insn));
1498 memcpy (buf + 1, &offset, 4);
1499 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1500
1501 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1502 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1503 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1504 memcpy (buf + 2, &offset, 2);
1505 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1506 *jjump_pad_insn_size = sizeof (small_jump_insn);
1507 }
1508 else
1509 {
1510 /* Else use a 32-bit relative jump instruction. */
1511 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1512 memcpy (buf, jump_insn, sizeof (jump_insn));
1513 memcpy (buf + 1, &offset, 4);
1514 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1515 *jjump_pad_insn_size = sizeof (jump_insn);
1516 }
1517
1518 /* Return the end address of our pad. */
1519 *jump_entry = buildaddr;
1520
1521 return 0;
1522 }
1523
1524 static int
1525 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1526 CORE_ADDR collector,
1527 CORE_ADDR lockaddr,
1528 ULONGEST orig_size,
1529 CORE_ADDR *jump_entry,
1530 CORE_ADDR *trampoline,
1531 ULONGEST *trampoline_size,
1532 unsigned char *jjump_pad_insn,
1533 ULONGEST *jjump_pad_insn_size,
1534 CORE_ADDR *adjusted_insn_addr,
1535 CORE_ADDR *adjusted_insn_addr_end,
1536 char *err)
1537 {
1538 #ifdef __x86_64__
1539 if (is_64bit_tdesc ())
1540 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1541 collector, lockaddr,
1542 orig_size, jump_entry,
1543 trampoline, trampoline_size,
1544 jjump_pad_insn,
1545 jjump_pad_insn_size,
1546 adjusted_insn_addr,
1547 adjusted_insn_addr_end,
1548 err);
1549 #endif
1550
1551 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1552 collector, lockaddr,
1553 orig_size, jump_entry,
1554 trampoline, trampoline_size,
1555 jjump_pad_insn,
1556 jjump_pad_insn_size,
1557 adjusted_insn_addr,
1558 adjusted_insn_addr_end,
1559 err);
1560 }
1561
1562 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1563 architectures. */
1564
1565 static int
1566 x86_get_min_fast_tracepoint_insn_len (void)
1567 {
1568 static int warned_about_fast_tracepoints = 0;
1569
1570 #ifdef __x86_64__
1571 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1572 used for fast tracepoints. */
1573 if (is_64bit_tdesc ())
1574 return 5;
1575 #endif
1576
1577 if (agent_loaded_p ())
1578 {
1579 char errbuf[IPA_BUFSIZ];
1580
1581 errbuf[0] = '\0';
1582
1583 /* On x86, if trampolines are available, then 4-byte jump instructions
1584 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1585 with a 4-byte offset are used instead. */
1586 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1587 return 4;
1588 else
1589 {
1590 /* GDB has no channel to explain to user why a shorter fast
1591 tracepoint is not possible, but at least make GDBserver
1592 mention that something has gone awry. */
1593 if (!warned_about_fast_tracepoints)
1594 {
1595 warning ("4-byte fast tracepoints not available; %s", errbuf);
1596 warned_about_fast_tracepoints = 1;
1597 }
1598 return 5;
1599 }
1600 }
1601 else
1602 {
1603 /* Indicate that the minimum length is currently unknown since the IPA
1604 has not loaded yet. */
1605 return 0;
1606 }
1607 }
1608
1609 static void
1610 add_insns (unsigned char *start, int len)
1611 {
1612 CORE_ADDR buildaddr = current_insn_ptr;
1613
1614 if (debug_threads)
1615 debug_printf ("Adding %d bytes of insn at %s\n",
1616 len, paddress (buildaddr));
1617
1618 append_insns (&buildaddr, len, start);
1619 current_insn_ptr = buildaddr;
1620 }
1621
1622 /* Our general strategy for emitting code is to avoid specifying raw
1623 bytes whenever possible, and instead copy a block of inline asm
1624 that is embedded in the function. This is a little messy, because
1625 we need to keep the compiler from discarding what looks like dead
1626 code, plus suppress various warnings. */
1627
1628 #define EMIT_ASM(NAME, INSNS) \
1629 do \
1630 { \
1631 extern unsigned char start_ ## NAME, end_ ## NAME; \
1632 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1633 __asm__ ("jmp end_" #NAME "\n" \
1634 "\t" "start_" #NAME ":" \
1635 "\t" INSNS "\n" \
1636 "\t" "end_" #NAME ":"); \
1637 } while (0)
1638
1639 #ifdef __x86_64__
1640
1641 #define EMIT_ASM32(NAME,INSNS) \
1642 do \
1643 { \
1644 extern unsigned char start_ ## NAME, end_ ## NAME; \
1645 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1646 __asm__ (".code32\n" \
1647 "\t" "jmp end_" #NAME "\n" \
1648 "\t" "start_" #NAME ":\n" \
1649 "\t" INSNS "\n" \
1650 "\t" "end_" #NAME ":\n" \
1651 ".code64\n"); \
1652 } while (0)
1653
1654 #else
1655
1656 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1657
1658 #endif
1659
1660 #ifdef __x86_64__
1661
1662 static void
1663 amd64_emit_prologue (void)
1664 {
1665 EMIT_ASM (amd64_prologue,
1666 "pushq %rbp\n\t"
1667 "movq %rsp,%rbp\n\t"
1668 "sub $0x20,%rsp\n\t"
1669 "movq %rdi,-8(%rbp)\n\t"
1670 "movq %rsi,-16(%rbp)");
1671 }
1672
1673
1674 static void
1675 amd64_emit_epilogue (void)
1676 {
1677 EMIT_ASM (amd64_epilogue,
1678 "movq -16(%rbp),%rdi\n\t"
1679 "movq %rax,(%rdi)\n\t"
1680 "xor %rax,%rax\n\t"
1681 "leave\n\t"
1682 "ret");
1683 }
1684
1685 static void
1686 amd64_emit_add (void)
1687 {
1688 EMIT_ASM (amd64_add,
1689 "add (%rsp),%rax\n\t"
1690 "lea 0x8(%rsp),%rsp");
1691 }
1692
1693 static void
1694 amd64_emit_sub (void)
1695 {
1696 EMIT_ASM (amd64_sub,
1697 "sub %rax,(%rsp)\n\t"
1698 "pop %rax");
1699 }
1700
1701 static void
1702 amd64_emit_mul (void)
1703 {
1704 emit_error = 1;
1705 }
1706
1707 static void
1708 amd64_emit_lsh (void)
1709 {
1710 emit_error = 1;
1711 }
1712
1713 static void
1714 amd64_emit_rsh_signed (void)
1715 {
1716 emit_error = 1;
1717 }
1718
1719 static void
1720 amd64_emit_rsh_unsigned (void)
1721 {
1722 emit_error = 1;
1723 }
1724
1725 static void
1726 amd64_emit_ext (int arg)
1727 {
1728 switch (arg)
1729 {
1730 case 8:
1731 EMIT_ASM (amd64_ext_8,
1732 "cbtw\n\t"
1733 "cwtl\n\t"
1734 "cltq");
1735 break;
1736 case 16:
1737 EMIT_ASM (amd64_ext_16,
1738 "cwtl\n\t"
1739 "cltq");
1740 break;
1741 case 32:
1742 EMIT_ASM (amd64_ext_32,
1743 "cltq");
1744 break;
1745 default:
1746 emit_error = 1;
1747 }
1748 }
1749
1750 static void
1751 amd64_emit_log_not (void)
1752 {
1753 EMIT_ASM (amd64_log_not,
1754 "test %rax,%rax\n\t"
1755 "sete %cl\n\t"
1756 "movzbq %cl,%rax");
1757 }
1758
1759 static void
1760 amd64_emit_bit_and (void)
1761 {
1762 EMIT_ASM (amd64_and,
1763 "and (%rsp),%rax\n\t"
1764 "lea 0x8(%rsp),%rsp");
1765 }
1766
1767 static void
1768 amd64_emit_bit_or (void)
1769 {
1770 EMIT_ASM (amd64_or,
1771 "or (%rsp),%rax\n\t"
1772 "lea 0x8(%rsp),%rsp");
1773 }
1774
1775 static void
1776 amd64_emit_bit_xor (void)
1777 {
1778 EMIT_ASM (amd64_xor,
1779 "xor (%rsp),%rax\n\t"
1780 "lea 0x8(%rsp),%rsp");
1781 }
1782
1783 static void
1784 amd64_emit_bit_not (void)
1785 {
1786 EMIT_ASM (amd64_bit_not,
1787 "xorq $0xffffffffffffffff,%rax");
1788 }
1789
1790 static void
1791 amd64_emit_equal (void)
1792 {
1793 EMIT_ASM (amd64_equal,
1794 "cmp %rax,(%rsp)\n\t"
1795 "je .Lamd64_equal_true\n\t"
1796 "xor %rax,%rax\n\t"
1797 "jmp .Lamd64_equal_end\n\t"
1798 ".Lamd64_equal_true:\n\t"
1799 "mov $0x1,%rax\n\t"
1800 ".Lamd64_equal_end:\n\t"
1801 "lea 0x8(%rsp),%rsp");
1802 }
1803
1804 static void
1805 amd64_emit_less_signed (void)
1806 {
1807 EMIT_ASM (amd64_less_signed,
1808 "cmp %rax,(%rsp)\n\t"
1809 "jl .Lamd64_less_signed_true\n\t"
1810 "xor %rax,%rax\n\t"
1811 "jmp .Lamd64_less_signed_end\n\t"
1812 ".Lamd64_less_signed_true:\n\t"
1813 "mov $1,%rax\n\t"
1814 ".Lamd64_less_signed_end:\n\t"
1815 "lea 0x8(%rsp),%rsp");
1816 }
1817
1818 static void
1819 amd64_emit_less_unsigned (void)
1820 {
1821 EMIT_ASM (amd64_less_unsigned,
1822 "cmp %rax,(%rsp)\n\t"
1823 "jb .Lamd64_less_unsigned_true\n\t"
1824 "xor %rax,%rax\n\t"
1825 "jmp .Lamd64_less_unsigned_end\n\t"
1826 ".Lamd64_less_unsigned_true:\n\t"
1827 "mov $1,%rax\n\t"
1828 ".Lamd64_less_unsigned_end:\n\t"
1829 "lea 0x8(%rsp),%rsp");
1830 }
1831
1832 static void
1833 amd64_emit_ref (int size)
1834 {
1835 switch (size)
1836 {
1837 case 1:
1838 EMIT_ASM (amd64_ref1,
1839 "movb (%rax),%al");
1840 break;
1841 case 2:
1842 EMIT_ASM (amd64_ref2,
1843 "movw (%rax),%ax");
1844 break;
1845 case 4:
1846 EMIT_ASM (amd64_ref4,
1847 "movl (%rax),%eax");
1848 break;
1849 case 8:
1850 EMIT_ASM (amd64_ref8,
1851 "movq (%rax),%rax");
1852 break;
1853 }
1854 }
1855
1856 static void
1857 amd64_emit_if_goto (int *offset_p, int *size_p)
1858 {
1859 EMIT_ASM (amd64_if_goto,
1860 "mov %rax,%rcx\n\t"
1861 "pop %rax\n\t"
1862 "cmp $0,%rcx\n\t"
1863 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1864 if (offset_p)
1865 *offset_p = 10;
1866 if (size_p)
1867 *size_p = 4;
1868 }
1869
1870 static void
1871 amd64_emit_goto (int *offset_p, int *size_p)
1872 {
1873 EMIT_ASM (amd64_goto,
1874 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1875 if (offset_p)
1876 *offset_p = 1;
1877 if (size_p)
1878 *size_p = 4;
1879 }
1880
1881 static void
1882 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1883 {
1884 int diff = (to - (from + size));
1885 unsigned char buf[sizeof (int)];
1886
1887 if (size != 4)
1888 {
1889 emit_error = 1;
1890 return;
1891 }
1892
1893 memcpy (buf, &diff, sizeof (int));
1894 target_write_memory (from, buf, sizeof (int));
1895 }
1896
1897 static void
1898 amd64_emit_const (LONGEST num)
1899 {
1900 unsigned char buf[16];
1901 int i;
1902 CORE_ADDR buildaddr = current_insn_ptr;
1903
1904 i = 0;
1905 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1906 memcpy (&buf[i], &num, sizeof (num));
1907 i += 8;
1908 append_insns (&buildaddr, i, buf);
1909 current_insn_ptr = buildaddr;
1910 }
1911
1912 static void
1913 amd64_emit_call (CORE_ADDR fn)
1914 {
1915 unsigned char buf[16];
1916 int i;
1917 CORE_ADDR buildaddr;
1918 LONGEST offset64;
1919
1920 /* The destination function being in the shared library, may be
1921 >31-bits away off the compiled code pad. */
1922
1923 buildaddr = current_insn_ptr;
1924
1925 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1926
1927 i = 0;
1928
1929 if (offset64 > INT_MAX || offset64 < INT_MIN)
1930 {
1931 /* Offset is too large for a call. Use callq, but that requires
1932 a register, so avoid it if possible. Use r10, since it is
1933 call-clobbered, we don't have to push/pop it. */
1934 buf[i++] = 0x48; /* mov $fn,%r10 */
1935 buf[i++] = 0xba;
1936 memcpy (buf + i, &fn, 8);
1937 i += 8;
1938 buf[i++] = 0xff; /* callq *%r10 */
1939 buf[i++] = 0xd2;
1940 }
1941 else
1942 {
1943 int offset32 = offset64; /* we know we can't overflow here. */
1944
1945 buf[i++] = 0xe8; /* call <reladdr> */
1946 memcpy (buf + i, &offset32, 4);
1947 i += 4;
1948 }
1949
1950 append_insns (&buildaddr, i, buf);
1951 current_insn_ptr = buildaddr;
1952 }
1953
1954 static void
1955 amd64_emit_reg (int reg)
1956 {
1957 unsigned char buf[16];
1958 int i;
1959 CORE_ADDR buildaddr;
1960
1961 /* Assume raw_regs is still in %rdi. */
1962 buildaddr = current_insn_ptr;
1963 i = 0;
1964 buf[i++] = 0xbe; /* mov $<n>,%esi */
1965 memcpy (&buf[i], &reg, sizeof (reg));
1966 i += 4;
1967 append_insns (&buildaddr, i, buf);
1968 current_insn_ptr = buildaddr;
1969 amd64_emit_call (get_raw_reg_func_addr ());
1970 }
1971
1972 static void
1973 amd64_emit_pop (void)
1974 {
1975 EMIT_ASM (amd64_pop,
1976 "pop %rax");
1977 }
1978
1979 static void
1980 amd64_emit_stack_flush (void)
1981 {
1982 EMIT_ASM (amd64_stack_flush,
1983 "push %rax");
1984 }
1985
1986 static void
1987 amd64_emit_zero_ext (int arg)
1988 {
1989 switch (arg)
1990 {
1991 case 8:
1992 EMIT_ASM (amd64_zero_ext_8,
1993 "and $0xff,%rax");
1994 break;
1995 case 16:
1996 EMIT_ASM (amd64_zero_ext_16,
1997 "and $0xffff,%rax");
1998 break;
1999 case 32:
2000 EMIT_ASM (amd64_zero_ext_32,
2001 "mov $0xffffffff,%rcx\n\t"
2002 "and %rcx,%rax");
2003 break;
2004 default:
2005 emit_error = 1;
2006 }
2007 }
2008
2009 static void
2010 amd64_emit_swap (void)
2011 {
2012 EMIT_ASM (amd64_swap,
2013 "mov %rax,%rcx\n\t"
2014 "pop %rax\n\t"
2015 "push %rcx");
2016 }
2017
2018 static void
2019 amd64_emit_stack_adjust (int n)
2020 {
2021 unsigned char buf[16];
2022 int i;
2023 CORE_ADDR buildaddr = current_insn_ptr;
2024
2025 i = 0;
2026 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2027 buf[i++] = 0x8d;
2028 buf[i++] = 0x64;
2029 buf[i++] = 0x24;
2030 /* This only handles adjustments up to 16, but we don't expect any more. */
2031 buf[i++] = n * 8;
2032 append_insns (&buildaddr, i, buf);
2033 current_insn_ptr = buildaddr;
2034 }
2035
2036 /* FN's prototype is `LONGEST(*fn)(int)'. */
2037
2038 static void
2039 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2040 {
2041 unsigned char buf[16];
2042 int i;
2043 CORE_ADDR buildaddr;
2044
2045 buildaddr = current_insn_ptr;
2046 i = 0;
2047 buf[i++] = 0xbf; /* movl $<n>,%edi */
2048 memcpy (&buf[i], &arg1, sizeof (arg1));
2049 i += 4;
2050 append_insns (&buildaddr, i, buf);
2051 current_insn_ptr = buildaddr;
2052 amd64_emit_call (fn);
2053 }
2054
2055 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2056
2057 static void
2058 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2059 {
2060 unsigned char buf[16];
2061 int i;
2062 CORE_ADDR buildaddr;
2063
2064 buildaddr = current_insn_ptr;
2065 i = 0;
2066 buf[i++] = 0xbf; /* movl $<n>,%edi */
2067 memcpy (&buf[i], &arg1, sizeof (arg1));
2068 i += 4;
2069 append_insns (&buildaddr, i, buf);
2070 current_insn_ptr = buildaddr;
2071 EMIT_ASM (amd64_void_call_2_a,
2072 /* Save away a copy of the stack top. */
2073 "push %rax\n\t"
2074 /* Also pass top as the second argument. */
2075 "mov %rax,%rsi");
2076 amd64_emit_call (fn);
2077 EMIT_ASM (amd64_void_call_2_b,
2078 /* Restore the stack top, %rax may have been trashed. */
2079 "pop %rax");
2080 }
2081
2082 static void
2083 amd64_emit_eq_goto (int *offset_p, int *size_p)
2084 {
2085 EMIT_ASM (amd64_eq,
2086 "cmp %rax,(%rsp)\n\t"
2087 "jne .Lamd64_eq_fallthru\n\t"
2088 "lea 0x8(%rsp),%rsp\n\t"
2089 "pop %rax\n\t"
2090 /* jmp, but don't trust the assembler to choose the right jump */
2091 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2092 ".Lamd64_eq_fallthru:\n\t"
2093 "lea 0x8(%rsp),%rsp\n\t"
2094 "pop %rax");
2095
2096 if (offset_p)
2097 *offset_p = 13;
2098 if (size_p)
2099 *size_p = 4;
2100 }
2101
2102 static void
2103 amd64_emit_ne_goto (int *offset_p, int *size_p)
2104 {
2105 EMIT_ASM (amd64_ne,
2106 "cmp %rax,(%rsp)\n\t"
2107 "je .Lamd64_ne_fallthru\n\t"
2108 "lea 0x8(%rsp),%rsp\n\t"
2109 "pop %rax\n\t"
2110 /* jmp, but don't trust the assembler to choose the right jump */
2111 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2112 ".Lamd64_ne_fallthru:\n\t"
2113 "lea 0x8(%rsp),%rsp\n\t"
2114 "pop %rax");
2115
2116 if (offset_p)
2117 *offset_p = 13;
2118 if (size_p)
2119 *size_p = 4;
2120 }
2121
2122 static void
2123 amd64_emit_lt_goto (int *offset_p, int *size_p)
2124 {
2125 EMIT_ASM (amd64_lt,
2126 "cmp %rax,(%rsp)\n\t"
2127 "jnl .Lamd64_lt_fallthru\n\t"
2128 "lea 0x8(%rsp),%rsp\n\t"
2129 "pop %rax\n\t"
2130 /* jmp, but don't trust the assembler to choose the right jump */
2131 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2132 ".Lamd64_lt_fallthru:\n\t"
2133 "lea 0x8(%rsp),%rsp\n\t"
2134 "pop %rax");
2135
2136 if (offset_p)
2137 *offset_p = 13;
2138 if (size_p)
2139 *size_p = 4;
2140 }
2141
2142 static void
2143 amd64_emit_le_goto (int *offset_p, int *size_p)
2144 {
2145 EMIT_ASM (amd64_le,
2146 "cmp %rax,(%rsp)\n\t"
2147 "jnle .Lamd64_le_fallthru\n\t"
2148 "lea 0x8(%rsp),%rsp\n\t"
2149 "pop %rax\n\t"
2150 /* jmp, but don't trust the assembler to choose the right jump */
2151 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2152 ".Lamd64_le_fallthru:\n\t"
2153 "lea 0x8(%rsp),%rsp\n\t"
2154 "pop %rax");
2155
2156 if (offset_p)
2157 *offset_p = 13;
2158 if (size_p)
2159 *size_p = 4;
2160 }
2161
2162 static void
2163 amd64_emit_gt_goto (int *offset_p, int *size_p)
2164 {
2165 EMIT_ASM (amd64_gt,
2166 "cmp %rax,(%rsp)\n\t"
2167 "jng .Lamd64_gt_fallthru\n\t"
2168 "lea 0x8(%rsp),%rsp\n\t"
2169 "pop %rax\n\t"
2170 /* jmp, but don't trust the assembler to choose the right jump */
2171 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2172 ".Lamd64_gt_fallthru:\n\t"
2173 "lea 0x8(%rsp),%rsp\n\t"
2174 "pop %rax");
2175
2176 if (offset_p)
2177 *offset_p = 13;
2178 if (size_p)
2179 *size_p = 4;
2180 }
2181
2182 static void
2183 amd64_emit_ge_goto (int *offset_p, int *size_p)
2184 {
2185 EMIT_ASM (amd64_ge,
2186 "cmp %rax,(%rsp)\n\t"
2187 "jnge .Lamd64_ge_fallthru\n\t"
2188 ".Lamd64_ge_jump:\n\t"
2189 "lea 0x8(%rsp),%rsp\n\t"
2190 "pop %rax\n\t"
2191 /* jmp, but don't trust the assembler to choose the right jump */
2192 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2193 ".Lamd64_ge_fallthru:\n\t"
2194 "lea 0x8(%rsp),%rsp\n\t"
2195 "pop %rax");
2196
2197 if (offset_p)
2198 *offset_p = 13;
2199 if (size_p)
2200 *size_p = 4;
2201 }
2202
2203 struct emit_ops amd64_emit_ops =
2204 {
2205 amd64_emit_prologue,
2206 amd64_emit_epilogue,
2207 amd64_emit_add,
2208 amd64_emit_sub,
2209 amd64_emit_mul,
2210 amd64_emit_lsh,
2211 amd64_emit_rsh_signed,
2212 amd64_emit_rsh_unsigned,
2213 amd64_emit_ext,
2214 amd64_emit_log_not,
2215 amd64_emit_bit_and,
2216 amd64_emit_bit_or,
2217 amd64_emit_bit_xor,
2218 amd64_emit_bit_not,
2219 amd64_emit_equal,
2220 amd64_emit_less_signed,
2221 amd64_emit_less_unsigned,
2222 amd64_emit_ref,
2223 amd64_emit_if_goto,
2224 amd64_emit_goto,
2225 amd64_write_goto_address,
2226 amd64_emit_const,
2227 amd64_emit_call,
2228 amd64_emit_reg,
2229 amd64_emit_pop,
2230 amd64_emit_stack_flush,
2231 amd64_emit_zero_ext,
2232 amd64_emit_swap,
2233 amd64_emit_stack_adjust,
2234 amd64_emit_int_call_1,
2235 amd64_emit_void_call_2,
2236 amd64_emit_eq_goto,
2237 amd64_emit_ne_goto,
2238 amd64_emit_lt_goto,
2239 amd64_emit_le_goto,
2240 amd64_emit_gt_goto,
2241 amd64_emit_ge_goto
2242 };
2243
2244 #endif /* __x86_64__ */
2245
2246 static void
2247 i386_emit_prologue (void)
2248 {
2249 EMIT_ASM32 (i386_prologue,
2250 "push %ebp\n\t"
2251 "mov %esp,%ebp\n\t"
2252 "push %ebx");
2253 /* At this point, the raw regs base address is at 8(%ebp), and the
2254 value pointer is at 12(%ebp). */
2255 }
2256
2257 static void
2258 i386_emit_epilogue (void)
2259 {
2260 EMIT_ASM32 (i386_epilogue,
2261 "mov 12(%ebp),%ecx\n\t"
2262 "mov %eax,(%ecx)\n\t"
2263 "mov %ebx,0x4(%ecx)\n\t"
2264 "xor %eax,%eax\n\t"
2265 "pop %ebx\n\t"
2266 "pop %ebp\n\t"
2267 "ret");
2268 }
2269
2270 static void
2271 i386_emit_add (void)
2272 {
2273 EMIT_ASM32 (i386_add,
2274 "add (%esp),%eax\n\t"
2275 "adc 0x4(%esp),%ebx\n\t"
2276 "lea 0x8(%esp),%esp");
2277 }
2278
2279 static void
2280 i386_emit_sub (void)
2281 {
2282 EMIT_ASM32 (i386_sub,
2283 "subl %eax,(%esp)\n\t"
2284 "sbbl %ebx,4(%esp)\n\t"
2285 "pop %eax\n\t"
2286 "pop %ebx\n\t");
2287 }
2288
2289 static void
2290 i386_emit_mul (void)
2291 {
2292 emit_error = 1;
2293 }
2294
2295 static void
2296 i386_emit_lsh (void)
2297 {
2298 emit_error = 1;
2299 }
2300
2301 static void
2302 i386_emit_rsh_signed (void)
2303 {
2304 emit_error = 1;
2305 }
2306
2307 static void
2308 i386_emit_rsh_unsigned (void)
2309 {
2310 emit_error = 1;
2311 }
2312
2313 static void
2314 i386_emit_ext (int arg)
2315 {
2316 switch (arg)
2317 {
2318 case 8:
2319 EMIT_ASM32 (i386_ext_8,
2320 "cbtw\n\t"
2321 "cwtl\n\t"
2322 "movl %eax,%ebx\n\t"
2323 "sarl $31,%ebx");
2324 break;
2325 case 16:
2326 EMIT_ASM32 (i386_ext_16,
2327 "cwtl\n\t"
2328 "movl %eax,%ebx\n\t"
2329 "sarl $31,%ebx");
2330 break;
2331 case 32:
2332 EMIT_ASM32 (i386_ext_32,
2333 "movl %eax,%ebx\n\t"
2334 "sarl $31,%ebx");
2335 break;
2336 default:
2337 emit_error = 1;
2338 }
2339 }
2340
2341 static void
2342 i386_emit_log_not (void)
2343 {
2344 EMIT_ASM32 (i386_log_not,
2345 "or %ebx,%eax\n\t"
2346 "test %eax,%eax\n\t"
2347 "sete %cl\n\t"
2348 "xor %ebx,%ebx\n\t"
2349 "movzbl %cl,%eax");
2350 }
2351
2352 static void
2353 i386_emit_bit_and (void)
2354 {
2355 EMIT_ASM32 (i386_and,
2356 "and (%esp),%eax\n\t"
2357 "and 0x4(%esp),%ebx\n\t"
2358 "lea 0x8(%esp),%esp");
2359 }
2360
2361 static void
2362 i386_emit_bit_or (void)
2363 {
2364 EMIT_ASM32 (i386_or,
2365 "or (%esp),%eax\n\t"
2366 "or 0x4(%esp),%ebx\n\t"
2367 "lea 0x8(%esp),%esp");
2368 }
2369
2370 static void
2371 i386_emit_bit_xor (void)
2372 {
2373 EMIT_ASM32 (i386_xor,
2374 "xor (%esp),%eax\n\t"
2375 "xor 0x4(%esp),%ebx\n\t"
2376 "lea 0x8(%esp),%esp");
2377 }
2378
2379 static void
2380 i386_emit_bit_not (void)
2381 {
2382 EMIT_ASM32 (i386_bit_not,
2383 "xor $0xffffffff,%eax\n\t"
2384 "xor $0xffffffff,%ebx\n\t");
2385 }
2386
2387 static void
2388 i386_emit_equal (void)
2389 {
2390 EMIT_ASM32 (i386_equal,
2391 "cmpl %ebx,4(%esp)\n\t"
2392 "jne .Li386_equal_false\n\t"
2393 "cmpl %eax,(%esp)\n\t"
2394 "je .Li386_equal_true\n\t"
2395 ".Li386_equal_false:\n\t"
2396 "xor %eax,%eax\n\t"
2397 "jmp .Li386_equal_end\n\t"
2398 ".Li386_equal_true:\n\t"
2399 "mov $1,%eax\n\t"
2400 ".Li386_equal_end:\n\t"
2401 "xor %ebx,%ebx\n\t"
2402 "lea 0x8(%esp),%esp");
2403 }
2404
2405 static void
2406 i386_emit_less_signed (void)
2407 {
2408 EMIT_ASM32 (i386_less_signed,
2409 "cmpl %ebx,4(%esp)\n\t"
2410 "jl .Li386_less_signed_true\n\t"
2411 "jne .Li386_less_signed_false\n\t"
2412 "cmpl %eax,(%esp)\n\t"
2413 "jl .Li386_less_signed_true\n\t"
2414 ".Li386_less_signed_false:\n\t"
2415 "xor %eax,%eax\n\t"
2416 "jmp .Li386_less_signed_end\n\t"
2417 ".Li386_less_signed_true:\n\t"
2418 "mov $1,%eax\n\t"
2419 ".Li386_less_signed_end:\n\t"
2420 "xor %ebx,%ebx\n\t"
2421 "lea 0x8(%esp),%esp");
2422 }
2423
2424 static void
2425 i386_emit_less_unsigned (void)
2426 {
2427 EMIT_ASM32 (i386_less_unsigned,
2428 "cmpl %ebx,4(%esp)\n\t"
2429 "jb .Li386_less_unsigned_true\n\t"
2430 "jne .Li386_less_unsigned_false\n\t"
2431 "cmpl %eax,(%esp)\n\t"
2432 "jb .Li386_less_unsigned_true\n\t"
2433 ".Li386_less_unsigned_false:\n\t"
2434 "xor %eax,%eax\n\t"
2435 "jmp .Li386_less_unsigned_end\n\t"
2436 ".Li386_less_unsigned_true:\n\t"
2437 "mov $1,%eax\n\t"
2438 ".Li386_less_unsigned_end:\n\t"
2439 "xor %ebx,%ebx\n\t"
2440 "lea 0x8(%esp),%esp");
2441 }
2442
2443 static void
2444 i386_emit_ref (int size)
2445 {
2446 switch (size)
2447 {
2448 case 1:
2449 EMIT_ASM32 (i386_ref1,
2450 "movb (%eax),%al");
2451 break;
2452 case 2:
2453 EMIT_ASM32 (i386_ref2,
2454 "movw (%eax),%ax");
2455 break;
2456 case 4:
2457 EMIT_ASM32 (i386_ref4,
2458 "movl (%eax),%eax");
2459 break;
2460 case 8:
2461 EMIT_ASM32 (i386_ref8,
2462 "movl 4(%eax),%ebx\n\t"
2463 "movl (%eax),%eax");
2464 break;
2465 }
2466 }
2467
2468 static void
2469 i386_emit_if_goto (int *offset_p, int *size_p)
2470 {
2471 EMIT_ASM32 (i386_if_goto,
2472 "mov %eax,%ecx\n\t"
2473 "or %ebx,%ecx\n\t"
2474 "pop %eax\n\t"
2475 "pop %ebx\n\t"
2476 "cmpl $0,%ecx\n\t"
2477 /* Don't trust the assembler to choose the right jump */
2478 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2479
2480 if (offset_p)
2481 *offset_p = 11; /* be sure that this matches the sequence above */
2482 if (size_p)
2483 *size_p = 4;
2484 }
2485
2486 static void
2487 i386_emit_goto (int *offset_p, int *size_p)
2488 {
2489 EMIT_ASM32 (i386_goto,
2490 /* Don't trust the assembler to choose the right jump */
2491 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2492 if (offset_p)
2493 *offset_p = 1;
2494 if (size_p)
2495 *size_p = 4;
2496 }
2497
2498 static void
2499 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2500 {
2501 int diff = (to - (from + size));
2502 unsigned char buf[sizeof (int)];
2503
2504 /* We're only doing 4-byte sizes at the moment. */
2505 if (size != 4)
2506 {
2507 emit_error = 1;
2508 return;
2509 }
2510
2511 memcpy (buf, &diff, sizeof (int));
2512 target_write_memory (from, buf, sizeof (int));
2513 }
2514
2515 static void
2516 i386_emit_const (LONGEST num)
2517 {
2518 unsigned char buf[16];
2519 int i, hi, lo;
2520 CORE_ADDR buildaddr = current_insn_ptr;
2521
2522 i = 0;
2523 buf[i++] = 0xb8; /* mov $<n>,%eax */
2524 lo = num & 0xffffffff;
2525 memcpy (&buf[i], &lo, sizeof (lo));
2526 i += 4;
2527 hi = ((num >> 32) & 0xffffffff);
2528 if (hi)
2529 {
2530 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2531 memcpy (&buf[i], &hi, sizeof (hi));
2532 i += 4;
2533 }
2534 else
2535 {
2536 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2537 }
2538 append_insns (&buildaddr, i, buf);
2539 current_insn_ptr = buildaddr;
2540 }
2541
2542 static void
2543 i386_emit_call (CORE_ADDR fn)
2544 {
2545 unsigned char buf[16];
2546 int i, offset;
2547 CORE_ADDR buildaddr;
2548
2549 buildaddr = current_insn_ptr;
2550 i = 0;
2551 buf[i++] = 0xe8; /* call <reladdr> */
2552 offset = ((int) fn) - (buildaddr + 5);
2553 memcpy (buf + 1, &offset, 4);
2554 append_insns (&buildaddr, 5, buf);
2555 current_insn_ptr = buildaddr;
2556 }
2557
2558 static void
2559 i386_emit_reg (int reg)
2560 {
2561 unsigned char buf[16];
2562 int i;
2563 CORE_ADDR buildaddr;
2564
2565 EMIT_ASM32 (i386_reg_a,
2566 "sub $0x8,%esp");
2567 buildaddr = current_insn_ptr;
2568 i = 0;
2569 buf[i++] = 0xb8; /* mov $<n>,%eax */
2570 memcpy (&buf[i], &reg, sizeof (reg));
2571 i += 4;
2572 append_insns (&buildaddr, i, buf);
2573 current_insn_ptr = buildaddr;
2574 EMIT_ASM32 (i386_reg_b,
2575 "mov %eax,4(%esp)\n\t"
2576 "mov 8(%ebp),%eax\n\t"
2577 "mov %eax,(%esp)");
2578 i386_emit_call (get_raw_reg_func_addr ());
2579 EMIT_ASM32 (i386_reg_c,
2580 "xor %ebx,%ebx\n\t"
2581 "lea 0x8(%esp),%esp");
2582 }
2583
2584 static void
2585 i386_emit_pop (void)
2586 {
2587 EMIT_ASM32 (i386_pop,
2588 "pop %eax\n\t"
2589 "pop %ebx");
2590 }
2591
2592 static void
2593 i386_emit_stack_flush (void)
2594 {
2595 EMIT_ASM32 (i386_stack_flush,
2596 "push %ebx\n\t"
2597 "push %eax");
2598 }
2599
2600 static void
2601 i386_emit_zero_ext (int arg)
2602 {
2603 switch (arg)
2604 {
2605 case 8:
2606 EMIT_ASM32 (i386_zero_ext_8,
2607 "and $0xff,%eax\n\t"
2608 "xor %ebx,%ebx");
2609 break;
2610 case 16:
2611 EMIT_ASM32 (i386_zero_ext_16,
2612 "and $0xffff,%eax\n\t"
2613 "xor %ebx,%ebx");
2614 break;
2615 case 32:
2616 EMIT_ASM32 (i386_zero_ext_32,
2617 "xor %ebx,%ebx");
2618 break;
2619 default:
2620 emit_error = 1;
2621 }
2622 }
2623
2624 static void
2625 i386_emit_swap (void)
2626 {
2627 EMIT_ASM32 (i386_swap,
2628 "mov %eax,%ecx\n\t"
2629 "mov %ebx,%edx\n\t"
2630 "pop %eax\n\t"
2631 "pop %ebx\n\t"
2632 "push %edx\n\t"
2633 "push %ecx");
2634 }
2635
2636 static void
2637 i386_emit_stack_adjust (int n)
2638 {
2639 unsigned char buf[16];
2640 int i;
2641 CORE_ADDR buildaddr = current_insn_ptr;
2642
2643 i = 0;
2644 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2645 buf[i++] = 0x64;
2646 buf[i++] = 0x24;
2647 buf[i++] = n * 8;
2648 append_insns (&buildaddr, i, buf);
2649 current_insn_ptr = buildaddr;
2650 }
2651
2652 /* FN's prototype is `LONGEST(*fn)(int)'. */
2653
2654 static void
2655 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2656 {
2657 unsigned char buf[16];
2658 int i;
2659 CORE_ADDR buildaddr;
2660
2661 EMIT_ASM32 (i386_int_call_1_a,
2662 /* Reserve a bit of stack space. */
2663 "sub $0x8,%esp");
2664 /* Put the one argument on the stack. */
2665 buildaddr = current_insn_ptr;
2666 i = 0;
2667 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2668 buf[i++] = 0x04;
2669 buf[i++] = 0x24;
2670 memcpy (&buf[i], &arg1, sizeof (arg1));
2671 i += 4;
2672 append_insns (&buildaddr, i, buf);
2673 current_insn_ptr = buildaddr;
2674 i386_emit_call (fn);
2675 EMIT_ASM32 (i386_int_call_1_c,
2676 "mov %edx,%ebx\n\t"
2677 "lea 0x8(%esp),%esp");
2678 }
2679
2680 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2681
2682 static void
2683 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2684 {
2685 unsigned char buf[16];
2686 int i;
2687 CORE_ADDR buildaddr;
2688
2689 EMIT_ASM32 (i386_void_call_2_a,
2690 /* Preserve %eax only; we don't have to worry about %ebx. */
2691 "push %eax\n\t"
2692 /* Reserve a bit of stack space for arguments. */
2693 "sub $0x10,%esp\n\t"
2694 /* Copy "top" to the second argument position. (Note that
2695 we can't assume function won't scribble on its
2696 arguments, so don't try to restore from this.) */
2697 "mov %eax,4(%esp)\n\t"
2698 "mov %ebx,8(%esp)");
2699 /* Put the first argument on the stack. */
2700 buildaddr = current_insn_ptr;
2701 i = 0;
2702 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2703 buf[i++] = 0x04;
2704 buf[i++] = 0x24;
2705 memcpy (&buf[i], &arg1, sizeof (arg1));
2706 i += 4;
2707 append_insns (&buildaddr, i, buf);
2708 current_insn_ptr = buildaddr;
2709 i386_emit_call (fn);
2710 EMIT_ASM32 (i386_void_call_2_b,
2711 "lea 0x10(%esp),%esp\n\t"
2712 /* Restore original stack top. */
2713 "pop %eax");
2714 }
2715
2716
2717 static void
2718 i386_emit_eq_goto (int *offset_p, int *size_p)
2719 {
2720 EMIT_ASM32 (eq,
2721 /* Check low half first, more likely to be decider */
2722 "cmpl %eax,(%esp)\n\t"
2723 "jne .Leq_fallthru\n\t"
2724 "cmpl %ebx,4(%esp)\n\t"
2725 "jne .Leq_fallthru\n\t"
2726 "lea 0x8(%esp),%esp\n\t"
2727 "pop %eax\n\t"
2728 "pop %ebx\n\t"
2729 /* jmp, but don't trust the assembler to choose the right jump */
2730 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2731 ".Leq_fallthru:\n\t"
2732 "lea 0x8(%esp),%esp\n\t"
2733 "pop %eax\n\t"
2734 "pop %ebx");
2735
2736 if (offset_p)
2737 *offset_p = 18;
2738 if (size_p)
2739 *size_p = 4;
2740 }
2741
2742 static void
2743 i386_emit_ne_goto (int *offset_p, int *size_p)
2744 {
2745 EMIT_ASM32 (ne,
2746 /* Check low half first, more likely to be decider */
2747 "cmpl %eax,(%esp)\n\t"
2748 "jne .Lne_jump\n\t"
2749 "cmpl %ebx,4(%esp)\n\t"
2750 "je .Lne_fallthru\n\t"
2751 ".Lne_jump:\n\t"
2752 "lea 0x8(%esp),%esp\n\t"
2753 "pop %eax\n\t"
2754 "pop %ebx\n\t"
2755 /* jmp, but don't trust the assembler to choose the right jump */
2756 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2757 ".Lne_fallthru:\n\t"
2758 "lea 0x8(%esp),%esp\n\t"
2759 "pop %eax\n\t"
2760 "pop %ebx");
2761
2762 if (offset_p)
2763 *offset_p = 18;
2764 if (size_p)
2765 *size_p = 4;
2766 }
2767
2768 static void
2769 i386_emit_lt_goto (int *offset_p, int *size_p)
2770 {
2771 EMIT_ASM32 (lt,
2772 "cmpl %ebx,4(%esp)\n\t"
2773 "jl .Llt_jump\n\t"
2774 "jne .Llt_fallthru\n\t"
2775 "cmpl %eax,(%esp)\n\t"
2776 "jnl .Llt_fallthru\n\t"
2777 ".Llt_jump:\n\t"
2778 "lea 0x8(%esp),%esp\n\t"
2779 "pop %eax\n\t"
2780 "pop %ebx\n\t"
2781 /* jmp, but don't trust the assembler to choose the right jump */
2782 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2783 ".Llt_fallthru:\n\t"
2784 "lea 0x8(%esp),%esp\n\t"
2785 "pop %eax\n\t"
2786 "pop %ebx");
2787
2788 if (offset_p)
2789 *offset_p = 20;
2790 if (size_p)
2791 *size_p = 4;
2792 }
2793
2794 static void
2795 i386_emit_le_goto (int *offset_p, int *size_p)
2796 {
2797 EMIT_ASM32 (le,
2798 "cmpl %ebx,4(%esp)\n\t"
2799 "jle .Lle_jump\n\t"
2800 "jne .Lle_fallthru\n\t"
2801 "cmpl %eax,(%esp)\n\t"
2802 "jnle .Lle_fallthru\n\t"
2803 ".Lle_jump:\n\t"
2804 "lea 0x8(%esp),%esp\n\t"
2805 "pop %eax\n\t"
2806 "pop %ebx\n\t"
2807 /* jmp, but don't trust the assembler to choose the right jump */
2808 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2809 ".Lle_fallthru:\n\t"
2810 "lea 0x8(%esp),%esp\n\t"
2811 "pop %eax\n\t"
2812 "pop %ebx");
2813
2814 if (offset_p)
2815 *offset_p = 20;
2816 if (size_p)
2817 *size_p = 4;
2818 }
2819
2820 static void
2821 i386_emit_gt_goto (int *offset_p, int *size_p)
2822 {
2823 EMIT_ASM32 (gt,
2824 "cmpl %ebx,4(%esp)\n\t"
2825 "jg .Lgt_jump\n\t"
2826 "jne .Lgt_fallthru\n\t"
2827 "cmpl %eax,(%esp)\n\t"
2828 "jng .Lgt_fallthru\n\t"
2829 ".Lgt_jump:\n\t"
2830 "lea 0x8(%esp),%esp\n\t"
2831 "pop %eax\n\t"
2832 "pop %ebx\n\t"
2833 /* jmp, but don't trust the assembler to choose the right jump */
2834 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2835 ".Lgt_fallthru:\n\t"
2836 "lea 0x8(%esp),%esp\n\t"
2837 "pop %eax\n\t"
2838 "pop %ebx");
2839
2840 if (offset_p)
2841 *offset_p = 20;
2842 if (size_p)
2843 *size_p = 4;
2844 }
2845
2846 static void
2847 i386_emit_ge_goto (int *offset_p, int *size_p)
2848 {
2849 EMIT_ASM32 (ge,
2850 "cmpl %ebx,4(%esp)\n\t"
2851 "jge .Lge_jump\n\t"
2852 "jne .Lge_fallthru\n\t"
2853 "cmpl %eax,(%esp)\n\t"
2854 "jnge .Lge_fallthru\n\t"
2855 ".Lge_jump:\n\t"
2856 "lea 0x8(%esp),%esp\n\t"
2857 "pop %eax\n\t"
2858 "pop %ebx\n\t"
2859 /* jmp, but don't trust the assembler to choose the right jump */
2860 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2861 ".Lge_fallthru:\n\t"
2862 "lea 0x8(%esp),%esp\n\t"
2863 "pop %eax\n\t"
2864 "pop %ebx");
2865
2866 if (offset_p)
2867 *offset_p = 20;
2868 if (size_p)
2869 *size_p = 4;
2870 }
2871
2872 struct emit_ops i386_emit_ops =
2873 {
2874 i386_emit_prologue,
2875 i386_emit_epilogue,
2876 i386_emit_add,
2877 i386_emit_sub,
2878 i386_emit_mul,
2879 i386_emit_lsh,
2880 i386_emit_rsh_signed,
2881 i386_emit_rsh_unsigned,
2882 i386_emit_ext,
2883 i386_emit_log_not,
2884 i386_emit_bit_and,
2885 i386_emit_bit_or,
2886 i386_emit_bit_xor,
2887 i386_emit_bit_not,
2888 i386_emit_equal,
2889 i386_emit_less_signed,
2890 i386_emit_less_unsigned,
2891 i386_emit_ref,
2892 i386_emit_if_goto,
2893 i386_emit_goto,
2894 i386_write_goto_address,
2895 i386_emit_const,
2896 i386_emit_call,
2897 i386_emit_reg,
2898 i386_emit_pop,
2899 i386_emit_stack_flush,
2900 i386_emit_zero_ext,
2901 i386_emit_swap,
2902 i386_emit_stack_adjust,
2903 i386_emit_int_call_1,
2904 i386_emit_void_call_2,
2905 i386_emit_eq_goto,
2906 i386_emit_ne_goto,
2907 i386_emit_lt_goto,
2908 i386_emit_le_goto,
2909 i386_emit_gt_goto,
2910 i386_emit_ge_goto
2911 };
2912
2913
2914 static struct emit_ops *
2915 x86_emit_ops (void)
2916 {
2917 #ifdef __x86_64__
2918 if (is_64bit_tdesc ())
2919 return &amd64_emit_ops;
2920 else
2921 #endif
2922 return &i386_emit_ops;
2923 }
2924
2925 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2926
2927 const gdb_byte *
2928 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2929 {
2930 *size = x86_breakpoint_len;
2931 return x86_breakpoint;
2932 }
2933
2934 static int
2935 x86_supports_range_stepping (void)
2936 {
2937 return 1;
2938 }
2939
2940 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2941 */
2942
2943 static int
2944 x86_supports_hardware_single_step (void)
2945 {
2946 return 1;
2947 }
2948
2949 static int
2950 x86_get_ipa_tdesc_idx (void)
2951 {
2952 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2953 const struct target_desc *tdesc = regcache->tdesc;
2954
2955 #ifdef __x86_64__
2956 return amd64_get_ipa_tdesc_idx (tdesc);
2957 #endif
2958
2959 if (tdesc == tdesc_i386_linux_no_xml)
2960 return X86_TDESC_SSE;
2961
2962 return i386_get_ipa_tdesc_idx (tdesc);
2963 }
2964
2965 /* This is initialized assuming an amd64 target.
2966 x86_arch_setup will correct it for i386 or amd64 targets. */
2967
2968 struct linux_target_ops the_low_target =
2969 {
2970 x86_supports_tracepoints,
2971 x86_get_thread_area,
2972 x86_install_fast_tracepoint_jump_pad,
2973 x86_emit_ops,
2974 x86_get_min_fast_tracepoint_insn_len,
2975 x86_supports_range_stepping,
2976 x86_supports_hardware_single_step,
2977 x86_get_syscall_trapinfo,
2978 x86_get_ipa_tdesc_idx,
2979 };
2980
2981 /* The linux target ops object. */
2982
2983 linux_process_target *the_linux_target = &the_x86_target;
2984
2985 void
2986 initialize_low_arch (void)
2987 {
2988 /* Initialize the Linux target descriptions. */
2989 #ifdef __x86_64__
2990 tdesc_amd64_linux_no_xml = allocate_target_description ();
2991 copy_target_description (tdesc_amd64_linux_no_xml,
2992 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2993 false));
2994 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2995 #endif
2996
2997 tdesc_i386_linux_no_xml = allocate_target_description ();
2998 copy_target_description (tdesc_i386_linux_no_xml,
2999 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3000 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3001
3002 initialize_regsets_info (&x86_regsets_info);
3003 }
This page took 0.090048 seconds and 5 git commands to generate.