gdbserver/linux-low: turn 'siginfo_fixup' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
107 const regs_info *get_regs_info () override;
108
109 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
110
111 bool supports_z_point_type (char z_type) override;
112
113 protected:
114
115 void low_arch_setup () override;
116
117 bool low_cannot_fetch_register (int regno) override;
118
119 bool low_cannot_store_register (int regno) override;
120
121 bool low_supports_breakpoints () override;
122
123 CORE_ADDR low_get_pc (regcache *regcache) override;
124
125 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
126
127 int low_decr_pc_after_break () override;
128
129 bool low_breakpoint_at (CORE_ADDR pc) override;
130
131 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
132 int size, raw_breakpoint *bp) override;
133
134 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
135 int size, raw_breakpoint *bp) override;
136
137 bool low_stopped_by_watchpoint () override;
138
139 CORE_ADDR low_stopped_data_address () override;
140
141 /* collect_ptrace_register/supply_ptrace_register are not needed in the
142 native i386 case (no registers smaller than an xfer unit), and are not
143 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
144
145 /* Need to fix up i386 siginfo if host is amd64. */
146 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
147 int direction) override;
148 };
149
150 /* The singleton target ops object. */
151
152 static x86_target the_x86_target;
153
154 /* Per-process arch-specific data we want to keep. */
155
156 struct arch_process_info
157 {
158 struct x86_debug_reg_state debug_reg_state;
159 };
160
161 #ifdef __x86_64__
162
163 /* Mapping between the general-purpose registers in `struct user'
164 format and GDB's register array layout.
165 Note that the transfer layout uses 64-bit regs. */
166 static /*const*/ int i386_regmap[] =
167 {
168 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
169 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
170 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
171 DS * 8, ES * 8, FS * 8, GS * 8
172 };
173
174 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
175
176 /* So code below doesn't have to care, i386 or amd64. */
177 #define ORIG_EAX ORIG_RAX
178 #define REGSIZE 8
179
180 static const int x86_64_regmap[] =
181 {
182 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
183 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
184 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
185 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
186 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
187 DS * 8, ES * 8, FS * 8, GS * 8,
188 -1, -1, -1, -1, -1, -1, -1, -1,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1,
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 ORIG_RAX * 8,
194 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
195 21 * 8, 22 * 8,
196 #else
197 -1, -1,
198 #endif
199 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
200 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
201 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1,
203 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
206 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
207 -1, -1, -1, -1, -1, -1, -1, -1,
208 -1, -1, -1, -1, -1, -1, -1, -1,
209 -1, -1, -1, -1, -1, -1, -1, -1,
210 -1 /* pkru */
211 };
212
213 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
214 #define X86_64_USER_REGS (GS + 1)
215
216 #else /* ! __x86_64__ */
217
218 /* Mapping between the general-purpose registers in `struct user'
219 format and GDB's register array layout. */
220 static /*const*/ int i386_regmap[] =
221 {
222 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
223 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
224 EIP * 4, EFL * 4, CS * 4, SS * 4,
225 DS * 4, ES * 4, FS * 4, GS * 4
226 };
227
228 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
229
230 #define REGSIZE 4
231
232 #endif
233
234 #ifdef __x86_64__
235
236 /* Returns true if the current inferior belongs to a x86-64 process,
237 per the tdesc. */
238
239 static int
240 is_64bit_tdesc (void)
241 {
242 struct regcache *regcache = get_thread_regcache (current_thread, 0);
243
244 return register_size (regcache->tdesc, 0) == 8;
245 }
246
247 #endif
248
249 \f
250 /* Called by libthread_db. */
251
252 ps_err_e
253 ps_get_thread_area (struct ps_prochandle *ph,
254 lwpid_t lwpid, int idx, void **base)
255 {
256 #ifdef __x86_64__
257 int use_64bit = is_64bit_tdesc ();
258
259 if (use_64bit)
260 {
261 switch (idx)
262 {
263 case FS:
264 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
265 return PS_OK;
266 break;
267 case GS:
268 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
269 return PS_OK;
270 break;
271 default:
272 return PS_BADADDR;
273 }
274 return PS_ERR;
275 }
276 #endif
277
278 {
279 unsigned int desc[4];
280
281 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
282 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
283 return PS_ERR;
284
285 /* Ensure we properly extend the value to 64-bits for x86_64. */
286 *base = (void *) (uintptr_t) desc[1];
287 return PS_OK;
288 }
289 }
290
291 /* Get the thread area address. This is used to recognize which
292 thread is which when tracing with the in-process agent library. We
293 don't read anything from the address, and treat it as opaque; it's
294 the address itself that we assume is unique per-thread. */
295
296 static int
297 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
298 {
299 #ifdef __x86_64__
300 int use_64bit = is_64bit_tdesc ();
301
302 if (use_64bit)
303 {
304 void *base;
305 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
306 {
307 *addr = (CORE_ADDR) (uintptr_t) base;
308 return 0;
309 }
310
311 return -1;
312 }
313 #endif
314
315 {
316 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
317 struct thread_info *thr = get_lwp_thread (lwp);
318 struct regcache *regcache = get_thread_regcache (thr, 1);
319 unsigned int desc[4];
320 ULONGEST gs = 0;
321 const int reg_thread_area = 3; /* bits to scale down register value. */
322 int idx;
323
324 collect_register_by_name (regcache, "gs", &gs);
325
326 idx = gs >> reg_thread_area;
327
328 if (ptrace (PTRACE_GET_THREAD_AREA,
329 lwpid_of (thr),
330 (void *) (long) idx, (unsigned long) &desc) < 0)
331 return -1;
332
333 *addr = desc[1];
334 return 0;
335 }
336 }
337
338
339 \f
340 bool
341 x86_target::low_cannot_store_register (int regno)
342 {
343 #ifdef __x86_64__
344 if (is_64bit_tdesc ())
345 return false;
346 #endif
347
348 return regno >= I386_NUM_REGS;
349 }
350
351 bool
352 x86_target::low_cannot_fetch_register (int regno)
353 {
354 #ifdef __x86_64__
355 if (is_64bit_tdesc ())
356 return false;
357 #endif
358
359 return regno >= I386_NUM_REGS;
360 }
361
362 static void
363 x86_fill_gregset (struct regcache *regcache, void *buf)
364 {
365 int i;
366
367 #ifdef __x86_64__
368 if (register_size (regcache->tdesc, 0) == 8)
369 {
370 for (i = 0; i < X86_64_NUM_REGS; i++)
371 if (x86_64_regmap[i] != -1)
372 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
373
374 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
375 {
376 unsigned long base;
377 int lwpid = lwpid_of (current_thread);
378
379 collect_register_by_name (regcache, "fs_base", &base);
380 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
381
382 collect_register_by_name (regcache, "gs_base", &base);
383 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
384 }
385 #endif
386
387 return;
388 }
389
390 /* 32-bit inferior registers need to be zero-extended.
391 Callers would read uninitialized memory otherwise. */
392 memset (buf, 0x00, X86_64_USER_REGS * 8);
393 #endif
394
395 for (i = 0; i < I386_NUM_REGS; i++)
396 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
397
398 collect_register_by_name (regcache, "orig_eax",
399 ((char *) buf) + ORIG_EAX * REGSIZE);
400
401 #ifdef __x86_64__
402 /* Sign extend EAX value to avoid potential syscall restart
403 problems.
404
405 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
406 for a detailed explanation. */
407 if (register_size (regcache->tdesc, 0) == 4)
408 {
409 void *ptr = ((gdb_byte *) buf
410 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
411
412 *(int64_t *) ptr = *(int32_t *) ptr;
413 }
414 #endif
415 }
416
417 static void
418 x86_store_gregset (struct regcache *regcache, const void *buf)
419 {
420 int i;
421
422 #ifdef __x86_64__
423 if (register_size (regcache->tdesc, 0) == 8)
424 {
425 for (i = 0; i < X86_64_NUM_REGS; i++)
426 if (x86_64_regmap[i] != -1)
427 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
428
429 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
430 {
431 unsigned long base;
432 int lwpid = lwpid_of (current_thread);
433
434 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
435 supply_register_by_name (regcache, "fs_base", &base);
436
437 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
438 supply_register_by_name (regcache, "gs_base", &base);
439 }
440 #endif
441 return;
442 }
443 #endif
444
445 for (i = 0; i < I386_NUM_REGS; i++)
446 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
447
448 supply_register_by_name (regcache, "orig_eax",
449 ((char *) buf) + ORIG_EAX * REGSIZE);
450 }
451
452 static void
453 x86_fill_fpregset (struct regcache *regcache, void *buf)
454 {
455 #ifdef __x86_64__
456 i387_cache_to_fxsave (regcache, buf);
457 #else
458 i387_cache_to_fsave (regcache, buf);
459 #endif
460 }
461
462 static void
463 x86_store_fpregset (struct regcache *regcache, const void *buf)
464 {
465 #ifdef __x86_64__
466 i387_fxsave_to_cache (regcache, buf);
467 #else
468 i387_fsave_to_cache (regcache, buf);
469 #endif
470 }
471
472 #ifndef __x86_64__
473
474 static void
475 x86_fill_fpxregset (struct regcache *regcache, void *buf)
476 {
477 i387_cache_to_fxsave (regcache, buf);
478 }
479
480 static void
481 x86_store_fpxregset (struct regcache *regcache, const void *buf)
482 {
483 i387_fxsave_to_cache (regcache, buf);
484 }
485
486 #endif
487
488 static void
489 x86_fill_xstateregset (struct regcache *regcache, void *buf)
490 {
491 i387_cache_to_xsave (regcache, buf);
492 }
493
494 static void
495 x86_store_xstateregset (struct regcache *regcache, const void *buf)
496 {
497 i387_xsave_to_cache (regcache, buf);
498 }
499
500 /* ??? The non-biarch i386 case stores all the i387 regs twice.
501 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
502 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
503 doesn't work. IWBN to avoid the duplication in the case where it
504 does work. Maybe the arch_setup routine could check whether it works
505 and update the supported regsets accordingly. */
506
507 static struct regset_info x86_regsets[] =
508 {
509 #ifdef HAVE_PTRACE_GETREGS
510 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
511 GENERAL_REGS,
512 x86_fill_gregset, x86_store_gregset },
513 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
514 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
515 # ifndef __x86_64__
516 # ifdef HAVE_PTRACE_GETFPXREGS
517 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
518 EXTENDED_REGS,
519 x86_fill_fpxregset, x86_store_fpxregset },
520 # endif
521 # endif
522 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
523 FP_REGS,
524 x86_fill_fpregset, x86_store_fpregset },
525 #endif /* HAVE_PTRACE_GETREGS */
526 NULL_REGSET
527 };
528
529 bool
530 x86_target::low_supports_breakpoints ()
531 {
532 return true;
533 }
534
535 CORE_ADDR
536 x86_target::low_get_pc (regcache *regcache)
537 {
538 int use_64bit = register_size (regcache->tdesc, 0) == 8;
539
540 if (use_64bit)
541 {
542 uint64_t pc;
543
544 collect_register_by_name (regcache, "rip", &pc);
545 return (CORE_ADDR) pc;
546 }
547 else
548 {
549 uint32_t pc;
550
551 collect_register_by_name (regcache, "eip", &pc);
552 return (CORE_ADDR) pc;
553 }
554 }
555
556 void
557 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
558 {
559 int use_64bit = register_size (regcache->tdesc, 0) == 8;
560
561 if (use_64bit)
562 {
563 uint64_t newpc = pc;
564
565 supply_register_by_name (regcache, "rip", &newpc);
566 }
567 else
568 {
569 uint32_t newpc = pc;
570
571 supply_register_by_name (regcache, "eip", &newpc);
572 }
573 }
574
575 int
576 x86_target::low_decr_pc_after_break ()
577 {
578 return 1;
579 }
580
581 \f
582 static const gdb_byte x86_breakpoint[] = { 0xCC };
583 #define x86_breakpoint_len 1
584
585 bool
586 x86_target::low_breakpoint_at (CORE_ADDR pc)
587 {
588 unsigned char c;
589
590 read_memory (pc, &c, 1);
591 if (c == 0xCC)
592 return true;
593
594 return false;
595 }
596 \f
597 /* Low-level function vector. */
598 struct x86_dr_low_type x86_dr_low =
599 {
600 x86_linux_dr_set_control,
601 x86_linux_dr_set_addr,
602 x86_linux_dr_get_addr,
603 x86_linux_dr_get_status,
604 x86_linux_dr_get_control,
605 sizeof (void *),
606 };
607 \f
608 /* Breakpoint/Watchpoint support. */
609
610 bool
611 x86_target::supports_z_point_type (char z_type)
612 {
613 switch (z_type)
614 {
615 case Z_PACKET_SW_BP:
616 case Z_PACKET_HW_BP:
617 case Z_PACKET_WRITE_WP:
618 case Z_PACKET_ACCESS_WP:
619 return true;
620 default:
621 return false;
622 }
623 }
624
625 int
626 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
627 int size, raw_breakpoint *bp)
628 {
629 struct process_info *proc = current_process ();
630
631 switch (type)
632 {
633 case raw_bkpt_type_hw:
634 case raw_bkpt_type_write_wp:
635 case raw_bkpt_type_access_wp:
636 {
637 enum target_hw_bp_type hw_type
638 = raw_bkpt_type_to_target_hw_bp_type (type);
639 struct x86_debug_reg_state *state
640 = &proc->priv->arch_private->debug_reg_state;
641
642 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
643 }
644
645 default:
646 /* Unsupported. */
647 return 1;
648 }
649 }
650
651 int
652 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
653 int size, raw_breakpoint *bp)
654 {
655 struct process_info *proc = current_process ();
656
657 switch (type)
658 {
659 case raw_bkpt_type_hw:
660 case raw_bkpt_type_write_wp:
661 case raw_bkpt_type_access_wp:
662 {
663 enum target_hw_bp_type hw_type
664 = raw_bkpt_type_to_target_hw_bp_type (type);
665 struct x86_debug_reg_state *state
666 = &proc->priv->arch_private->debug_reg_state;
667
668 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
669 }
670 default:
671 /* Unsupported. */
672 return 1;
673 }
674 }
675
676 bool
677 x86_target::low_stopped_by_watchpoint ()
678 {
679 struct process_info *proc = current_process ();
680 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
681 }
682
683 CORE_ADDR
684 x86_target::low_stopped_data_address ()
685 {
686 struct process_info *proc = current_process ();
687 CORE_ADDR addr;
688 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
689 &addr))
690 return addr;
691 return 0;
692 }
693 \f
694 /* Called when a new process is created. */
695
696 static struct arch_process_info *
697 x86_linux_new_process (void)
698 {
699 struct arch_process_info *info = XCNEW (struct arch_process_info);
700
701 x86_low_init_dregs (&info->debug_reg_state);
702
703 return info;
704 }
705
706 /* Called when a process is being deleted. */
707
708 static void
709 x86_linux_delete_process (struct arch_process_info *info)
710 {
711 xfree (info);
712 }
713
714 /* Target routine for linux_new_fork. */
715
716 static void
717 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
718 {
719 /* These are allocated by linux_add_process. */
720 gdb_assert (parent->priv != NULL
721 && parent->priv->arch_private != NULL);
722 gdb_assert (child->priv != NULL
723 && child->priv->arch_private != NULL);
724
725 /* Linux kernel before 2.6.33 commit
726 72f674d203cd230426437cdcf7dd6f681dad8b0d
727 will inherit hardware debug registers from parent
728 on fork/vfork/clone. Newer Linux kernels create such tasks with
729 zeroed debug registers.
730
731 GDB core assumes the child inherits the watchpoints/hw
732 breakpoints of the parent, and will remove them all from the
733 forked off process. Copy the debug registers mirrors into the
734 new process so that all breakpoints and watchpoints can be
735 removed together. The debug registers mirror will become zeroed
736 in the end before detaching the forked off process, thus making
737 this compatible with older Linux kernels too. */
738
739 *child->priv->arch_private = *parent->priv->arch_private;
740 }
741
742 /* See nat/x86-dregs.h. */
743
744 struct x86_debug_reg_state *
745 x86_debug_reg_state (pid_t pid)
746 {
747 struct process_info *proc = find_process_pid (pid);
748
749 return &proc->priv->arch_private->debug_reg_state;
750 }
751 \f
752 /* When GDBSERVER is built as a 64-bit application on linux, the
753 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
754 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
755 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
756 conversion in-place ourselves. */
757
758 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
759 layout of the inferiors' architecture. Returns true if any
760 conversion was done; false otherwise. If DIRECTION is 1, then copy
761 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
762 INF. */
763
764 bool
765 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
766 {
767 #ifdef __x86_64__
768 unsigned int machine;
769 int tid = lwpid_of (current_thread);
770 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
771
772 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
773 if (!is_64bit_tdesc ())
774 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
775 FIXUP_32);
776 /* No fixup for native x32 GDB. */
777 else if (!is_elf64 && sizeof (void *) == 8)
778 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
779 FIXUP_X32);
780 #endif
781
782 return false;
783 }
784 \f
785 static int use_xml;
786
787 /* Format of XSAVE extended state is:
788 struct
789 {
790 fxsave_bytes[0..463]
791 sw_usable_bytes[464..511]
792 xstate_hdr_bytes[512..575]
793 avx_bytes[576..831]
794 future_state etc
795 };
796
797 Same memory layout will be used for the coredump NT_X86_XSTATE
798 representing the XSAVE extended state registers.
799
800 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
801 extended state mask, which is the same as the extended control register
802 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
803 together with the mask saved in the xstate_hdr_bytes to determine what
804 states the processor/OS supports and what state, used or initialized,
805 the process/thread is in. */
806 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
807
808 /* Does the current host support the GETFPXREGS request? The header
809 file may or may not define it, and even if it is defined, the
810 kernel will return EIO if it's running on a pre-SSE processor. */
811 int have_ptrace_getfpxregs =
812 #ifdef HAVE_PTRACE_GETFPXREGS
813 -1
814 #else
815 0
816 #endif
817 ;
818
819 /* Get Linux/x86 target description from running target. */
820
821 static const struct target_desc *
822 x86_linux_read_description (void)
823 {
824 unsigned int machine;
825 int is_elf64;
826 int xcr0_features;
827 int tid;
828 static uint64_t xcr0;
829 struct regset_info *regset;
830
831 tid = lwpid_of (current_thread);
832
833 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
834
835 if (sizeof (void *) == 4)
836 {
837 if (is_elf64 > 0)
838 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
839 #ifndef __x86_64__
840 else if (machine == EM_X86_64)
841 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
842 #endif
843 }
844
845 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
846 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
847 {
848 elf_fpxregset_t fpxregs;
849
850 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
851 {
852 have_ptrace_getfpxregs = 0;
853 have_ptrace_getregset = 0;
854 return i386_linux_read_description (X86_XSTATE_X87);
855 }
856 else
857 have_ptrace_getfpxregs = 1;
858 }
859 #endif
860
861 if (!use_xml)
862 {
863 x86_xcr0 = X86_XSTATE_SSE_MASK;
864
865 /* Don't use XML. */
866 #ifdef __x86_64__
867 if (machine == EM_X86_64)
868 return tdesc_amd64_linux_no_xml;
869 else
870 #endif
871 return tdesc_i386_linux_no_xml;
872 }
873
874 if (have_ptrace_getregset == -1)
875 {
876 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
877 struct iovec iov;
878
879 iov.iov_base = xstateregs;
880 iov.iov_len = sizeof (xstateregs);
881
882 /* Check if PTRACE_GETREGSET works. */
883 if (ptrace (PTRACE_GETREGSET, tid,
884 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
885 have_ptrace_getregset = 0;
886 else
887 {
888 have_ptrace_getregset = 1;
889
890 /* Get XCR0 from XSAVE extended state. */
891 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
892 / sizeof (uint64_t))];
893
894 /* Use PTRACE_GETREGSET if it is available. */
895 for (regset = x86_regsets;
896 regset->fill_function != NULL; regset++)
897 if (regset->get_request == PTRACE_GETREGSET)
898 regset->size = X86_XSTATE_SIZE (xcr0);
899 else if (regset->type != GENERAL_REGS)
900 regset->size = 0;
901 }
902 }
903
904 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
905 xcr0_features = (have_ptrace_getregset
906 && (xcr0 & X86_XSTATE_ALL_MASK));
907
908 if (xcr0_features)
909 x86_xcr0 = xcr0;
910
911 if (machine == EM_X86_64)
912 {
913 #ifdef __x86_64__
914 const target_desc *tdesc = NULL;
915
916 if (xcr0_features)
917 {
918 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
919 !is_elf64);
920 }
921
922 if (tdesc == NULL)
923 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
924 return tdesc;
925 #endif
926 }
927 else
928 {
929 const target_desc *tdesc = NULL;
930
931 if (xcr0_features)
932 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
933
934 if (tdesc == NULL)
935 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
936
937 return tdesc;
938 }
939
940 gdb_assert_not_reached ("failed to return tdesc");
941 }
942
943 /* Update all the target description of all processes; a new GDB
944 connected, and it may or not support xml target descriptions. */
945
946 void
947 x86_target::update_xmltarget ()
948 {
949 struct thread_info *saved_thread = current_thread;
950
951 /* Before changing the register cache's internal layout, flush the
952 contents of the current valid caches back to the threads, and
953 release the current regcache objects. */
954 regcache_release ();
955
956 for_each_process ([this] (process_info *proc) {
957 int pid = proc->pid;
958
959 /* Look up any thread of this process. */
960 current_thread = find_any_thread_of_pid (pid);
961
962 low_arch_setup ();
963 });
964
965 current_thread = saved_thread;
966 }
967
968 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
969 PTRACE_GETREGSET. */
970
971 static void
972 x86_linux_process_qsupported (char **features, int count)
973 {
974 int i;
975
976 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
977 with "i386" in qSupported query, it supports x86 XML target
978 descriptions. */
979 use_xml = 0;
980 for (i = 0; i < count; i++)
981 {
982 const char *feature = features[i];
983
984 if (startswith (feature, "xmlRegisters="))
985 {
986 char *copy = xstrdup (feature + 13);
987
988 char *saveptr;
989 for (char *p = strtok_r (copy, ",", &saveptr);
990 p != NULL;
991 p = strtok_r (NULL, ",", &saveptr))
992 {
993 if (strcmp (p, "i386") == 0)
994 {
995 use_xml = 1;
996 break;
997 }
998 }
999
1000 free (copy);
1001 }
1002 }
1003 the_x86_target.update_xmltarget ();
1004 }
1005
1006 /* Common for x86/x86-64. */
1007
1008 static struct regsets_info x86_regsets_info =
1009 {
1010 x86_regsets, /* regsets */
1011 0, /* num_regsets */
1012 NULL, /* disabled_regsets */
1013 };
1014
1015 #ifdef __x86_64__
1016 static struct regs_info amd64_linux_regs_info =
1017 {
1018 NULL, /* regset_bitmap */
1019 NULL, /* usrregs_info */
1020 &x86_regsets_info
1021 };
1022 #endif
1023 static struct usrregs_info i386_linux_usrregs_info =
1024 {
1025 I386_NUM_REGS,
1026 i386_regmap,
1027 };
1028
1029 static struct regs_info i386_linux_regs_info =
1030 {
1031 NULL, /* regset_bitmap */
1032 &i386_linux_usrregs_info,
1033 &x86_regsets_info
1034 };
1035
1036 const regs_info *
1037 x86_target::get_regs_info ()
1038 {
1039 #ifdef __x86_64__
1040 if (is_64bit_tdesc ())
1041 return &amd64_linux_regs_info;
1042 else
1043 #endif
1044 return &i386_linux_regs_info;
1045 }
1046
1047 /* Initialize the target description for the architecture of the
1048 inferior. */
1049
1050 void
1051 x86_target::low_arch_setup ()
1052 {
1053 current_process ()->tdesc = x86_linux_read_description ();
1054 }
1055
1056 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1057 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1058
1059 static void
1060 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
1061 {
1062 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1063
1064 if (use_64bit)
1065 {
1066 long l_sysno;
1067
1068 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1069 *sysno = (int) l_sysno;
1070 }
1071 else
1072 collect_register_by_name (regcache, "orig_eax", sysno);
1073 }
1074
1075 static int
1076 x86_supports_tracepoints (void)
1077 {
1078 return 1;
1079 }
1080
1081 static void
1082 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1083 {
1084 target_write_memory (*to, buf, len);
1085 *to += len;
1086 }
1087
1088 static int
1089 push_opcode (unsigned char *buf, const char *op)
1090 {
1091 unsigned char *buf_org = buf;
1092
1093 while (1)
1094 {
1095 char *endptr;
1096 unsigned long ul = strtoul (op, &endptr, 16);
1097
1098 if (endptr == op)
1099 break;
1100
1101 *buf++ = ul;
1102 op = endptr;
1103 }
1104
1105 return buf - buf_org;
1106 }
1107
1108 #ifdef __x86_64__
1109
1110 /* Build a jump pad that saves registers and calls a collection
1111 function. Writes a jump instruction to the jump pad to
1112 JJUMPAD_INSN. The caller is responsible to write it in at the
1113 tracepoint address. */
1114
1115 static int
1116 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1117 CORE_ADDR collector,
1118 CORE_ADDR lockaddr,
1119 ULONGEST orig_size,
1120 CORE_ADDR *jump_entry,
1121 CORE_ADDR *trampoline,
1122 ULONGEST *trampoline_size,
1123 unsigned char *jjump_pad_insn,
1124 ULONGEST *jjump_pad_insn_size,
1125 CORE_ADDR *adjusted_insn_addr,
1126 CORE_ADDR *adjusted_insn_addr_end,
1127 char *err)
1128 {
1129 unsigned char buf[40];
1130 int i, offset;
1131 int64_t loffset;
1132
1133 CORE_ADDR buildaddr = *jump_entry;
1134
1135 /* Build the jump pad. */
1136
1137 /* First, do tracepoint data collection. Save registers. */
1138 i = 0;
1139 /* Need to ensure stack pointer saved first. */
1140 buf[i++] = 0x54; /* push %rsp */
1141 buf[i++] = 0x55; /* push %rbp */
1142 buf[i++] = 0x57; /* push %rdi */
1143 buf[i++] = 0x56; /* push %rsi */
1144 buf[i++] = 0x52; /* push %rdx */
1145 buf[i++] = 0x51; /* push %rcx */
1146 buf[i++] = 0x53; /* push %rbx */
1147 buf[i++] = 0x50; /* push %rax */
1148 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1149 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1150 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1151 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1152 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1153 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1154 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1155 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1156 buf[i++] = 0x9c; /* pushfq */
1157 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1158 buf[i++] = 0xbf;
1159 memcpy (buf + i, &tpaddr, 8);
1160 i += 8;
1161 buf[i++] = 0x57; /* push %rdi */
1162 append_insns (&buildaddr, i, buf);
1163
1164 /* Stack space for the collecting_t object. */
1165 i = 0;
1166 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1167 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1168 memcpy (buf + i, &tpoint, 8);
1169 i += 8;
1170 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1171 i += push_opcode (&buf[i],
1172 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1173 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1174 append_insns (&buildaddr, i, buf);
1175
1176 /* spin-lock. */
1177 i = 0;
1178 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1179 memcpy (&buf[i], (void *) &lockaddr, 8);
1180 i += 8;
1181 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1182 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1183 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1184 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1185 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1186 append_insns (&buildaddr, i, buf);
1187
1188 /* Set up the gdb_collect call. */
1189 /* At this point, (stack pointer + 0x18) is the base of our saved
1190 register block. */
1191
1192 i = 0;
1193 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1194 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1195
1196 /* tpoint address may be 64-bit wide. */
1197 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1198 memcpy (buf + i, &tpoint, 8);
1199 i += 8;
1200 append_insns (&buildaddr, i, buf);
1201
1202 /* The collector function being in the shared library, may be
1203 >31-bits away off the jump pad. */
1204 i = 0;
1205 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1206 memcpy (buf + i, &collector, 8);
1207 i += 8;
1208 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1209 append_insns (&buildaddr, i, buf);
1210
1211 /* Clear the spin-lock. */
1212 i = 0;
1213 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1214 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1215 memcpy (buf + i, &lockaddr, 8);
1216 i += 8;
1217 append_insns (&buildaddr, i, buf);
1218
1219 /* Remove stack that had been used for the collect_t object. */
1220 i = 0;
1221 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1222 append_insns (&buildaddr, i, buf);
1223
1224 /* Restore register state. */
1225 i = 0;
1226 buf[i++] = 0x48; /* add $0x8,%rsp */
1227 buf[i++] = 0x83;
1228 buf[i++] = 0xc4;
1229 buf[i++] = 0x08;
1230 buf[i++] = 0x9d; /* popfq */
1231 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1232 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1233 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1234 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1235 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1236 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1237 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1238 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1239 buf[i++] = 0x58; /* pop %rax */
1240 buf[i++] = 0x5b; /* pop %rbx */
1241 buf[i++] = 0x59; /* pop %rcx */
1242 buf[i++] = 0x5a; /* pop %rdx */
1243 buf[i++] = 0x5e; /* pop %rsi */
1244 buf[i++] = 0x5f; /* pop %rdi */
1245 buf[i++] = 0x5d; /* pop %rbp */
1246 buf[i++] = 0x5c; /* pop %rsp */
1247 append_insns (&buildaddr, i, buf);
1248
1249 /* Now, adjust the original instruction to execute in the jump
1250 pad. */
1251 *adjusted_insn_addr = buildaddr;
1252 relocate_instruction (&buildaddr, tpaddr);
1253 *adjusted_insn_addr_end = buildaddr;
1254
1255 /* Finally, write a jump back to the program. */
1256
1257 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1258 if (loffset > INT_MAX || loffset < INT_MIN)
1259 {
1260 sprintf (err,
1261 "E.Jump back from jump pad too far from tracepoint "
1262 "(offset 0x%" PRIx64 " > int32).", loffset);
1263 return 1;
1264 }
1265
1266 offset = (int) loffset;
1267 memcpy (buf, jump_insn, sizeof (jump_insn));
1268 memcpy (buf + 1, &offset, 4);
1269 append_insns (&buildaddr, sizeof (jump_insn), buf);
1270
1271 /* The jump pad is now built. Wire in a jump to our jump pad. This
1272 is always done last (by our caller actually), so that we can
1273 install fast tracepoints with threads running. This relies on
1274 the agent's atomic write support. */
1275 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1276 if (loffset > INT_MAX || loffset < INT_MIN)
1277 {
1278 sprintf (err,
1279 "E.Jump pad too far from tracepoint "
1280 "(offset 0x%" PRIx64 " > int32).", loffset);
1281 return 1;
1282 }
1283
1284 offset = (int) loffset;
1285
1286 memcpy (buf, jump_insn, sizeof (jump_insn));
1287 memcpy (buf + 1, &offset, 4);
1288 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1289 *jjump_pad_insn_size = sizeof (jump_insn);
1290
1291 /* Return the end address of our pad. */
1292 *jump_entry = buildaddr;
1293
1294 return 0;
1295 }
1296
1297 #endif /* __x86_64__ */
1298
1299 /* Build a jump pad that saves registers and calls a collection
1300 function. Writes a jump instruction to the jump pad to
1301 JJUMPAD_INSN. The caller is responsible to write it in at the
1302 tracepoint address. */
1303
1304 static int
1305 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1306 CORE_ADDR collector,
1307 CORE_ADDR lockaddr,
1308 ULONGEST orig_size,
1309 CORE_ADDR *jump_entry,
1310 CORE_ADDR *trampoline,
1311 ULONGEST *trampoline_size,
1312 unsigned char *jjump_pad_insn,
1313 ULONGEST *jjump_pad_insn_size,
1314 CORE_ADDR *adjusted_insn_addr,
1315 CORE_ADDR *adjusted_insn_addr_end,
1316 char *err)
1317 {
1318 unsigned char buf[0x100];
1319 int i, offset;
1320 CORE_ADDR buildaddr = *jump_entry;
1321
1322 /* Build the jump pad. */
1323
1324 /* First, do tracepoint data collection. Save registers. */
1325 i = 0;
1326 buf[i++] = 0x60; /* pushad */
1327 buf[i++] = 0x68; /* push tpaddr aka $pc */
1328 *((int *)(buf + i)) = (int) tpaddr;
1329 i += 4;
1330 buf[i++] = 0x9c; /* pushf */
1331 buf[i++] = 0x1e; /* push %ds */
1332 buf[i++] = 0x06; /* push %es */
1333 buf[i++] = 0x0f; /* push %fs */
1334 buf[i++] = 0xa0;
1335 buf[i++] = 0x0f; /* push %gs */
1336 buf[i++] = 0xa8;
1337 buf[i++] = 0x16; /* push %ss */
1338 buf[i++] = 0x0e; /* push %cs */
1339 append_insns (&buildaddr, i, buf);
1340
1341 /* Stack space for the collecting_t object. */
1342 i = 0;
1343 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1344
1345 /* Build the object. */
1346 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1347 memcpy (buf + i, &tpoint, 4);
1348 i += 4;
1349 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1350
1351 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1352 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1353 append_insns (&buildaddr, i, buf);
1354
1355 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1356 If we cared for it, this could be using xchg alternatively. */
1357
1358 i = 0;
1359 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1360 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1361 %esp,<lockaddr> */
1362 memcpy (&buf[i], (void *) &lockaddr, 4);
1363 i += 4;
1364 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1365 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1366 append_insns (&buildaddr, i, buf);
1367
1368
1369 /* Set up arguments to the gdb_collect call. */
1370 i = 0;
1371 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1372 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1373 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1374 append_insns (&buildaddr, i, buf);
1375
1376 i = 0;
1377 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1378 append_insns (&buildaddr, i, buf);
1379
1380 i = 0;
1381 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1382 memcpy (&buf[i], (void *) &tpoint, 4);
1383 i += 4;
1384 append_insns (&buildaddr, i, buf);
1385
1386 buf[0] = 0xe8; /* call <reladdr> */
1387 offset = collector - (buildaddr + sizeof (jump_insn));
1388 memcpy (buf + 1, &offset, 4);
1389 append_insns (&buildaddr, 5, buf);
1390 /* Clean up after the call. */
1391 buf[0] = 0x83; /* add $0x8,%esp */
1392 buf[1] = 0xc4;
1393 buf[2] = 0x08;
1394 append_insns (&buildaddr, 3, buf);
1395
1396
1397 /* Clear the spin-lock. This would need the LOCK prefix on older
1398 broken archs. */
1399 i = 0;
1400 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1401 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1402 memcpy (buf + i, &lockaddr, 4);
1403 i += 4;
1404 append_insns (&buildaddr, i, buf);
1405
1406
1407 /* Remove stack that had been used for the collect_t object. */
1408 i = 0;
1409 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1410 append_insns (&buildaddr, i, buf);
1411
1412 i = 0;
1413 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1414 buf[i++] = 0xc4;
1415 buf[i++] = 0x04;
1416 buf[i++] = 0x17; /* pop %ss */
1417 buf[i++] = 0x0f; /* pop %gs */
1418 buf[i++] = 0xa9;
1419 buf[i++] = 0x0f; /* pop %fs */
1420 buf[i++] = 0xa1;
1421 buf[i++] = 0x07; /* pop %es */
1422 buf[i++] = 0x1f; /* pop %ds */
1423 buf[i++] = 0x9d; /* popf */
1424 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1425 buf[i++] = 0xc4;
1426 buf[i++] = 0x04;
1427 buf[i++] = 0x61; /* popad */
1428 append_insns (&buildaddr, i, buf);
1429
1430 /* Now, adjust the original instruction to execute in the jump
1431 pad. */
1432 *adjusted_insn_addr = buildaddr;
1433 relocate_instruction (&buildaddr, tpaddr);
1434 *adjusted_insn_addr_end = buildaddr;
1435
1436 /* Write the jump back to the program. */
1437 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1438 memcpy (buf, jump_insn, sizeof (jump_insn));
1439 memcpy (buf + 1, &offset, 4);
1440 append_insns (&buildaddr, sizeof (jump_insn), buf);
1441
1442 /* The jump pad is now built. Wire in a jump to our jump pad. This
1443 is always done last (by our caller actually), so that we can
1444 install fast tracepoints with threads running. This relies on
1445 the agent's atomic write support. */
1446 if (orig_size == 4)
1447 {
1448 /* Create a trampoline. */
1449 *trampoline_size = sizeof (jump_insn);
1450 if (!claim_trampoline_space (*trampoline_size, trampoline))
1451 {
1452 /* No trampoline space available. */
1453 strcpy (err,
1454 "E.Cannot allocate trampoline space needed for fast "
1455 "tracepoints on 4-byte instructions.");
1456 return 1;
1457 }
1458
1459 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1460 memcpy (buf, jump_insn, sizeof (jump_insn));
1461 memcpy (buf + 1, &offset, 4);
1462 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1463
1464 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1465 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1466 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1467 memcpy (buf + 2, &offset, 2);
1468 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1469 *jjump_pad_insn_size = sizeof (small_jump_insn);
1470 }
1471 else
1472 {
1473 /* Else use a 32-bit relative jump instruction. */
1474 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1475 memcpy (buf, jump_insn, sizeof (jump_insn));
1476 memcpy (buf + 1, &offset, 4);
1477 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1478 *jjump_pad_insn_size = sizeof (jump_insn);
1479 }
1480
1481 /* Return the end address of our pad. */
1482 *jump_entry = buildaddr;
1483
1484 return 0;
1485 }
1486
1487 static int
1488 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1489 CORE_ADDR collector,
1490 CORE_ADDR lockaddr,
1491 ULONGEST orig_size,
1492 CORE_ADDR *jump_entry,
1493 CORE_ADDR *trampoline,
1494 ULONGEST *trampoline_size,
1495 unsigned char *jjump_pad_insn,
1496 ULONGEST *jjump_pad_insn_size,
1497 CORE_ADDR *adjusted_insn_addr,
1498 CORE_ADDR *adjusted_insn_addr_end,
1499 char *err)
1500 {
1501 #ifdef __x86_64__
1502 if (is_64bit_tdesc ())
1503 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1504 collector, lockaddr,
1505 orig_size, jump_entry,
1506 trampoline, trampoline_size,
1507 jjump_pad_insn,
1508 jjump_pad_insn_size,
1509 adjusted_insn_addr,
1510 adjusted_insn_addr_end,
1511 err);
1512 #endif
1513
1514 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1515 collector, lockaddr,
1516 orig_size, jump_entry,
1517 trampoline, trampoline_size,
1518 jjump_pad_insn,
1519 jjump_pad_insn_size,
1520 adjusted_insn_addr,
1521 adjusted_insn_addr_end,
1522 err);
1523 }
1524
1525 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1526 architectures. */
1527
1528 static int
1529 x86_get_min_fast_tracepoint_insn_len (void)
1530 {
1531 static int warned_about_fast_tracepoints = 0;
1532
1533 #ifdef __x86_64__
1534 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1535 used for fast tracepoints. */
1536 if (is_64bit_tdesc ())
1537 return 5;
1538 #endif
1539
1540 if (agent_loaded_p ())
1541 {
1542 char errbuf[IPA_BUFSIZ];
1543
1544 errbuf[0] = '\0';
1545
1546 /* On x86, if trampolines are available, then 4-byte jump instructions
1547 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1548 with a 4-byte offset are used instead. */
1549 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1550 return 4;
1551 else
1552 {
1553 /* GDB has no channel to explain to user why a shorter fast
1554 tracepoint is not possible, but at least make GDBserver
1555 mention that something has gone awry. */
1556 if (!warned_about_fast_tracepoints)
1557 {
1558 warning ("4-byte fast tracepoints not available; %s", errbuf);
1559 warned_about_fast_tracepoints = 1;
1560 }
1561 return 5;
1562 }
1563 }
1564 else
1565 {
1566 /* Indicate that the minimum length is currently unknown since the IPA
1567 has not loaded yet. */
1568 return 0;
1569 }
1570 }
1571
1572 static void
1573 add_insns (unsigned char *start, int len)
1574 {
1575 CORE_ADDR buildaddr = current_insn_ptr;
1576
1577 if (debug_threads)
1578 debug_printf ("Adding %d bytes of insn at %s\n",
1579 len, paddress (buildaddr));
1580
1581 append_insns (&buildaddr, len, start);
1582 current_insn_ptr = buildaddr;
1583 }
1584
1585 /* Our general strategy for emitting code is to avoid specifying raw
1586 bytes whenever possible, and instead copy a block of inline asm
1587 that is embedded in the function. This is a little messy, because
1588 we need to keep the compiler from discarding what looks like dead
1589 code, plus suppress various warnings. */
1590
1591 #define EMIT_ASM(NAME, INSNS) \
1592 do \
1593 { \
1594 extern unsigned char start_ ## NAME, end_ ## NAME; \
1595 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1596 __asm__ ("jmp end_" #NAME "\n" \
1597 "\t" "start_" #NAME ":" \
1598 "\t" INSNS "\n" \
1599 "\t" "end_" #NAME ":"); \
1600 } while (0)
1601
1602 #ifdef __x86_64__
1603
1604 #define EMIT_ASM32(NAME,INSNS) \
1605 do \
1606 { \
1607 extern unsigned char start_ ## NAME, end_ ## NAME; \
1608 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1609 __asm__ (".code32\n" \
1610 "\t" "jmp end_" #NAME "\n" \
1611 "\t" "start_" #NAME ":\n" \
1612 "\t" INSNS "\n" \
1613 "\t" "end_" #NAME ":\n" \
1614 ".code64\n"); \
1615 } while (0)
1616
1617 #else
1618
1619 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1620
1621 #endif
1622
1623 #ifdef __x86_64__
1624
1625 static void
1626 amd64_emit_prologue (void)
1627 {
1628 EMIT_ASM (amd64_prologue,
1629 "pushq %rbp\n\t"
1630 "movq %rsp,%rbp\n\t"
1631 "sub $0x20,%rsp\n\t"
1632 "movq %rdi,-8(%rbp)\n\t"
1633 "movq %rsi,-16(%rbp)");
1634 }
1635
1636
1637 static void
1638 amd64_emit_epilogue (void)
1639 {
1640 EMIT_ASM (amd64_epilogue,
1641 "movq -16(%rbp),%rdi\n\t"
1642 "movq %rax,(%rdi)\n\t"
1643 "xor %rax,%rax\n\t"
1644 "leave\n\t"
1645 "ret");
1646 }
1647
1648 static void
1649 amd64_emit_add (void)
1650 {
1651 EMIT_ASM (amd64_add,
1652 "add (%rsp),%rax\n\t"
1653 "lea 0x8(%rsp),%rsp");
1654 }
1655
1656 static void
1657 amd64_emit_sub (void)
1658 {
1659 EMIT_ASM (amd64_sub,
1660 "sub %rax,(%rsp)\n\t"
1661 "pop %rax");
1662 }
1663
1664 static void
1665 amd64_emit_mul (void)
1666 {
1667 emit_error = 1;
1668 }
1669
1670 static void
1671 amd64_emit_lsh (void)
1672 {
1673 emit_error = 1;
1674 }
1675
1676 static void
1677 amd64_emit_rsh_signed (void)
1678 {
1679 emit_error = 1;
1680 }
1681
1682 static void
1683 amd64_emit_rsh_unsigned (void)
1684 {
1685 emit_error = 1;
1686 }
1687
1688 static void
1689 amd64_emit_ext (int arg)
1690 {
1691 switch (arg)
1692 {
1693 case 8:
1694 EMIT_ASM (amd64_ext_8,
1695 "cbtw\n\t"
1696 "cwtl\n\t"
1697 "cltq");
1698 break;
1699 case 16:
1700 EMIT_ASM (amd64_ext_16,
1701 "cwtl\n\t"
1702 "cltq");
1703 break;
1704 case 32:
1705 EMIT_ASM (amd64_ext_32,
1706 "cltq");
1707 break;
1708 default:
1709 emit_error = 1;
1710 }
1711 }
1712
1713 static void
1714 amd64_emit_log_not (void)
1715 {
1716 EMIT_ASM (amd64_log_not,
1717 "test %rax,%rax\n\t"
1718 "sete %cl\n\t"
1719 "movzbq %cl,%rax");
1720 }
1721
1722 static void
1723 amd64_emit_bit_and (void)
1724 {
1725 EMIT_ASM (amd64_and,
1726 "and (%rsp),%rax\n\t"
1727 "lea 0x8(%rsp),%rsp");
1728 }
1729
1730 static void
1731 amd64_emit_bit_or (void)
1732 {
1733 EMIT_ASM (amd64_or,
1734 "or (%rsp),%rax\n\t"
1735 "lea 0x8(%rsp),%rsp");
1736 }
1737
1738 static void
1739 amd64_emit_bit_xor (void)
1740 {
1741 EMIT_ASM (amd64_xor,
1742 "xor (%rsp),%rax\n\t"
1743 "lea 0x8(%rsp),%rsp");
1744 }
1745
1746 static void
1747 amd64_emit_bit_not (void)
1748 {
1749 EMIT_ASM (amd64_bit_not,
1750 "xorq $0xffffffffffffffff,%rax");
1751 }
1752
1753 static void
1754 amd64_emit_equal (void)
1755 {
1756 EMIT_ASM (amd64_equal,
1757 "cmp %rax,(%rsp)\n\t"
1758 "je .Lamd64_equal_true\n\t"
1759 "xor %rax,%rax\n\t"
1760 "jmp .Lamd64_equal_end\n\t"
1761 ".Lamd64_equal_true:\n\t"
1762 "mov $0x1,%rax\n\t"
1763 ".Lamd64_equal_end:\n\t"
1764 "lea 0x8(%rsp),%rsp");
1765 }
1766
1767 static void
1768 amd64_emit_less_signed (void)
1769 {
1770 EMIT_ASM (amd64_less_signed,
1771 "cmp %rax,(%rsp)\n\t"
1772 "jl .Lamd64_less_signed_true\n\t"
1773 "xor %rax,%rax\n\t"
1774 "jmp .Lamd64_less_signed_end\n\t"
1775 ".Lamd64_less_signed_true:\n\t"
1776 "mov $1,%rax\n\t"
1777 ".Lamd64_less_signed_end:\n\t"
1778 "lea 0x8(%rsp),%rsp");
1779 }
1780
1781 static void
1782 amd64_emit_less_unsigned (void)
1783 {
1784 EMIT_ASM (amd64_less_unsigned,
1785 "cmp %rax,(%rsp)\n\t"
1786 "jb .Lamd64_less_unsigned_true\n\t"
1787 "xor %rax,%rax\n\t"
1788 "jmp .Lamd64_less_unsigned_end\n\t"
1789 ".Lamd64_less_unsigned_true:\n\t"
1790 "mov $1,%rax\n\t"
1791 ".Lamd64_less_unsigned_end:\n\t"
1792 "lea 0x8(%rsp),%rsp");
1793 }
1794
1795 static void
1796 amd64_emit_ref (int size)
1797 {
1798 switch (size)
1799 {
1800 case 1:
1801 EMIT_ASM (amd64_ref1,
1802 "movb (%rax),%al");
1803 break;
1804 case 2:
1805 EMIT_ASM (amd64_ref2,
1806 "movw (%rax),%ax");
1807 break;
1808 case 4:
1809 EMIT_ASM (amd64_ref4,
1810 "movl (%rax),%eax");
1811 break;
1812 case 8:
1813 EMIT_ASM (amd64_ref8,
1814 "movq (%rax),%rax");
1815 break;
1816 }
1817 }
1818
1819 static void
1820 amd64_emit_if_goto (int *offset_p, int *size_p)
1821 {
1822 EMIT_ASM (amd64_if_goto,
1823 "mov %rax,%rcx\n\t"
1824 "pop %rax\n\t"
1825 "cmp $0,%rcx\n\t"
1826 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1827 if (offset_p)
1828 *offset_p = 10;
1829 if (size_p)
1830 *size_p = 4;
1831 }
1832
1833 static void
1834 amd64_emit_goto (int *offset_p, int *size_p)
1835 {
1836 EMIT_ASM (amd64_goto,
1837 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1838 if (offset_p)
1839 *offset_p = 1;
1840 if (size_p)
1841 *size_p = 4;
1842 }
1843
1844 static void
1845 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1846 {
1847 int diff = (to - (from + size));
1848 unsigned char buf[sizeof (int)];
1849
1850 if (size != 4)
1851 {
1852 emit_error = 1;
1853 return;
1854 }
1855
1856 memcpy (buf, &diff, sizeof (int));
1857 target_write_memory (from, buf, sizeof (int));
1858 }
1859
1860 static void
1861 amd64_emit_const (LONGEST num)
1862 {
1863 unsigned char buf[16];
1864 int i;
1865 CORE_ADDR buildaddr = current_insn_ptr;
1866
1867 i = 0;
1868 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1869 memcpy (&buf[i], &num, sizeof (num));
1870 i += 8;
1871 append_insns (&buildaddr, i, buf);
1872 current_insn_ptr = buildaddr;
1873 }
1874
1875 static void
1876 amd64_emit_call (CORE_ADDR fn)
1877 {
1878 unsigned char buf[16];
1879 int i;
1880 CORE_ADDR buildaddr;
1881 LONGEST offset64;
1882
1883 /* The destination function being in the shared library, may be
1884 >31-bits away off the compiled code pad. */
1885
1886 buildaddr = current_insn_ptr;
1887
1888 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1889
1890 i = 0;
1891
1892 if (offset64 > INT_MAX || offset64 < INT_MIN)
1893 {
1894 /* Offset is too large for a call. Use callq, but that requires
1895 a register, so avoid it if possible. Use r10, since it is
1896 call-clobbered, we don't have to push/pop it. */
1897 buf[i++] = 0x48; /* mov $fn,%r10 */
1898 buf[i++] = 0xba;
1899 memcpy (buf + i, &fn, 8);
1900 i += 8;
1901 buf[i++] = 0xff; /* callq *%r10 */
1902 buf[i++] = 0xd2;
1903 }
1904 else
1905 {
1906 int offset32 = offset64; /* we know we can't overflow here. */
1907
1908 buf[i++] = 0xe8; /* call <reladdr> */
1909 memcpy (buf + i, &offset32, 4);
1910 i += 4;
1911 }
1912
1913 append_insns (&buildaddr, i, buf);
1914 current_insn_ptr = buildaddr;
1915 }
1916
1917 static void
1918 amd64_emit_reg (int reg)
1919 {
1920 unsigned char buf[16];
1921 int i;
1922 CORE_ADDR buildaddr;
1923
1924 /* Assume raw_regs is still in %rdi. */
1925 buildaddr = current_insn_ptr;
1926 i = 0;
1927 buf[i++] = 0xbe; /* mov $<n>,%esi */
1928 memcpy (&buf[i], &reg, sizeof (reg));
1929 i += 4;
1930 append_insns (&buildaddr, i, buf);
1931 current_insn_ptr = buildaddr;
1932 amd64_emit_call (get_raw_reg_func_addr ());
1933 }
1934
1935 static void
1936 amd64_emit_pop (void)
1937 {
1938 EMIT_ASM (amd64_pop,
1939 "pop %rax");
1940 }
1941
1942 static void
1943 amd64_emit_stack_flush (void)
1944 {
1945 EMIT_ASM (amd64_stack_flush,
1946 "push %rax");
1947 }
1948
1949 static void
1950 amd64_emit_zero_ext (int arg)
1951 {
1952 switch (arg)
1953 {
1954 case 8:
1955 EMIT_ASM (amd64_zero_ext_8,
1956 "and $0xff,%rax");
1957 break;
1958 case 16:
1959 EMIT_ASM (amd64_zero_ext_16,
1960 "and $0xffff,%rax");
1961 break;
1962 case 32:
1963 EMIT_ASM (amd64_zero_ext_32,
1964 "mov $0xffffffff,%rcx\n\t"
1965 "and %rcx,%rax");
1966 break;
1967 default:
1968 emit_error = 1;
1969 }
1970 }
1971
1972 static void
1973 amd64_emit_swap (void)
1974 {
1975 EMIT_ASM (amd64_swap,
1976 "mov %rax,%rcx\n\t"
1977 "pop %rax\n\t"
1978 "push %rcx");
1979 }
1980
1981 static void
1982 amd64_emit_stack_adjust (int n)
1983 {
1984 unsigned char buf[16];
1985 int i;
1986 CORE_ADDR buildaddr = current_insn_ptr;
1987
1988 i = 0;
1989 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1990 buf[i++] = 0x8d;
1991 buf[i++] = 0x64;
1992 buf[i++] = 0x24;
1993 /* This only handles adjustments up to 16, but we don't expect any more. */
1994 buf[i++] = n * 8;
1995 append_insns (&buildaddr, i, buf);
1996 current_insn_ptr = buildaddr;
1997 }
1998
1999 /* FN's prototype is `LONGEST(*fn)(int)'. */
2000
2001 static void
2002 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2003 {
2004 unsigned char buf[16];
2005 int i;
2006 CORE_ADDR buildaddr;
2007
2008 buildaddr = current_insn_ptr;
2009 i = 0;
2010 buf[i++] = 0xbf; /* movl $<n>,%edi */
2011 memcpy (&buf[i], &arg1, sizeof (arg1));
2012 i += 4;
2013 append_insns (&buildaddr, i, buf);
2014 current_insn_ptr = buildaddr;
2015 amd64_emit_call (fn);
2016 }
2017
2018 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2019
2020 static void
2021 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2022 {
2023 unsigned char buf[16];
2024 int i;
2025 CORE_ADDR buildaddr;
2026
2027 buildaddr = current_insn_ptr;
2028 i = 0;
2029 buf[i++] = 0xbf; /* movl $<n>,%edi */
2030 memcpy (&buf[i], &arg1, sizeof (arg1));
2031 i += 4;
2032 append_insns (&buildaddr, i, buf);
2033 current_insn_ptr = buildaddr;
2034 EMIT_ASM (amd64_void_call_2_a,
2035 /* Save away a copy of the stack top. */
2036 "push %rax\n\t"
2037 /* Also pass top as the second argument. */
2038 "mov %rax,%rsi");
2039 amd64_emit_call (fn);
2040 EMIT_ASM (amd64_void_call_2_b,
2041 /* Restore the stack top, %rax may have been trashed. */
2042 "pop %rax");
2043 }
2044
2045 static void
2046 amd64_emit_eq_goto (int *offset_p, int *size_p)
2047 {
2048 EMIT_ASM (amd64_eq,
2049 "cmp %rax,(%rsp)\n\t"
2050 "jne .Lamd64_eq_fallthru\n\t"
2051 "lea 0x8(%rsp),%rsp\n\t"
2052 "pop %rax\n\t"
2053 /* jmp, but don't trust the assembler to choose the right jump */
2054 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2055 ".Lamd64_eq_fallthru:\n\t"
2056 "lea 0x8(%rsp),%rsp\n\t"
2057 "pop %rax");
2058
2059 if (offset_p)
2060 *offset_p = 13;
2061 if (size_p)
2062 *size_p = 4;
2063 }
2064
2065 static void
2066 amd64_emit_ne_goto (int *offset_p, int *size_p)
2067 {
2068 EMIT_ASM (amd64_ne,
2069 "cmp %rax,(%rsp)\n\t"
2070 "je .Lamd64_ne_fallthru\n\t"
2071 "lea 0x8(%rsp),%rsp\n\t"
2072 "pop %rax\n\t"
2073 /* jmp, but don't trust the assembler to choose the right jump */
2074 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2075 ".Lamd64_ne_fallthru:\n\t"
2076 "lea 0x8(%rsp),%rsp\n\t"
2077 "pop %rax");
2078
2079 if (offset_p)
2080 *offset_p = 13;
2081 if (size_p)
2082 *size_p = 4;
2083 }
2084
2085 static void
2086 amd64_emit_lt_goto (int *offset_p, int *size_p)
2087 {
2088 EMIT_ASM (amd64_lt,
2089 "cmp %rax,(%rsp)\n\t"
2090 "jnl .Lamd64_lt_fallthru\n\t"
2091 "lea 0x8(%rsp),%rsp\n\t"
2092 "pop %rax\n\t"
2093 /* jmp, but don't trust the assembler to choose the right jump */
2094 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2095 ".Lamd64_lt_fallthru:\n\t"
2096 "lea 0x8(%rsp),%rsp\n\t"
2097 "pop %rax");
2098
2099 if (offset_p)
2100 *offset_p = 13;
2101 if (size_p)
2102 *size_p = 4;
2103 }
2104
2105 static void
2106 amd64_emit_le_goto (int *offset_p, int *size_p)
2107 {
2108 EMIT_ASM (amd64_le,
2109 "cmp %rax,(%rsp)\n\t"
2110 "jnle .Lamd64_le_fallthru\n\t"
2111 "lea 0x8(%rsp),%rsp\n\t"
2112 "pop %rax\n\t"
2113 /* jmp, but don't trust the assembler to choose the right jump */
2114 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2115 ".Lamd64_le_fallthru:\n\t"
2116 "lea 0x8(%rsp),%rsp\n\t"
2117 "pop %rax");
2118
2119 if (offset_p)
2120 *offset_p = 13;
2121 if (size_p)
2122 *size_p = 4;
2123 }
2124
2125 static void
2126 amd64_emit_gt_goto (int *offset_p, int *size_p)
2127 {
2128 EMIT_ASM (amd64_gt,
2129 "cmp %rax,(%rsp)\n\t"
2130 "jng .Lamd64_gt_fallthru\n\t"
2131 "lea 0x8(%rsp),%rsp\n\t"
2132 "pop %rax\n\t"
2133 /* jmp, but don't trust the assembler to choose the right jump */
2134 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2135 ".Lamd64_gt_fallthru:\n\t"
2136 "lea 0x8(%rsp),%rsp\n\t"
2137 "pop %rax");
2138
2139 if (offset_p)
2140 *offset_p = 13;
2141 if (size_p)
2142 *size_p = 4;
2143 }
2144
2145 static void
2146 amd64_emit_ge_goto (int *offset_p, int *size_p)
2147 {
2148 EMIT_ASM (amd64_ge,
2149 "cmp %rax,(%rsp)\n\t"
2150 "jnge .Lamd64_ge_fallthru\n\t"
2151 ".Lamd64_ge_jump:\n\t"
2152 "lea 0x8(%rsp),%rsp\n\t"
2153 "pop %rax\n\t"
2154 /* jmp, but don't trust the assembler to choose the right jump */
2155 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2156 ".Lamd64_ge_fallthru:\n\t"
2157 "lea 0x8(%rsp),%rsp\n\t"
2158 "pop %rax");
2159
2160 if (offset_p)
2161 *offset_p = 13;
2162 if (size_p)
2163 *size_p = 4;
2164 }
2165
2166 struct emit_ops amd64_emit_ops =
2167 {
2168 amd64_emit_prologue,
2169 amd64_emit_epilogue,
2170 amd64_emit_add,
2171 amd64_emit_sub,
2172 amd64_emit_mul,
2173 amd64_emit_lsh,
2174 amd64_emit_rsh_signed,
2175 amd64_emit_rsh_unsigned,
2176 amd64_emit_ext,
2177 amd64_emit_log_not,
2178 amd64_emit_bit_and,
2179 amd64_emit_bit_or,
2180 amd64_emit_bit_xor,
2181 amd64_emit_bit_not,
2182 amd64_emit_equal,
2183 amd64_emit_less_signed,
2184 amd64_emit_less_unsigned,
2185 amd64_emit_ref,
2186 amd64_emit_if_goto,
2187 amd64_emit_goto,
2188 amd64_write_goto_address,
2189 amd64_emit_const,
2190 amd64_emit_call,
2191 amd64_emit_reg,
2192 amd64_emit_pop,
2193 amd64_emit_stack_flush,
2194 amd64_emit_zero_ext,
2195 amd64_emit_swap,
2196 amd64_emit_stack_adjust,
2197 amd64_emit_int_call_1,
2198 amd64_emit_void_call_2,
2199 amd64_emit_eq_goto,
2200 amd64_emit_ne_goto,
2201 amd64_emit_lt_goto,
2202 amd64_emit_le_goto,
2203 amd64_emit_gt_goto,
2204 amd64_emit_ge_goto
2205 };
2206
2207 #endif /* __x86_64__ */
2208
2209 static void
2210 i386_emit_prologue (void)
2211 {
2212 EMIT_ASM32 (i386_prologue,
2213 "push %ebp\n\t"
2214 "mov %esp,%ebp\n\t"
2215 "push %ebx");
2216 /* At this point, the raw regs base address is at 8(%ebp), and the
2217 value pointer is at 12(%ebp). */
2218 }
2219
2220 static void
2221 i386_emit_epilogue (void)
2222 {
2223 EMIT_ASM32 (i386_epilogue,
2224 "mov 12(%ebp),%ecx\n\t"
2225 "mov %eax,(%ecx)\n\t"
2226 "mov %ebx,0x4(%ecx)\n\t"
2227 "xor %eax,%eax\n\t"
2228 "pop %ebx\n\t"
2229 "pop %ebp\n\t"
2230 "ret");
2231 }
2232
2233 static void
2234 i386_emit_add (void)
2235 {
2236 EMIT_ASM32 (i386_add,
2237 "add (%esp),%eax\n\t"
2238 "adc 0x4(%esp),%ebx\n\t"
2239 "lea 0x8(%esp),%esp");
2240 }
2241
2242 static void
2243 i386_emit_sub (void)
2244 {
2245 EMIT_ASM32 (i386_sub,
2246 "subl %eax,(%esp)\n\t"
2247 "sbbl %ebx,4(%esp)\n\t"
2248 "pop %eax\n\t"
2249 "pop %ebx\n\t");
2250 }
2251
2252 static void
2253 i386_emit_mul (void)
2254 {
2255 emit_error = 1;
2256 }
2257
2258 static void
2259 i386_emit_lsh (void)
2260 {
2261 emit_error = 1;
2262 }
2263
2264 static void
2265 i386_emit_rsh_signed (void)
2266 {
2267 emit_error = 1;
2268 }
2269
2270 static void
2271 i386_emit_rsh_unsigned (void)
2272 {
2273 emit_error = 1;
2274 }
2275
2276 static void
2277 i386_emit_ext (int arg)
2278 {
2279 switch (arg)
2280 {
2281 case 8:
2282 EMIT_ASM32 (i386_ext_8,
2283 "cbtw\n\t"
2284 "cwtl\n\t"
2285 "movl %eax,%ebx\n\t"
2286 "sarl $31,%ebx");
2287 break;
2288 case 16:
2289 EMIT_ASM32 (i386_ext_16,
2290 "cwtl\n\t"
2291 "movl %eax,%ebx\n\t"
2292 "sarl $31,%ebx");
2293 break;
2294 case 32:
2295 EMIT_ASM32 (i386_ext_32,
2296 "movl %eax,%ebx\n\t"
2297 "sarl $31,%ebx");
2298 break;
2299 default:
2300 emit_error = 1;
2301 }
2302 }
2303
2304 static void
2305 i386_emit_log_not (void)
2306 {
2307 EMIT_ASM32 (i386_log_not,
2308 "or %ebx,%eax\n\t"
2309 "test %eax,%eax\n\t"
2310 "sete %cl\n\t"
2311 "xor %ebx,%ebx\n\t"
2312 "movzbl %cl,%eax");
2313 }
2314
2315 static void
2316 i386_emit_bit_and (void)
2317 {
2318 EMIT_ASM32 (i386_and,
2319 "and (%esp),%eax\n\t"
2320 "and 0x4(%esp),%ebx\n\t"
2321 "lea 0x8(%esp),%esp");
2322 }
2323
2324 static void
2325 i386_emit_bit_or (void)
2326 {
2327 EMIT_ASM32 (i386_or,
2328 "or (%esp),%eax\n\t"
2329 "or 0x4(%esp),%ebx\n\t"
2330 "lea 0x8(%esp),%esp");
2331 }
2332
2333 static void
2334 i386_emit_bit_xor (void)
2335 {
2336 EMIT_ASM32 (i386_xor,
2337 "xor (%esp),%eax\n\t"
2338 "xor 0x4(%esp),%ebx\n\t"
2339 "lea 0x8(%esp),%esp");
2340 }
2341
2342 static void
2343 i386_emit_bit_not (void)
2344 {
2345 EMIT_ASM32 (i386_bit_not,
2346 "xor $0xffffffff,%eax\n\t"
2347 "xor $0xffffffff,%ebx\n\t");
2348 }
2349
2350 static void
2351 i386_emit_equal (void)
2352 {
2353 EMIT_ASM32 (i386_equal,
2354 "cmpl %ebx,4(%esp)\n\t"
2355 "jne .Li386_equal_false\n\t"
2356 "cmpl %eax,(%esp)\n\t"
2357 "je .Li386_equal_true\n\t"
2358 ".Li386_equal_false:\n\t"
2359 "xor %eax,%eax\n\t"
2360 "jmp .Li386_equal_end\n\t"
2361 ".Li386_equal_true:\n\t"
2362 "mov $1,%eax\n\t"
2363 ".Li386_equal_end:\n\t"
2364 "xor %ebx,%ebx\n\t"
2365 "lea 0x8(%esp),%esp");
2366 }
2367
2368 static void
2369 i386_emit_less_signed (void)
2370 {
2371 EMIT_ASM32 (i386_less_signed,
2372 "cmpl %ebx,4(%esp)\n\t"
2373 "jl .Li386_less_signed_true\n\t"
2374 "jne .Li386_less_signed_false\n\t"
2375 "cmpl %eax,(%esp)\n\t"
2376 "jl .Li386_less_signed_true\n\t"
2377 ".Li386_less_signed_false:\n\t"
2378 "xor %eax,%eax\n\t"
2379 "jmp .Li386_less_signed_end\n\t"
2380 ".Li386_less_signed_true:\n\t"
2381 "mov $1,%eax\n\t"
2382 ".Li386_less_signed_end:\n\t"
2383 "xor %ebx,%ebx\n\t"
2384 "lea 0x8(%esp),%esp");
2385 }
2386
2387 static void
2388 i386_emit_less_unsigned (void)
2389 {
2390 EMIT_ASM32 (i386_less_unsigned,
2391 "cmpl %ebx,4(%esp)\n\t"
2392 "jb .Li386_less_unsigned_true\n\t"
2393 "jne .Li386_less_unsigned_false\n\t"
2394 "cmpl %eax,(%esp)\n\t"
2395 "jb .Li386_less_unsigned_true\n\t"
2396 ".Li386_less_unsigned_false:\n\t"
2397 "xor %eax,%eax\n\t"
2398 "jmp .Li386_less_unsigned_end\n\t"
2399 ".Li386_less_unsigned_true:\n\t"
2400 "mov $1,%eax\n\t"
2401 ".Li386_less_unsigned_end:\n\t"
2402 "xor %ebx,%ebx\n\t"
2403 "lea 0x8(%esp),%esp");
2404 }
2405
2406 static void
2407 i386_emit_ref (int size)
2408 {
2409 switch (size)
2410 {
2411 case 1:
2412 EMIT_ASM32 (i386_ref1,
2413 "movb (%eax),%al");
2414 break;
2415 case 2:
2416 EMIT_ASM32 (i386_ref2,
2417 "movw (%eax),%ax");
2418 break;
2419 case 4:
2420 EMIT_ASM32 (i386_ref4,
2421 "movl (%eax),%eax");
2422 break;
2423 case 8:
2424 EMIT_ASM32 (i386_ref8,
2425 "movl 4(%eax),%ebx\n\t"
2426 "movl (%eax),%eax");
2427 break;
2428 }
2429 }
2430
2431 static void
2432 i386_emit_if_goto (int *offset_p, int *size_p)
2433 {
2434 EMIT_ASM32 (i386_if_goto,
2435 "mov %eax,%ecx\n\t"
2436 "or %ebx,%ecx\n\t"
2437 "pop %eax\n\t"
2438 "pop %ebx\n\t"
2439 "cmpl $0,%ecx\n\t"
2440 /* Don't trust the assembler to choose the right jump */
2441 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2442
2443 if (offset_p)
2444 *offset_p = 11; /* be sure that this matches the sequence above */
2445 if (size_p)
2446 *size_p = 4;
2447 }
2448
2449 static void
2450 i386_emit_goto (int *offset_p, int *size_p)
2451 {
2452 EMIT_ASM32 (i386_goto,
2453 /* Don't trust the assembler to choose the right jump */
2454 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2455 if (offset_p)
2456 *offset_p = 1;
2457 if (size_p)
2458 *size_p = 4;
2459 }
2460
2461 static void
2462 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2463 {
2464 int diff = (to - (from + size));
2465 unsigned char buf[sizeof (int)];
2466
2467 /* We're only doing 4-byte sizes at the moment. */
2468 if (size != 4)
2469 {
2470 emit_error = 1;
2471 return;
2472 }
2473
2474 memcpy (buf, &diff, sizeof (int));
2475 target_write_memory (from, buf, sizeof (int));
2476 }
2477
2478 static void
2479 i386_emit_const (LONGEST num)
2480 {
2481 unsigned char buf[16];
2482 int i, hi, lo;
2483 CORE_ADDR buildaddr = current_insn_ptr;
2484
2485 i = 0;
2486 buf[i++] = 0xb8; /* mov $<n>,%eax */
2487 lo = num & 0xffffffff;
2488 memcpy (&buf[i], &lo, sizeof (lo));
2489 i += 4;
2490 hi = ((num >> 32) & 0xffffffff);
2491 if (hi)
2492 {
2493 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2494 memcpy (&buf[i], &hi, sizeof (hi));
2495 i += 4;
2496 }
2497 else
2498 {
2499 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2500 }
2501 append_insns (&buildaddr, i, buf);
2502 current_insn_ptr = buildaddr;
2503 }
2504
2505 static void
2506 i386_emit_call (CORE_ADDR fn)
2507 {
2508 unsigned char buf[16];
2509 int i, offset;
2510 CORE_ADDR buildaddr;
2511
2512 buildaddr = current_insn_ptr;
2513 i = 0;
2514 buf[i++] = 0xe8; /* call <reladdr> */
2515 offset = ((int) fn) - (buildaddr + 5);
2516 memcpy (buf + 1, &offset, 4);
2517 append_insns (&buildaddr, 5, buf);
2518 current_insn_ptr = buildaddr;
2519 }
2520
2521 static void
2522 i386_emit_reg (int reg)
2523 {
2524 unsigned char buf[16];
2525 int i;
2526 CORE_ADDR buildaddr;
2527
2528 EMIT_ASM32 (i386_reg_a,
2529 "sub $0x8,%esp");
2530 buildaddr = current_insn_ptr;
2531 i = 0;
2532 buf[i++] = 0xb8; /* mov $<n>,%eax */
2533 memcpy (&buf[i], &reg, sizeof (reg));
2534 i += 4;
2535 append_insns (&buildaddr, i, buf);
2536 current_insn_ptr = buildaddr;
2537 EMIT_ASM32 (i386_reg_b,
2538 "mov %eax,4(%esp)\n\t"
2539 "mov 8(%ebp),%eax\n\t"
2540 "mov %eax,(%esp)");
2541 i386_emit_call (get_raw_reg_func_addr ());
2542 EMIT_ASM32 (i386_reg_c,
2543 "xor %ebx,%ebx\n\t"
2544 "lea 0x8(%esp),%esp");
2545 }
2546
2547 static void
2548 i386_emit_pop (void)
2549 {
2550 EMIT_ASM32 (i386_pop,
2551 "pop %eax\n\t"
2552 "pop %ebx");
2553 }
2554
2555 static void
2556 i386_emit_stack_flush (void)
2557 {
2558 EMIT_ASM32 (i386_stack_flush,
2559 "push %ebx\n\t"
2560 "push %eax");
2561 }
2562
2563 static void
2564 i386_emit_zero_ext (int arg)
2565 {
2566 switch (arg)
2567 {
2568 case 8:
2569 EMIT_ASM32 (i386_zero_ext_8,
2570 "and $0xff,%eax\n\t"
2571 "xor %ebx,%ebx");
2572 break;
2573 case 16:
2574 EMIT_ASM32 (i386_zero_ext_16,
2575 "and $0xffff,%eax\n\t"
2576 "xor %ebx,%ebx");
2577 break;
2578 case 32:
2579 EMIT_ASM32 (i386_zero_ext_32,
2580 "xor %ebx,%ebx");
2581 break;
2582 default:
2583 emit_error = 1;
2584 }
2585 }
2586
2587 static void
2588 i386_emit_swap (void)
2589 {
2590 EMIT_ASM32 (i386_swap,
2591 "mov %eax,%ecx\n\t"
2592 "mov %ebx,%edx\n\t"
2593 "pop %eax\n\t"
2594 "pop %ebx\n\t"
2595 "push %edx\n\t"
2596 "push %ecx");
2597 }
2598
2599 static void
2600 i386_emit_stack_adjust (int n)
2601 {
2602 unsigned char buf[16];
2603 int i;
2604 CORE_ADDR buildaddr = current_insn_ptr;
2605
2606 i = 0;
2607 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2608 buf[i++] = 0x64;
2609 buf[i++] = 0x24;
2610 buf[i++] = n * 8;
2611 append_insns (&buildaddr, i, buf);
2612 current_insn_ptr = buildaddr;
2613 }
2614
2615 /* FN's prototype is `LONGEST(*fn)(int)'. */
2616
2617 static void
2618 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2619 {
2620 unsigned char buf[16];
2621 int i;
2622 CORE_ADDR buildaddr;
2623
2624 EMIT_ASM32 (i386_int_call_1_a,
2625 /* Reserve a bit of stack space. */
2626 "sub $0x8,%esp");
2627 /* Put the one argument on the stack. */
2628 buildaddr = current_insn_ptr;
2629 i = 0;
2630 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2631 buf[i++] = 0x04;
2632 buf[i++] = 0x24;
2633 memcpy (&buf[i], &arg1, sizeof (arg1));
2634 i += 4;
2635 append_insns (&buildaddr, i, buf);
2636 current_insn_ptr = buildaddr;
2637 i386_emit_call (fn);
2638 EMIT_ASM32 (i386_int_call_1_c,
2639 "mov %edx,%ebx\n\t"
2640 "lea 0x8(%esp),%esp");
2641 }
2642
2643 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2644
2645 static void
2646 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2647 {
2648 unsigned char buf[16];
2649 int i;
2650 CORE_ADDR buildaddr;
2651
2652 EMIT_ASM32 (i386_void_call_2_a,
2653 /* Preserve %eax only; we don't have to worry about %ebx. */
2654 "push %eax\n\t"
2655 /* Reserve a bit of stack space for arguments. */
2656 "sub $0x10,%esp\n\t"
2657 /* Copy "top" to the second argument position. (Note that
2658 we can't assume function won't scribble on its
2659 arguments, so don't try to restore from this.) */
2660 "mov %eax,4(%esp)\n\t"
2661 "mov %ebx,8(%esp)");
2662 /* Put the first argument on the stack. */
2663 buildaddr = current_insn_ptr;
2664 i = 0;
2665 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2666 buf[i++] = 0x04;
2667 buf[i++] = 0x24;
2668 memcpy (&buf[i], &arg1, sizeof (arg1));
2669 i += 4;
2670 append_insns (&buildaddr, i, buf);
2671 current_insn_ptr = buildaddr;
2672 i386_emit_call (fn);
2673 EMIT_ASM32 (i386_void_call_2_b,
2674 "lea 0x10(%esp),%esp\n\t"
2675 /* Restore original stack top. */
2676 "pop %eax");
2677 }
2678
2679
2680 static void
2681 i386_emit_eq_goto (int *offset_p, int *size_p)
2682 {
2683 EMIT_ASM32 (eq,
2684 /* Check low half first, more likely to be decider */
2685 "cmpl %eax,(%esp)\n\t"
2686 "jne .Leq_fallthru\n\t"
2687 "cmpl %ebx,4(%esp)\n\t"
2688 "jne .Leq_fallthru\n\t"
2689 "lea 0x8(%esp),%esp\n\t"
2690 "pop %eax\n\t"
2691 "pop %ebx\n\t"
2692 /* jmp, but don't trust the assembler to choose the right jump */
2693 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2694 ".Leq_fallthru:\n\t"
2695 "lea 0x8(%esp),%esp\n\t"
2696 "pop %eax\n\t"
2697 "pop %ebx");
2698
2699 if (offset_p)
2700 *offset_p = 18;
2701 if (size_p)
2702 *size_p = 4;
2703 }
2704
2705 static void
2706 i386_emit_ne_goto (int *offset_p, int *size_p)
2707 {
2708 EMIT_ASM32 (ne,
2709 /* Check low half first, more likely to be decider */
2710 "cmpl %eax,(%esp)\n\t"
2711 "jne .Lne_jump\n\t"
2712 "cmpl %ebx,4(%esp)\n\t"
2713 "je .Lne_fallthru\n\t"
2714 ".Lne_jump:\n\t"
2715 "lea 0x8(%esp),%esp\n\t"
2716 "pop %eax\n\t"
2717 "pop %ebx\n\t"
2718 /* jmp, but don't trust the assembler to choose the right jump */
2719 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2720 ".Lne_fallthru:\n\t"
2721 "lea 0x8(%esp),%esp\n\t"
2722 "pop %eax\n\t"
2723 "pop %ebx");
2724
2725 if (offset_p)
2726 *offset_p = 18;
2727 if (size_p)
2728 *size_p = 4;
2729 }
2730
2731 static void
2732 i386_emit_lt_goto (int *offset_p, int *size_p)
2733 {
2734 EMIT_ASM32 (lt,
2735 "cmpl %ebx,4(%esp)\n\t"
2736 "jl .Llt_jump\n\t"
2737 "jne .Llt_fallthru\n\t"
2738 "cmpl %eax,(%esp)\n\t"
2739 "jnl .Llt_fallthru\n\t"
2740 ".Llt_jump:\n\t"
2741 "lea 0x8(%esp),%esp\n\t"
2742 "pop %eax\n\t"
2743 "pop %ebx\n\t"
2744 /* jmp, but don't trust the assembler to choose the right jump */
2745 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2746 ".Llt_fallthru:\n\t"
2747 "lea 0x8(%esp),%esp\n\t"
2748 "pop %eax\n\t"
2749 "pop %ebx");
2750
2751 if (offset_p)
2752 *offset_p = 20;
2753 if (size_p)
2754 *size_p = 4;
2755 }
2756
2757 static void
2758 i386_emit_le_goto (int *offset_p, int *size_p)
2759 {
2760 EMIT_ASM32 (le,
2761 "cmpl %ebx,4(%esp)\n\t"
2762 "jle .Lle_jump\n\t"
2763 "jne .Lle_fallthru\n\t"
2764 "cmpl %eax,(%esp)\n\t"
2765 "jnle .Lle_fallthru\n\t"
2766 ".Lle_jump:\n\t"
2767 "lea 0x8(%esp),%esp\n\t"
2768 "pop %eax\n\t"
2769 "pop %ebx\n\t"
2770 /* jmp, but don't trust the assembler to choose the right jump */
2771 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2772 ".Lle_fallthru:\n\t"
2773 "lea 0x8(%esp),%esp\n\t"
2774 "pop %eax\n\t"
2775 "pop %ebx");
2776
2777 if (offset_p)
2778 *offset_p = 20;
2779 if (size_p)
2780 *size_p = 4;
2781 }
2782
2783 static void
2784 i386_emit_gt_goto (int *offset_p, int *size_p)
2785 {
2786 EMIT_ASM32 (gt,
2787 "cmpl %ebx,4(%esp)\n\t"
2788 "jg .Lgt_jump\n\t"
2789 "jne .Lgt_fallthru\n\t"
2790 "cmpl %eax,(%esp)\n\t"
2791 "jng .Lgt_fallthru\n\t"
2792 ".Lgt_jump:\n\t"
2793 "lea 0x8(%esp),%esp\n\t"
2794 "pop %eax\n\t"
2795 "pop %ebx\n\t"
2796 /* jmp, but don't trust the assembler to choose the right jump */
2797 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2798 ".Lgt_fallthru:\n\t"
2799 "lea 0x8(%esp),%esp\n\t"
2800 "pop %eax\n\t"
2801 "pop %ebx");
2802
2803 if (offset_p)
2804 *offset_p = 20;
2805 if (size_p)
2806 *size_p = 4;
2807 }
2808
2809 static void
2810 i386_emit_ge_goto (int *offset_p, int *size_p)
2811 {
2812 EMIT_ASM32 (ge,
2813 "cmpl %ebx,4(%esp)\n\t"
2814 "jge .Lge_jump\n\t"
2815 "jne .Lge_fallthru\n\t"
2816 "cmpl %eax,(%esp)\n\t"
2817 "jnge .Lge_fallthru\n\t"
2818 ".Lge_jump:\n\t"
2819 "lea 0x8(%esp),%esp\n\t"
2820 "pop %eax\n\t"
2821 "pop %ebx\n\t"
2822 /* jmp, but don't trust the assembler to choose the right jump */
2823 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2824 ".Lge_fallthru:\n\t"
2825 "lea 0x8(%esp),%esp\n\t"
2826 "pop %eax\n\t"
2827 "pop %ebx");
2828
2829 if (offset_p)
2830 *offset_p = 20;
2831 if (size_p)
2832 *size_p = 4;
2833 }
2834
2835 struct emit_ops i386_emit_ops =
2836 {
2837 i386_emit_prologue,
2838 i386_emit_epilogue,
2839 i386_emit_add,
2840 i386_emit_sub,
2841 i386_emit_mul,
2842 i386_emit_lsh,
2843 i386_emit_rsh_signed,
2844 i386_emit_rsh_unsigned,
2845 i386_emit_ext,
2846 i386_emit_log_not,
2847 i386_emit_bit_and,
2848 i386_emit_bit_or,
2849 i386_emit_bit_xor,
2850 i386_emit_bit_not,
2851 i386_emit_equal,
2852 i386_emit_less_signed,
2853 i386_emit_less_unsigned,
2854 i386_emit_ref,
2855 i386_emit_if_goto,
2856 i386_emit_goto,
2857 i386_write_goto_address,
2858 i386_emit_const,
2859 i386_emit_call,
2860 i386_emit_reg,
2861 i386_emit_pop,
2862 i386_emit_stack_flush,
2863 i386_emit_zero_ext,
2864 i386_emit_swap,
2865 i386_emit_stack_adjust,
2866 i386_emit_int_call_1,
2867 i386_emit_void_call_2,
2868 i386_emit_eq_goto,
2869 i386_emit_ne_goto,
2870 i386_emit_lt_goto,
2871 i386_emit_le_goto,
2872 i386_emit_gt_goto,
2873 i386_emit_ge_goto
2874 };
2875
2876
2877 static struct emit_ops *
2878 x86_emit_ops (void)
2879 {
2880 #ifdef __x86_64__
2881 if (is_64bit_tdesc ())
2882 return &amd64_emit_ops;
2883 else
2884 #endif
2885 return &i386_emit_ops;
2886 }
2887
2888 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2889
2890 const gdb_byte *
2891 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2892 {
2893 *size = x86_breakpoint_len;
2894 return x86_breakpoint;
2895 }
2896
2897 static int
2898 x86_supports_range_stepping (void)
2899 {
2900 return 1;
2901 }
2902
2903 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2904 */
2905
2906 static int
2907 x86_supports_hardware_single_step (void)
2908 {
2909 return 1;
2910 }
2911
2912 static int
2913 x86_get_ipa_tdesc_idx (void)
2914 {
2915 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2916 const struct target_desc *tdesc = regcache->tdesc;
2917
2918 #ifdef __x86_64__
2919 return amd64_get_ipa_tdesc_idx (tdesc);
2920 #endif
2921
2922 if (tdesc == tdesc_i386_linux_no_xml)
2923 return X86_TDESC_SSE;
2924
2925 return i386_get_ipa_tdesc_idx (tdesc);
2926 }
2927
2928 /* This is initialized assuming an amd64 target.
2929 x86_arch_setup will correct it for i386 or amd64 targets. */
2930
2931 struct linux_target_ops the_low_target =
2932 {
2933 x86_linux_new_process,
2934 x86_linux_delete_process,
2935 x86_linux_new_thread,
2936 x86_linux_delete_thread,
2937 x86_linux_new_fork,
2938 x86_linux_prepare_to_resume,
2939 x86_linux_process_qsupported,
2940 x86_supports_tracepoints,
2941 x86_get_thread_area,
2942 x86_install_fast_tracepoint_jump_pad,
2943 x86_emit_ops,
2944 x86_get_min_fast_tracepoint_insn_len,
2945 x86_supports_range_stepping,
2946 x86_supports_hardware_single_step,
2947 x86_get_syscall_trapinfo,
2948 x86_get_ipa_tdesc_idx,
2949 };
2950
2951 /* The linux target ops object. */
2952
2953 linux_process_target *the_linux_target = &the_x86_target;
2954
2955 void
2956 initialize_low_arch (void)
2957 {
2958 /* Initialize the Linux target descriptions. */
2959 #ifdef __x86_64__
2960 tdesc_amd64_linux_no_xml = allocate_target_description ();
2961 copy_target_description (tdesc_amd64_linux_no_xml,
2962 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2963 false));
2964 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2965 #endif
2966
2967 tdesc_i386_linux_no_xml = allocate_target_description ();
2968 copy_target_description (tdesc_i386_linux_no_xml,
2969 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2970 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2971
2972 initialize_regsets_info (&x86_regsets_info);
2973 }
This page took 0.119437 seconds and 5 git commands to generate.