de992fc062eea3660ea28052ac88fdef706edf81
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 const regs_info *get_regs_info () override;
104
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
107 bool supports_z_point_type (char z_type) override;
108
109 void process_qsupported (char **features, int count) override;
110
111 bool supports_tracepoints () override;
112
113 bool supports_fast_tracepoints () override;
114
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121 char *err) override;
122
123 int get_min_fast_tracepoint_insn_len () override;
124
125 protected:
126
127 void low_arch_setup () override;
128
129 bool low_cannot_fetch_register (int regno) override;
130
131 bool low_cannot_store_register (int regno) override;
132
133 bool low_supports_breakpoints () override;
134
135 CORE_ADDR low_get_pc (regcache *regcache) override;
136
137 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
138
139 int low_decr_pc_after_break () override;
140
141 bool low_breakpoint_at (CORE_ADDR pc) override;
142
143 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
144 int size, raw_breakpoint *bp) override;
145
146 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
147 int size, raw_breakpoint *bp) override;
148
149 bool low_stopped_by_watchpoint () override;
150
151 CORE_ADDR low_stopped_data_address () override;
152
153 /* collect_ptrace_register/supply_ptrace_register are not needed in the
154 native i386 case (no registers smaller than an xfer unit), and are not
155 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
156
157 /* Need to fix up i386 siginfo if host is amd64. */
158 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
159 int direction) override;
160
161 arch_process_info *low_new_process () override;
162
163 void low_delete_process (arch_process_info *info) override;
164
165 void low_new_thread (lwp_info *) override;
166
167 void low_delete_thread (arch_lwp_info *) override;
168
169 void low_new_fork (process_info *parent, process_info *child) override;
170
171 void low_prepare_to_resume (lwp_info *lwp) override;
172
173 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
174
175 private:
176
177 /* Update all the target description of all processes; a new GDB
178 connected, and it may or not support xml target descriptions. */
179 void update_xmltarget ();
180 };
181
182 /* The singleton target ops object. */
183
184 static x86_target the_x86_target;
185
186 /* Per-process arch-specific data we want to keep. */
187
188 struct arch_process_info
189 {
190 struct x86_debug_reg_state debug_reg_state;
191 };
192
193 #ifdef __x86_64__
194
195 /* Mapping between the general-purpose registers in `struct user'
196 format and GDB's register array layout.
197 Note that the transfer layout uses 64-bit regs. */
198 static /*const*/ int i386_regmap[] =
199 {
200 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
201 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
202 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
203 DS * 8, ES * 8, FS * 8, GS * 8
204 };
205
206 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
207
208 /* So code below doesn't have to care, i386 or amd64. */
209 #define ORIG_EAX ORIG_RAX
210 #define REGSIZE 8
211
212 static const int x86_64_regmap[] =
213 {
214 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
215 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
216 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
217 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
218 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
219 DS * 8, ES * 8, FS * 8, GS * 8,
220 -1, -1, -1, -1, -1, -1, -1, -1,
221 -1, -1, -1, -1, -1, -1, -1, -1,
222 -1, -1, -1, -1, -1, -1, -1, -1,
223 -1,
224 -1, -1, -1, -1, -1, -1, -1, -1,
225 ORIG_RAX * 8,
226 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
227 21 * 8, 22 * 8,
228 #else
229 -1, -1,
230 #endif
231 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
232 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
233 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
234 -1, -1, -1, -1, -1, -1, -1, -1,
235 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
236 -1, -1, -1, -1, -1, -1, -1, -1,
237 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
238 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
239 -1, -1, -1, -1, -1, -1, -1, -1,
240 -1, -1, -1, -1, -1, -1, -1, -1,
241 -1, -1, -1, -1, -1, -1, -1, -1,
242 -1 /* pkru */
243 };
244
245 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
246 #define X86_64_USER_REGS (GS + 1)
247
248 #else /* ! __x86_64__ */
249
250 /* Mapping between the general-purpose registers in `struct user'
251 format and GDB's register array layout. */
252 static /*const*/ int i386_regmap[] =
253 {
254 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
255 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
256 EIP * 4, EFL * 4, CS * 4, SS * 4,
257 DS * 4, ES * 4, FS * 4, GS * 4
258 };
259
260 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
261
262 #define REGSIZE 4
263
264 #endif
265
266 #ifdef __x86_64__
267
268 /* Returns true if the current inferior belongs to a x86-64 process,
269 per the tdesc. */
270
271 static int
272 is_64bit_tdesc (void)
273 {
274 struct regcache *regcache = get_thread_regcache (current_thread, 0);
275
276 return register_size (regcache->tdesc, 0) == 8;
277 }
278
279 #endif
280
281 \f
282 /* Called by libthread_db. */
283
284 ps_err_e
285 ps_get_thread_area (struct ps_prochandle *ph,
286 lwpid_t lwpid, int idx, void **base)
287 {
288 #ifdef __x86_64__
289 int use_64bit = is_64bit_tdesc ();
290
291 if (use_64bit)
292 {
293 switch (idx)
294 {
295 case FS:
296 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
297 return PS_OK;
298 break;
299 case GS:
300 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
301 return PS_OK;
302 break;
303 default:
304 return PS_BADADDR;
305 }
306 return PS_ERR;
307 }
308 #endif
309
310 {
311 unsigned int desc[4];
312
313 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
314 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
315 return PS_ERR;
316
317 /* Ensure we properly extend the value to 64-bits for x86_64. */
318 *base = (void *) (uintptr_t) desc[1];
319 return PS_OK;
320 }
321 }
322
323 /* Get the thread area address. This is used to recognize which
324 thread is which when tracing with the in-process agent library. We
325 don't read anything from the address, and treat it as opaque; it's
326 the address itself that we assume is unique per-thread. */
327
328 int
329 x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
330 {
331 #ifdef __x86_64__
332 int use_64bit = is_64bit_tdesc ();
333
334 if (use_64bit)
335 {
336 void *base;
337 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
338 {
339 *addr = (CORE_ADDR) (uintptr_t) base;
340 return 0;
341 }
342
343 return -1;
344 }
345 #endif
346
347 {
348 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
349 struct thread_info *thr = get_lwp_thread (lwp);
350 struct regcache *regcache = get_thread_regcache (thr, 1);
351 unsigned int desc[4];
352 ULONGEST gs = 0;
353 const int reg_thread_area = 3; /* bits to scale down register value. */
354 int idx;
355
356 collect_register_by_name (regcache, "gs", &gs);
357
358 idx = gs >> reg_thread_area;
359
360 if (ptrace (PTRACE_GET_THREAD_AREA,
361 lwpid_of (thr),
362 (void *) (long) idx, (unsigned long) &desc) < 0)
363 return -1;
364
365 *addr = desc[1];
366 return 0;
367 }
368 }
369
370
371 \f
372 bool
373 x86_target::low_cannot_store_register (int regno)
374 {
375 #ifdef __x86_64__
376 if (is_64bit_tdesc ())
377 return false;
378 #endif
379
380 return regno >= I386_NUM_REGS;
381 }
382
383 bool
384 x86_target::low_cannot_fetch_register (int regno)
385 {
386 #ifdef __x86_64__
387 if (is_64bit_tdesc ())
388 return false;
389 #endif
390
391 return regno >= I386_NUM_REGS;
392 }
393
394 static void
395 x86_fill_gregset (struct regcache *regcache, void *buf)
396 {
397 int i;
398
399 #ifdef __x86_64__
400 if (register_size (regcache->tdesc, 0) == 8)
401 {
402 for (i = 0; i < X86_64_NUM_REGS; i++)
403 if (x86_64_regmap[i] != -1)
404 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
405
406 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
407 {
408 unsigned long base;
409 int lwpid = lwpid_of (current_thread);
410
411 collect_register_by_name (regcache, "fs_base", &base);
412 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
413
414 collect_register_by_name (regcache, "gs_base", &base);
415 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
416 }
417 #endif
418
419 return;
420 }
421
422 /* 32-bit inferior registers need to be zero-extended.
423 Callers would read uninitialized memory otherwise. */
424 memset (buf, 0x00, X86_64_USER_REGS * 8);
425 #endif
426
427 for (i = 0; i < I386_NUM_REGS; i++)
428 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
429
430 collect_register_by_name (regcache, "orig_eax",
431 ((char *) buf) + ORIG_EAX * REGSIZE);
432
433 #ifdef __x86_64__
434 /* Sign extend EAX value to avoid potential syscall restart
435 problems.
436
437 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
438 for a detailed explanation. */
439 if (register_size (regcache->tdesc, 0) == 4)
440 {
441 void *ptr = ((gdb_byte *) buf
442 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
443
444 *(int64_t *) ptr = *(int32_t *) ptr;
445 }
446 #endif
447 }
448
449 static void
450 x86_store_gregset (struct regcache *regcache, const void *buf)
451 {
452 int i;
453
454 #ifdef __x86_64__
455 if (register_size (regcache->tdesc, 0) == 8)
456 {
457 for (i = 0; i < X86_64_NUM_REGS; i++)
458 if (x86_64_regmap[i] != -1)
459 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
460
461 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
462 {
463 unsigned long base;
464 int lwpid = lwpid_of (current_thread);
465
466 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
467 supply_register_by_name (regcache, "fs_base", &base);
468
469 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
470 supply_register_by_name (regcache, "gs_base", &base);
471 }
472 #endif
473 return;
474 }
475 #endif
476
477 for (i = 0; i < I386_NUM_REGS; i++)
478 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
479
480 supply_register_by_name (regcache, "orig_eax",
481 ((char *) buf) + ORIG_EAX * REGSIZE);
482 }
483
484 static void
485 x86_fill_fpregset (struct regcache *regcache, void *buf)
486 {
487 #ifdef __x86_64__
488 i387_cache_to_fxsave (regcache, buf);
489 #else
490 i387_cache_to_fsave (regcache, buf);
491 #endif
492 }
493
494 static void
495 x86_store_fpregset (struct regcache *regcache, const void *buf)
496 {
497 #ifdef __x86_64__
498 i387_fxsave_to_cache (regcache, buf);
499 #else
500 i387_fsave_to_cache (regcache, buf);
501 #endif
502 }
503
504 #ifndef __x86_64__
505
506 static void
507 x86_fill_fpxregset (struct regcache *regcache, void *buf)
508 {
509 i387_cache_to_fxsave (regcache, buf);
510 }
511
512 static void
513 x86_store_fpxregset (struct regcache *regcache, const void *buf)
514 {
515 i387_fxsave_to_cache (regcache, buf);
516 }
517
518 #endif
519
520 static void
521 x86_fill_xstateregset (struct regcache *regcache, void *buf)
522 {
523 i387_cache_to_xsave (regcache, buf);
524 }
525
526 static void
527 x86_store_xstateregset (struct regcache *regcache, const void *buf)
528 {
529 i387_xsave_to_cache (regcache, buf);
530 }
531
532 /* ??? The non-biarch i386 case stores all the i387 regs twice.
533 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
534 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
535 doesn't work. IWBN to avoid the duplication in the case where it
536 does work. Maybe the arch_setup routine could check whether it works
537 and update the supported regsets accordingly. */
538
539 static struct regset_info x86_regsets[] =
540 {
541 #ifdef HAVE_PTRACE_GETREGS
542 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
543 GENERAL_REGS,
544 x86_fill_gregset, x86_store_gregset },
545 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
546 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
547 # ifndef __x86_64__
548 # ifdef HAVE_PTRACE_GETFPXREGS
549 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
550 EXTENDED_REGS,
551 x86_fill_fpxregset, x86_store_fpxregset },
552 # endif
553 # endif
554 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
555 FP_REGS,
556 x86_fill_fpregset, x86_store_fpregset },
557 #endif /* HAVE_PTRACE_GETREGS */
558 NULL_REGSET
559 };
560
561 bool
562 x86_target::low_supports_breakpoints ()
563 {
564 return true;
565 }
566
567 CORE_ADDR
568 x86_target::low_get_pc (regcache *regcache)
569 {
570 int use_64bit = register_size (regcache->tdesc, 0) == 8;
571
572 if (use_64bit)
573 {
574 uint64_t pc;
575
576 collect_register_by_name (regcache, "rip", &pc);
577 return (CORE_ADDR) pc;
578 }
579 else
580 {
581 uint32_t pc;
582
583 collect_register_by_name (regcache, "eip", &pc);
584 return (CORE_ADDR) pc;
585 }
586 }
587
588 void
589 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
590 {
591 int use_64bit = register_size (regcache->tdesc, 0) == 8;
592
593 if (use_64bit)
594 {
595 uint64_t newpc = pc;
596
597 supply_register_by_name (regcache, "rip", &newpc);
598 }
599 else
600 {
601 uint32_t newpc = pc;
602
603 supply_register_by_name (regcache, "eip", &newpc);
604 }
605 }
606
607 int
608 x86_target::low_decr_pc_after_break ()
609 {
610 return 1;
611 }
612
613 \f
614 static const gdb_byte x86_breakpoint[] = { 0xCC };
615 #define x86_breakpoint_len 1
616
617 bool
618 x86_target::low_breakpoint_at (CORE_ADDR pc)
619 {
620 unsigned char c;
621
622 read_memory (pc, &c, 1);
623 if (c == 0xCC)
624 return true;
625
626 return false;
627 }
628 \f
629 /* Low-level function vector. */
630 struct x86_dr_low_type x86_dr_low =
631 {
632 x86_linux_dr_set_control,
633 x86_linux_dr_set_addr,
634 x86_linux_dr_get_addr,
635 x86_linux_dr_get_status,
636 x86_linux_dr_get_control,
637 sizeof (void *),
638 };
639 \f
640 /* Breakpoint/Watchpoint support. */
641
642 bool
643 x86_target::supports_z_point_type (char z_type)
644 {
645 switch (z_type)
646 {
647 case Z_PACKET_SW_BP:
648 case Z_PACKET_HW_BP:
649 case Z_PACKET_WRITE_WP:
650 case Z_PACKET_ACCESS_WP:
651 return true;
652 default:
653 return false;
654 }
655 }
656
657 int
658 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
659 int size, raw_breakpoint *bp)
660 {
661 struct process_info *proc = current_process ();
662
663 switch (type)
664 {
665 case raw_bkpt_type_hw:
666 case raw_bkpt_type_write_wp:
667 case raw_bkpt_type_access_wp:
668 {
669 enum target_hw_bp_type hw_type
670 = raw_bkpt_type_to_target_hw_bp_type (type);
671 struct x86_debug_reg_state *state
672 = &proc->priv->arch_private->debug_reg_state;
673
674 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
675 }
676
677 default:
678 /* Unsupported. */
679 return 1;
680 }
681 }
682
683 int
684 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
685 int size, raw_breakpoint *bp)
686 {
687 struct process_info *proc = current_process ();
688
689 switch (type)
690 {
691 case raw_bkpt_type_hw:
692 case raw_bkpt_type_write_wp:
693 case raw_bkpt_type_access_wp:
694 {
695 enum target_hw_bp_type hw_type
696 = raw_bkpt_type_to_target_hw_bp_type (type);
697 struct x86_debug_reg_state *state
698 = &proc->priv->arch_private->debug_reg_state;
699
700 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
701 }
702 default:
703 /* Unsupported. */
704 return 1;
705 }
706 }
707
708 bool
709 x86_target::low_stopped_by_watchpoint ()
710 {
711 struct process_info *proc = current_process ();
712 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
713 }
714
715 CORE_ADDR
716 x86_target::low_stopped_data_address ()
717 {
718 struct process_info *proc = current_process ();
719 CORE_ADDR addr;
720 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
721 &addr))
722 return addr;
723 return 0;
724 }
725 \f
726 /* Called when a new process is created. */
727
728 arch_process_info *
729 x86_target::low_new_process ()
730 {
731 struct arch_process_info *info = XCNEW (struct arch_process_info);
732
733 x86_low_init_dregs (&info->debug_reg_state);
734
735 return info;
736 }
737
738 /* Called when a process is being deleted. */
739
740 void
741 x86_target::low_delete_process (arch_process_info *info)
742 {
743 xfree (info);
744 }
745
746 void
747 x86_target::low_new_thread (lwp_info *lwp)
748 {
749 /* This comes from nat/. */
750 x86_linux_new_thread (lwp);
751 }
752
753 void
754 x86_target::low_delete_thread (arch_lwp_info *alwp)
755 {
756 /* This comes from nat/. */
757 x86_linux_delete_thread (alwp);
758 }
759
760 /* Target routine for new_fork. */
761
762 void
763 x86_target::low_new_fork (process_info *parent, process_info *child)
764 {
765 /* These are allocated by linux_add_process. */
766 gdb_assert (parent->priv != NULL
767 && parent->priv->arch_private != NULL);
768 gdb_assert (child->priv != NULL
769 && child->priv->arch_private != NULL);
770
771 /* Linux kernel before 2.6.33 commit
772 72f674d203cd230426437cdcf7dd6f681dad8b0d
773 will inherit hardware debug registers from parent
774 on fork/vfork/clone. Newer Linux kernels create such tasks with
775 zeroed debug registers.
776
777 GDB core assumes the child inherits the watchpoints/hw
778 breakpoints of the parent, and will remove them all from the
779 forked off process. Copy the debug registers mirrors into the
780 new process so that all breakpoints and watchpoints can be
781 removed together. The debug registers mirror will become zeroed
782 in the end before detaching the forked off process, thus making
783 this compatible with older Linux kernels too. */
784
785 *child->priv->arch_private = *parent->priv->arch_private;
786 }
787
788 void
789 x86_target::low_prepare_to_resume (lwp_info *lwp)
790 {
791 /* This comes from nat/. */
792 x86_linux_prepare_to_resume (lwp);
793 }
794
795 /* See nat/x86-dregs.h. */
796
797 struct x86_debug_reg_state *
798 x86_debug_reg_state (pid_t pid)
799 {
800 struct process_info *proc = find_process_pid (pid);
801
802 return &proc->priv->arch_private->debug_reg_state;
803 }
804 \f
805 /* When GDBSERVER is built as a 64-bit application on linux, the
806 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
807 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
808 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
809 conversion in-place ourselves. */
810
811 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
812 layout of the inferiors' architecture. Returns true if any
813 conversion was done; false otherwise. If DIRECTION is 1, then copy
814 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
815 INF. */
816
817 bool
818 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
819 {
820 #ifdef __x86_64__
821 unsigned int machine;
822 int tid = lwpid_of (current_thread);
823 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
824
825 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
826 if (!is_64bit_tdesc ())
827 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
828 FIXUP_32);
829 /* No fixup for native x32 GDB. */
830 else if (!is_elf64 && sizeof (void *) == 8)
831 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
832 FIXUP_X32);
833 #endif
834
835 return false;
836 }
837 \f
838 static int use_xml;
839
840 /* Format of XSAVE extended state is:
841 struct
842 {
843 fxsave_bytes[0..463]
844 sw_usable_bytes[464..511]
845 xstate_hdr_bytes[512..575]
846 avx_bytes[576..831]
847 future_state etc
848 };
849
850 Same memory layout will be used for the coredump NT_X86_XSTATE
851 representing the XSAVE extended state registers.
852
853 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
854 extended state mask, which is the same as the extended control register
855 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
856 together with the mask saved in the xstate_hdr_bytes to determine what
857 states the processor/OS supports and what state, used or initialized,
858 the process/thread is in. */
859 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
860
861 /* Does the current host support the GETFPXREGS request? The header
862 file may or may not define it, and even if it is defined, the
863 kernel will return EIO if it's running on a pre-SSE processor. */
864 int have_ptrace_getfpxregs =
865 #ifdef HAVE_PTRACE_GETFPXREGS
866 -1
867 #else
868 0
869 #endif
870 ;
871
872 /* Get Linux/x86 target description from running target. */
873
874 static const struct target_desc *
875 x86_linux_read_description (void)
876 {
877 unsigned int machine;
878 int is_elf64;
879 int xcr0_features;
880 int tid;
881 static uint64_t xcr0;
882 struct regset_info *regset;
883
884 tid = lwpid_of (current_thread);
885
886 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
887
888 if (sizeof (void *) == 4)
889 {
890 if (is_elf64 > 0)
891 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
892 #ifndef __x86_64__
893 else if (machine == EM_X86_64)
894 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
895 #endif
896 }
897
898 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
899 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
900 {
901 elf_fpxregset_t fpxregs;
902
903 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
904 {
905 have_ptrace_getfpxregs = 0;
906 have_ptrace_getregset = 0;
907 return i386_linux_read_description (X86_XSTATE_X87);
908 }
909 else
910 have_ptrace_getfpxregs = 1;
911 }
912 #endif
913
914 if (!use_xml)
915 {
916 x86_xcr0 = X86_XSTATE_SSE_MASK;
917
918 /* Don't use XML. */
919 #ifdef __x86_64__
920 if (machine == EM_X86_64)
921 return tdesc_amd64_linux_no_xml;
922 else
923 #endif
924 return tdesc_i386_linux_no_xml;
925 }
926
927 if (have_ptrace_getregset == -1)
928 {
929 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
930 struct iovec iov;
931
932 iov.iov_base = xstateregs;
933 iov.iov_len = sizeof (xstateregs);
934
935 /* Check if PTRACE_GETREGSET works. */
936 if (ptrace (PTRACE_GETREGSET, tid,
937 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
938 have_ptrace_getregset = 0;
939 else
940 {
941 have_ptrace_getregset = 1;
942
943 /* Get XCR0 from XSAVE extended state. */
944 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
945 / sizeof (uint64_t))];
946
947 /* Use PTRACE_GETREGSET if it is available. */
948 for (regset = x86_regsets;
949 regset->fill_function != NULL; regset++)
950 if (regset->get_request == PTRACE_GETREGSET)
951 regset->size = X86_XSTATE_SIZE (xcr0);
952 else if (regset->type != GENERAL_REGS)
953 regset->size = 0;
954 }
955 }
956
957 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
958 xcr0_features = (have_ptrace_getregset
959 && (xcr0 & X86_XSTATE_ALL_MASK));
960
961 if (xcr0_features)
962 x86_xcr0 = xcr0;
963
964 if (machine == EM_X86_64)
965 {
966 #ifdef __x86_64__
967 const target_desc *tdesc = NULL;
968
969 if (xcr0_features)
970 {
971 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
972 !is_elf64);
973 }
974
975 if (tdesc == NULL)
976 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
977 return tdesc;
978 #endif
979 }
980 else
981 {
982 const target_desc *tdesc = NULL;
983
984 if (xcr0_features)
985 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
986
987 if (tdesc == NULL)
988 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
989
990 return tdesc;
991 }
992
993 gdb_assert_not_reached ("failed to return tdesc");
994 }
995
996 /* Update all the target description of all processes; a new GDB
997 connected, and it may or not support xml target descriptions. */
998
999 void
1000 x86_target::update_xmltarget ()
1001 {
1002 struct thread_info *saved_thread = current_thread;
1003
1004 /* Before changing the register cache's internal layout, flush the
1005 contents of the current valid caches back to the threads, and
1006 release the current regcache objects. */
1007 regcache_release ();
1008
1009 for_each_process ([this] (process_info *proc) {
1010 int pid = proc->pid;
1011
1012 /* Look up any thread of this process. */
1013 current_thread = find_any_thread_of_pid (pid);
1014
1015 low_arch_setup ();
1016 });
1017
1018 current_thread = saved_thread;
1019 }
1020
1021 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1022 PTRACE_GETREGSET. */
1023
1024 void
1025 x86_target::process_qsupported (char **features, int count)
1026 {
1027 int i;
1028
1029 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1030 with "i386" in qSupported query, it supports x86 XML target
1031 descriptions. */
1032 use_xml = 0;
1033 for (i = 0; i < count; i++)
1034 {
1035 const char *feature = features[i];
1036
1037 if (startswith (feature, "xmlRegisters="))
1038 {
1039 char *copy = xstrdup (feature + 13);
1040
1041 char *saveptr;
1042 for (char *p = strtok_r (copy, ",", &saveptr);
1043 p != NULL;
1044 p = strtok_r (NULL, ",", &saveptr))
1045 {
1046 if (strcmp (p, "i386") == 0)
1047 {
1048 use_xml = 1;
1049 break;
1050 }
1051 }
1052
1053 free (copy);
1054 }
1055 }
1056 update_xmltarget ();
1057 }
1058
1059 /* Common for x86/x86-64. */
1060
1061 static struct regsets_info x86_regsets_info =
1062 {
1063 x86_regsets, /* regsets */
1064 0, /* num_regsets */
1065 NULL, /* disabled_regsets */
1066 };
1067
1068 #ifdef __x86_64__
1069 static struct regs_info amd64_linux_regs_info =
1070 {
1071 NULL, /* regset_bitmap */
1072 NULL, /* usrregs_info */
1073 &x86_regsets_info
1074 };
1075 #endif
1076 static struct usrregs_info i386_linux_usrregs_info =
1077 {
1078 I386_NUM_REGS,
1079 i386_regmap,
1080 };
1081
1082 static struct regs_info i386_linux_regs_info =
1083 {
1084 NULL, /* regset_bitmap */
1085 &i386_linux_usrregs_info,
1086 &x86_regsets_info
1087 };
1088
1089 const regs_info *
1090 x86_target::get_regs_info ()
1091 {
1092 #ifdef __x86_64__
1093 if (is_64bit_tdesc ())
1094 return &amd64_linux_regs_info;
1095 else
1096 #endif
1097 return &i386_linux_regs_info;
1098 }
1099
1100 /* Initialize the target description for the architecture of the
1101 inferior. */
1102
1103 void
1104 x86_target::low_arch_setup ()
1105 {
1106 current_process ()->tdesc = x86_linux_read_description ();
1107 }
1108
1109 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1110 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1111
1112 static void
1113 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
1114 {
1115 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1116
1117 if (use_64bit)
1118 {
1119 long l_sysno;
1120
1121 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1122 *sysno = (int) l_sysno;
1123 }
1124 else
1125 collect_register_by_name (regcache, "orig_eax", sysno);
1126 }
1127
1128 bool
1129 x86_target::supports_tracepoints ()
1130 {
1131 return true;
1132 }
1133
1134 static void
1135 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1136 {
1137 target_write_memory (*to, buf, len);
1138 *to += len;
1139 }
1140
1141 static int
1142 push_opcode (unsigned char *buf, const char *op)
1143 {
1144 unsigned char *buf_org = buf;
1145
1146 while (1)
1147 {
1148 char *endptr;
1149 unsigned long ul = strtoul (op, &endptr, 16);
1150
1151 if (endptr == op)
1152 break;
1153
1154 *buf++ = ul;
1155 op = endptr;
1156 }
1157
1158 return buf - buf_org;
1159 }
1160
1161 #ifdef __x86_64__
1162
1163 /* Build a jump pad that saves registers and calls a collection
1164 function. Writes a jump instruction to the jump pad to
1165 JJUMPAD_INSN. The caller is responsible to write it in at the
1166 tracepoint address. */
1167
1168 static int
1169 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1170 CORE_ADDR collector,
1171 CORE_ADDR lockaddr,
1172 ULONGEST orig_size,
1173 CORE_ADDR *jump_entry,
1174 CORE_ADDR *trampoline,
1175 ULONGEST *trampoline_size,
1176 unsigned char *jjump_pad_insn,
1177 ULONGEST *jjump_pad_insn_size,
1178 CORE_ADDR *adjusted_insn_addr,
1179 CORE_ADDR *adjusted_insn_addr_end,
1180 char *err)
1181 {
1182 unsigned char buf[40];
1183 int i, offset;
1184 int64_t loffset;
1185
1186 CORE_ADDR buildaddr = *jump_entry;
1187
1188 /* Build the jump pad. */
1189
1190 /* First, do tracepoint data collection. Save registers. */
1191 i = 0;
1192 /* Need to ensure stack pointer saved first. */
1193 buf[i++] = 0x54; /* push %rsp */
1194 buf[i++] = 0x55; /* push %rbp */
1195 buf[i++] = 0x57; /* push %rdi */
1196 buf[i++] = 0x56; /* push %rsi */
1197 buf[i++] = 0x52; /* push %rdx */
1198 buf[i++] = 0x51; /* push %rcx */
1199 buf[i++] = 0x53; /* push %rbx */
1200 buf[i++] = 0x50; /* push %rax */
1201 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1202 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1203 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1204 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1205 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1206 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1207 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1208 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1209 buf[i++] = 0x9c; /* pushfq */
1210 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1211 buf[i++] = 0xbf;
1212 memcpy (buf + i, &tpaddr, 8);
1213 i += 8;
1214 buf[i++] = 0x57; /* push %rdi */
1215 append_insns (&buildaddr, i, buf);
1216
1217 /* Stack space for the collecting_t object. */
1218 i = 0;
1219 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1220 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1221 memcpy (buf + i, &tpoint, 8);
1222 i += 8;
1223 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1224 i += push_opcode (&buf[i],
1225 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1226 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1227 append_insns (&buildaddr, i, buf);
1228
1229 /* spin-lock. */
1230 i = 0;
1231 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1232 memcpy (&buf[i], (void *) &lockaddr, 8);
1233 i += 8;
1234 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1235 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1236 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1237 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1238 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1239 append_insns (&buildaddr, i, buf);
1240
1241 /* Set up the gdb_collect call. */
1242 /* At this point, (stack pointer + 0x18) is the base of our saved
1243 register block. */
1244
1245 i = 0;
1246 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1247 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1248
1249 /* tpoint address may be 64-bit wide. */
1250 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1251 memcpy (buf + i, &tpoint, 8);
1252 i += 8;
1253 append_insns (&buildaddr, i, buf);
1254
1255 /* The collector function being in the shared library, may be
1256 >31-bits away off the jump pad. */
1257 i = 0;
1258 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1259 memcpy (buf + i, &collector, 8);
1260 i += 8;
1261 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1262 append_insns (&buildaddr, i, buf);
1263
1264 /* Clear the spin-lock. */
1265 i = 0;
1266 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1267 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1268 memcpy (buf + i, &lockaddr, 8);
1269 i += 8;
1270 append_insns (&buildaddr, i, buf);
1271
1272 /* Remove stack that had been used for the collect_t object. */
1273 i = 0;
1274 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1275 append_insns (&buildaddr, i, buf);
1276
1277 /* Restore register state. */
1278 i = 0;
1279 buf[i++] = 0x48; /* add $0x8,%rsp */
1280 buf[i++] = 0x83;
1281 buf[i++] = 0xc4;
1282 buf[i++] = 0x08;
1283 buf[i++] = 0x9d; /* popfq */
1284 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1285 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1286 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1287 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1288 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1289 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1290 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1291 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1292 buf[i++] = 0x58; /* pop %rax */
1293 buf[i++] = 0x5b; /* pop %rbx */
1294 buf[i++] = 0x59; /* pop %rcx */
1295 buf[i++] = 0x5a; /* pop %rdx */
1296 buf[i++] = 0x5e; /* pop %rsi */
1297 buf[i++] = 0x5f; /* pop %rdi */
1298 buf[i++] = 0x5d; /* pop %rbp */
1299 buf[i++] = 0x5c; /* pop %rsp */
1300 append_insns (&buildaddr, i, buf);
1301
1302 /* Now, adjust the original instruction to execute in the jump
1303 pad. */
1304 *adjusted_insn_addr = buildaddr;
1305 relocate_instruction (&buildaddr, tpaddr);
1306 *adjusted_insn_addr_end = buildaddr;
1307
1308 /* Finally, write a jump back to the program. */
1309
1310 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1311 if (loffset > INT_MAX || loffset < INT_MIN)
1312 {
1313 sprintf (err,
1314 "E.Jump back from jump pad too far from tracepoint "
1315 "(offset 0x%" PRIx64 " > int32).", loffset);
1316 return 1;
1317 }
1318
1319 offset = (int) loffset;
1320 memcpy (buf, jump_insn, sizeof (jump_insn));
1321 memcpy (buf + 1, &offset, 4);
1322 append_insns (&buildaddr, sizeof (jump_insn), buf);
1323
1324 /* The jump pad is now built. Wire in a jump to our jump pad. This
1325 is always done last (by our caller actually), so that we can
1326 install fast tracepoints with threads running. This relies on
1327 the agent's atomic write support. */
1328 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1329 if (loffset > INT_MAX || loffset < INT_MIN)
1330 {
1331 sprintf (err,
1332 "E.Jump pad too far from tracepoint "
1333 "(offset 0x%" PRIx64 " > int32).", loffset);
1334 return 1;
1335 }
1336
1337 offset = (int) loffset;
1338
1339 memcpy (buf, jump_insn, sizeof (jump_insn));
1340 memcpy (buf + 1, &offset, 4);
1341 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1342 *jjump_pad_insn_size = sizeof (jump_insn);
1343
1344 /* Return the end address of our pad. */
1345 *jump_entry = buildaddr;
1346
1347 return 0;
1348 }
1349
1350 #endif /* __x86_64__ */
1351
1352 /* Build a jump pad that saves registers and calls a collection
1353 function. Writes a jump instruction to the jump pad to
1354 JJUMPAD_INSN. The caller is responsible to write it in at the
1355 tracepoint address. */
1356
1357 static int
1358 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1359 CORE_ADDR collector,
1360 CORE_ADDR lockaddr,
1361 ULONGEST orig_size,
1362 CORE_ADDR *jump_entry,
1363 CORE_ADDR *trampoline,
1364 ULONGEST *trampoline_size,
1365 unsigned char *jjump_pad_insn,
1366 ULONGEST *jjump_pad_insn_size,
1367 CORE_ADDR *adjusted_insn_addr,
1368 CORE_ADDR *adjusted_insn_addr_end,
1369 char *err)
1370 {
1371 unsigned char buf[0x100];
1372 int i, offset;
1373 CORE_ADDR buildaddr = *jump_entry;
1374
1375 /* Build the jump pad. */
1376
1377 /* First, do tracepoint data collection. Save registers. */
1378 i = 0;
1379 buf[i++] = 0x60; /* pushad */
1380 buf[i++] = 0x68; /* push tpaddr aka $pc */
1381 *((int *)(buf + i)) = (int) tpaddr;
1382 i += 4;
1383 buf[i++] = 0x9c; /* pushf */
1384 buf[i++] = 0x1e; /* push %ds */
1385 buf[i++] = 0x06; /* push %es */
1386 buf[i++] = 0x0f; /* push %fs */
1387 buf[i++] = 0xa0;
1388 buf[i++] = 0x0f; /* push %gs */
1389 buf[i++] = 0xa8;
1390 buf[i++] = 0x16; /* push %ss */
1391 buf[i++] = 0x0e; /* push %cs */
1392 append_insns (&buildaddr, i, buf);
1393
1394 /* Stack space for the collecting_t object. */
1395 i = 0;
1396 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1397
1398 /* Build the object. */
1399 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1400 memcpy (buf + i, &tpoint, 4);
1401 i += 4;
1402 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1403
1404 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1405 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1406 append_insns (&buildaddr, i, buf);
1407
1408 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1409 If we cared for it, this could be using xchg alternatively. */
1410
1411 i = 0;
1412 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1413 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1414 %esp,<lockaddr> */
1415 memcpy (&buf[i], (void *) &lockaddr, 4);
1416 i += 4;
1417 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1418 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1419 append_insns (&buildaddr, i, buf);
1420
1421
1422 /* Set up arguments to the gdb_collect call. */
1423 i = 0;
1424 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1425 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1426 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1427 append_insns (&buildaddr, i, buf);
1428
1429 i = 0;
1430 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1431 append_insns (&buildaddr, i, buf);
1432
1433 i = 0;
1434 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1435 memcpy (&buf[i], (void *) &tpoint, 4);
1436 i += 4;
1437 append_insns (&buildaddr, i, buf);
1438
1439 buf[0] = 0xe8; /* call <reladdr> */
1440 offset = collector - (buildaddr + sizeof (jump_insn));
1441 memcpy (buf + 1, &offset, 4);
1442 append_insns (&buildaddr, 5, buf);
1443 /* Clean up after the call. */
1444 buf[0] = 0x83; /* add $0x8,%esp */
1445 buf[1] = 0xc4;
1446 buf[2] = 0x08;
1447 append_insns (&buildaddr, 3, buf);
1448
1449
1450 /* Clear the spin-lock. This would need the LOCK prefix on older
1451 broken archs. */
1452 i = 0;
1453 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1454 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1455 memcpy (buf + i, &lockaddr, 4);
1456 i += 4;
1457 append_insns (&buildaddr, i, buf);
1458
1459
1460 /* Remove stack that had been used for the collect_t object. */
1461 i = 0;
1462 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1463 append_insns (&buildaddr, i, buf);
1464
1465 i = 0;
1466 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1467 buf[i++] = 0xc4;
1468 buf[i++] = 0x04;
1469 buf[i++] = 0x17; /* pop %ss */
1470 buf[i++] = 0x0f; /* pop %gs */
1471 buf[i++] = 0xa9;
1472 buf[i++] = 0x0f; /* pop %fs */
1473 buf[i++] = 0xa1;
1474 buf[i++] = 0x07; /* pop %es */
1475 buf[i++] = 0x1f; /* pop %ds */
1476 buf[i++] = 0x9d; /* popf */
1477 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1478 buf[i++] = 0xc4;
1479 buf[i++] = 0x04;
1480 buf[i++] = 0x61; /* popad */
1481 append_insns (&buildaddr, i, buf);
1482
1483 /* Now, adjust the original instruction to execute in the jump
1484 pad. */
1485 *adjusted_insn_addr = buildaddr;
1486 relocate_instruction (&buildaddr, tpaddr);
1487 *adjusted_insn_addr_end = buildaddr;
1488
1489 /* Write the jump back to the program. */
1490 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1491 memcpy (buf, jump_insn, sizeof (jump_insn));
1492 memcpy (buf + 1, &offset, 4);
1493 append_insns (&buildaddr, sizeof (jump_insn), buf);
1494
1495 /* The jump pad is now built. Wire in a jump to our jump pad. This
1496 is always done last (by our caller actually), so that we can
1497 install fast tracepoints with threads running. This relies on
1498 the agent's atomic write support. */
1499 if (orig_size == 4)
1500 {
1501 /* Create a trampoline. */
1502 *trampoline_size = sizeof (jump_insn);
1503 if (!claim_trampoline_space (*trampoline_size, trampoline))
1504 {
1505 /* No trampoline space available. */
1506 strcpy (err,
1507 "E.Cannot allocate trampoline space needed for fast "
1508 "tracepoints on 4-byte instructions.");
1509 return 1;
1510 }
1511
1512 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1513 memcpy (buf, jump_insn, sizeof (jump_insn));
1514 memcpy (buf + 1, &offset, 4);
1515 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1516
1517 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1518 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1519 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1520 memcpy (buf + 2, &offset, 2);
1521 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1522 *jjump_pad_insn_size = sizeof (small_jump_insn);
1523 }
1524 else
1525 {
1526 /* Else use a 32-bit relative jump instruction. */
1527 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1528 memcpy (buf, jump_insn, sizeof (jump_insn));
1529 memcpy (buf + 1, &offset, 4);
1530 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1531 *jjump_pad_insn_size = sizeof (jump_insn);
1532 }
1533
1534 /* Return the end address of our pad. */
1535 *jump_entry = buildaddr;
1536
1537 return 0;
1538 }
1539
1540 bool
1541 x86_target::supports_fast_tracepoints ()
1542 {
1543 return true;
1544 }
1545
1546 int
1547 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1548 CORE_ADDR tpaddr,
1549 CORE_ADDR collector,
1550 CORE_ADDR lockaddr,
1551 ULONGEST orig_size,
1552 CORE_ADDR *jump_entry,
1553 CORE_ADDR *trampoline,
1554 ULONGEST *trampoline_size,
1555 unsigned char *jjump_pad_insn,
1556 ULONGEST *jjump_pad_insn_size,
1557 CORE_ADDR *adjusted_insn_addr,
1558 CORE_ADDR *adjusted_insn_addr_end,
1559 char *err)
1560 {
1561 #ifdef __x86_64__
1562 if (is_64bit_tdesc ())
1563 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1564 collector, lockaddr,
1565 orig_size, jump_entry,
1566 trampoline, trampoline_size,
1567 jjump_pad_insn,
1568 jjump_pad_insn_size,
1569 adjusted_insn_addr,
1570 adjusted_insn_addr_end,
1571 err);
1572 #endif
1573
1574 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1575 collector, lockaddr,
1576 orig_size, jump_entry,
1577 trampoline, trampoline_size,
1578 jjump_pad_insn,
1579 jjump_pad_insn_size,
1580 adjusted_insn_addr,
1581 adjusted_insn_addr_end,
1582 err);
1583 }
1584
1585 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1586 architectures. */
1587
1588 int
1589 x86_target::get_min_fast_tracepoint_insn_len ()
1590 {
1591 static int warned_about_fast_tracepoints = 0;
1592
1593 #ifdef __x86_64__
1594 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1595 used for fast tracepoints. */
1596 if (is_64bit_tdesc ())
1597 return 5;
1598 #endif
1599
1600 if (agent_loaded_p ())
1601 {
1602 char errbuf[IPA_BUFSIZ];
1603
1604 errbuf[0] = '\0';
1605
1606 /* On x86, if trampolines are available, then 4-byte jump instructions
1607 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1608 with a 4-byte offset are used instead. */
1609 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1610 return 4;
1611 else
1612 {
1613 /* GDB has no channel to explain to user why a shorter fast
1614 tracepoint is not possible, but at least make GDBserver
1615 mention that something has gone awry. */
1616 if (!warned_about_fast_tracepoints)
1617 {
1618 warning ("4-byte fast tracepoints not available; %s", errbuf);
1619 warned_about_fast_tracepoints = 1;
1620 }
1621 return 5;
1622 }
1623 }
1624 else
1625 {
1626 /* Indicate that the minimum length is currently unknown since the IPA
1627 has not loaded yet. */
1628 return 0;
1629 }
1630 }
1631
1632 static void
1633 add_insns (unsigned char *start, int len)
1634 {
1635 CORE_ADDR buildaddr = current_insn_ptr;
1636
1637 if (debug_threads)
1638 debug_printf ("Adding %d bytes of insn at %s\n",
1639 len, paddress (buildaddr));
1640
1641 append_insns (&buildaddr, len, start);
1642 current_insn_ptr = buildaddr;
1643 }
1644
1645 /* Our general strategy for emitting code is to avoid specifying raw
1646 bytes whenever possible, and instead copy a block of inline asm
1647 that is embedded in the function. This is a little messy, because
1648 we need to keep the compiler from discarding what looks like dead
1649 code, plus suppress various warnings. */
1650
1651 #define EMIT_ASM(NAME, INSNS) \
1652 do \
1653 { \
1654 extern unsigned char start_ ## NAME, end_ ## NAME; \
1655 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1656 __asm__ ("jmp end_" #NAME "\n" \
1657 "\t" "start_" #NAME ":" \
1658 "\t" INSNS "\n" \
1659 "\t" "end_" #NAME ":"); \
1660 } while (0)
1661
1662 #ifdef __x86_64__
1663
1664 #define EMIT_ASM32(NAME,INSNS) \
1665 do \
1666 { \
1667 extern unsigned char start_ ## NAME, end_ ## NAME; \
1668 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1669 __asm__ (".code32\n" \
1670 "\t" "jmp end_" #NAME "\n" \
1671 "\t" "start_" #NAME ":\n" \
1672 "\t" INSNS "\n" \
1673 "\t" "end_" #NAME ":\n" \
1674 ".code64\n"); \
1675 } while (0)
1676
1677 #else
1678
1679 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1680
1681 #endif
1682
1683 #ifdef __x86_64__
1684
1685 static void
1686 amd64_emit_prologue (void)
1687 {
1688 EMIT_ASM (amd64_prologue,
1689 "pushq %rbp\n\t"
1690 "movq %rsp,%rbp\n\t"
1691 "sub $0x20,%rsp\n\t"
1692 "movq %rdi,-8(%rbp)\n\t"
1693 "movq %rsi,-16(%rbp)");
1694 }
1695
1696
1697 static void
1698 amd64_emit_epilogue (void)
1699 {
1700 EMIT_ASM (amd64_epilogue,
1701 "movq -16(%rbp),%rdi\n\t"
1702 "movq %rax,(%rdi)\n\t"
1703 "xor %rax,%rax\n\t"
1704 "leave\n\t"
1705 "ret");
1706 }
1707
1708 static void
1709 amd64_emit_add (void)
1710 {
1711 EMIT_ASM (amd64_add,
1712 "add (%rsp),%rax\n\t"
1713 "lea 0x8(%rsp),%rsp");
1714 }
1715
1716 static void
1717 amd64_emit_sub (void)
1718 {
1719 EMIT_ASM (amd64_sub,
1720 "sub %rax,(%rsp)\n\t"
1721 "pop %rax");
1722 }
1723
1724 static void
1725 amd64_emit_mul (void)
1726 {
1727 emit_error = 1;
1728 }
1729
1730 static void
1731 amd64_emit_lsh (void)
1732 {
1733 emit_error = 1;
1734 }
1735
1736 static void
1737 amd64_emit_rsh_signed (void)
1738 {
1739 emit_error = 1;
1740 }
1741
1742 static void
1743 amd64_emit_rsh_unsigned (void)
1744 {
1745 emit_error = 1;
1746 }
1747
1748 static void
1749 amd64_emit_ext (int arg)
1750 {
1751 switch (arg)
1752 {
1753 case 8:
1754 EMIT_ASM (amd64_ext_8,
1755 "cbtw\n\t"
1756 "cwtl\n\t"
1757 "cltq");
1758 break;
1759 case 16:
1760 EMIT_ASM (amd64_ext_16,
1761 "cwtl\n\t"
1762 "cltq");
1763 break;
1764 case 32:
1765 EMIT_ASM (amd64_ext_32,
1766 "cltq");
1767 break;
1768 default:
1769 emit_error = 1;
1770 }
1771 }
1772
1773 static void
1774 amd64_emit_log_not (void)
1775 {
1776 EMIT_ASM (amd64_log_not,
1777 "test %rax,%rax\n\t"
1778 "sete %cl\n\t"
1779 "movzbq %cl,%rax");
1780 }
1781
1782 static void
1783 amd64_emit_bit_and (void)
1784 {
1785 EMIT_ASM (amd64_and,
1786 "and (%rsp),%rax\n\t"
1787 "lea 0x8(%rsp),%rsp");
1788 }
1789
1790 static void
1791 amd64_emit_bit_or (void)
1792 {
1793 EMIT_ASM (amd64_or,
1794 "or (%rsp),%rax\n\t"
1795 "lea 0x8(%rsp),%rsp");
1796 }
1797
1798 static void
1799 amd64_emit_bit_xor (void)
1800 {
1801 EMIT_ASM (amd64_xor,
1802 "xor (%rsp),%rax\n\t"
1803 "lea 0x8(%rsp),%rsp");
1804 }
1805
1806 static void
1807 amd64_emit_bit_not (void)
1808 {
1809 EMIT_ASM (amd64_bit_not,
1810 "xorq $0xffffffffffffffff,%rax");
1811 }
1812
1813 static void
1814 amd64_emit_equal (void)
1815 {
1816 EMIT_ASM (amd64_equal,
1817 "cmp %rax,(%rsp)\n\t"
1818 "je .Lamd64_equal_true\n\t"
1819 "xor %rax,%rax\n\t"
1820 "jmp .Lamd64_equal_end\n\t"
1821 ".Lamd64_equal_true:\n\t"
1822 "mov $0x1,%rax\n\t"
1823 ".Lamd64_equal_end:\n\t"
1824 "lea 0x8(%rsp),%rsp");
1825 }
1826
1827 static void
1828 amd64_emit_less_signed (void)
1829 {
1830 EMIT_ASM (amd64_less_signed,
1831 "cmp %rax,(%rsp)\n\t"
1832 "jl .Lamd64_less_signed_true\n\t"
1833 "xor %rax,%rax\n\t"
1834 "jmp .Lamd64_less_signed_end\n\t"
1835 ".Lamd64_less_signed_true:\n\t"
1836 "mov $1,%rax\n\t"
1837 ".Lamd64_less_signed_end:\n\t"
1838 "lea 0x8(%rsp),%rsp");
1839 }
1840
1841 static void
1842 amd64_emit_less_unsigned (void)
1843 {
1844 EMIT_ASM (amd64_less_unsigned,
1845 "cmp %rax,(%rsp)\n\t"
1846 "jb .Lamd64_less_unsigned_true\n\t"
1847 "xor %rax,%rax\n\t"
1848 "jmp .Lamd64_less_unsigned_end\n\t"
1849 ".Lamd64_less_unsigned_true:\n\t"
1850 "mov $1,%rax\n\t"
1851 ".Lamd64_less_unsigned_end:\n\t"
1852 "lea 0x8(%rsp),%rsp");
1853 }
1854
1855 static void
1856 amd64_emit_ref (int size)
1857 {
1858 switch (size)
1859 {
1860 case 1:
1861 EMIT_ASM (amd64_ref1,
1862 "movb (%rax),%al");
1863 break;
1864 case 2:
1865 EMIT_ASM (amd64_ref2,
1866 "movw (%rax),%ax");
1867 break;
1868 case 4:
1869 EMIT_ASM (amd64_ref4,
1870 "movl (%rax),%eax");
1871 break;
1872 case 8:
1873 EMIT_ASM (amd64_ref8,
1874 "movq (%rax),%rax");
1875 break;
1876 }
1877 }
1878
1879 static void
1880 amd64_emit_if_goto (int *offset_p, int *size_p)
1881 {
1882 EMIT_ASM (amd64_if_goto,
1883 "mov %rax,%rcx\n\t"
1884 "pop %rax\n\t"
1885 "cmp $0,%rcx\n\t"
1886 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1887 if (offset_p)
1888 *offset_p = 10;
1889 if (size_p)
1890 *size_p = 4;
1891 }
1892
1893 static void
1894 amd64_emit_goto (int *offset_p, int *size_p)
1895 {
1896 EMIT_ASM (amd64_goto,
1897 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1898 if (offset_p)
1899 *offset_p = 1;
1900 if (size_p)
1901 *size_p = 4;
1902 }
1903
1904 static void
1905 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1906 {
1907 int diff = (to - (from + size));
1908 unsigned char buf[sizeof (int)];
1909
1910 if (size != 4)
1911 {
1912 emit_error = 1;
1913 return;
1914 }
1915
1916 memcpy (buf, &diff, sizeof (int));
1917 target_write_memory (from, buf, sizeof (int));
1918 }
1919
1920 static void
1921 amd64_emit_const (LONGEST num)
1922 {
1923 unsigned char buf[16];
1924 int i;
1925 CORE_ADDR buildaddr = current_insn_ptr;
1926
1927 i = 0;
1928 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1929 memcpy (&buf[i], &num, sizeof (num));
1930 i += 8;
1931 append_insns (&buildaddr, i, buf);
1932 current_insn_ptr = buildaddr;
1933 }
1934
1935 static void
1936 amd64_emit_call (CORE_ADDR fn)
1937 {
1938 unsigned char buf[16];
1939 int i;
1940 CORE_ADDR buildaddr;
1941 LONGEST offset64;
1942
1943 /* The destination function being in the shared library, may be
1944 >31-bits away off the compiled code pad. */
1945
1946 buildaddr = current_insn_ptr;
1947
1948 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1949
1950 i = 0;
1951
1952 if (offset64 > INT_MAX || offset64 < INT_MIN)
1953 {
1954 /* Offset is too large for a call. Use callq, but that requires
1955 a register, so avoid it if possible. Use r10, since it is
1956 call-clobbered, we don't have to push/pop it. */
1957 buf[i++] = 0x48; /* mov $fn,%r10 */
1958 buf[i++] = 0xba;
1959 memcpy (buf + i, &fn, 8);
1960 i += 8;
1961 buf[i++] = 0xff; /* callq *%r10 */
1962 buf[i++] = 0xd2;
1963 }
1964 else
1965 {
1966 int offset32 = offset64; /* we know we can't overflow here. */
1967
1968 buf[i++] = 0xe8; /* call <reladdr> */
1969 memcpy (buf + i, &offset32, 4);
1970 i += 4;
1971 }
1972
1973 append_insns (&buildaddr, i, buf);
1974 current_insn_ptr = buildaddr;
1975 }
1976
1977 static void
1978 amd64_emit_reg (int reg)
1979 {
1980 unsigned char buf[16];
1981 int i;
1982 CORE_ADDR buildaddr;
1983
1984 /* Assume raw_regs is still in %rdi. */
1985 buildaddr = current_insn_ptr;
1986 i = 0;
1987 buf[i++] = 0xbe; /* mov $<n>,%esi */
1988 memcpy (&buf[i], &reg, sizeof (reg));
1989 i += 4;
1990 append_insns (&buildaddr, i, buf);
1991 current_insn_ptr = buildaddr;
1992 amd64_emit_call (get_raw_reg_func_addr ());
1993 }
1994
1995 static void
1996 amd64_emit_pop (void)
1997 {
1998 EMIT_ASM (amd64_pop,
1999 "pop %rax");
2000 }
2001
2002 static void
2003 amd64_emit_stack_flush (void)
2004 {
2005 EMIT_ASM (amd64_stack_flush,
2006 "push %rax");
2007 }
2008
2009 static void
2010 amd64_emit_zero_ext (int arg)
2011 {
2012 switch (arg)
2013 {
2014 case 8:
2015 EMIT_ASM (amd64_zero_ext_8,
2016 "and $0xff,%rax");
2017 break;
2018 case 16:
2019 EMIT_ASM (amd64_zero_ext_16,
2020 "and $0xffff,%rax");
2021 break;
2022 case 32:
2023 EMIT_ASM (amd64_zero_ext_32,
2024 "mov $0xffffffff,%rcx\n\t"
2025 "and %rcx,%rax");
2026 break;
2027 default:
2028 emit_error = 1;
2029 }
2030 }
2031
2032 static void
2033 amd64_emit_swap (void)
2034 {
2035 EMIT_ASM (amd64_swap,
2036 "mov %rax,%rcx\n\t"
2037 "pop %rax\n\t"
2038 "push %rcx");
2039 }
2040
2041 static void
2042 amd64_emit_stack_adjust (int n)
2043 {
2044 unsigned char buf[16];
2045 int i;
2046 CORE_ADDR buildaddr = current_insn_ptr;
2047
2048 i = 0;
2049 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2050 buf[i++] = 0x8d;
2051 buf[i++] = 0x64;
2052 buf[i++] = 0x24;
2053 /* This only handles adjustments up to 16, but we don't expect any more. */
2054 buf[i++] = n * 8;
2055 append_insns (&buildaddr, i, buf);
2056 current_insn_ptr = buildaddr;
2057 }
2058
2059 /* FN's prototype is `LONGEST(*fn)(int)'. */
2060
2061 static void
2062 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2063 {
2064 unsigned char buf[16];
2065 int i;
2066 CORE_ADDR buildaddr;
2067
2068 buildaddr = current_insn_ptr;
2069 i = 0;
2070 buf[i++] = 0xbf; /* movl $<n>,%edi */
2071 memcpy (&buf[i], &arg1, sizeof (arg1));
2072 i += 4;
2073 append_insns (&buildaddr, i, buf);
2074 current_insn_ptr = buildaddr;
2075 amd64_emit_call (fn);
2076 }
2077
2078 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2079
2080 static void
2081 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2082 {
2083 unsigned char buf[16];
2084 int i;
2085 CORE_ADDR buildaddr;
2086
2087 buildaddr = current_insn_ptr;
2088 i = 0;
2089 buf[i++] = 0xbf; /* movl $<n>,%edi */
2090 memcpy (&buf[i], &arg1, sizeof (arg1));
2091 i += 4;
2092 append_insns (&buildaddr, i, buf);
2093 current_insn_ptr = buildaddr;
2094 EMIT_ASM (amd64_void_call_2_a,
2095 /* Save away a copy of the stack top. */
2096 "push %rax\n\t"
2097 /* Also pass top as the second argument. */
2098 "mov %rax,%rsi");
2099 amd64_emit_call (fn);
2100 EMIT_ASM (amd64_void_call_2_b,
2101 /* Restore the stack top, %rax may have been trashed. */
2102 "pop %rax");
2103 }
2104
2105 static void
2106 amd64_emit_eq_goto (int *offset_p, int *size_p)
2107 {
2108 EMIT_ASM (amd64_eq,
2109 "cmp %rax,(%rsp)\n\t"
2110 "jne .Lamd64_eq_fallthru\n\t"
2111 "lea 0x8(%rsp),%rsp\n\t"
2112 "pop %rax\n\t"
2113 /* jmp, but don't trust the assembler to choose the right jump */
2114 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2115 ".Lamd64_eq_fallthru:\n\t"
2116 "lea 0x8(%rsp),%rsp\n\t"
2117 "pop %rax");
2118
2119 if (offset_p)
2120 *offset_p = 13;
2121 if (size_p)
2122 *size_p = 4;
2123 }
2124
2125 static void
2126 amd64_emit_ne_goto (int *offset_p, int *size_p)
2127 {
2128 EMIT_ASM (amd64_ne,
2129 "cmp %rax,(%rsp)\n\t"
2130 "je .Lamd64_ne_fallthru\n\t"
2131 "lea 0x8(%rsp),%rsp\n\t"
2132 "pop %rax\n\t"
2133 /* jmp, but don't trust the assembler to choose the right jump */
2134 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2135 ".Lamd64_ne_fallthru:\n\t"
2136 "lea 0x8(%rsp),%rsp\n\t"
2137 "pop %rax");
2138
2139 if (offset_p)
2140 *offset_p = 13;
2141 if (size_p)
2142 *size_p = 4;
2143 }
2144
2145 static void
2146 amd64_emit_lt_goto (int *offset_p, int *size_p)
2147 {
2148 EMIT_ASM (amd64_lt,
2149 "cmp %rax,(%rsp)\n\t"
2150 "jnl .Lamd64_lt_fallthru\n\t"
2151 "lea 0x8(%rsp),%rsp\n\t"
2152 "pop %rax\n\t"
2153 /* jmp, but don't trust the assembler to choose the right jump */
2154 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2155 ".Lamd64_lt_fallthru:\n\t"
2156 "lea 0x8(%rsp),%rsp\n\t"
2157 "pop %rax");
2158
2159 if (offset_p)
2160 *offset_p = 13;
2161 if (size_p)
2162 *size_p = 4;
2163 }
2164
2165 static void
2166 amd64_emit_le_goto (int *offset_p, int *size_p)
2167 {
2168 EMIT_ASM (amd64_le,
2169 "cmp %rax,(%rsp)\n\t"
2170 "jnle .Lamd64_le_fallthru\n\t"
2171 "lea 0x8(%rsp),%rsp\n\t"
2172 "pop %rax\n\t"
2173 /* jmp, but don't trust the assembler to choose the right jump */
2174 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2175 ".Lamd64_le_fallthru:\n\t"
2176 "lea 0x8(%rsp),%rsp\n\t"
2177 "pop %rax");
2178
2179 if (offset_p)
2180 *offset_p = 13;
2181 if (size_p)
2182 *size_p = 4;
2183 }
2184
2185 static void
2186 amd64_emit_gt_goto (int *offset_p, int *size_p)
2187 {
2188 EMIT_ASM (amd64_gt,
2189 "cmp %rax,(%rsp)\n\t"
2190 "jng .Lamd64_gt_fallthru\n\t"
2191 "lea 0x8(%rsp),%rsp\n\t"
2192 "pop %rax\n\t"
2193 /* jmp, but don't trust the assembler to choose the right jump */
2194 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2195 ".Lamd64_gt_fallthru:\n\t"
2196 "lea 0x8(%rsp),%rsp\n\t"
2197 "pop %rax");
2198
2199 if (offset_p)
2200 *offset_p = 13;
2201 if (size_p)
2202 *size_p = 4;
2203 }
2204
2205 static void
2206 amd64_emit_ge_goto (int *offset_p, int *size_p)
2207 {
2208 EMIT_ASM (amd64_ge,
2209 "cmp %rax,(%rsp)\n\t"
2210 "jnge .Lamd64_ge_fallthru\n\t"
2211 ".Lamd64_ge_jump:\n\t"
2212 "lea 0x8(%rsp),%rsp\n\t"
2213 "pop %rax\n\t"
2214 /* jmp, but don't trust the assembler to choose the right jump */
2215 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2216 ".Lamd64_ge_fallthru:\n\t"
2217 "lea 0x8(%rsp),%rsp\n\t"
2218 "pop %rax");
2219
2220 if (offset_p)
2221 *offset_p = 13;
2222 if (size_p)
2223 *size_p = 4;
2224 }
2225
2226 struct emit_ops amd64_emit_ops =
2227 {
2228 amd64_emit_prologue,
2229 amd64_emit_epilogue,
2230 amd64_emit_add,
2231 amd64_emit_sub,
2232 amd64_emit_mul,
2233 amd64_emit_lsh,
2234 amd64_emit_rsh_signed,
2235 amd64_emit_rsh_unsigned,
2236 amd64_emit_ext,
2237 amd64_emit_log_not,
2238 amd64_emit_bit_and,
2239 amd64_emit_bit_or,
2240 amd64_emit_bit_xor,
2241 amd64_emit_bit_not,
2242 amd64_emit_equal,
2243 amd64_emit_less_signed,
2244 amd64_emit_less_unsigned,
2245 amd64_emit_ref,
2246 amd64_emit_if_goto,
2247 amd64_emit_goto,
2248 amd64_write_goto_address,
2249 amd64_emit_const,
2250 amd64_emit_call,
2251 amd64_emit_reg,
2252 amd64_emit_pop,
2253 amd64_emit_stack_flush,
2254 amd64_emit_zero_ext,
2255 amd64_emit_swap,
2256 amd64_emit_stack_adjust,
2257 amd64_emit_int_call_1,
2258 amd64_emit_void_call_2,
2259 amd64_emit_eq_goto,
2260 amd64_emit_ne_goto,
2261 amd64_emit_lt_goto,
2262 amd64_emit_le_goto,
2263 amd64_emit_gt_goto,
2264 amd64_emit_ge_goto
2265 };
2266
2267 #endif /* __x86_64__ */
2268
2269 static void
2270 i386_emit_prologue (void)
2271 {
2272 EMIT_ASM32 (i386_prologue,
2273 "push %ebp\n\t"
2274 "mov %esp,%ebp\n\t"
2275 "push %ebx");
2276 /* At this point, the raw regs base address is at 8(%ebp), and the
2277 value pointer is at 12(%ebp). */
2278 }
2279
2280 static void
2281 i386_emit_epilogue (void)
2282 {
2283 EMIT_ASM32 (i386_epilogue,
2284 "mov 12(%ebp),%ecx\n\t"
2285 "mov %eax,(%ecx)\n\t"
2286 "mov %ebx,0x4(%ecx)\n\t"
2287 "xor %eax,%eax\n\t"
2288 "pop %ebx\n\t"
2289 "pop %ebp\n\t"
2290 "ret");
2291 }
2292
2293 static void
2294 i386_emit_add (void)
2295 {
2296 EMIT_ASM32 (i386_add,
2297 "add (%esp),%eax\n\t"
2298 "adc 0x4(%esp),%ebx\n\t"
2299 "lea 0x8(%esp),%esp");
2300 }
2301
2302 static void
2303 i386_emit_sub (void)
2304 {
2305 EMIT_ASM32 (i386_sub,
2306 "subl %eax,(%esp)\n\t"
2307 "sbbl %ebx,4(%esp)\n\t"
2308 "pop %eax\n\t"
2309 "pop %ebx\n\t");
2310 }
2311
2312 static void
2313 i386_emit_mul (void)
2314 {
2315 emit_error = 1;
2316 }
2317
2318 static void
2319 i386_emit_lsh (void)
2320 {
2321 emit_error = 1;
2322 }
2323
2324 static void
2325 i386_emit_rsh_signed (void)
2326 {
2327 emit_error = 1;
2328 }
2329
2330 static void
2331 i386_emit_rsh_unsigned (void)
2332 {
2333 emit_error = 1;
2334 }
2335
2336 static void
2337 i386_emit_ext (int arg)
2338 {
2339 switch (arg)
2340 {
2341 case 8:
2342 EMIT_ASM32 (i386_ext_8,
2343 "cbtw\n\t"
2344 "cwtl\n\t"
2345 "movl %eax,%ebx\n\t"
2346 "sarl $31,%ebx");
2347 break;
2348 case 16:
2349 EMIT_ASM32 (i386_ext_16,
2350 "cwtl\n\t"
2351 "movl %eax,%ebx\n\t"
2352 "sarl $31,%ebx");
2353 break;
2354 case 32:
2355 EMIT_ASM32 (i386_ext_32,
2356 "movl %eax,%ebx\n\t"
2357 "sarl $31,%ebx");
2358 break;
2359 default:
2360 emit_error = 1;
2361 }
2362 }
2363
2364 static void
2365 i386_emit_log_not (void)
2366 {
2367 EMIT_ASM32 (i386_log_not,
2368 "or %ebx,%eax\n\t"
2369 "test %eax,%eax\n\t"
2370 "sete %cl\n\t"
2371 "xor %ebx,%ebx\n\t"
2372 "movzbl %cl,%eax");
2373 }
2374
2375 static void
2376 i386_emit_bit_and (void)
2377 {
2378 EMIT_ASM32 (i386_and,
2379 "and (%esp),%eax\n\t"
2380 "and 0x4(%esp),%ebx\n\t"
2381 "lea 0x8(%esp),%esp");
2382 }
2383
2384 static void
2385 i386_emit_bit_or (void)
2386 {
2387 EMIT_ASM32 (i386_or,
2388 "or (%esp),%eax\n\t"
2389 "or 0x4(%esp),%ebx\n\t"
2390 "lea 0x8(%esp),%esp");
2391 }
2392
2393 static void
2394 i386_emit_bit_xor (void)
2395 {
2396 EMIT_ASM32 (i386_xor,
2397 "xor (%esp),%eax\n\t"
2398 "xor 0x4(%esp),%ebx\n\t"
2399 "lea 0x8(%esp),%esp");
2400 }
2401
2402 static void
2403 i386_emit_bit_not (void)
2404 {
2405 EMIT_ASM32 (i386_bit_not,
2406 "xor $0xffffffff,%eax\n\t"
2407 "xor $0xffffffff,%ebx\n\t");
2408 }
2409
2410 static void
2411 i386_emit_equal (void)
2412 {
2413 EMIT_ASM32 (i386_equal,
2414 "cmpl %ebx,4(%esp)\n\t"
2415 "jne .Li386_equal_false\n\t"
2416 "cmpl %eax,(%esp)\n\t"
2417 "je .Li386_equal_true\n\t"
2418 ".Li386_equal_false:\n\t"
2419 "xor %eax,%eax\n\t"
2420 "jmp .Li386_equal_end\n\t"
2421 ".Li386_equal_true:\n\t"
2422 "mov $1,%eax\n\t"
2423 ".Li386_equal_end:\n\t"
2424 "xor %ebx,%ebx\n\t"
2425 "lea 0x8(%esp),%esp");
2426 }
2427
2428 static void
2429 i386_emit_less_signed (void)
2430 {
2431 EMIT_ASM32 (i386_less_signed,
2432 "cmpl %ebx,4(%esp)\n\t"
2433 "jl .Li386_less_signed_true\n\t"
2434 "jne .Li386_less_signed_false\n\t"
2435 "cmpl %eax,(%esp)\n\t"
2436 "jl .Li386_less_signed_true\n\t"
2437 ".Li386_less_signed_false:\n\t"
2438 "xor %eax,%eax\n\t"
2439 "jmp .Li386_less_signed_end\n\t"
2440 ".Li386_less_signed_true:\n\t"
2441 "mov $1,%eax\n\t"
2442 ".Li386_less_signed_end:\n\t"
2443 "xor %ebx,%ebx\n\t"
2444 "lea 0x8(%esp),%esp");
2445 }
2446
2447 static void
2448 i386_emit_less_unsigned (void)
2449 {
2450 EMIT_ASM32 (i386_less_unsigned,
2451 "cmpl %ebx,4(%esp)\n\t"
2452 "jb .Li386_less_unsigned_true\n\t"
2453 "jne .Li386_less_unsigned_false\n\t"
2454 "cmpl %eax,(%esp)\n\t"
2455 "jb .Li386_less_unsigned_true\n\t"
2456 ".Li386_less_unsigned_false:\n\t"
2457 "xor %eax,%eax\n\t"
2458 "jmp .Li386_less_unsigned_end\n\t"
2459 ".Li386_less_unsigned_true:\n\t"
2460 "mov $1,%eax\n\t"
2461 ".Li386_less_unsigned_end:\n\t"
2462 "xor %ebx,%ebx\n\t"
2463 "lea 0x8(%esp),%esp");
2464 }
2465
2466 static void
2467 i386_emit_ref (int size)
2468 {
2469 switch (size)
2470 {
2471 case 1:
2472 EMIT_ASM32 (i386_ref1,
2473 "movb (%eax),%al");
2474 break;
2475 case 2:
2476 EMIT_ASM32 (i386_ref2,
2477 "movw (%eax),%ax");
2478 break;
2479 case 4:
2480 EMIT_ASM32 (i386_ref4,
2481 "movl (%eax),%eax");
2482 break;
2483 case 8:
2484 EMIT_ASM32 (i386_ref8,
2485 "movl 4(%eax),%ebx\n\t"
2486 "movl (%eax),%eax");
2487 break;
2488 }
2489 }
2490
2491 static void
2492 i386_emit_if_goto (int *offset_p, int *size_p)
2493 {
2494 EMIT_ASM32 (i386_if_goto,
2495 "mov %eax,%ecx\n\t"
2496 "or %ebx,%ecx\n\t"
2497 "pop %eax\n\t"
2498 "pop %ebx\n\t"
2499 "cmpl $0,%ecx\n\t"
2500 /* Don't trust the assembler to choose the right jump */
2501 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2502
2503 if (offset_p)
2504 *offset_p = 11; /* be sure that this matches the sequence above */
2505 if (size_p)
2506 *size_p = 4;
2507 }
2508
2509 static void
2510 i386_emit_goto (int *offset_p, int *size_p)
2511 {
2512 EMIT_ASM32 (i386_goto,
2513 /* Don't trust the assembler to choose the right jump */
2514 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2515 if (offset_p)
2516 *offset_p = 1;
2517 if (size_p)
2518 *size_p = 4;
2519 }
2520
2521 static void
2522 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2523 {
2524 int diff = (to - (from + size));
2525 unsigned char buf[sizeof (int)];
2526
2527 /* We're only doing 4-byte sizes at the moment. */
2528 if (size != 4)
2529 {
2530 emit_error = 1;
2531 return;
2532 }
2533
2534 memcpy (buf, &diff, sizeof (int));
2535 target_write_memory (from, buf, sizeof (int));
2536 }
2537
2538 static void
2539 i386_emit_const (LONGEST num)
2540 {
2541 unsigned char buf[16];
2542 int i, hi, lo;
2543 CORE_ADDR buildaddr = current_insn_ptr;
2544
2545 i = 0;
2546 buf[i++] = 0xb8; /* mov $<n>,%eax */
2547 lo = num & 0xffffffff;
2548 memcpy (&buf[i], &lo, sizeof (lo));
2549 i += 4;
2550 hi = ((num >> 32) & 0xffffffff);
2551 if (hi)
2552 {
2553 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2554 memcpy (&buf[i], &hi, sizeof (hi));
2555 i += 4;
2556 }
2557 else
2558 {
2559 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2560 }
2561 append_insns (&buildaddr, i, buf);
2562 current_insn_ptr = buildaddr;
2563 }
2564
2565 static void
2566 i386_emit_call (CORE_ADDR fn)
2567 {
2568 unsigned char buf[16];
2569 int i, offset;
2570 CORE_ADDR buildaddr;
2571
2572 buildaddr = current_insn_ptr;
2573 i = 0;
2574 buf[i++] = 0xe8; /* call <reladdr> */
2575 offset = ((int) fn) - (buildaddr + 5);
2576 memcpy (buf + 1, &offset, 4);
2577 append_insns (&buildaddr, 5, buf);
2578 current_insn_ptr = buildaddr;
2579 }
2580
2581 static void
2582 i386_emit_reg (int reg)
2583 {
2584 unsigned char buf[16];
2585 int i;
2586 CORE_ADDR buildaddr;
2587
2588 EMIT_ASM32 (i386_reg_a,
2589 "sub $0x8,%esp");
2590 buildaddr = current_insn_ptr;
2591 i = 0;
2592 buf[i++] = 0xb8; /* mov $<n>,%eax */
2593 memcpy (&buf[i], &reg, sizeof (reg));
2594 i += 4;
2595 append_insns (&buildaddr, i, buf);
2596 current_insn_ptr = buildaddr;
2597 EMIT_ASM32 (i386_reg_b,
2598 "mov %eax,4(%esp)\n\t"
2599 "mov 8(%ebp),%eax\n\t"
2600 "mov %eax,(%esp)");
2601 i386_emit_call (get_raw_reg_func_addr ());
2602 EMIT_ASM32 (i386_reg_c,
2603 "xor %ebx,%ebx\n\t"
2604 "lea 0x8(%esp),%esp");
2605 }
2606
2607 static void
2608 i386_emit_pop (void)
2609 {
2610 EMIT_ASM32 (i386_pop,
2611 "pop %eax\n\t"
2612 "pop %ebx");
2613 }
2614
2615 static void
2616 i386_emit_stack_flush (void)
2617 {
2618 EMIT_ASM32 (i386_stack_flush,
2619 "push %ebx\n\t"
2620 "push %eax");
2621 }
2622
2623 static void
2624 i386_emit_zero_ext (int arg)
2625 {
2626 switch (arg)
2627 {
2628 case 8:
2629 EMIT_ASM32 (i386_zero_ext_8,
2630 "and $0xff,%eax\n\t"
2631 "xor %ebx,%ebx");
2632 break;
2633 case 16:
2634 EMIT_ASM32 (i386_zero_ext_16,
2635 "and $0xffff,%eax\n\t"
2636 "xor %ebx,%ebx");
2637 break;
2638 case 32:
2639 EMIT_ASM32 (i386_zero_ext_32,
2640 "xor %ebx,%ebx");
2641 break;
2642 default:
2643 emit_error = 1;
2644 }
2645 }
2646
2647 static void
2648 i386_emit_swap (void)
2649 {
2650 EMIT_ASM32 (i386_swap,
2651 "mov %eax,%ecx\n\t"
2652 "mov %ebx,%edx\n\t"
2653 "pop %eax\n\t"
2654 "pop %ebx\n\t"
2655 "push %edx\n\t"
2656 "push %ecx");
2657 }
2658
2659 static void
2660 i386_emit_stack_adjust (int n)
2661 {
2662 unsigned char buf[16];
2663 int i;
2664 CORE_ADDR buildaddr = current_insn_ptr;
2665
2666 i = 0;
2667 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2668 buf[i++] = 0x64;
2669 buf[i++] = 0x24;
2670 buf[i++] = n * 8;
2671 append_insns (&buildaddr, i, buf);
2672 current_insn_ptr = buildaddr;
2673 }
2674
2675 /* FN's prototype is `LONGEST(*fn)(int)'. */
2676
2677 static void
2678 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2679 {
2680 unsigned char buf[16];
2681 int i;
2682 CORE_ADDR buildaddr;
2683
2684 EMIT_ASM32 (i386_int_call_1_a,
2685 /* Reserve a bit of stack space. */
2686 "sub $0x8,%esp");
2687 /* Put the one argument on the stack. */
2688 buildaddr = current_insn_ptr;
2689 i = 0;
2690 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2691 buf[i++] = 0x04;
2692 buf[i++] = 0x24;
2693 memcpy (&buf[i], &arg1, sizeof (arg1));
2694 i += 4;
2695 append_insns (&buildaddr, i, buf);
2696 current_insn_ptr = buildaddr;
2697 i386_emit_call (fn);
2698 EMIT_ASM32 (i386_int_call_1_c,
2699 "mov %edx,%ebx\n\t"
2700 "lea 0x8(%esp),%esp");
2701 }
2702
2703 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2704
2705 static void
2706 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2707 {
2708 unsigned char buf[16];
2709 int i;
2710 CORE_ADDR buildaddr;
2711
2712 EMIT_ASM32 (i386_void_call_2_a,
2713 /* Preserve %eax only; we don't have to worry about %ebx. */
2714 "push %eax\n\t"
2715 /* Reserve a bit of stack space for arguments. */
2716 "sub $0x10,%esp\n\t"
2717 /* Copy "top" to the second argument position. (Note that
2718 we can't assume function won't scribble on its
2719 arguments, so don't try to restore from this.) */
2720 "mov %eax,4(%esp)\n\t"
2721 "mov %ebx,8(%esp)");
2722 /* Put the first argument on the stack. */
2723 buildaddr = current_insn_ptr;
2724 i = 0;
2725 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2726 buf[i++] = 0x04;
2727 buf[i++] = 0x24;
2728 memcpy (&buf[i], &arg1, sizeof (arg1));
2729 i += 4;
2730 append_insns (&buildaddr, i, buf);
2731 current_insn_ptr = buildaddr;
2732 i386_emit_call (fn);
2733 EMIT_ASM32 (i386_void_call_2_b,
2734 "lea 0x10(%esp),%esp\n\t"
2735 /* Restore original stack top. */
2736 "pop %eax");
2737 }
2738
2739
2740 static void
2741 i386_emit_eq_goto (int *offset_p, int *size_p)
2742 {
2743 EMIT_ASM32 (eq,
2744 /* Check low half first, more likely to be decider */
2745 "cmpl %eax,(%esp)\n\t"
2746 "jne .Leq_fallthru\n\t"
2747 "cmpl %ebx,4(%esp)\n\t"
2748 "jne .Leq_fallthru\n\t"
2749 "lea 0x8(%esp),%esp\n\t"
2750 "pop %eax\n\t"
2751 "pop %ebx\n\t"
2752 /* jmp, but don't trust the assembler to choose the right jump */
2753 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2754 ".Leq_fallthru:\n\t"
2755 "lea 0x8(%esp),%esp\n\t"
2756 "pop %eax\n\t"
2757 "pop %ebx");
2758
2759 if (offset_p)
2760 *offset_p = 18;
2761 if (size_p)
2762 *size_p = 4;
2763 }
2764
2765 static void
2766 i386_emit_ne_goto (int *offset_p, int *size_p)
2767 {
2768 EMIT_ASM32 (ne,
2769 /* Check low half first, more likely to be decider */
2770 "cmpl %eax,(%esp)\n\t"
2771 "jne .Lne_jump\n\t"
2772 "cmpl %ebx,4(%esp)\n\t"
2773 "je .Lne_fallthru\n\t"
2774 ".Lne_jump:\n\t"
2775 "lea 0x8(%esp),%esp\n\t"
2776 "pop %eax\n\t"
2777 "pop %ebx\n\t"
2778 /* jmp, but don't trust the assembler to choose the right jump */
2779 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2780 ".Lne_fallthru:\n\t"
2781 "lea 0x8(%esp),%esp\n\t"
2782 "pop %eax\n\t"
2783 "pop %ebx");
2784
2785 if (offset_p)
2786 *offset_p = 18;
2787 if (size_p)
2788 *size_p = 4;
2789 }
2790
2791 static void
2792 i386_emit_lt_goto (int *offset_p, int *size_p)
2793 {
2794 EMIT_ASM32 (lt,
2795 "cmpl %ebx,4(%esp)\n\t"
2796 "jl .Llt_jump\n\t"
2797 "jne .Llt_fallthru\n\t"
2798 "cmpl %eax,(%esp)\n\t"
2799 "jnl .Llt_fallthru\n\t"
2800 ".Llt_jump:\n\t"
2801 "lea 0x8(%esp),%esp\n\t"
2802 "pop %eax\n\t"
2803 "pop %ebx\n\t"
2804 /* jmp, but don't trust the assembler to choose the right jump */
2805 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2806 ".Llt_fallthru:\n\t"
2807 "lea 0x8(%esp),%esp\n\t"
2808 "pop %eax\n\t"
2809 "pop %ebx");
2810
2811 if (offset_p)
2812 *offset_p = 20;
2813 if (size_p)
2814 *size_p = 4;
2815 }
2816
2817 static void
2818 i386_emit_le_goto (int *offset_p, int *size_p)
2819 {
2820 EMIT_ASM32 (le,
2821 "cmpl %ebx,4(%esp)\n\t"
2822 "jle .Lle_jump\n\t"
2823 "jne .Lle_fallthru\n\t"
2824 "cmpl %eax,(%esp)\n\t"
2825 "jnle .Lle_fallthru\n\t"
2826 ".Lle_jump:\n\t"
2827 "lea 0x8(%esp),%esp\n\t"
2828 "pop %eax\n\t"
2829 "pop %ebx\n\t"
2830 /* jmp, but don't trust the assembler to choose the right jump */
2831 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2832 ".Lle_fallthru:\n\t"
2833 "lea 0x8(%esp),%esp\n\t"
2834 "pop %eax\n\t"
2835 "pop %ebx");
2836
2837 if (offset_p)
2838 *offset_p = 20;
2839 if (size_p)
2840 *size_p = 4;
2841 }
2842
2843 static void
2844 i386_emit_gt_goto (int *offset_p, int *size_p)
2845 {
2846 EMIT_ASM32 (gt,
2847 "cmpl %ebx,4(%esp)\n\t"
2848 "jg .Lgt_jump\n\t"
2849 "jne .Lgt_fallthru\n\t"
2850 "cmpl %eax,(%esp)\n\t"
2851 "jng .Lgt_fallthru\n\t"
2852 ".Lgt_jump:\n\t"
2853 "lea 0x8(%esp),%esp\n\t"
2854 "pop %eax\n\t"
2855 "pop %ebx\n\t"
2856 /* jmp, but don't trust the assembler to choose the right jump */
2857 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2858 ".Lgt_fallthru:\n\t"
2859 "lea 0x8(%esp),%esp\n\t"
2860 "pop %eax\n\t"
2861 "pop %ebx");
2862
2863 if (offset_p)
2864 *offset_p = 20;
2865 if (size_p)
2866 *size_p = 4;
2867 }
2868
2869 static void
2870 i386_emit_ge_goto (int *offset_p, int *size_p)
2871 {
2872 EMIT_ASM32 (ge,
2873 "cmpl %ebx,4(%esp)\n\t"
2874 "jge .Lge_jump\n\t"
2875 "jne .Lge_fallthru\n\t"
2876 "cmpl %eax,(%esp)\n\t"
2877 "jnge .Lge_fallthru\n\t"
2878 ".Lge_jump:\n\t"
2879 "lea 0x8(%esp),%esp\n\t"
2880 "pop %eax\n\t"
2881 "pop %ebx\n\t"
2882 /* jmp, but don't trust the assembler to choose the right jump */
2883 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2884 ".Lge_fallthru:\n\t"
2885 "lea 0x8(%esp),%esp\n\t"
2886 "pop %eax\n\t"
2887 "pop %ebx");
2888
2889 if (offset_p)
2890 *offset_p = 20;
2891 if (size_p)
2892 *size_p = 4;
2893 }
2894
2895 struct emit_ops i386_emit_ops =
2896 {
2897 i386_emit_prologue,
2898 i386_emit_epilogue,
2899 i386_emit_add,
2900 i386_emit_sub,
2901 i386_emit_mul,
2902 i386_emit_lsh,
2903 i386_emit_rsh_signed,
2904 i386_emit_rsh_unsigned,
2905 i386_emit_ext,
2906 i386_emit_log_not,
2907 i386_emit_bit_and,
2908 i386_emit_bit_or,
2909 i386_emit_bit_xor,
2910 i386_emit_bit_not,
2911 i386_emit_equal,
2912 i386_emit_less_signed,
2913 i386_emit_less_unsigned,
2914 i386_emit_ref,
2915 i386_emit_if_goto,
2916 i386_emit_goto,
2917 i386_write_goto_address,
2918 i386_emit_const,
2919 i386_emit_call,
2920 i386_emit_reg,
2921 i386_emit_pop,
2922 i386_emit_stack_flush,
2923 i386_emit_zero_ext,
2924 i386_emit_swap,
2925 i386_emit_stack_adjust,
2926 i386_emit_int_call_1,
2927 i386_emit_void_call_2,
2928 i386_emit_eq_goto,
2929 i386_emit_ne_goto,
2930 i386_emit_lt_goto,
2931 i386_emit_le_goto,
2932 i386_emit_gt_goto,
2933 i386_emit_ge_goto
2934 };
2935
2936
2937 static struct emit_ops *
2938 x86_emit_ops (void)
2939 {
2940 #ifdef __x86_64__
2941 if (is_64bit_tdesc ())
2942 return &amd64_emit_ops;
2943 else
2944 #endif
2945 return &i386_emit_ops;
2946 }
2947
2948 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2949
2950 const gdb_byte *
2951 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2952 {
2953 *size = x86_breakpoint_len;
2954 return x86_breakpoint;
2955 }
2956
2957 static int
2958 x86_supports_range_stepping (void)
2959 {
2960 return 1;
2961 }
2962
2963 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2964 */
2965
2966 static int
2967 x86_supports_hardware_single_step (void)
2968 {
2969 return 1;
2970 }
2971
2972 static int
2973 x86_get_ipa_tdesc_idx (void)
2974 {
2975 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2976 const struct target_desc *tdesc = regcache->tdesc;
2977
2978 #ifdef __x86_64__
2979 return amd64_get_ipa_tdesc_idx (tdesc);
2980 #endif
2981
2982 if (tdesc == tdesc_i386_linux_no_xml)
2983 return X86_TDESC_SSE;
2984
2985 return i386_get_ipa_tdesc_idx (tdesc);
2986 }
2987
2988 /* This is initialized assuming an amd64 target.
2989 x86_arch_setup will correct it for i386 or amd64 targets. */
2990
2991 struct linux_target_ops the_low_target =
2992 {
2993 x86_emit_ops,
2994 x86_supports_range_stepping,
2995 x86_supports_hardware_single_step,
2996 x86_get_syscall_trapinfo,
2997 x86_get_ipa_tdesc_idx,
2998 };
2999
3000 /* The linux target ops object. */
3001
3002 linux_process_target *the_linux_target = &the_x86_target;
3003
3004 void
3005 initialize_low_arch (void)
3006 {
3007 /* Initialize the Linux target descriptions. */
3008 #ifdef __x86_64__
3009 tdesc_amd64_linux_no_xml = allocate_target_description ();
3010 copy_target_description (tdesc_amd64_linux_no_xml,
3011 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
3012 false));
3013 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3014 #endif
3015
3016 tdesc_i386_linux_no_xml = allocate_target_description ();
3017 copy_target_description (tdesc_i386_linux_no_xml,
3018 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3019 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3020
3021 initialize_regsets_info (&x86_regsets_info);
3022 }
This page took 0.089337 seconds and 3 git commands to generate.