gdbserver/linux-low: turn 'supports_tracepoints' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 const regs_info *get_regs_info () override;
104
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
107 bool supports_z_point_type (char z_type) override;
108
109 void process_qsupported (char **features, int count) override;
110
111 bool supports_tracepoints () override;
112
113 protected:
114
115 void low_arch_setup () override;
116
117 bool low_cannot_fetch_register (int regno) override;
118
119 bool low_cannot_store_register (int regno) override;
120
121 bool low_supports_breakpoints () override;
122
123 CORE_ADDR low_get_pc (regcache *regcache) override;
124
125 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
126
127 int low_decr_pc_after_break () override;
128
129 bool low_breakpoint_at (CORE_ADDR pc) override;
130
131 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
132 int size, raw_breakpoint *bp) override;
133
134 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
135 int size, raw_breakpoint *bp) override;
136
137 bool low_stopped_by_watchpoint () override;
138
139 CORE_ADDR low_stopped_data_address () override;
140
141 /* collect_ptrace_register/supply_ptrace_register are not needed in the
142 native i386 case (no registers smaller than an xfer unit), and are not
143 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
144
145 /* Need to fix up i386 siginfo if host is amd64. */
146 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
147 int direction) override;
148
149 arch_process_info *low_new_process () override;
150
151 void low_delete_process (arch_process_info *info) override;
152
153 void low_new_thread (lwp_info *) override;
154
155 void low_delete_thread (arch_lwp_info *) override;
156
157 void low_new_fork (process_info *parent, process_info *child) override;
158
159 void low_prepare_to_resume (lwp_info *lwp) override;
160
161 private:
162
163 /* Update all the target description of all processes; a new GDB
164 connected, and it may or not support xml target descriptions. */
165 void update_xmltarget ();
166 };
167
168 /* The singleton target ops object. */
169
170 static x86_target the_x86_target;
171
172 /* Per-process arch-specific data we want to keep. */
173
174 struct arch_process_info
175 {
176 struct x86_debug_reg_state debug_reg_state;
177 };
178
179 #ifdef __x86_64__
180
181 /* Mapping between the general-purpose registers in `struct user'
182 format and GDB's register array layout.
183 Note that the transfer layout uses 64-bit regs. */
184 static /*const*/ int i386_regmap[] =
185 {
186 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
187 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
188 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
189 DS * 8, ES * 8, FS * 8, GS * 8
190 };
191
192 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
193
194 /* So code below doesn't have to care, i386 or amd64. */
195 #define ORIG_EAX ORIG_RAX
196 #define REGSIZE 8
197
198 static const int x86_64_regmap[] =
199 {
200 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
201 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
202 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
203 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
204 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
205 DS * 8, ES * 8, FS * 8, GS * 8,
206 -1, -1, -1, -1, -1, -1, -1, -1,
207 -1, -1, -1, -1, -1, -1, -1, -1,
208 -1, -1, -1, -1, -1, -1, -1, -1,
209 -1,
210 -1, -1, -1, -1, -1, -1, -1, -1,
211 ORIG_RAX * 8,
212 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
213 21 * 8, 22 * 8,
214 #else
215 -1, -1,
216 #endif
217 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
218 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
219 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
220 -1, -1, -1, -1, -1, -1, -1, -1,
221 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
222 -1, -1, -1, -1, -1, -1, -1, -1,
223 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
224 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
225 -1, -1, -1, -1, -1, -1, -1, -1,
226 -1, -1, -1, -1, -1, -1, -1, -1,
227 -1, -1, -1, -1, -1, -1, -1, -1,
228 -1 /* pkru */
229 };
230
231 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
232 #define X86_64_USER_REGS (GS + 1)
233
234 #else /* ! __x86_64__ */
235
236 /* Mapping between the general-purpose registers in `struct user'
237 format and GDB's register array layout. */
238 static /*const*/ int i386_regmap[] =
239 {
240 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
241 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
242 EIP * 4, EFL * 4, CS * 4, SS * 4,
243 DS * 4, ES * 4, FS * 4, GS * 4
244 };
245
246 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
247
248 #define REGSIZE 4
249
250 #endif
251
252 #ifdef __x86_64__
253
254 /* Returns true if the current inferior belongs to a x86-64 process,
255 per the tdesc. */
256
257 static int
258 is_64bit_tdesc (void)
259 {
260 struct regcache *regcache = get_thread_regcache (current_thread, 0);
261
262 return register_size (regcache->tdesc, 0) == 8;
263 }
264
265 #endif
266
267 \f
268 /* Called by libthread_db. */
269
270 ps_err_e
271 ps_get_thread_area (struct ps_prochandle *ph,
272 lwpid_t lwpid, int idx, void **base)
273 {
274 #ifdef __x86_64__
275 int use_64bit = is_64bit_tdesc ();
276
277 if (use_64bit)
278 {
279 switch (idx)
280 {
281 case FS:
282 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
283 return PS_OK;
284 break;
285 case GS:
286 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
287 return PS_OK;
288 break;
289 default:
290 return PS_BADADDR;
291 }
292 return PS_ERR;
293 }
294 #endif
295
296 {
297 unsigned int desc[4];
298
299 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
300 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
301 return PS_ERR;
302
303 /* Ensure we properly extend the value to 64-bits for x86_64. */
304 *base = (void *) (uintptr_t) desc[1];
305 return PS_OK;
306 }
307 }
308
309 /* Get the thread area address. This is used to recognize which
310 thread is which when tracing with the in-process agent library. We
311 don't read anything from the address, and treat it as opaque; it's
312 the address itself that we assume is unique per-thread. */
313
314 static int
315 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
316 {
317 #ifdef __x86_64__
318 int use_64bit = is_64bit_tdesc ();
319
320 if (use_64bit)
321 {
322 void *base;
323 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
324 {
325 *addr = (CORE_ADDR) (uintptr_t) base;
326 return 0;
327 }
328
329 return -1;
330 }
331 #endif
332
333 {
334 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
335 struct thread_info *thr = get_lwp_thread (lwp);
336 struct regcache *regcache = get_thread_regcache (thr, 1);
337 unsigned int desc[4];
338 ULONGEST gs = 0;
339 const int reg_thread_area = 3; /* bits to scale down register value. */
340 int idx;
341
342 collect_register_by_name (regcache, "gs", &gs);
343
344 idx = gs >> reg_thread_area;
345
346 if (ptrace (PTRACE_GET_THREAD_AREA,
347 lwpid_of (thr),
348 (void *) (long) idx, (unsigned long) &desc) < 0)
349 return -1;
350
351 *addr = desc[1];
352 return 0;
353 }
354 }
355
356
357 \f
358 bool
359 x86_target::low_cannot_store_register (int regno)
360 {
361 #ifdef __x86_64__
362 if (is_64bit_tdesc ())
363 return false;
364 #endif
365
366 return regno >= I386_NUM_REGS;
367 }
368
369 bool
370 x86_target::low_cannot_fetch_register (int regno)
371 {
372 #ifdef __x86_64__
373 if (is_64bit_tdesc ())
374 return false;
375 #endif
376
377 return regno >= I386_NUM_REGS;
378 }
379
380 static void
381 x86_fill_gregset (struct regcache *regcache, void *buf)
382 {
383 int i;
384
385 #ifdef __x86_64__
386 if (register_size (regcache->tdesc, 0) == 8)
387 {
388 for (i = 0; i < X86_64_NUM_REGS; i++)
389 if (x86_64_regmap[i] != -1)
390 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
391
392 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
393 {
394 unsigned long base;
395 int lwpid = lwpid_of (current_thread);
396
397 collect_register_by_name (regcache, "fs_base", &base);
398 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
399
400 collect_register_by_name (regcache, "gs_base", &base);
401 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
402 }
403 #endif
404
405 return;
406 }
407
408 /* 32-bit inferior registers need to be zero-extended.
409 Callers would read uninitialized memory otherwise. */
410 memset (buf, 0x00, X86_64_USER_REGS * 8);
411 #endif
412
413 for (i = 0; i < I386_NUM_REGS; i++)
414 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
415
416 collect_register_by_name (regcache, "orig_eax",
417 ((char *) buf) + ORIG_EAX * REGSIZE);
418
419 #ifdef __x86_64__
420 /* Sign extend EAX value to avoid potential syscall restart
421 problems.
422
423 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
424 for a detailed explanation. */
425 if (register_size (regcache->tdesc, 0) == 4)
426 {
427 void *ptr = ((gdb_byte *) buf
428 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
429
430 *(int64_t *) ptr = *(int32_t *) ptr;
431 }
432 #endif
433 }
434
435 static void
436 x86_store_gregset (struct regcache *regcache, const void *buf)
437 {
438 int i;
439
440 #ifdef __x86_64__
441 if (register_size (regcache->tdesc, 0) == 8)
442 {
443 for (i = 0; i < X86_64_NUM_REGS; i++)
444 if (x86_64_regmap[i] != -1)
445 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
446
447 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
448 {
449 unsigned long base;
450 int lwpid = lwpid_of (current_thread);
451
452 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
453 supply_register_by_name (regcache, "fs_base", &base);
454
455 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
456 supply_register_by_name (regcache, "gs_base", &base);
457 }
458 #endif
459 return;
460 }
461 #endif
462
463 for (i = 0; i < I386_NUM_REGS; i++)
464 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
465
466 supply_register_by_name (regcache, "orig_eax",
467 ((char *) buf) + ORIG_EAX * REGSIZE);
468 }
469
470 static void
471 x86_fill_fpregset (struct regcache *regcache, void *buf)
472 {
473 #ifdef __x86_64__
474 i387_cache_to_fxsave (regcache, buf);
475 #else
476 i387_cache_to_fsave (regcache, buf);
477 #endif
478 }
479
480 static void
481 x86_store_fpregset (struct regcache *regcache, const void *buf)
482 {
483 #ifdef __x86_64__
484 i387_fxsave_to_cache (regcache, buf);
485 #else
486 i387_fsave_to_cache (regcache, buf);
487 #endif
488 }
489
490 #ifndef __x86_64__
491
492 static void
493 x86_fill_fpxregset (struct regcache *regcache, void *buf)
494 {
495 i387_cache_to_fxsave (regcache, buf);
496 }
497
498 static void
499 x86_store_fpxregset (struct regcache *regcache, const void *buf)
500 {
501 i387_fxsave_to_cache (regcache, buf);
502 }
503
504 #endif
505
506 static void
507 x86_fill_xstateregset (struct regcache *regcache, void *buf)
508 {
509 i387_cache_to_xsave (regcache, buf);
510 }
511
512 static void
513 x86_store_xstateregset (struct regcache *regcache, const void *buf)
514 {
515 i387_xsave_to_cache (regcache, buf);
516 }
517
518 /* ??? The non-biarch i386 case stores all the i387 regs twice.
519 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
520 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
521 doesn't work. IWBN to avoid the duplication in the case where it
522 does work. Maybe the arch_setup routine could check whether it works
523 and update the supported regsets accordingly. */
524
525 static struct regset_info x86_regsets[] =
526 {
527 #ifdef HAVE_PTRACE_GETREGS
528 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
529 GENERAL_REGS,
530 x86_fill_gregset, x86_store_gregset },
531 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
532 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
533 # ifndef __x86_64__
534 # ifdef HAVE_PTRACE_GETFPXREGS
535 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
536 EXTENDED_REGS,
537 x86_fill_fpxregset, x86_store_fpxregset },
538 # endif
539 # endif
540 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
541 FP_REGS,
542 x86_fill_fpregset, x86_store_fpregset },
543 #endif /* HAVE_PTRACE_GETREGS */
544 NULL_REGSET
545 };
546
547 bool
548 x86_target::low_supports_breakpoints ()
549 {
550 return true;
551 }
552
553 CORE_ADDR
554 x86_target::low_get_pc (regcache *regcache)
555 {
556 int use_64bit = register_size (regcache->tdesc, 0) == 8;
557
558 if (use_64bit)
559 {
560 uint64_t pc;
561
562 collect_register_by_name (regcache, "rip", &pc);
563 return (CORE_ADDR) pc;
564 }
565 else
566 {
567 uint32_t pc;
568
569 collect_register_by_name (regcache, "eip", &pc);
570 return (CORE_ADDR) pc;
571 }
572 }
573
574 void
575 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
576 {
577 int use_64bit = register_size (regcache->tdesc, 0) == 8;
578
579 if (use_64bit)
580 {
581 uint64_t newpc = pc;
582
583 supply_register_by_name (regcache, "rip", &newpc);
584 }
585 else
586 {
587 uint32_t newpc = pc;
588
589 supply_register_by_name (regcache, "eip", &newpc);
590 }
591 }
592
593 int
594 x86_target::low_decr_pc_after_break ()
595 {
596 return 1;
597 }
598
599 \f
600 static const gdb_byte x86_breakpoint[] = { 0xCC };
601 #define x86_breakpoint_len 1
602
603 bool
604 x86_target::low_breakpoint_at (CORE_ADDR pc)
605 {
606 unsigned char c;
607
608 read_memory (pc, &c, 1);
609 if (c == 0xCC)
610 return true;
611
612 return false;
613 }
614 \f
615 /* Low-level function vector. */
616 struct x86_dr_low_type x86_dr_low =
617 {
618 x86_linux_dr_set_control,
619 x86_linux_dr_set_addr,
620 x86_linux_dr_get_addr,
621 x86_linux_dr_get_status,
622 x86_linux_dr_get_control,
623 sizeof (void *),
624 };
625 \f
626 /* Breakpoint/Watchpoint support. */
627
628 bool
629 x86_target::supports_z_point_type (char z_type)
630 {
631 switch (z_type)
632 {
633 case Z_PACKET_SW_BP:
634 case Z_PACKET_HW_BP:
635 case Z_PACKET_WRITE_WP:
636 case Z_PACKET_ACCESS_WP:
637 return true;
638 default:
639 return false;
640 }
641 }
642
643 int
644 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
645 int size, raw_breakpoint *bp)
646 {
647 struct process_info *proc = current_process ();
648
649 switch (type)
650 {
651 case raw_bkpt_type_hw:
652 case raw_bkpt_type_write_wp:
653 case raw_bkpt_type_access_wp:
654 {
655 enum target_hw_bp_type hw_type
656 = raw_bkpt_type_to_target_hw_bp_type (type);
657 struct x86_debug_reg_state *state
658 = &proc->priv->arch_private->debug_reg_state;
659
660 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
661 }
662
663 default:
664 /* Unsupported. */
665 return 1;
666 }
667 }
668
669 int
670 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
671 int size, raw_breakpoint *bp)
672 {
673 struct process_info *proc = current_process ();
674
675 switch (type)
676 {
677 case raw_bkpt_type_hw:
678 case raw_bkpt_type_write_wp:
679 case raw_bkpt_type_access_wp:
680 {
681 enum target_hw_bp_type hw_type
682 = raw_bkpt_type_to_target_hw_bp_type (type);
683 struct x86_debug_reg_state *state
684 = &proc->priv->arch_private->debug_reg_state;
685
686 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
687 }
688 default:
689 /* Unsupported. */
690 return 1;
691 }
692 }
693
694 bool
695 x86_target::low_stopped_by_watchpoint ()
696 {
697 struct process_info *proc = current_process ();
698 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
699 }
700
701 CORE_ADDR
702 x86_target::low_stopped_data_address ()
703 {
704 struct process_info *proc = current_process ();
705 CORE_ADDR addr;
706 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
707 &addr))
708 return addr;
709 return 0;
710 }
711 \f
712 /* Called when a new process is created. */
713
714 arch_process_info *
715 x86_target::low_new_process ()
716 {
717 struct arch_process_info *info = XCNEW (struct arch_process_info);
718
719 x86_low_init_dregs (&info->debug_reg_state);
720
721 return info;
722 }
723
724 /* Called when a process is being deleted. */
725
726 void
727 x86_target::low_delete_process (arch_process_info *info)
728 {
729 xfree (info);
730 }
731
732 void
733 x86_target::low_new_thread (lwp_info *lwp)
734 {
735 /* This comes from nat/. */
736 x86_linux_new_thread (lwp);
737 }
738
739 void
740 x86_target::low_delete_thread (arch_lwp_info *alwp)
741 {
742 /* This comes from nat/. */
743 x86_linux_delete_thread (alwp);
744 }
745
746 /* Target routine for new_fork. */
747
748 void
749 x86_target::low_new_fork (process_info *parent, process_info *child)
750 {
751 /* These are allocated by linux_add_process. */
752 gdb_assert (parent->priv != NULL
753 && parent->priv->arch_private != NULL);
754 gdb_assert (child->priv != NULL
755 && child->priv->arch_private != NULL);
756
757 /* Linux kernel before 2.6.33 commit
758 72f674d203cd230426437cdcf7dd6f681dad8b0d
759 will inherit hardware debug registers from parent
760 on fork/vfork/clone. Newer Linux kernels create such tasks with
761 zeroed debug registers.
762
763 GDB core assumes the child inherits the watchpoints/hw
764 breakpoints of the parent, and will remove them all from the
765 forked off process. Copy the debug registers mirrors into the
766 new process so that all breakpoints and watchpoints can be
767 removed together. The debug registers mirror will become zeroed
768 in the end before detaching the forked off process, thus making
769 this compatible with older Linux kernels too. */
770
771 *child->priv->arch_private = *parent->priv->arch_private;
772 }
773
774 void
775 x86_target::low_prepare_to_resume (lwp_info *lwp)
776 {
777 /* This comes from nat/. */
778 x86_linux_prepare_to_resume (lwp);
779 }
780
781 /* See nat/x86-dregs.h. */
782
783 struct x86_debug_reg_state *
784 x86_debug_reg_state (pid_t pid)
785 {
786 struct process_info *proc = find_process_pid (pid);
787
788 return &proc->priv->arch_private->debug_reg_state;
789 }
790 \f
791 /* When GDBSERVER is built as a 64-bit application on linux, the
792 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
793 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
794 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
795 conversion in-place ourselves. */
796
797 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
798 layout of the inferiors' architecture. Returns true if any
799 conversion was done; false otherwise. If DIRECTION is 1, then copy
800 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
801 INF. */
802
803 bool
804 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
805 {
806 #ifdef __x86_64__
807 unsigned int machine;
808 int tid = lwpid_of (current_thread);
809 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
810
811 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
812 if (!is_64bit_tdesc ())
813 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
814 FIXUP_32);
815 /* No fixup for native x32 GDB. */
816 else if (!is_elf64 && sizeof (void *) == 8)
817 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
818 FIXUP_X32);
819 #endif
820
821 return false;
822 }
823 \f
824 static int use_xml;
825
826 /* Format of XSAVE extended state is:
827 struct
828 {
829 fxsave_bytes[0..463]
830 sw_usable_bytes[464..511]
831 xstate_hdr_bytes[512..575]
832 avx_bytes[576..831]
833 future_state etc
834 };
835
836 Same memory layout will be used for the coredump NT_X86_XSTATE
837 representing the XSAVE extended state registers.
838
839 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
840 extended state mask, which is the same as the extended control register
841 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
842 together with the mask saved in the xstate_hdr_bytes to determine what
843 states the processor/OS supports and what state, used or initialized,
844 the process/thread is in. */
845 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
846
847 /* Does the current host support the GETFPXREGS request? The header
848 file may or may not define it, and even if it is defined, the
849 kernel will return EIO if it's running on a pre-SSE processor. */
850 int have_ptrace_getfpxregs =
851 #ifdef HAVE_PTRACE_GETFPXREGS
852 -1
853 #else
854 0
855 #endif
856 ;
857
858 /* Get Linux/x86 target description from running target. */
859
860 static const struct target_desc *
861 x86_linux_read_description (void)
862 {
863 unsigned int machine;
864 int is_elf64;
865 int xcr0_features;
866 int tid;
867 static uint64_t xcr0;
868 struct regset_info *regset;
869
870 tid = lwpid_of (current_thread);
871
872 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
873
874 if (sizeof (void *) == 4)
875 {
876 if (is_elf64 > 0)
877 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
878 #ifndef __x86_64__
879 else if (machine == EM_X86_64)
880 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
881 #endif
882 }
883
884 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
885 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
886 {
887 elf_fpxregset_t fpxregs;
888
889 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
890 {
891 have_ptrace_getfpxregs = 0;
892 have_ptrace_getregset = 0;
893 return i386_linux_read_description (X86_XSTATE_X87);
894 }
895 else
896 have_ptrace_getfpxregs = 1;
897 }
898 #endif
899
900 if (!use_xml)
901 {
902 x86_xcr0 = X86_XSTATE_SSE_MASK;
903
904 /* Don't use XML. */
905 #ifdef __x86_64__
906 if (machine == EM_X86_64)
907 return tdesc_amd64_linux_no_xml;
908 else
909 #endif
910 return tdesc_i386_linux_no_xml;
911 }
912
913 if (have_ptrace_getregset == -1)
914 {
915 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
916 struct iovec iov;
917
918 iov.iov_base = xstateregs;
919 iov.iov_len = sizeof (xstateregs);
920
921 /* Check if PTRACE_GETREGSET works. */
922 if (ptrace (PTRACE_GETREGSET, tid,
923 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
924 have_ptrace_getregset = 0;
925 else
926 {
927 have_ptrace_getregset = 1;
928
929 /* Get XCR0 from XSAVE extended state. */
930 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
931 / sizeof (uint64_t))];
932
933 /* Use PTRACE_GETREGSET if it is available. */
934 for (regset = x86_regsets;
935 regset->fill_function != NULL; regset++)
936 if (regset->get_request == PTRACE_GETREGSET)
937 regset->size = X86_XSTATE_SIZE (xcr0);
938 else if (regset->type != GENERAL_REGS)
939 regset->size = 0;
940 }
941 }
942
943 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
944 xcr0_features = (have_ptrace_getregset
945 && (xcr0 & X86_XSTATE_ALL_MASK));
946
947 if (xcr0_features)
948 x86_xcr0 = xcr0;
949
950 if (machine == EM_X86_64)
951 {
952 #ifdef __x86_64__
953 const target_desc *tdesc = NULL;
954
955 if (xcr0_features)
956 {
957 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
958 !is_elf64);
959 }
960
961 if (tdesc == NULL)
962 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
963 return tdesc;
964 #endif
965 }
966 else
967 {
968 const target_desc *tdesc = NULL;
969
970 if (xcr0_features)
971 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
972
973 if (tdesc == NULL)
974 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
975
976 return tdesc;
977 }
978
979 gdb_assert_not_reached ("failed to return tdesc");
980 }
981
982 /* Update all the target description of all processes; a new GDB
983 connected, and it may or not support xml target descriptions. */
984
985 void
986 x86_target::update_xmltarget ()
987 {
988 struct thread_info *saved_thread = current_thread;
989
990 /* Before changing the register cache's internal layout, flush the
991 contents of the current valid caches back to the threads, and
992 release the current regcache objects. */
993 regcache_release ();
994
995 for_each_process ([this] (process_info *proc) {
996 int pid = proc->pid;
997
998 /* Look up any thread of this process. */
999 current_thread = find_any_thread_of_pid (pid);
1000
1001 low_arch_setup ();
1002 });
1003
1004 current_thread = saved_thread;
1005 }
1006
1007 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1008 PTRACE_GETREGSET. */
1009
1010 void
1011 x86_target::process_qsupported (char **features, int count)
1012 {
1013 int i;
1014
1015 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1016 with "i386" in qSupported query, it supports x86 XML target
1017 descriptions. */
1018 use_xml = 0;
1019 for (i = 0; i < count; i++)
1020 {
1021 const char *feature = features[i];
1022
1023 if (startswith (feature, "xmlRegisters="))
1024 {
1025 char *copy = xstrdup (feature + 13);
1026
1027 char *saveptr;
1028 for (char *p = strtok_r (copy, ",", &saveptr);
1029 p != NULL;
1030 p = strtok_r (NULL, ",", &saveptr))
1031 {
1032 if (strcmp (p, "i386") == 0)
1033 {
1034 use_xml = 1;
1035 break;
1036 }
1037 }
1038
1039 free (copy);
1040 }
1041 }
1042 update_xmltarget ();
1043 }
1044
1045 /* Common for x86/x86-64. */
1046
1047 static struct regsets_info x86_regsets_info =
1048 {
1049 x86_regsets, /* regsets */
1050 0, /* num_regsets */
1051 NULL, /* disabled_regsets */
1052 };
1053
1054 #ifdef __x86_64__
1055 static struct regs_info amd64_linux_regs_info =
1056 {
1057 NULL, /* regset_bitmap */
1058 NULL, /* usrregs_info */
1059 &x86_regsets_info
1060 };
1061 #endif
1062 static struct usrregs_info i386_linux_usrregs_info =
1063 {
1064 I386_NUM_REGS,
1065 i386_regmap,
1066 };
1067
1068 static struct regs_info i386_linux_regs_info =
1069 {
1070 NULL, /* regset_bitmap */
1071 &i386_linux_usrregs_info,
1072 &x86_regsets_info
1073 };
1074
1075 const regs_info *
1076 x86_target::get_regs_info ()
1077 {
1078 #ifdef __x86_64__
1079 if (is_64bit_tdesc ())
1080 return &amd64_linux_regs_info;
1081 else
1082 #endif
1083 return &i386_linux_regs_info;
1084 }
1085
1086 /* Initialize the target description for the architecture of the
1087 inferior. */
1088
1089 void
1090 x86_target::low_arch_setup ()
1091 {
1092 current_process ()->tdesc = x86_linux_read_description ();
1093 }
1094
1095 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1096 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1097
1098 static void
1099 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
1100 {
1101 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1102
1103 if (use_64bit)
1104 {
1105 long l_sysno;
1106
1107 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1108 *sysno = (int) l_sysno;
1109 }
1110 else
1111 collect_register_by_name (regcache, "orig_eax", sysno);
1112 }
1113
1114 bool
1115 x86_target::supports_tracepoints ()
1116 {
1117 return true;
1118 }
1119
1120 static void
1121 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1122 {
1123 target_write_memory (*to, buf, len);
1124 *to += len;
1125 }
1126
1127 static int
1128 push_opcode (unsigned char *buf, const char *op)
1129 {
1130 unsigned char *buf_org = buf;
1131
1132 while (1)
1133 {
1134 char *endptr;
1135 unsigned long ul = strtoul (op, &endptr, 16);
1136
1137 if (endptr == op)
1138 break;
1139
1140 *buf++ = ul;
1141 op = endptr;
1142 }
1143
1144 return buf - buf_org;
1145 }
1146
1147 #ifdef __x86_64__
1148
1149 /* Build a jump pad that saves registers and calls a collection
1150 function. Writes a jump instruction to the jump pad to
1151 JJUMPAD_INSN. The caller is responsible to write it in at the
1152 tracepoint address. */
1153
1154 static int
1155 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1156 CORE_ADDR collector,
1157 CORE_ADDR lockaddr,
1158 ULONGEST orig_size,
1159 CORE_ADDR *jump_entry,
1160 CORE_ADDR *trampoline,
1161 ULONGEST *trampoline_size,
1162 unsigned char *jjump_pad_insn,
1163 ULONGEST *jjump_pad_insn_size,
1164 CORE_ADDR *adjusted_insn_addr,
1165 CORE_ADDR *adjusted_insn_addr_end,
1166 char *err)
1167 {
1168 unsigned char buf[40];
1169 int i, offset;
1170 int64_t loffset;
1171
1172 CORE_ADDR buildaddr = *jump_entry;
1173
1174 /* Build the jump pad. */
1175
1176 /* First, do tracepoint data collection. Save registers. */
1177 i = 0;
1178 /* Need to ensure stack pointer saved first. */
1179 buf[i++] = 0x54; /* push %rsp */
1180 buf[i++] = 0x55; /* push %rbp */
1181 buf[i++] = 0x57; /* push %rdi */
1182 buf[i++] = 0x56; /* push %rsi */
1183 buf[i++] = 0x52; /* push %rdx */
1184 buf[i++] = 0x51; /* push %rcx */
1185 buf[i++] = 0x53; /* push %rbx */
1186 buf[i++] = 0x50; /* push %rax */
1187 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1188 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1189 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1190 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1191 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1192 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1193 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1194 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1195 buf[i++] = 0x9c; /* pushfq */
1196 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1197 buf[i++] = 0xbf;
1198 memcpy (buf + i, &tpaddr, 8);
1199 i += 8;
1200 buf[i++] = 0x57; /* push %rdi */
1201 append_insns (&buildaddr, i, buf);
1202
1203 /* Stack space for the collecting_t object. */
1204 i = 0;
1205 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1206 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1207 memcpy (buf + i, &tpoint, 8);
1208 i += 8;
1209 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1210 i += push_opcode (&buf[i],
1211 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1212 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1213 append_insns (&buildaddr, i, buf);
1214
1215 /* spin-lock. */
1216 i = 0;
1217 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1218 memcpy (&buf[i], (void *) &lockaddr, 8);
1219 i += 8;
1220 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1221 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1222 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1223 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1224 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1225 append_insns (&buildaddr, i, buf);
1226
1227 /* Set up the gdb_collect call. */
1228 /* At this point, (stack pointer + 0x18) is the base of our saved
1229 register block. */
1230
1231 i = 0;
1232 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1233 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1234
1235 /* tpoint address may be 64-bit wide. */
1236 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1237 memcpy (buf + i, &tpoint, 8);
1238 i += 8;
1239 append_insns (&buildaddr, i, buf);
1240
1241 /* The collector function being in the shared library, may be
1242 >31-bits away off the jump pad. */
1243 i = 0;
1244 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1245 memcpy (buf + i, &collector, 8);
1246 i += 8;
1247 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1248 append_insns (&buildaddr, i, buf);
1249
1250 /* Clear the spin-lock. */
1251 i = 0;
1252 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1253 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1254 memcpy (buf + i, &lockaddr, 8);
1255 i += 8;
1256 append_insns (&buildaddr, i, buf);
1257
1258 /* Remove stack that had been used for the collect_t object. */
1259 i = 0;
1260 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1261 append_insns (&buildaddr, i, buf);
1262
1263 /* Restore register state. */
1264 i = 0;
1265 buf[i++] = 0x48; /* add $0x8,%rsp */
1266 buf[i++] = 0x83;
1267 buf[i++] = 0xc4;
1268 buf[i++] = 0x08;
1269 buf[i++] = 0x9d; /* popfq */
1270 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1271 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1272 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1273 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1274 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1275 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1276 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1277 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1278 buf[i++] = 0x58; /* pop %rax */
1279 buf[i++] = 0x5b; /* pop %rbx */
1280 buf[i++] = 0x59; /* pop %rcx */
1281 buf[i++] = 0x5a; /* pop %rdx */
1282 buf[i++] = 0x5e; /* pop %rsi */
1283 buf[i++] = 0x5f; /* pop %rdi */
1284 buf[i++] = 0x5d; /* pop %rbp */
1285 buf[i++] = 0x5c; /* pop %rsp */
1286 append_insns (&buildaddr, i, buf);
1287
1288 /* Now, adjust the original instruction to execute in the jump
1289 pad. */
1290 *adjusted_insn_addr = buildaddr;
1291 relocate_instruction (&buildaddr, tpaddr);
1292 *adjusted_insn_addr_end = buildaddr;
1293
1294 /* Finally, write a jump back to the program. */
1295
1296 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1297 if (loffset > INT_MAX || loffset < INT_MIN)
1298 {
1299 sprintf (err,
1300 "E.Jump back from jump pad too far from tracepoint "
1301 "(offset 0x%" PRIx64 " > int32).", loffset);
1302 return 1;
1303 }
1304
1305 offset = (int) loffset;
1306 memcpy (buf, jump_insn, sizeof (jump_insn));
1307 memcpy (buf + 1, &offset, 4);
1308 append_insns (&buildaddr, sizeof (jump_insn), buf);
1309
1310 /* The jump pad is now built. Wire in a jump to our jump pad. This
1311 is always done last (by our caller actually), so that we can
1312 install fast tracepoints with threads running. This relies on
1313 the agent's atomic write support. */
1314 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1315 if (loffset > INT_MAX || loffset < INT_MIN)
1316 {
1317 sprintf (err,
1318 "E.Jump pad too far from tracepoint "
1319 "(offset 0x%" PRIx64 " > int32).", loffset);
1320 return 1;
1321 }
1322
1323 offset = (int) loffset;
1324
1325 memcpy (buf, jump_insn, sizeof (jump_insn));
1326 memcpy (buf + 1, &offset, 4);
1327 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1328 *jjump_pad_insn_size = sizeof (jump_insn);
1329
1330 /* Return the end address of our pad. */
1331 *jump_entry = buildaddr;
1332
1333 return 0;
1334 }
1335
1336 #endif /* __x86_64__ */
1337
1338 /* Build a jump pad that saves registers and calls a collection
1339 function. Writes a jump instruction to the jump pad to
1340 JJUMPAD_INSN. The caller is responsible to write it in at the
1341 tracepoint address. */
1342
1343 static int
1344 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1345 CORE_ADDR collector,
1346 CORE_ADDR lockaddr,
1347 ULONGEST orig_size,
1348 CORE_ADDR *jump_entry,
1349 CORE_ADDR *trampoline,
1350 ULONGEST *trampoline_size,
1351 unsigned char *jjump_pad_insn,
1352 ULONGEST *jjump_pad_insn_size,
1353 CORE_ADDR *adjusted_insn_addr,
1354 CORE_ADDR *adjusted_insn_addr_end,
1355 char *err)
1356 {
1357 unsigned char buf[0x100];
1358 int i, offset;
1359 CORE_ADDR buildaddr = *jump_entry;
1360
1361 /* Build the jump pad. */
1362
1363 /* First, do tracepoint data collection. Save registers. */
1364 i = 0;
1365 buf[i++] = 0x60; /* pushad */
1366 buf[i++] = 0x68; /* push tpaddr aka $pc */
1367 *((int *)(buf + i)) = (int) tpaddr;
1368 i += 4;
1369 buf[i++] = 0x9c; /* pushf */
1370 buf[i++] = 0x1e; /* push %ds */
1371 buf[i++] = 0x06; /* push %es */
1372 buf[i++] = 0x0f; /* push %fs */
1373 buf[i++] = 0xa0;
1374 buf[i++] = 0x0f; /* push %gs */
1375 buf[i++] = 0xa8;
1376 buf[i++] = 0x16; /* push %ss */
1377 buf[i++] = 0x0e; /* push %cs */
1378 append_insns (&buildaddr, i, buf);
1379
1380 /* Stack space for the collecting_t object. */
1381 i = 0;
1382 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1383
1384 /* Build the object. */
1385 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1386 memcpy (buf + i, &tpoint, 4);
1387 i += 4;
1388 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1389
1390 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1391 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1392 append_insns (&buildaddr, i, buf);
1393
1394 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1395 If we cared for it, this could be using xchg alternatively. */
1396
1397 i = 0;
1398 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1399 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1400 %esp,<lockaddr> */
1401 memcpy (&buf[i], (void *) &lockaddr, 4);
1402 i += 4;
1403 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1404 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1405 append_insns (&buildaddr, i, buf);
1406
1407
1408 /* Set up arguments to the gdb_collect call. */
1409 i = 0;
1410 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1411 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1412 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1413 append_insns (&buildaddr, i, buf);
1414
1415 i = 0;
1416 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1417 append_insns (&buildaddr, i, buf);
1418
1419 i = 0;
1420 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1421 memcpy (&buf[i], (void *) &tpoint, 4);
1422 i += 4;
1423 append_insns (&buildaddr, i, buf);
1424
1425 buf[0] = 0xe8; /* call <reladdr> */
1426 offset = collector - (buildaddr + sizeof (jump_insn));
1427 memcpy (buf + 1, &offset, 4);
1428 append_insns (&buildaddr, 5, buf);
1429 /* Clean up after the call. */
1430 buf[0] = 0x83; /* add $0x8,%esp */
1431 buf[1] = 0xc4;
1432 buf[2] = 0x08;
1433 append_insns (&buildaddr, 3, buf);
1434
1435
1436 /* Clear the spin-lock. This would need the LOCK prefix on older
1437 broken archs. */
1438 i = 0;
1439 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1440 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1441 memcpy (buf + i, &lockaddr, 4);
1442 i += 4;
1443 append_insns (&buildaddr, i, buf);
1444
1445
1446 /* Remove stack that had been used for the collect_t object. */
1447 i = 0;
1448 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1449 append_insns (&buildaddr, i, buf);
1450
1451 i = 0;
1452 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1453 buf[i++] = 0xc4;
1454 buf[i++] = 0x04;
1455 buf[i++] = 0x17; /* pop %ss */
1456 buf[i++] = 0x0f; /* pop %gs */
1457 buf[i++] = 0xa9;
1458 buf[i++] = 0x0f; /* pop %fs */
1459 buf[i++] = 0xa1;
1460 buf[i++] = 0x07; /* pop %es */
1461 buf[i++] = 0x1f; /* pop %ds */
1462 buf[i++] = 0x9d; /* popf */
1463 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1464 buf[i++] = 0xc4;
1465 buf[i++] = 0x04;
1466 buf[i++] = 0x61; /* popad */
1467 append_insns (&buildaddr, i, buf);
1468
1469 /* Now, adjust the original instruction to execute in the jump
1470 pad. */
1471 *adjusted_insn_addr = buildaddr;
1472 relocate_instruction (&buildaddr, tpaddr);
1473 *adjusted_insn_addr_end = buildaddr;
1474
1475 /* Write the jump back to the program. */
1476 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1477 memcpy (buf, jump_insn, sizeof (jump_insn));
1478 memcpy (buf + 1, &offset, 4);
1479 append_insns (&buildaddr, sizeof (jump_insn), buf);
1480
1481 /* The jump pad is now built. Wire in a jump to our jump pad. This
1482 is always done last (by our caller actually), so that we can
1483 install fast tracepoints with threads running. This relies on
1484 the agent's atomic write support. */
1485 if (orig_size == 4)
1486 {
1487 /* Create a trampoline. */
1488 *trampoline_size = sizeof (jump_insn);
1489 if (!claim_trampoline_space (*trampoline_size, trampoline))
1490 {
1491 /* No trampoline space available. */
1492 strcpy (err,
1493 "E.Cannot allocate trampoline space needed for fast "
1494 "tracepoints on 4-byte instructions.");
1495 return 1;
1496 }
1497
1498 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1499 memcpy (buf, jump_insn, sizeof (jump_insn));
1500 memcpy (buf + 1, &offset, 4);
1501 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1502
1503 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1504 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1505 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1506 memcpy (buf + 2, &offset, 2);
1507 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1508 *jjump_pad_insn_size = sizeof (small_jump_insn);
1509 }
1510 else
1511 {
1512 /* Else use a 32-bit relative jump instruction. */
1513 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1514 memcpy (buf, jump_insn, sizeof (jump_insn));
1515 memcpy (buf + 1, &offset, 4);
1516 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1517 *jjump_pad_insn_size = sizeof (jump_insn);
1518 }
1519
1520 /* Return the end address of our pad. */
1521 *jump_entry = buildaddr;
1522
1523 return 0;
1524 }
1525
1526 static int
1527 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1528 CORE_ADDR collector,
1529 CORE_ADDR lockaddr,
1530 ULONGEST orig_size,
1531 CORE_ADDR *jump_entry,
1532 CORE_ADDR *trampoline,
1533 ULONGEST *trampoline_size,
1534 unsigned char *jjump_pad_insn,
1535 ULONGEST *jjump_pad_insn_size,
1536 CORE_ADDR *adjusted_insn_addr,
1537 CORE_ADDR *adjusted_insn_addr_end,
1538 char *err)
1539 {
1540 #ifdef __x86_64__
1541 if (is_64bit_tdesc ())
1542 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1543 collector, lockaddr,
1544 orig_size, jump_entry,
1545 trampoline, trampoline_size,
1546 jjump_pad_insn,
1547 jjump_pad_insn_size,
1548 adjusted_insn_addr,
1549 adjusted_insn_addr_end,
1550 err);
1551 #endif
1552
1553 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1554 collector, lockaddr,
1555 orig_size, jump_entry,
1556 trampoline, trampoline_size,
1557 jjump_pad_insn,
1558 jjump_pad_insn_size,
1559 adjusted_insn_addr,
1560 adjusted_insn_addr_end,
1561 err);
1562 }
1563
1564 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1565 architectures. */
1566
1567 static int
1568 x86_get_min_fast_tracepoint_insn_len (void)
1569 {
1570 static int warned_about_fast_tracepoints = 0;
1571
1572 #ifdef __x86_64__
1573 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1574 used for fast tracepoints. */
1575 if (is_64bit_tdesc ())
1576 return 5;
1577 #endif
1578
1579 if (agent_loaded_p ())
1580 {
1581 char errbuf[IPA_BUFSIZ];
1582
1583 errbuf[0] = '\0';
1584
1585 /* On x86, if trampolines are available, then 4-byte jump instructions
1586 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1587 with a 4-byte offset are used instead. */
1588 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1589 return 4;
1590 else
1591 {
1592 /* GDB has no channel to explain to user why a shorter fast
1593 tracepoint is not possible, but at least make GDBserver
1594 mention that something has gone awry. */
1595 if (!warned_about_fast_tracepoints)
1596 {
1597 warning ("4-byte fast tracepoints not available; %s", errbuf);
1598 warned_about_fast_tracepoints = 1;
1599 }
1600 return 5;
1601 }
1602 }
1603 else
1604 {
1605 /* Indicate that the minimum length is currently unknown since the IPA
1606 has not loaded yet. */
1607 return 0;
1608 }
1609 }
1610
1611 static void
1612 add_insns (unsigned char *start, int len)
1613 {
1614 CORE_ADDR buildaddr = current_insn_ptr;
1615
1616 if (debug_threads)
1617 debug_printf ("Adding %d bytes of insn at %s\n",
1618 len, paddress (buildaddr));
1619
1620 append_insns (&buildaddr, len, start);
1621 current_insn_ptr = buildaddr;
1622 }
1623
1624 /* Our general strategy for emitting code is to avoid specifying raw
1625 bytes whenever possible, and instead copy a block of inline asm
1626 that is embedded in the function. This is a little messy, because
1627 we need to keep the compiler from discarding what looks like dead
1628 code, plus suppress various warnings. */
1629
1630 #define EMIT_ASM(NAME, INSNS) \
1631 do \
1632 { \
1633 extern unsigned char start_ ## NAME, end_ ## NAME; \
1634 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1635 __asm__ ("jmp end_" #NAME "\n" \
1636 "\t" "start_" #NAME ":" \
1637 "\t" INSNS "\n" \
1638 "\t" "end_" #NAME ":"); \
1639 } while (0)
1640
1641 #ifdef __x86_64__
1642
1643 #define EMIT_ASM32(NAME,INSNS) \
1644 do \
1645 { \
1646 extern unsigned char start_ ## NAME, end_ ## NAME; \
1647 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1648 __asm__ (".code32\n" \
1649 "\t" "jmp end_" #NAME "\n" \
1650 "\t" "start_" #NAME ":\n" \
1651 "\t" INSNS "\n" \
1652 "\t" "end_" #NAME ":\n" \
1653 ".code64\n"); \
1654 } while (0)
1655
1656 #else
1657
1658 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1659
1660 #endif
1661
1662 #ifdef __x86_64__
1663
1664 static void
1665 amd64_emit_prologue (void)
1666 {
1667 EMIT_ASM (amd64_prologue,
1668 "pushq %rbp\n\t"
1669 "movq %rsp,%rbp\n\t"
1670 "sub $0x20,%rsp\n\t"
1671 "movq %rdi,-8(%rbp)\n\t"
1672 "movq %rsi,-16(%rbp)");
1673 }
1674
1675
1676 static void
1677 amd64_emit_epilogue (void)
1678 {
1679 EMIT_ASM (amd64_epilogue,
1680 "movq -16(%rbp),%rdi\n\t"
1681 "movq %rax,(%rdi)\n\t"
1682 "xor %rax,%rax\n\t"
1683 "leave\n\t"
1684 "ret");
1685 }
1686
1687 static void
1688 amd64_emit_add (void)
1689 {
1690 EMIT_ASM (amd64_add,
1691 "add (%rsp),%rax\n\t"
1692 "lea 0x8(%rsp),%rsp");
1693 }
1694
1695 static void
1696 amd64_emit_sub (void)
1697 {
1698 EMIT_ASM (amd64_sub,
1699 "sub %rax,(%rsp)\n\t"
1700 "pop %rax");
1701 }
1702
1703 static void
1704 amd64_emit_mul (void)
1705 {
1706 emit_error = 1;
1707 }
1708
1709 static void
1710 amd64_emit_lsh (void)
1711 {
1712 emit_error = 1;
1713 }
1714
1715 static void
1716 amd64_emit_rsh_signed (void)
1717 {
1718 emit_error = 1;
1719 }
1720
1721 static void
1722 amd64_emit_rsh_unsigned (void)
1723 {
1724 emit_error = 1;
1725 }
1726
1727 static void
1728 amd64_emit_ext (int arg)
1729 {
1730 switch (arg)
1731 {
1732 case 8:
1733 EMIT_ASM (amd64_ext_8,
1734 "cbtw\n\t"
1735 "cwtl\n\t"
1736 "cltq");
1737 break;
1738 case 16:
1739 EMIT_ASM (amd64_ext_16,
1740 "cwtl\n\t"
1741 "cltq");
1742 break;
1743 case 32:
1744 EMIT_ASM (amd64_ext_32,
1745 "cltq");
1746 break;
1747 default:
1748 emit_error = 1;
1749 }
1750 }
1751
1752 static void
1753 amd64_emit_log_not (void)
1754 {
1755 EMIT_ASM (amd64_log_not,
1756 "test %rax,%rax\n\t"
1757 "sete %cl\n\t"
1758 "movzbq %cl,%rax");
1759 }
1760
1761 static void
1762 amd64_emit_bit_and (void)
1763 {
1764 EMIT_ASM (amd64_and,
1765 "and (%rsp),%rax\n\t"
1766 "lea 0x8(%rsp),%rsp");
1767 }
1768
1769 static void
1770 amd64_emit_bit_or (void)
1771 {
1772 EMIT_ASM (amd64_or,
1773 "or (%rsp),%rax\n\t"
1774 "lea 0x8(%rsp),%rsp");
1775 }
1776
1777 static void
1778 amd64_emit_bit_xor (void)
1779 {
1780 EMIT_ASM (amd64_xor,
1781 "xor (%rsp),%rax\n\t"
1782 "lea 0x8(%rsp),%rsp");
1783 }
1784
1785 static void
1786 amd64_emit_bit_not (void)
1787 {
1788 EMIT_ASM (amd64_bit_not,
1789 "xorq $0xffffffffffffffff,%rax");
1790 }
1791
1792 static void
1793 amd64_emit_equal (void)
1794 {
1795 EMIT_ASM (amd64_equal,
1796 "cmp %rax,(%rsp)\n\t"
1797 "je .Lamd64_equal_true\n\t"
1798 "xor %rax,%rax\n\t"
1799 "jmp .Lamd64_equal_end\n\t"
1800 ".Lamd64_equal_true:\n\t"
1801 "mov $0x1,%rax\n\t"
1802 ".Lamd64_equal_end:\n\t"
1803 "lea 0x8(%rsp),%rsp");
1804 }
1805
1806 static void
1807 amd64_emit_less_signed (void)
1808 {
1809 EMIT_ASM (amd64_less_signed,
1810 "cmp %rax,(%rsp)\n\t"
1811 "jl .Lamd64_less_signed_true\n\t"
1812 "xor %rax,%rax\n\t"
1813 "jmp .Lamd64_less_signed_end\n\t"
1814 ".Lamd64_less_signed_true:\n\t"
1815 "mov $1,%rax\n\t"
1816 ".Lamd64_less_signed_end:\n\t"
1817 "lea 0x8(%rsp),%rsp");
1818 }
1819
1820 static void
1821 amd64_emit_less_unsigned (void)
1822 {
1823 EMIT_ASM (amd64_less_unsigned,
1824 "cmp %rax,(%rsp)\n\t"
1825 "jb .Lamd64_less_unsigned_true\n\t"
1826 "xor %rax,%rax\n\t"
1827 "jmp .Lamd64_less_unsigned_end\n\t"
1828 ".Lamd64_less_unsigned_true:\n\t"
1829 "mov $1,%rax\n\t"
1830 ".Lamd64_less_unsigned_end:\n\t"
1831 "lea 0x8(%rsp),%rsp");
1832 }
1833
1834 static void
1835 amd64_emit_ref (int size)
1836 {
1837 switch (size)
1838 {
1839 case 1:
1840 EMIT_ASM (amd64_ref1,
1841 "movb (%rax),%al");
1842 break;
1843 case 2:
1844 EMIT_ASM (amd64_ref2,
1845 "movw (%rax),%ax");
1846 break;
1847 case 4:
1848 EMIT_ASM (amd64_ref4,
1849 "movl (%rax),%eax");
1850 break;
1851 case 8:
1852 EMIT_ASM (amd64_ref8,
1853 "movq (%rax),%rax");
1854 break;
1855 }
1856 }
1857
1858 static void
1859 amd64_emit_if_goto (int *offset_p, int *size_p)
1860 {
1861 EMIT_ASM (amd64_if_goto,
1862 "mov %rax,%rcx\n\t"
1863 "pop %rax\n\t"
1864 "cmp $0,%rcx\n\t"
1865 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1866 if (offset_p)
1867 *offset_p = 10;
1868 if (size_p)
1869 *size_p = 4;
1870 }
1871
1872 static void
1873 amd64_emit_goto (int *offset_p, int *size_p)
1874 {
1875 EMIT_ASM (amd64_goto,
1876 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1877 if (offset_p)
1878 *offset_p = 1;
1879 if (size_p)
1880 *size_p = 4;
1881 }
1882
1883 static void
1884 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1885 {
1886 int diff = (to - (from + size));
1887 unsigned char buf[sizeof (int)];
1888
1889 if (size != 4)
1890 {
1891 emit_error = 1;
1892 return;
1893 }
1894
1895 memcpy (buf, &diff, sizeof (int));
1896 target_write_memory (from, buf, sizeof (int));
1897 }
1898
1899 static void
1900 amd64_emit_const (LONGEST num)
1901 {
1902 unsigned char buf[16];
1903 int i;
1904 CORE_ADDR buildaddr = current_insn_ptr;
1905
1906 i = 0;
1907 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1908 memcpy (&buf[i], &num, sizeof (num));
1909 i += 8;
1910 append_insns (&buildaddr, i, buf);
1911 current_insn_ptr = buildaddr;
1912 }
1913
1914 static void
1915 amd64_emit_call (CORE_ADDR fn)
1916 {
1917 unsigned char buf[16];
1918 int i;
1919 CORE_ADDR buildaddr;
1920 LONGEST offset64;
1921
1922 /* The destination function being in the shared library, may be
1923 >31-bits away off the compiled code pad. */
1924
1925 buildaddr = current_insn_ptr;
1926
1927 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1928
1929 i = 0;
1930
1931 if (offset64 > INT_MAX || offset64 < INT_MIN)
1932 {
1933 /* Offset is too large for a call. Use callq, but that requires
1934 a register, so avoid it if possible. Use r10, since it is
1935 call-clobbered, we don't have to push/pop it. */
1936 buf[i++] = 0x48; /* mov $fn,%r10 */
1937 buf[i++] = 0xba;
1938 memcpy (buf + i, &fn, 8);
1939 i += 8;
1940 buf[i++] = 0xff; /* callq *%r10 */
1941 buf[i++] = 0xd2;
1942 }
1943 else
1944 {
1945 int offset32 = offset64; /* we know we can't overflow here. */
1946
1947 buf[i++] = 0xe8; /* call <reladdr> */
1948 memcpy (buf + i, &offset32, 4);
1949 i += 4;
1950 }
1951
1952 append_insns (&buildaddr, i, buf);
1953 current_insn_ptr = buildaddr;
1954 }
1955
1956 static void
1957 amd64_emit_reg (int reg)
1958 {
1959 unsigned char buf[16];
1960 int i;
1961 CORE_ADDR buildaddr;
1962
1963 /* Assume raw_regs is still in %rdi. */
1964 buildaddr = current_insn_ptr;
1965 i = 0;
1966 buf[i++] = 0xbe; /* mov $<n>,%esi */
1967 memcpy (&buf[i], &reg, sizeof (reg));
1968 i += 4;
1969 append_insns (&buildaddr, i, buf);
1970 current_insn_ptr = buildaddr;
1971 amd64_emit_call (get_raw_reg_func_addr ());
1972 }
1973
1974 static void
1975 amd64_emit_pop (void)
1976 {
1977 EMIT_ASM (amd64_pop,
1978 "pop %rax");
1979 }
1980
1981 static void
1982 amd64_emit_stack_flush (void)
1983 {
1984 EMIT_ASM (amd64_stack_flush,
1985 "push %rax");
1986 }
1987
1988 static void
1989 amd64_emit_zero_ext (int arg)
1990 {
1991 switch (arg)
1992 {
1993 case 8:
1994 EMIT_ASM (amd64_zero_ext_8,
1995 "and $0xff,%rax");
1996 break;
1997 case 16:
1998 EMIT_ASM (amd64_zero_ext_16,
1999 "and $0xffff,%rax");
2000 break;
2001 case 32:
2002 EMIT_ASM (amd64_zero_ext_32,
2003 "mov $0xffffffff,%rcx\n\t"
2004 "and %rcx,%rax");
2005 break;
2006 default:
2007 emit_error = 1;
2008 }
2009 }
2010
2011 static void
2012 amd64_emit_swap (void)
2013 {
2014 EMIT_ASM (amd64_swap,
2015 "mov %rax,%rcx\n\t"
2016 "pop %rax\n\t"
2017 "push %rcx");
2018 }
2019
2020 static void
2021 amd64_emit_stack_adjust (int n)
2022 {
2023 unsigned char buf[16];
2024 int i;
2025 CORE_ADDR buildaddr = current_insn_ptr;
2026
2027 i = 0;
2028 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2029 buf[i++] = 0x8d;
2030 buf[i++] = 0x64;
2031 buf[i++] = 0x24;
2032 /* This only handles adjustments up to 16, but we don't expect any more. */
2033 buf[i++] = n * 8;
2034 append_insns (&buildaddr, i, buf);
2035 current_insn_ptr = buildaddr;
2036 }
2037
2038 /* FN's prototype is `LONGEST(*fn)(int)'. */
2039
2040 static void
2041 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2042 {
2043 unsigned char buf[16];
2044 int i;
2045 CORE_ADDR buildaddr;
2046
2047 buildaddr = current_insn_ptr;
2048 i = 0;
2049 buf[i++] = 0xbf; /* movl $<n>,%edi */
2050 memcpy (&buf[i], &arg1, sizeof (arg1));
2051 i += 4;
2052 append_insns (&buildaddr, i, buf);
2053 current_insn_ptr = buildaddr;
2054 amd64_emit_call (fn);
2055 }
2056
2057 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2058
2059 static void
2060 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2061 {
2062 unsigned char buf[16];
2063 int i;
2064 CORE_ADDR buildaddr;
2065
2066 buildaddr = current_insn_ptr;
2067 i = 0;
2068 buf[i++] = 0xbf; /* movl $<n>,%edi */
2069 memcpy (&buf[i], &arg1, sizeof (arg1));
2070 i += 4;
2071 append_insns (&buildaddr, i, buf);
2072 current_insn_ptr = buildaddr;
2073 EMIT_ASM (amd64_void_call_2_a,
2074 /* Save away a copy of the stack top. */
2075 "push %rax\n\t"
2076 /* Also pass top as the second argument. */
2077 "mov %rax,%rsi");
2078 amd64_emit_call (fn);
2079 EMIT_ASM (amd64_void_call_2_b,
2080 /* Restore the stack top, %rax may have been trashed. */
2081 "pop %rax");
2082 }
2083
2084 static void
2085 amd64_emit_eq_goto (int *offset_p, int *size_p)
2086 {
2087 EMIT_ASM (amd64_eq,
2088 "cmp %rax,(%rsp)\n\t"
2089 "jne .Lamd64_eq_fallthru\n\t"
2090 "lea 0x8(%rsp),%rsp\n\t"
2091 "pop %rax\n\t"
2092 /* jmp, but don't trust the assembler to choose the right jump */
2093 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2094 ".Lamd64_eq_fallthru:\n\t"
2095 "lea 0x8(%rsp),%rsp\n\t"
2096 "pop %rax");
2097
2098 if (offset_p)
2099 *offset_p = 13;
2100 if (size_p)
2101 *size_p = 4;
2102 }
2103
2104 static void
2105 amd64_emit_ne_goto (int *offset_p, int *size_p)
2106 {
2107 EMIT_ASM (amd64_ne,
2108 "cmp %rax,(%rsp)\n\t"
2109 "je .Lamd64_ne_fallthru\n\t"
2110 "lea 0x8(%rsp),%rsp\n\t"
2111 "pop %rax\n\t"
2112 /* jmp, but don't trust the assembler to choose the right jump */
2113 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2114 ".Lamd64_ne_fallthru:\n\t"
2115 "lea 0x8(%rsp),%rsp\n\t"
2116 "pop %rax");
2117
2118 if (offset_p)
2119 *offset_p = 13;
2120 if (size_p)
2121 *size_p = 4;
2122 }
2123
2124 static void
2125 amd64_emit_lt_goto (int *offset_p, int *size_p)
2126 {
2127 EMIT_ASM (amd64_lt,
2128 "cmp %rax,(%rsp)\n\t"
2129 "jnl .Lamd64_lt_fallthru\n\t"
2130 "lea 0x8(%rsp),%rsp\n\t"
2131 "pop %rax\n\t"
2132 /* jmp, but don't trust the assembler to choose the right jump */
2133 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2134 ".Lamd64_lt_fallthru:\n\t"
2135 "lea 0x8(%rsp),%rsp\n\t"
2136 "pop %rax");
2137
2138 if (offset_p)
2139 *offset_p = 13;
2140 if (size_p)
2141 *size_p = 4;
2142 }
2143
2144 static void
2145 amd64_emit_le_goto (int *offset_p, int *size_p)
2146 {
2147 EMIT_ASM (amd64_le,
2148 "cmp %rax,(%rsp)\n\t"
2149 "jnle .Lamd64_le_fallthru\n\t"
2150 "lea 0x8(%rsp),%rsp\n\t"
2151 "pop %rax\n\t"
2152 /* jmp, but don't trust the assembler to choose the right jump */
2153 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2154 ".Lamd64_le_fallthru:\n\t"
2155 "lea 0x8(%rsp),%rsp\n\t"
2156 "pop %rax");
2157
2158 if (offset_p)
2159 *offset_p = 13;
2160 if (size_p)
2161 *size_p = 4;
2162 }
2163
2164 static void
2165 amd64_emit_gt_goto (int *offset_p, int *size_p)
2166 {
2167 EMIT_ASM (amd64_gt,
2168 "cmp %rax,(%rsp)\n\t"
2169 "jng .Lamd64_gt_fallthru\n\t"
2170 "lea 0x8(%rsp),%rsp\n\t"
2171 "pop %rax\n\t"
2172 /* jmp, but don't trust the assembler to choose the right jump */
2173 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2174 ".Lamd64_gt_fallthru:\n\t"
2175 "lea 0x8(%rsp),%rsp\n\t"
2176 "pop %rax");
2177
2178 if (offset_p)
2179 *offset_p = 13;
2180 if (size_p)
2181 *size_p = 4;
2182 }
2183
2184 static void
2185 amd64_emit_ge_goto (int *offset_p, int *size_p)
2186 {
2187 EMIT_ASM (amd64_ge,
2188 "cmp %rax,(%rsp)\n\t"
2189 "jnge .Lamd64_ge_fallthru\n\t"
2190 ".Lamd64_ge_jump:\n\t"
2191 "lea 0x8(%rsp),%rsp\n\t"
2192 "pop %rax\n\t"
2193 /* jmp, but don't trust the assembler to choose the right jump */
2194 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2195 ".Lamd64_ge_fallthru:\n\t"
2196 "lea 0x8(%rsp),%rsp\n\t"
2197 "pop %rax");
2198
2199 if (offset_p)
2200 *offset_p = 13;
2201 if (size_p)
2202 *size_p = 4;
2203 }
2204
2205 struct emit_ops amd64_emit_ops =
2206 {
2207 amd64_emit_prologue,
2208 amd64_emit_epilogue,
2209 amd64_emit_add,
2210 amd64_emit_sub,
2211 amd64_emit_mul,
2212 amd64_emit_lsh,
2213 amd64_emit_rsh_signed,
2214 amd64_emit_rsh_unsigned,
2215 amd64_emit_ext,
2216 amd64_emit_log_not,
2217 amd64_emit_bit_and,
2218 amd64_emit_bit_or,
2219 amd64_emit_bit_xor,
2220 amd64_emit_bit_not,
2221 amd64_emit_equal,
2222 amd64_emit_less_signed,
2223 amd64_emit_less_unsigned,
2224 amd64_emit_ref,
2225 amd64_emit_if_goto,
2226 amd64_emit_goto,
2227 amd64_write_goto_address,
2228 amd64_emit_const,
2229 amd64_emit_call,
2230 amd64_emit_reg,
2231 amd64_emit_pop,
2232 amd64_emit_stack_flush,
2233 amd64_emit_zero_ext,
2234 amd64_emit_swap,
2235 amd64_emit_stack_adjust,
2236 amd64_emit_int_call_1,
2237 amd64_emit_void_call_2,
2238 amd64_emit_eq_goto,
2239 amd64_emit_ne_goto,
2240 amd64_emit_lt_goto,
2241 amd64_emit_le_goto,
2242 amd64_emit_gt_goto,
2243 amd64_emit_ge_goto
2244 };
2245
2246 #endif /* __x86_64__ */
2247
2248 static void
2249 i386_emit_prologue (void)
2250 {
2251 EMIT_ASM32 (i386_prologue,
2252 "push %ebp\n\t"
2253 "mov %esp,%ebp\n\t"
2254 "push %ebx");
2255 /* At this point, the raw regs base address is at 8(%ebp), and the
2256 value pointer is at 12(%ebp). */
2257 }
2258
2259 static void
2260 i386_emit_epilogue (void)
2261 {
2262 EMIT_ASM32 (i386_epilogue,
2263 "mov 12(%ebp),%ecx\n\t"
2264 "mov %eax,(%ecx)\n\t"
2265 "mov %ebx,0x4(%ecx)\n\t"
2266 "xor %eax,%eax\n\t"
2267 "pop %ebx\n\t"
2268 "pop %ebp\n\t"
2269 "ret");
2270 }
2271
2272 static void
2273 i386_emit_add (void)
2274 {
2275 EMIT_ASM32 (i386_add,
2276 "add (%esp),%eax\n\t"
2277 "adc 0x4(%esp),%ebx\n\t"
2278 "lea 0x8(%esp),%esp");
2279 }
2280
2281 static void
2282 i386_emit_sub (void)
2283 {
2284 EMIT_ASM32 (i386_sub,
2285 "subl %eax,(%esp)\n\t"
2286 "sbbl %ebx,4(%esp)\n\t"
2287 "pop %eax\n\t"
2288 "pop %ebx\n\t");
2289 }
2290
2291 static void
2292 i386_emit_mul (void)
2293 {
2294 emit_error = 1;
2295 }
2296
2297 static void
2298 i386_emit_lsh (void)
2299 {
2300 emit_error = 1;
2301 }
2302
2303 static void
2304 i386_emit_rsh_signed (void)
2305 {
2306 emit_error = 1;
2307 }
2308
2309 static void
2310 i386_emit_rsh_unsigned (void)
2311 {
2312 emit_error = 1;
2313 }
2314
2315 static void
2316 i386_emit_ext (int arg)
2317 {
2318 switch (arg)
2319 {
2320 case 8:
2321 EMIT_ASM32 (i386_ext_8,
2322 "cbtw\n\t"
2323 "cwtl\n\t"
2324 "movl %eax,%ebx\n\t"
2325 "sarl $31,%ebx");
2326 break;
2327 case 16:
2328 EMIT_ASM32 (i386_ext_16,
2329 "cwtl\n\t"
2330 "movl %eax,%ebx\n\t"
2331 "sarl $31,%ebx");
2332 break;
2333 case 32:
2334 EMIT_ASM32 (i386_ext_32,
2335 "movl %eax,%ebx\n\t"
2336 "sarl $31,%ebx");
2337 break;
2338 default:
2339 emit_error = 1;
2340 }
2341 }
2342
2343 static void
2344 i386_emit_log_not (void)
2345 {
2346 EMIT_ASM32 (i386_log_not,
2347 "or %ebx,%eax\n\t"
2348 "test %eax,%eax\n\t"
2349 "sete %cl\n\t"
2350 "xor %ebx,%ebx\n\t"
2351 "movzbl %cl,%eax");
2352 }
2353
2354 static void
2355 i386_emit_bit_and (void)
2356 {
2357 EMIT_ASM32 (i386_and,
2358 "and (%esp),%eax\n\t"
2359 "and 0x4(%esp),%ebx\n\t"
2360 "lea 0x8(%esp),%esp");
2361 }
2362
2363 static void
2364 i386_emit_bit_or (void)
2365 {
2366 EMIT_ASM32 (i386_or,
2367 "or (%esp),%eax\n\t"
2368 "or 0x4(%esp),%ebx\n\t"
2369 "lea 0x8(%esp),%esp");
2370 }
2371
2372 static void
2373 i386_emit_bit_xor (void)
2374 {
2375 EMIT_ASM32 (i386_xor,
2376 "xor (%esp),%eax\n\t"
2377 "xor 0x4(%esp),%ebx\n\t"
2378 "lea 0x8(%esp),%esp");
2379 }
2380
2381 static void
2382 i386_emit_bit_not (void)
2383 {
2384 EMIT_ASM32 (i386_bit_not,
2385 "xor $0xffffffff,%eax\n\t"
2386 "xor $0xffffffff,%ebx\n\t");
2387 }
2388
2389 static void
2390 i386_emit_equal (void)
2391 {
2392 EMIT_ASM32 (i386_equal,
2393 "cmpl %ebx,4(%esp)\n\t"
2394 "jne .Li386_equal_false\n\t"
2395 "cmpl %eax,(%esp)\n\t"
2396 "je .Li386_equal_true\n\t"
2397 ".Li386_equal_false:\n\t"
2398 "xor %eax,%eax\n\t"
2399 "jmp .Li386_equal_end\n\t"
2400 ".Li386_equal_true:\n\t"
2401 "mov $1,%eax\n\t"
2402 ".Li386_equal_end:\n\t"
2403 "xor %ebx,%ebx\n\t"
2404 "lea 0x8(%esp),%esp");
2405 }
2406
2407 static void
2408 i386_emit_less_signed (void)
2409 {
2410 EMIT_ASM32 (i386_less_signed,
2411 "cmpl %ebx,4(%esp)\n\t"
2412 "jl .Li386_less_signed_true\n\t"
2413 "jne .Li386_less_signed_false\n\t"
2414 "cmpl %eax,(%esp)\n\t"
2415 "jl .Li386_less_signed_true\n\t"
2416 ".Li386_less_signed_false:\n\t"
2417 "xor %eax,%eax\n\t"
2418 "jmp .Li386_less_signed_end\n\t"
2419 ".Li386_less_signed_true:\n\t"
2420 "mov $1,%eax\n\t"
2421 ".Li386_less_signed_end:\n\t"
2422 "xor %ebx,%ebx\n\t"
2423 "lea 0x8(%esp),%esp");
2424 }
2425
2426 static void
2427 i386_emit_less_unsigned (void)
2428 {
2429 EMIT_ASM32 (i386_less_unsigned,
2430 "cmpl %ebx,4(%esp)\n\t"
2431 "jb .Li386_less_unsigned_true\n\t"
2432 "jne .Li386_less_unsigned_false\n\t"
2433 "cmpl %eax,(%esp)\n\t"
2434 "jb .Li386_less_unsigned_true\n\t"
2435 ".Li386_less_unsigned_false:\n\t"
2436 "xor %eax,%eax\n\t"
2437 "jmp .Li386_less_unsigned_end\n\t"
2438 ".Li386_less_unsigned_true:\n\t"
2439 "mov $1,%eax\n\t"
2440 ".Li386_less_unsigned_end:\n\t"
2441 "xor %ebx,%ebx\n\t"
2442 "lea 0x8(%esp),%esp");
2443 }
2444
2445 static void
2446 i386_emit_ref (int size)
2447 {
2448 switch (size)
2449 {
2450 case 1:
2451 EMIT_ASM32 (i386_ref1,
2452 "movb (%eax),%al");
2453 break;
2454 case 2:
2455 EMIT_ASM32 (i386_ref2,
2456 "movw (%eax),%ax");
2457 break;
2458 case 4:
2459 EMIT_ASM32 (i386_ref4,
2460 "movl (%eax),%eax");
2461 break;
2462 case 8:
2463 EMIT_ASM32 (i386_ref8,
2464 "movl 4(%eax),%ebx\n\t"
2465 "movl (%eax),%eax");
2466 break;
2467 }
2468 }
2469
2470 static void
2471 i386_emit_if_goto (int *offset_p, int *size_p)
2472 {
2473 EMIT_ASM32 (i386_if_goto,
2474 "mov %eax,%ecx\n\t"
2475 "or %ebx,%ecx\n\t"
2476 "pop %eax\n\t"
2477 "pop %ebx\n\t"
2478 "cmpl $0,%ecx\n\t"
2479 /* Don't trust the assembler to choose the right jump */
2480 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2481
2482 if (offset_p)
2483 *offset_p = 11; /* be sure that this matches the sequence above */
2484 if (size_p)
2485 *size_p = 4;
2486 }
2487
2488 static void
2489 i386_emit_goto (int *offset_p, int *size_p)
2490 {
2491 EMIT_ASM32 (i386_goto,
2492 /* Don't trust the assembler to choose the right jump */
2493 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2494 if (offset_p)
2495 *offset_p = 1;
2496 if (size_p)
2497 *size_p = 4;
2498 }
2499
2500 static void
2501 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2502 {
2503 int diff = (to - (from + size));
2504 unsigned char buf[sizeof (int)];
2505
2506 /* We're only doing 4-byte sizes at the moment. */
2507 if (size != 4)
2508 {
2509 emit_error = 1;
2510 return;
2511 }
2512
2513 memcpy (buf, &diff, sizeof (int));
2514 target_write_memory (from, buf, sizeof (int));
2515 }
2516
2517 static void
2518 i386_emit_const (LONGEST num)
2519 {
2520 unsigned char buf[16];
2521 int i, hi, lo;
2522 CORE_ADDR buildaddr = current_insn_ptr;
2523
2524 i = 0;
2525 buf[i++] = 0xb8; /* mov $<n>,%eax */
2526 lo = num & 0xffffffff;
2527 memcpy (&buf[i], &lo, sizeof (lo));
2528 i += 4;
2529 hi = ((num >> 32) & 0xffffffff);
2530 if (hi)
2531 {
2532 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2533 memcpy (&buf[i], &hi, sizeof (hi));
2534 i += 4;
2535 }
2536 else
2537 {
2538 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2539 }
2540 append_insns (&buildaddr, i, buf);
2541 current_insn_ptr = buildaddr;
2542 }
2543
2544 static void
2545 i386_emit_call (CORE_ADDR fn)
2546 {
2547 unsigned char buf[16];
2548 int i, offset;
2549 CORE_ADDR buildaddr;
2550
2551 buildaddr = current_insn_ptr;
2552 i = 0;
2553 buf[i++] = 0xe8; /* call <reladdr> */
2554 offset = ((int) fn) - (buildaddr + 5);
2555 memcpy (buf + 1, &offset, 4);
2556 append_insns (&buildaddr, 5, buf);
2557 current_insn_ptr = buildaddr;
2558 }
2559
2560 static void
2561 i386_emit_reg (int reg)
2562 {
2563 unsigned char buf[16];
2564 int i;
2565 CORE_ADDR buildaddr;
2566
2567 EMIT_ASM32 (i386_reg_a,
2568 "sub $0x8,%esp");
2569 buildaddr = current_insn_ptr;
2570 i = 0;
2571 buf[i++] = 0xb8; /* mov $<n>,%eax */
2572 memcpy (&buf[i], &reg, sizeof (reg));
2573 i += 4;
2574 append_insns (&buildaddr, i, buf);
2575 current_insn_ptr = buildaddr;
2576 EMIT_ASM32 (i386_reg_b,
2577 "mov %eax,4(%esp)\n\t"
2578 "mov 8(%ebp),%eax\n\t"
2579 "mov %eax,(%esp)");
2580 i386_emit_call (get_raw_reg_func_addr ());
2581 EMIT_ASM32 (i386_reg_c,
2582 "xor %ebx,%ebx\n\t"
2583 "lea 0x8(%esp),%esp");
2584 }
2585
2586 static void
2587 i386_emit_pop (void)
2588 {
2589 EMIT_ASM32 (i386_pop,
2590 "pop %eax\n\t"
2591 "pop %ebx");
2592 }
2593
2594 static void
2595 i386_emit_stack_flush (void)
2596 {
2597 EMIT_ASM32 (i386_stack_flush,
2598 "push %ebx\n\t"
2599 "push %eax");
2600 }
2601
2602 static void
2603 i386_emit_zero_ext (int arg)
2604 {
2605 switch (arg)
2606 {
2607 case 8:
2608 EMIT_ASM32 (i386_zero_ext_8,
2609 "and $0xff,%eax\n\t"
2610 "xor %ebx,%ebx");
2611 break;
2612 case 16:
2613 EMIT_ASM32 (i386_zero_ext_16,
2614 "and $0xffff,%eax\n\t"
2615 "xor %ebx,%ebx");
2616 break;
2617 case 32:
2618 EMIT_ASM32 (i386_zero_ext_32,
2619 "xor %ebx,%ebx");
2620 break;
2621 default:
2622 emit_error = 1;
2623 }
2624 }
2625
2626 static void
2627 i386_emit_swap (void)
2628 {
2629 EMIT_ASM32 (i386_swap,
2630 "mov %eax,%ecx\n\t"
2631 "mov %ebx,%edx\n\t"
2632 "pop %eax\n\t"
2633 "pop %ebx\n\t"
2634 "push %edx\n\t"
2635 "push %ecx");
2636 }
2637
2638 static void
2639 i386_emit_stack_adjust (int n)
2640 {
2641 unsigned char buf[16];
2642 int i;
2643 CORE_ADDR buildaddr = current_insn_ptr;
2644
2645 i = 0;
2646 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2647 buf[i++] = 0x64;
2648 buf[i++] = 0x24;
2649 buf[i++] = n * 8;
2650 append_insns (&buildaddr, i, buf);
2651 current_insn_ptr = buildaddr;
2652 }
2653
2654 /* FN's prototype is `LONGEST(*fn)(int)'. */
2655
2656 static void
2657 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2658 {
2659 unsigned char buf[16];
2660 int i;
2661 CORE_ADDR buildaddr;
2662
2663 EMIT_ASM32 (i386_int_call_1_a,
2664 /* Reserve a bit of stack space. */
2665 "sub $0x8,%esp");
2666 /* Put the one argument on the stack. */
2667 buildaddr = current_insn_ptr;
2668 i = 0;
2669 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2670 buf[i++] = 0x04;
2671 buf[i++] = 0x24;
2672 memcpy (&buf[i], &arg1, sizeof (arg1));
2673 i += 4;
2674 append_insns (&buildaddr, i, buf);
2675 current_insn_ptr = buildaddr;
2676 i386_emit_call (fn);
2677 EMIT_ASM32 (i386_int_call_1_c,
2678 "mov %edx,%ebx\n\t"
2679 "lea 0x8(%esp),%esp");
2680 }
2681
2682 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2683
2684 static void
2685 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2686 {
2687 unsigned char buf[16];
2688 int i;
2689 CORE_ADDR buildaddr;
2690
2691 EMIT_ASM32 (i386_void_call_2_a,
2692 /* Preserve %eax only; we don't have to worry about %ebx. */
2693 "push %eax\n\t"
2694 /* Reserve a bit of stack space for arguments. */
2695 "sub $0x10,%esp\n\t"
2696 /* Copy "top" to the second argument position. (Note that
2697 we can't assume function won't scribble on its
2698 arguments, so don't try to restore from this.) */
2699 "mov %eax,4(%esp)\n\t"
2700 "mov %ebx,8(%esp)");
2701 /* Put the first argument on the stack. */
2702 buildaddr = current_insn_ptr;
2703 i = 0;
2704 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2705 buf[i++] = 0x04;
2706 buf[i++] = 0x24;
2707 memcpy (&buf[i], &arg1, sizeof (arg1));
2708 i += 4;
2709 append_insns (&buildaddr, i, buf);
2710 current_insn_ptr = buildaddr;
2711 i386_emit_call (fn);
2712 EMIT_ASM32 (i386_void_call_2_b,
2713 "lea 0x10(%esp),%esp\n\t"
2714 /* Restore original stack top. */
2715 "pop %eax");
2716 }
2717
2718
2719 static void
2720 i386_emit_eq_goto (int *offset_p, int *size_p)
2721 {
2722 EMIT_ASM32 (eq,
2723 /* Check low half first, more likely to be decider */
2724 "cmpl %eax,(%esp)\n\t"
2725 "jne .Leq_fallthru\n\t"
2726 "cmpl %ebx,4(%esp)\n\t"
2727 "jne .Leq_fallthru\n\t"
2728 "lea 0x8(%esp),%esp\n\t"
2729 "pop %eax\n\t"
2730 "pop %ebx\n\t"
2731 /* jmp, but don't trust the assembler to choose the right jump */
2732 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2733 ".Leq_fallthru:\n\t"
2734 "lea 0x8(%esp),%esp\n\t"
2735 "pop %eax\n\t"
2736 "pop %ebx");
2737
2738 if (offset_p)
2739 *offset_p = 18;
2740 if (size_p)
2741 *size_p = 4;
2742 }
2743
2744 static void
2745 i386_emit_ne_goto (int *offset_p, int *size_p)
2746 {
2747 EMIT_ASM32 (ne,
2748 /* Check low half first, more likely to be decider */
2749 "cmpl %eax,(%esp)\n\t"
2750 "jne .Lne_jump\n\t"
2751 "cmpl %ebx,4(%esp)\n\t"
2752 "je .Lne_fallthru\n\t"
2753 ".Lne_jump:\n\t"
2754 "lea 0x8(%esp),%esp\n\t"
2755 "pop %eax\n\t"
2756 "pop %ebx\n\t"
2757 /* jmp, but don't trust the assembler to choose the right jump */
2758 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2759 ".Lne_fallthru:\n\t"
2760 "lea 0x8(%esp),%esp\n\t"
2761 "pop %eax\n\t"
2762 "pop %ebx");
2763
2764 if (offset_p)
2765 *offset_p = 18;
2766 if (size_p)
2767 *size_p = 4;
2768 }
2769
2770 static void
2771 i386_emit_lt_goto (int *offset_p, int *size_p)
2772 {
2773 EMIT_ASM32 (lt,
2774 "cmpl %ebx,4(%esp)\n\t"
2775 "jl .Llt_jump\n\t"
2776 "jne .Llt_fallthru\n\t"
2777 "cmpl %eax,(%esp)\n\t"
2778 "jnl .Llt_fallthru\n\t"
2779 ".Llt_jump:\n\t"
2780 "lea 0x8(%esp),%esp\n\t"
2781 "pop %eax\n\t"
2782 "pop %ebx\n\t"
2783 /* jmp, but don't trust the assembler to choose the right jump */
2784 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2785 ".Llt_fallthru:\n\t"
2786 "lea 0x8(%esp),%esp\n\t"
2787 "pop %eax\n\t"
2788 "pop %ebx");
2789
2790 if (offset_p)
2791 *offset_p = 20;
2792 if (size_p)
2793 *size_p = 4;
2794 }
2795
2796 static void
2797 i386_emit_le_goto (int *offset_p, int *size_p)
2798 {
2799 EMIT_ASM32 (le,
2800 "cmpl %ebx,4(%esp)\n\t"
2801 "jle .Lle_jump\n\t"
2802 "jne .Lle_fallthru\n\t"
2803 "cmpl %eax,(%esp)\n\t"
2804 "jnle .Lle_fallthru\n\t"
2805 ".Lle_jump:\n\t"
2806 "lea 0x8(%esp),%esp\n\t"
2807 "pop %eax\n\t"
2808 "pop %ebx\n\t"
2809 /* jmp, but don't trust the assembler to choose the right jump */
2810 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2811 ".Lle_fallthru:\n\t"
2812 "lea 0x8(%esp),%esp\n\t"
2813 "pop %eax\n\t"
2814 "pop %ebx");
2815
2816 if (offset_p)
2817 *offset_p = 20;
2818 if (size_p)
2819 *size_p = 4;
2820 }
2821
2822 static void
2823 i386_emit_gt_goto (int *offset_p, int *size_p)
2824 {
2825 EMIT_ASM32 (gt,
2826 "cmpl %ebx,4(%esp)\n\t"
2827 "jg .Lgt_jump\n\t"
2828 "jne .Lgt_fallthru\n\t"
2829 "cmpl %eax,(%esp)\n\t"
2830 "jng .Lgt_fallthru\n\t"
2831 ".Lgt_jump:\n\t"
2832 "lea 0x8(%esp),%esp\n\t"
2833 "pop %eax\n\t"
2834 "pop %ebx\n\t"
2835 /* jmp, but don't trust the assembler to choose the right jump */
2836 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2837 ".Lgt_fallthru:\n\t"
2838 "lea 0x8(%esp),%esp\n\t"
2839 "pop %eax\n\t"
2840 "pop %ebx");
2841
2842 if (offset_p)
2843 *offset_p = 20;
2844 if (size_p)
2845 *size_p = 4;
2846 }
2847
2848 static void
2849 i386_emit_ge_goto (int *offset_p, int *size_p)
2850 {
2851 EMIT_ASM32 (ge,
2852 "cmpl %ebx,4(%esp)\n\t"
2853 "jge .Lge_jump\n\t"
2854 "jne .Lge_fallthru\n\t"
2855 "cmpl %eax,(%esp)\n\t"
2856 "jnge .Lge_fallthru\n\t"
2857 ".Lge_jump:\n\t"
2858 "lea 0x8(%esp),%esp\n\t"
2859 "pop %eax\n\t"
2860 "pop %ebx\n\t"
2861 /* jmp, but don't trust the assembler to choose the right jump */
2862 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2863 ".Lge_fallthru:\n\t"
2864 "lea 0x8(%esp),%esp\n\t"
2865 "pop %eax\n\t"
2866 "pop %ebx");
2867
2868 if (offset_p)
2869 *offset_p = 20;
2870 if (size_p)
2871 *size_p = 4;
2872 }
2873
2874 struct emit_ops i386_emit_ops =
2875 {
2876 i386_emit_prologue,
2877 i386_emit_epilogue,
2878 i386_emit_add,
2879 i386_emit_sub,
2880 i386_emit_mul,
2881 i386_emit_lsh,
2882 i386_emit_rsh_signed,
2883 i386_emit_rsh_unsigned,
2884 i386_emit_ext,
2885 i386_emit_log_not,
2886 i386_emit_bit_and,
2887 i386_emit_bit_or,
2888 i386_emit_bit_xor,
2889 i386_emit_bit_not,
2890 i386_emit_equal,
2891 i386_emit_less_signed,
2892 i386_emit_less_unsigned,
2893 i386_emit_ref,
2894 i386_emit_if_goto,
2895 i386_emit_goto,
2896 i386_write_goto_address,
2897 i386_emit_const,
2898 i386_emit_call,
2899 i386_emit_reg,
2900 i386_emit_pop,
2901 i386_emit_stack_flush,
2902 i386_emit_zero_ext,
2903 i386_emit_swap,
2904 i386_emit_stack_adjust,
2905 i386_emit_int_call_1,
2906 i386_emit_void_call_2,
2907 i386_emit_eq_goto,
2908 i386_emit_ne_goto,
2909 i386_emit_lt_goto,
2910 i386_emit_le_goto,
2911 i386_emit_gt_goto,
2912 i386_emit_ge_goto
2913 };
2914
2915
2916 static struct emit_ops *
2917 x86_emit_ops (void)
2918 {
2919 #ifdef __x86_64__
2920 if (is_64bit_tdesc ())
2921 return &amd64_emit_ops;
2922 else
2923 #endif
2924 return &i386_emit_ops;
2925 }
2926
2927 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2928
2929 const gdb_byte *
2930 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2931 {
2932 *size = x86_breakpoint_len;
2933 return x86_breakpoint;
2934 }
2935
2936 static int
2937 x86_supports_range_stepping (void)
2938 {
2939 return 1;
2940 }
2941
2942 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2943 */
2944
2945 static int
2946 x86_supports_hardware_single_step (void)
2947 {
2948 return 1;
2949 }
2950
2951 static int
2952 x86_get_ipa_tdesc_idx (void)
2953 {
2954 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2955 const struct target_desc *tdesc = regcache->tdesc;
2956
2957 #ifdef __x86_64__
2958 return amd64_get_ipa_tdesc_idx (tdesc);
2959 #endif
2960
2961 if (tdesc == tdesc_i386_linux_no_xml)
2962 return X86_TDESC_SSE;
2963
2964 return i386_get_ipa_tdesc_idx (tdesc);
2965 }
2966
2967 /* This is initialized assuming an amd64 target.
2968 x86_arch_setup will correct it for i386 or amd64 targets. */
2969
2970 struct linux_target_ops the_low_target =
2971 {
2972 x86_get_thread_area,
2973 x86_install_fast_tracepoint_jump_pad,
2974 x86_emit_ops,
2975 x86_get_min_fast_tracepoint_insn_len,
2976 x86_supports_range_stepping,
2977 x86_supports_hardware_single_step,
2978 x86_get_syscall_trapinfo,
2979 x86_get_ipa_tdesc_idx,
2980 };
2981
2982 /* The linux target ops object. */
2983
2984 linux_process_target *the_linux_target = &the_x86_target;
2985
2986 void
2987 initialize_low_arch (void)
2988 {
2989 /* Initialize the Linux target descriptions. */
2990 #ifdef __x86_64__
2991 tdesc_amd64_linux_no_xml = allocate_target_description ();
2992 copy_target_description (tdesc_amd64_linux_no_xml,
2993 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2994 false));
2995 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2996 #endif
2997
2998 tdesc_i386_linux_no_xml = allocate_target_description ();
2999 copy_target_description (tdesc_i386_linux_no_xml,
3000 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3001 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3002
3003 initialize_regsets_info (&x86_regsets_info);
3004 }
This page took 0.094871 seconds and 4 git commands to generate.