Fix regression caused by recently added syscall restart code
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2019 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "common/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "common/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Per-process arch-specific data we want to keep. */
96
97 struct arch_process_info
98 {
99 struct x86_debug_reg_state debug_reg_state;
100 };
101
102 #ifdef __x86_64__
103
104 /* Mapping between the general-purpose registers in `struct user'
105 format and GDB's register array layout.
106 Note that the transfer layout uses 64-bit regs. */
107 static /*const*/ int i386_regmap[] =
108 {
109 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
110 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
111 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
112 DS * 8, ES * 8, FS * 8, GS * 8
113 };
114
115 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
116
117 /* So code below doesn't have to care, i386 or amd64. */
118 #define ORIG_EAX ORIG_RAX
119 #define REGSIZE 8
120
121 static const int x86_64_regmap[] =
122 {
123 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
124 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
125 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
126 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
127 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
128 DS * 8, ES * 8, FS * 8, GS * 8,
129 -1, -1, -1, -1, -1, -1, -1, -1,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
132 -1,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 ORIG_RAX * 8,
135 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
136 21 * 8, 22 * 8,
137 #else
138 -1, -1,
139 #endif
140 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
141 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
142 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
143 -1, -1, -1, -1, -1, -1, -1, -1,
144 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
145 -1, -1, -1, -1, -1, -1, -1, -1,
146 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
147 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
148 -1, -1, -1, -1, -1, -1, -1, -1,
149 -1, -1, -1, -1, -1, -1, -1, -1,
150 -1, -1, -1, -1, -1, -1, -1, -1,
151 -1 /* pkru */
152 };
153
154 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
155 #define X86_64_USER_REGS (GS + 1)
156
157 #else /* ! __x86_64__ */
158
159 /* Mapping between the general-purpose registers in `struct user'
160 format and GDB's register array layout. */
161 static /*const*/ int i386_regmap[] =
162 {
163 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
164 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
165 EIP * 4, EFL * 4, CS * 4, SS * 4,
166 DS * 4, ES * 4, FS * 4, GS * 4
167 };
168
169 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
170
171 #define REGSIZE 4
172
173 #endif
174
175 #ifdef __x86_64__
176
177 /* Returns true if the current inferior belongs to a x86-64 process,
178 per the tdesc. */
179
180 static int
181 is_64bit_tdesc (void)
182 {
183 struct regcache *regcache = get_thread_regcache (current_thread, 0);
184
185 return register_size (regcache->tdesc, 0) == 8;
186 }
187
188 #endif
189
190 \f
191 /* Called by libthread_db. */
192
193 ps_err_e
194 ps_get_thread_area (struct ps_prochandle *ph,
195 lwpid_t lwpid, int idx, void **base)
196 {
197 #ifdef __x86_64__
198 int use_64bit = is_64bit_tdesc ();
199
200 if (use_64bit)
201 {
202 switch (idx)
203 {
204 case FS:
205 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
206 return PS_OK;
207 break;
208 case GS:
209 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
210 return PS_OK;
211 break;
212 default:
213 return PS_BADADDR;
214 }
215 return PS_ERR;
216 }
217 #endif
218
219 {
220 unsigned int desc[4];
221
222 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
223 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
224 return PS_ERR;
225
226 /* Ensure we properly extend the value to 64-bits for x86_64. */
227 *base = (void *) (uintptr_t) desc[1];
228 return PS_OK;
229 }
230 }
231
232 /* Get the thread area address. This is used to recognize which
233 thread is which when tracing with the in-process agent library. We
234 don't read anything from the address, and treat it as opaque; it's
235 the address itself that we assume is unique per-thread. */
236
237 static int
238 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
239 {
240 #ifdef __x86_64__
241 int use_64bit = is_64bit_tdesc ();
242
243 if (use_64bit)
244 {
245 void *base;
246 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
247 {
248 *addr = (CORE_ADDR) (uintptr_t) base;
249 return 0;
250 }
251
252 return -1;
253 }
254 #endif
255
256 {
257 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
258 struct thread_info *thr = get_lwp_thread (lwp);
259 struct regcache *regcache = get_thread_regcache (thr, 1);
260 unsigned int desc[4];
261 ULONGEST gs = 0;
262 const int reg_thread_area = 3; /* bits to scale down register value. */
263 int idx;
264
265 collect_register_by_name (regcache, "gs", &gs);
266
267 idx = gs >> reg_thread_area;
268
269 if (ptrace (PTRACE_GET_THREAD_AREA,
270 lwpid_of (thr),
271 (void *) (long) idx, (unsigned long) &desc) < 0)
272 return -1;
273
274 *addr = desc[1];
275 return 0;
276 }
277 }
278
279
280 \f
281 static int
282 x86_cannot_store_register (int regno)
283 {
284 #ifdef __x86_64__
285 if (is_64bit_tdesc ())
286 return 0;
287 #endif
288
289 return regno >= I386_NUM_REGS;
290 }
291
292 static int
293 x86_cannot_fetch_register (int regno)
294 {
295 #ifdef __x86_64__
296 if (is_64bit_tdesc ())
297 return 0;
298 #endif
299
300 return regno >= I386_NUM_REGS;
301 }
302
303 static void
304 x86_fill_gregset (struct regcache *regcache, void *buf)
305 {
306 int i;
307
308 #ifdef __x86_64__
309 if (register_size (regcache->tdesc, 0) == 8)
310 {
311 for (i = 0; i < X86_64_NUM_REGS; i++)
312 if (x86_64_regmap[i] != -1)
313 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
314
315 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
316 {
317 unsigned long base;
318 int lwpid = lwpid_of (current_thread);
319
320 collect_register_by_name (regcache, "fs_base", &base);
321 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
322
323 collect_register_by_name (regcache, "gs_base", &base);
324 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
325 }
326 #endif
327
328 return;
329 }
330
331 /* 32-bit inferior registers need to be zero-extended.
332 Callers would read uninitialized memory otherwise. */
333 memset (buf, 0x00, X86_64_USER_REGS * 8);
334 #endif
335
336 for (i = 0; i < I386_NUM_REGS; i++)
337 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
338
339 collect_register_by_name (regcache, "orig_eax",
340 ((char *) buf) + ORIG_EAX * REGSIZE);
341
342 #ifdef __x86_64__
343 /* Sign extend EAX value to avoid potential syscall restart
344 problems.
345
346 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
347 for a detailed explanation. */
348 if (register_size (regcache->tdesc, 0) == 4)
349 {
350 void *ptr = ((gdb_byte *) buf
351 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
352
353 *(int64_t *) ptr = *(int32_t *) ptr;
354 }
355 #endif
356 }
357
358 static void
359 x86_store_gregset (struct regcache *regcache, const void *buf)
360 {
361 int i;
362
363 #ifdef __x86_64__
364 if (register_size (regcache->tdesc, 0) == 8)
365 {
366 for (i = 0; i < X86_64_NUM_REGS; i++)
367 if (x86_64_regmap[i] != -1)
368 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
369
370 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
371 {
372 unsigned long base;
373 int lwpid = lwpid_of (current_thread);
374
375 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
376 supply_register_by_name (regcache, "fs_base", &base);
377
378 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
379 supply_register_by_name (regcache, "gs_base", &base);
380 }
381 #endif
382 return;
383 }
384 #endif
385
386 for (i = 0; i < I386_NUM_REGS; i++)
387 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
388
389 supply_register_by_name (regcache, "orig_eax",
390 ((char *) buf) + ORIG_EAX * REGSIZE);
391 }
392
393 static void
394 x86_fill_fpregset (struct regcache *regcache, void *buf)
395 {
396 #ifdef __x86_64__
397 i387_cache_to_fxsave (regcache, buf);
398 #else
399 i387_cache_to_fsave (regcache, buf);
400 #endif
401 }
402
403 static void
404 x86_store_fpregset (struct regcache *regcache, const void *buf)
405 {
406 #ifdef __x86_64__
407 i387_fxsave_to_cache (regcache, buf);
408 #else
409 i387_fsave_to_cache (regcache, buf);
410 #endif
411 }
412
413 #ifndef __x86_64__
414
415 static void
416 x86_fill_fpxregset (struct regcache *regcache, void *buf)
417 {
418 i387_cache_to_fxsave (regcache, buf);
419 }
420
421 static void
422 x86_store_fpxregset (struct regcache *regcache, const void *buf)
423 {
424 i387_fxsave_to_cache (regcache, buf);
425 }
426
427 #endif
428
429 static void
430 x86_fill_xstateregset (struct regcache *regcache, void *buf)
431 {
432 i387_cache_to_xsave (regcache, buf);
433 }
434
435 static void
436 x86_store_xstateregset (struct regcache *regcache, const void *buf)
437 {
438 i387_xsave_to_cache (regcache, buf);
439 }
440
441 /* ??? The non-biarch i386 case stores all the i387 regs twice.
442 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
443 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
444 doesn't work. IWBN to avoid the duplication in the case where it
445 does work. Maybe the arch_setup routine could check whether it works
446 and update the supported regsets accordingly. */
447
448 static struct regset_info x86_regsets[] =
449 {
450 #ifdef HAVE_PTRACE_GETREGS
451 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
452 GENERAL_REGS,
453 x86_fill_gregset, x86_store_gregset },
454 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
455 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
456 # ifndef __x86_64__
457 # ifdef HAVE_PTRACE_GETFPXREGS
458 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
459 EXTENDED_REGS,
460 x86_fill_fpxregset, x86_store_fpxregset },
461 # endif
462 # endif
463 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
464 FP_REGS,
465 x86_fill_fpregset, x86_store_fpregset },
466 #endif /* HAVE_PTRACE_GETREGS */
467 NULL_REGSET
468 };
469
470 static CORE_ADDR
471 x86_get_pc (struct regcache *regcache)
472 {
473 int use_64bit = register_size (regcache->tdesc, 0) == 8;
474
475 if (use_64bit)
476 {
477 uint64_t pc;
478
479 collect_register_by_name (regcache, "rip", &pc);
480 return (CORE_ADDR) pc;
481 }
482 else
483 {
484 uint32_t pc;
485
486 collect_register_by_name (regcache, "eip", &pc);
487 return (CORE_ADDR) pc;
488 }
489 }
490
491 static void
492 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
493 {
494 int use_64bit = register_size (regcache->tdesc, 0) == 8;
495
496 if (use_64bit)
497 {
498 uint64_t newpc = pc;
499
500 supply_register_by_name (regcache, "rip", &newpc);
501 }
502 else
503 {
504 uint32_t newpc = pc;
505
506 supply_register_by_name (regcache, "eip", &newpc);
507 }
508 }
509 \f
510 static const gdb_byte x86_breakpoint[] = { 0xCC };
511 #define x86_breakpoint_len 1
512
513 static int
514 x86_breakpoint_at (CORE_ADDR pc)
515 {
516 unsigned char c;
517
518 (*the_target->read_memory) (pc, &c, 1);
519 if (c == 0xCC)
520 return 1;
521
522 return 0;
523 }
524 \f
525 /* Low-level function vector. */
526 struct x86_dr_low_type x86_dr_low =
527 {
528 x86_linux_dr_set_control,
529 x86_linux_dr_set_addr,
530 x86_linux_dr_get_addr,
531 x86_linux_dr_get_status,
532 x86_linux_dr_get_control,
533 sizeof (void *),
534 };
535 \f
536 /* Breakpoint/Watchpoint support. */
537
538 static int
539 x86_supports_z_point_type (char z_type)
540 {
541 switch (z_type)
542 {
543 case Z_PACKET_SW_BP:
544 case Z_PACKET_HW_BP:
545 case Z_PACKET_WRITE_WP:
546 case Z_PACKET_ACCESS_WP:
547 return 1;
548 default:
549 return 0;
550 }
551 }
552
553 static int
554 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
555 int size, struct raw_breakpoint *bp)
556 {
557 struct process_info *proc = current_process ();
558
559 switch (type)
560 {
561 case raw_bkpt_type_hw:
562 case raw_bkpt_type_write_wp:
563 case raw_bkpt_type_access_wp:
564 {
565 enum target_hw_bp_type hw_type
566 = raw_bkpt_type_to_target_hw_bp_type (type);
567 struct x86_debug_reg_state *state
568 = &proc->priv->arch_private->debug_reg_state;
569
570 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
571 }
572
573 default:
574 /* Unsupported. */
575 return 1;
576 }
577 }
578
579 static int
580 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
581 int size, struct raw_breakpoint *bp)
582 {
583 struct process_info *proc = current_process ();
584
585 switch (type)
586 {
587 case raw_bkpt_type_hw:
588 case raw_bkpt_type_write_wp:
589 case raw_bkpt_type_access_wp:
590 {
591 enum target_hw_bp_type hw_type
592 = raw_bkpt_type_to_target_hw_bp_type (type);
593 struct x86_debug_reg_state *state
594 = &proc->priv->arch_private->debug_reg_state;
595
596 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
597 }
598 default:
599 /* Unsupported. */
600 return 1;
601 }
602 }
603
604 static int
605 x86_stopped_by_watchpoint (void)
606 {
607 struct process_info *proc = current_process ();
608 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
609 }
610
611 static CORE_ADDR
612 x86_stopped_data_address (void)
613 {
614 struct process_info *proc = current_process ();
615 CORE_ADDR addr;
616 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
617 &addr))
618 return addr;
619 return 0;
620 }
621 \f
622 /* Called when a new process is created. */
623
624 static struct arch_process_info *
625 x86_linux_new_process (void)
626 {
627 struct arch_process_info *info = XCNEW (struct arch_process_info);
628
629 x86_low_init_dregs (&info->debug_reg_state);
630
631 return info;
632 }
633
634 /* Called when a process is being deleted. */
635
636 static void
637 x86_linux_delete_process (struct arch_process_info *info)
638 {
639 xfree (info);
640 }
641
642 /* Target routine for linux_new_fork. */
643
644 static void
645 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
646 {
647 /* These are allocated by linux_add_process. */
648 gdb_assert (parent->priv != NULL
649 && parent->priv->arch_private != NULL);
650 gdb_assert (child->priv != NULL
651 && child->priv->arch_private != NULL);
652
653 /* Linux kernel before 2.6.33 commit
654 72f674d203cd230426437cdcf7dd6f681dad8b0d
655 will inherit hardware debug registers from parent
656 on fork/vfork/clone. Newer Linux kernels create such tasks with
657 zeroed debug registers.
658
659 GDB core assumes the child inherits the watchpoints/hw
660 breakpoints of the parent, and will remove them all from the
661 forked off process. Copy the debug registers mirrors into the
662 new process so that all breakpoints and watchpoints can be
663 removed together. The debug registers mirror will become zeroed
664 in the end before detaching the forked off process, thus making
665 this compatible with older Linux kernels too. */
666
667 *child->priv->arch_private = *parent->priv->arch_private;
668 }
669
670 /* See nat/x86-dregs.h. */
671
672 struct x86_debug_reg_state *
673 x86_debug_reg_state (pid_t pid)
674 {
675 struct process_info *proc = find_process_pid (pid);
676
677 return &proc->priv->arch_private->debug_reg_state;
678 }
679 \f
680 /* When GDBSERVER is built as a 64-bit application on linux, the
681 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
682 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
683 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
684 conversion in-place ourselves. */
685
686 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
687 layout of the inferiors' architecture. Returns true if any
688 conversion was done; false otherwise. If DIRECTION is 1, then copy
689 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
690 INF. */
691
692 static int
693 x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
694 {
695 #ifdef __x86_64__
696 unsigned int machine;
697 int tid = lwpid_of (current_thread);
698 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
699
700 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
701 if (!is_64bit_tdesc ())
702 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
703 FIXUP_32);
704 /* No fixup for native x32 GDB. */
705 else if (!is_elf64 && sizeof (void *) == 8)
706 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
707 FIXUP_X32);
708 #endif
709
710 return 0;
711 }
712 \f
713 static int use_xml;
714
715 /* Format of XSAVE extended state is:
716 struct
717 {
718 fxsave_bytes[0..463]
719 sw_usable_bytes[464..511]
720 xstate_hdr_bytes[512..575]
721 avx_bytes[576..831]
722 future_state etc
723 };
724
725 Same memory layout will be used for the coredump NT_X86_XSTATE
726 representing the XSAVE extended state registers.
727
728 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
729 extended state mask, which is the same as the extended control register
730 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
731 together with the mask saved in the xstate_hdr_bytes to determine what
732 states the processor/OS supports and what state, used or initialized,
733 the process/thread is in. */
734 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
735
736 /* Does the current host support the GETFPXREGS request? The header
737 file may or may not define it, and even if it is defined, the
738 kernel will return EIO if it's running on a pre-SSE processor. */
739 int have_ptrace_getfpxregs =
740 #ifdef HAVE_PTRACE_GETFPXREGS
741 -1
742 #else
743 0
744 #endif
745 ;
746
747 /* Get Linux/x86 target description from running target. */
748
749 static const struct target_desc *
750 x86_linux_read_description (void)
751 {
752 unsigned int machine;
753 int is_elf64;
754 int xcr0_features;
755 int tid;
756 static uint64_t xcr0;
757 struct regset_info *regset;
758
759 tid = lwpid_of (current_thread);
760
761 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
762
763 if (sizeof (void *) == 4)
764 {
765 if (is_elf64 > 0)
766 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
767 #ifndef __x86_64__
768 else if (machine == EM_X86_64)
769 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
770 #endif
771 }
772
773 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
774 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
775 {
776 elf_fpxregset_t fpxregs;
777
778 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
779 {
780 have_ptrace_getfpxregs = 0;
781 have_ptrace_getregset = 0;
782 return i386_linux_read_description (X86_XSTATE_X87);
783 }
784 else
785 have_ptrace_getfpxregs = 1;
786 }
787 #endif
788
789 if (!use_xml)
790 {
791 x86_xcr0 = X86_XSTATE_SSE_MASK;
792
793 /* Don't use XML. */
794 #ifdef __x86_64__
795 if (machine == EM_X86_64)
796 return tdesc_amd64_linux_no_xml;
797 else
798 #endif
799 return tdesc_i386_linux_no_xml;
800 }
801
802 if (have_ptrace_getregset == -1)
803 {
804 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
805 struct iovec iov;
806
807 iov.iov_base = xstateregs;
808 iov.iov_len = sizeof (xstateregs);
809
810 /* Check if PTRACE_GETREGSET works. */
811 if (ptrace (PTRACE_GETREGSET, tid,
812 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
813 have_ptrace_getregset = 0;
814 else
815 {
816 have_ptrace_getregset = 1;
817
818 /* Get XCR0 from XSAVE extended state. */
819 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
820 / sizeof (uint64_t))];
821
822 /* Use PTRACE_GETREGSET if it is available. */
823 for (regset = x86_regsets;
824 regset->fill_function != NULL; regset++)
825 if (regset->get_request == PTRACE_GETREGSET)
826 regset->size = X86_XSTATE_SIZE (xcr0);
827 else if (regset->type != GENERAL_REGS)
828 regset->size = 0;
829 }
830 }
831
832 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
833 xcr0_features = (have_ptrace_getregset
834 && (xcr0 & X86_XSTATE_ALL_MASK));
835
836 if (xcr0_features)
837 x86_xcr0 = xcr0;
838
839 if (machine == EM_X86_64)
840 {
841 #ifdef __x86_64__
842 const target_desc *tdesc = NULL;
843
844 if (xcr0_features)
845 {
846 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
847 !is_elf64);
848 }
849
850 if (tdesc == NULL)
851 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
852 return tdesc;
853 #endif
854 }
855 else
856 {
857 const target_desc *tdesc = NULL;
858
859 if (xcr0_features)
860 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
861
862 if (tdesc == NULL)
863 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
864
865 return tdesc;
866 }
867
868 gdb_assert_not_reached ("failed to return tdesc");
869 }
870
871 /* Update all the target description of all processes; a new GDB
872 connected, and it may or not support xml target descriptions. */
873
874 static void
875 x86_linux_update_xmltarget (void)
876 {
877 struct thread_info *saved_thread = current_thread;
878
879 /* Before changing the register cache's internal layout, flush the
880 contents of the current valid caches back to the threads, and
881 release the current regcache objects. */
882 regcache_release ();
883
884 for_each_process ([] (process_info *proc) {
885 int pid = proc->pid;
886
887 /* Look up any thread of this process. */
888 current_thread = find_any_thread_of_pid (pid);
889
890 the_low_target.arch_setup ();
891 });
892
893 current_thread = saved_thread;
894 }
895
896 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
897 PTRACE_GETREGSET. */
898
899 static void
900 x86_linux_process_qsupported (char **features, int count)
901 {
902 int i;
903
904 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
905 with "i386" in qSupported query, it supports x86 XML target
906 descriptions. */
907 use_xml = 0;
908 for (i = 0; i < count; i++)
909 {
910 const char *feature = features[i];
911
912 if (startswith (feature, "xmlRegisters="))
913 {
914 char *copy = xstrdup (feature + 13);
915 char *p;
916
917 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
918 {
919 if (strcmp (p, "i386") == 0)
920 {
921 use_xml = 1;
922 break;
923 }
924 }
925
926 free (copy);
927 }
928 }
929 x86_linux_update_xmltarget ();
930 }
931
932 /* Common for x86/x86-64. */
933
934 static struct regsets_info x86_regsets_info =
935 {
936 x86_regsets, /* regsets */
937 0, /* num_regsets */
938 NULL, /* disabled_regsets */
939 };
940
941 #ifdef __x86_64__
942 static struct regs_info amd64_linux_regs_info =
943 {
944 NULL, /* regset_bitmap */
945 NULL, /* usrregs_info */
946 &x86_regsets_info
947 };
948 #endif
949 static struct usrregs_info i386_linux_usrregs_info =
950 {
951 I386_NUM_REGS,
952 i386_regmap,
953 };
954
955 static struct regs_info i386_linux_regs_info =
956 {
957 NULL, /* regset_bitmap */
958 &i386_linux_usrregs_info,
959 &x86_regsets_info
960 };
961
962 const struct regs_info *
963 x86_linux_regs_info (void)
964 {
965 #ifdef __x86_64__
966 if (is_64bit_tdesc ())
967 return &amd64_linux_regs_info;
968 else
969 #endif
970 return &i386_linux_regs_info;
971 }
972
973 /* Initialize the target description for the architecture of the
974 inferior. */
975
976 static void
977 x86_arch_setup (void)
978 {
979 current_process ()->tdesc = x86_linux_read_description ();
980 }
981
982 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
983 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
984
985 static void
986 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
987 {
988 int use_64bit = register_size (regcache->tdesc, 0) == 8;
989
990 if (use_64bit)
991 {
992 long l_sysno;
993
994 collect_register_by_name (regcache, "orig_rax", &l_sysno);
995 *sysno = (int) l_sysno;
996 }
997 else
998 collect_register_by_name (regcache, "orig_eax", sysno);
999 }
1000
1001 static int
1002 x86_supports_tracepoints (void)
1003 {
1004 return 1;
1005 }
1006
1007 static void
1008 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1009 {
1010 write_inferior_memory (*to, buf, len);
1011 *to += len;
1012 }
1013
1014 static int
1015 push_opcode (unsigned char *buf, const char *op)
1016 {
1017 unsigned char *buf_org = buf;
1018
1019 while (1)
1020 {
1021 char *endptr;
1022 unsigned long ul = strtoul (op, &endptr, 16);
1023
1024 if (endptr == op)
1025 break;
1026
1027 *buf++ = ul;
1028 op = endptr;
1029 }
1030
1031 return buf - buf_org;
1032 }
1033
1034 #ifdef __x86_64__
1035
1036 /* Build a jump pad that saves registers and calls a collection
1037 function. Writes a jump instruction to the jump pad to
1038 JJUMPAD_INSN. The caller is responsible to write it in at the
1039 tracepoint address. */
1040
1041 static int
1042 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1043 CORE_ADDR collector,
1044 CORE_ADDR lockaddr,
1045 ULONGEST orig_size,
1046 CORE_ADDR *jump_entry,
1047 CORE_ADDR *trampoline,
1048 ULONGEST *trampoline_size,
1049 unsigned char *jjump_pad_insn,
1050 ULONGEST *jjump_pad_insn_size,
1051 CORE_ADDR *adjusted_insn_addr,
1052 CORE_ADDR *adjusted_insn_addr_end,
1053 char *err)
1054 {
1055 unsigned char buf[40];
1056 int i, offset;
1057 int64_t loffset;
1058
1059 CORE_ADDR buildaddr = *jump_entry;
1060
1061 /* Build the jump pad. */
1062
1063 /* First, do tracepoint data collection. Save registers. */
1064 i = 0;
1065 /* Need to ensure stack pointer saved first. */
1066 buf[i++] = 0x54; /* push %rsp */
1067 buf[i++] = 0x55; /* push %rbp */
1068 buf[i++] = 0x57; /* push %rdi */
1069 buf[i++] = 0x56; /* push %rsi */
1070 buf[i++] = 0x52; /* push %rdx */
1071 buf[i++] = 0x51; /* push %rcx */
1072 buf[i++] = 0x53; /* push %rbx */
1073 buf[i++] = 0x50; /* push %rax */
1074 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1075 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1076 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1077 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1078 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1079 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1080 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1081 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1082 buf[i++] = 0x9c; /* pushfq */
1083 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1084 buf[i++] = 0xbf;
1085 memcpy (buf + i, &tpaddr, 8);
1086 i += 8;
1087 buf[i++] = 0x57; /* push %rdi */
1088 append_insns (&buildaddr, i, buf);
1089
1090 /* Stack space for the collecting_t object. */
1091 i = 0;
1092 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1093 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1094 memcpy (buf + i, &tpoint, 8);
1095 i += 8;
1096 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1097 i += push_opcode (&buf[i],
1098 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1099 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1100 append_insns (&buildaddr, i, buf);
1101
1102 /* spin-lock. */
1103 i = 0;
1104 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1105 memcpy (&buf[i], (void *) &lockaddr, 8);
1106 i += 8;
1107 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1108 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1109 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1110 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1111 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1112 append_insns (&buildaddr, i, buf);
1113
1114 /* Set up the gdb_collect call. */
1115 /* At this point, (stack pointer + 0x18) is the base of our saved
1116 register block. */
1117
1118 i = 0;
1119 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1120 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1121
1122 /* tpoint address may be 64-bit wide. */
1123 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1124 memcpy (buf + i, &tpoint, 8);
1125 i += 8;
1126 append_insns (&buildaddr, i, buf);
1127
1128 /* The collector function being in the shared library, may be
1129 >31-bits away off the jump pad. */
1130 i = 0;
1131 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1132 memcpy (buf + i, &collector, 8);
1133 i += 8;
1134 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1135 append_insns (&buildaddr, i, buf);
1136
1137 /* Clear the spin-lock. */
1138 i = 0;
1139 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1140 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1141 memcpy (buf + i, &lockaddr, 8);
1142 i += 8;
1143 append_insns (&buildaddr, i, buf);
1144
1145 /* Remove stack that had been used for the collect_t object. */
1146 i = 0;
1147 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1148 append_insns (&buildaddr, i, buf);
1149
1150 /* Restore register state. */
1151 i = 0;
1152 buf[i++] = 0x48; /* add $0x8,%rsp */
1153 buf[i++] = 0x83;
1154 buf[i++] = 0xc4;
1155 buf[i++] = 0x08;
1156 buf[i++] = 0x9d; /* popfq */
1157 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1158 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1159 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1160 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1161 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1162 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1163 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1164 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1165 buf[i++] = 0x58; /* pop %rax */
1166 buf[i++] = 0x5b; /* pop %rbx */
1167 buf[i++] = 0x59; /* pop %rcx */
1168 buf[i++] = 0x5a; /* pop %rdx */
1169 buf[i++] = 0x5e; /* pop %rsi */
1170 buf[i++] = 0x5f; /* pop %rdi */
1171 buf[i++] = 0x5d; /* pop %rbp */
1172 buf[i++] = 0x5c; /* pop %rsp */
1173 append_insns (&buildaddr, i, buf);
1174
1175 /* Now, adjust the original instruction to execute in the jump
1176 pad. */
1177 *adjusted_insn_addr = buildaddr;
1178 relocate_instruction (&buildaddr, tpaddr);
1179 *adjusted_insn_addr_end = buildaddr;
1180
1181 /* Finally, write a jump back to the program. */
1182
1183 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1184 if (loffset > INT_MAX || loffset < INT_MIN)
1185 {
1186 sprintf (err,
1187 "E.Jump back from jump pad too far from tracepoint "
1188 "(offset 0x%" PRIx64 " > int32).", loffset);
1189 return 1;
1190 }
1191
1192 offset = (int) loffset;
1193 memcpy (buf, jump_insn, sizeof (jump_insn));
1194 memcpy (buf + 1, &offset, 4);
1195 append_insns (&buildaddr, sizeof (jump_insn), buf);
1196
1197 /* The jump pad is now built. Wire in a jump to our jump pad. This
1198 is always done last (by our caller actually), so that we can
1199 install fast tracepoints with threads running. This relies on
1200 the agent's atomic write support. */
1201 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1202 if (loffset > INT_MAX || loffset < INT_MIN)
1203 {
1204 sprintf (err,
1205 "E.Jump pad too far from tracepoint "
1206 "(offset 0x%" PRIx64 " > int32).", loffset);
1207 return 1;
1208 }
1209
1210 offset = (int) loffset;
1211
1212 memcpy (buf, jump_insn, sizeof (jump_insn));
1213 memcpy (buf + 1, &offset, 4);
1214 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1215 *jjump_pad_insn_size = sizeof (jump_insn);
1216
1217 /* Return the end address of our pad. */
1218 *jump_entry = buildaddr;
1219
1220 return 0;
1221 }
1222
1223 #endif /* __x86_64__ */
1224
1225 /* Build a jump pad that saves registers and calls a collection
1226 function. Writes a jump instruction to the jump pad to
1227 JJUMPAD_INSN. The caller is responsible to write it in at the
1228 tracepoint address. */
1229
1230 static int
1231 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1232 CORE_ADDR collector,
1233 CORE_ADDR lockaddr,
1234 ULONGEST orig_size,
1235 CORE_ADDR *jump_entry,
1236 CORE_ADDR *trampoline,
1237 ULONGEST *trampoline_size,
1238 unsigned char *jjump_pad_insn,
1239 ULONGEST *jjump_pad_insn_size,
1240 CORE_ADDR *adjusted_insn_addr,
1241 CORE_ADDR *adjusted_insn_addr_end,
1242 char *err)
1243 {
1244 unsigned char buf[0x100];
1245 int i, offset;
1246 CORE_ADDR buildaddr = *jump_entry;
1247
1248 /* Build the jump pad. */
1249
1250 /* First, do tracepoint data collection. Save registers. */
1251 i = 0;
1252 buf[i++] = 0x60; /* pushad */
1253 buf[i++] = 0x68; /* push tpaddr aka $pc */
1254 *((int *)(buf + i)) = (int) tpaddr;
1255 i += 4;
1256 buf[i++] = 0x9c; /* pushf */
1257 buf[i++] = 0x1e; /* push %ds */
1258 buf[i++] = 0x06; /* push %es */
1259 buf[i++] = 0x0f; /* push %fs */
1260 buf[i++] = 0xa0;
1261 buf[i++] = 0x0f; /* push %gs */
1262 buf[i++] = 0xa8;
1263 buf[i++] = 0x16; /* push %ss */
1264 buf[i++] = 0x0e; /* push %cs */
1265 append_insns (&buildaddr, i, buf);
1266
1267 /* Stack space for the collecting_t object. */
1268 i = 0;
1269 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1270
1271 /* Build the object. */
1272 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1273 memcpy (buf + i, &tpoint, 4);
1274 i += 4;
1275 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1276
1277 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1278 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1279 append_insns (&buildaddr, i, buf);
1280
1281 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1282 If we cared for it, this could be using xchg alternatively. */
1283
1284 i = 0;
1285 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1286 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1287 %esp,<lockaddr> */
1288 memcpy (&buf[i], (void *) &lockaddr, 4);
1289 i += 4;
1290 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1291 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1292 append_insns (&buildaddr, i, buf);
1293
1294
1295 /* Set up arguments to the gdb_collect call. */
1296 i = 0;
1297 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1298 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1299 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1300 append_insns (&buildaddr, i, buf);
1301
1302 i = 0;
1303 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1304 append_insns (&buildaddr, i, buf);
1305
1306 i = 0;
1307 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1308 memcpy (&buf[i], (void *) &tpoint, 4);
1309 i += 4;
1310 append_insns (&buildaddr, i, buf);
1311
1312 buf[0] = 0xe8; /* call <reladdr> */
1313 offset = collector - (buildaddr + sizeof (jump_insn));
1314 memcpy (buf + 1, &offset, 4);
1315 append_insns (&buildaddr, 5, buf);
1316 /* Clean up after the call. */
1317 buf[0] = 0x83; /* add $0x8,%esp */
1318 buf[1] = 0xc4;
1319 buf[2] = 0x08;
1320 append_insns (&buildaddr, 3, buf);
1321
1322
1323 /* Clear the spin-lock. This would need the LOCK prefix on older
1324 broken archs. */
1325 i = 0;
1326 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1327 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1328 memcpy (buf + i, &lockaddr, 4);
1329 i += 4;
1330 append_insns (&buildaddr, i, buf);
1331
1332
1333 /* Remove stack that had been used for the collect_t object. */
1334 i = 0;
1335 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1336 append_insns (&buildaddr, i, buf);
1337
1338 i = 0;
1339 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1340 buf[i++] = 0xc4;
1341 buf[i++] = 0x04;
1342 buf[i++] = 0x17; /* pop %ss */
1343 buf[i++] = 0x0f; /* pop %gs */
1344 buf[i++] = 0xa9;
1345 buf[i++] = 0x0f; /* pop %fs */
1346 buf[i++] = 0xa1;
1347 buf[i++] = 0x07; /* pop %es */
1348 buf[i++] = 0x1f; /* pop %ds */
1349 buf[i++] = 0x9d; /* popf */
1350 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1351 buf[i++] = 0xc4;
1352 buf[i++] = 0x04;
1353 buf[i++] = 0x61; /* popad */
1354 append_insns (&buildaddr, i, buf);
1355
1356 /* Now, adjust the original instruction to execute in the jump
1357 pad. */
1358 *adjusted_insn_addr = buildaddr;
1359 relocate_instruction (&buildaddr, tpaddr);
1360 *adjusted_insn_addr_end = buildaddr;
1361
1362 /* Write the jump back to the program. */
1363 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1364 memcpy (buf, jump_insn, sizeof (jump_insn));
1365 memcpy (buf + 1, &offset, 4);
1366 append_insns (&buildaddr, sizeof (jump_insn), buf);
1367
1368 /* The jump pad is now built. Wire in a jump to our jump pad. This
1369 is always done last (by our caller actually), so that we can
1370 install fast tracepoints with threads running. This relies on
1371 the agent's atomic write support. */
1372 if (orig_size == 4)
1373 {
1374 /* Create a trampoline. */
1375 *trampoline_size = sizeof (jump_insn);
1376 if (!claim_trampoline_space (*trampoline_size, trampoline))
1377 {
1378 /* No trampoline space available. */
1379 strcpy (err,
1380 "E.Cannot allocate trampoline space needed for fast "
1381 "tracepoints on 4-byte instructions.");
1382 return 1;
1383 }
1384
1385 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1386 memcpy (buf, jump_insn, sizeof (jump_insn));
1387 memcpy (buf + 1, &offset, 4);
1388 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1389
1390 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1391 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1392 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1393 memcpy (buf + 2, &offset, 2);
1394 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1395 *jjump_pad_insn_size = sizeof (small_jump_insn);
1396 }
1397 else
1398 {
1399 /* Else use a 32-bit relative jump instruction. */
1400 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1401 memcpy (buf, jump_insn, sizeof (jump_insn));
1402 memcpy (buf + 1, &offset, 4);
1403 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1404 *jjump_pad_insn_size = sizeof (jump_insn);
1405 }
1406
1407 /* Return the end address of our pad. */
1408 *jump_entry = buildaddr;
1409
1410 return 0;
1411 }
1412
1413 static int
1414 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1415 CORE_ADDR collector,
1416 CORE_ADDR lockaddr,
1417 ULONGEST orig_size,
1418 CORE_ADDR *jump_entry,
1419 CORE_ADDR *trampoline,
1420 ULONGEST *trampoline_size,
1421 unsigned char *jjump_pad_insn,
1422 ULONGEST *jjump_pad_insn_size,
1423 CORE_ADDR *adjusted_insn_addr,
1424 CORE_ADDR *adjusted_insn_addr_end,
1425 char *err)
1426 {
1427 #ifdef __x86_64__
1428 if (is_64bit_tdesc ())
1429 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1430 collector, lockaddr,
1431 orig_size, jump_entry,
1432 trampoline, trampoline_size,
1433 jjump_pad_insn,
1434 jjump_pad_insn_size,
1435 adjusted_insn_addr,
1436 adjusted_insn_addr_end,
1437 err);
1438 #endif
1439
1440 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1441 collector, lockaddr,
1442 orig_size, jump_entry,
1443 trampoline, trampoline_size,
1444 jjump_pad_insn,
1445 jjump_pad_insn_size,
1446 adjusted_insn_addr,
1447 adjusted_insn_addr_end,
1448 err);
1449 }
1450
1451 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1452 architectures. */
1453
1454 static int
1455 x86_get_min_fast_tracepoint_insn_len (void)
1456 {
1457 static int warned_about_fast_tracepoints = 0;
1458
1459 #ifdef __x86_64__
1460 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1461 used for fast tracepoints. */
1462 if (is_64bit_tdesc ())
1463 return 5;
1464 #endif
1465
1466 if (agent_loaded_p ())
1467 {
1468 char errbuf[IPA_BUFSIZ];
1469
1470 errbuf[0] = '\0';
1471
1472 /* On x86, if trampolines are available, then 4-byte jump instructions
1473 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1474 with a 4-byte offset are used instead. */
1475 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1476 return 4;
1477 else
1478 {
1479 /* GDB has no channel to explain to user why a shorter fast
1480 tracepoint is not possible, but at least make GDBserver
1481 mention that something has gone awry. */
1482 if (!warned_about_fast_tracepoints)
1483 {
1484 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1485 warned_about_fast_tracepoints = 1;
1486 }
1487 return 5;
1488 }
1489 }
1490 else
1491 {
1492 /* Indicate that the minimum length is currently unknown since the IPA
1493 has not loaded yet. */
1494 return 0;
1495 }
1496 }
1497
1498 static void
1499 add_insns (unsigned char *start, int len)
1500 {
1501 CORE_ADDR buildaddr = current_insn_ptr;
1502
1503 if (debug_threads)
1504 debug_printf ("Adding %d bytes of insn at %s\n",
1505 len, paddress (buildaddr));
1506
1507 append_insns (&buildaddr, len, start);
1508 current_insn_ptr = buildaddr;
1509 }
1510
1511 /* Our general strategy for emitting code is to avoid specifying raw
1512 bytes whenever possible, and instead copy a block of inline asm
1513 that is embedded in the function. This is a little messy, because
1514 we need to keep the compiler from discarding what looks like dead
1515 code, plus suppress various warnings. */
1516
1517 #define EMIT_ASM(NAME, INSNS) \
1518 do \
1519 { \
1520 extern unsigned char start_ ## NAME, end_ ## NAME; \
1521 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1522 __asm__ ("jmp end_" #NAME "\n" \
1523 "\t" "start_" #NAME ":" \
1524 "\t" INSNS "\n" \
1525 "\t" "end_" #NAME ":"); \
1526 } while (0)
1527
1528 #ifdef __x86_64__
1529
1530 #define EMIT_ASM32(NAME,INSNS) \
1531 do \
1532 { \
1533 extern unsigned char start_ ## NAME, end_ ## NAME; \
1534 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1535 __asm__ (".code32\n" \
1536 "\t" "jmp end_" #NAME "\n" \
1537 "\t" "start_" #NAME ":\n" \
1538 "\t" INSNS "\n" \
1539 "\t" "end_" #NAME ":\n" \
1540 ".code64\n"); \
1541 } while (0)
1542
1543 #else
1544
1545 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1546
1547 #endif
1548
1549 #ifdef __x86_64__
1550
1551 static void
1552 amd64_emit_prologue (void)
1553 {
1554 EMIT_ASM (amd64_prologue,
1555 "pushq %rbp\n\t"
1556 "movq %rsp,%rbp\n\t"
1557 "sub $0x20,%rsp\n\t"
1558 "movq %rdi,-8(%rbp)\n\t"
1559 "movq %rsi,-16(%rbp)");
1560 }
1561
1562
1563 static void
1564 amd64_emit_epilogue (void)
1565 {
1566 EMIT_ASM (amd64_epilogue,
1567 "movq -16(%rbp),%rdi\n\t"
1568 "movq %rax,(%rdi)\n\t"
1569 "xor %rax,%rax\n\t"
1570 "leave\n\t"
1571 "ret");
1572 }
1573
1574 static void
1575 amd64_emit_add (void)
1576 {
1577 EMIT_ASM (amd64_add,
1578 "add (%rsp),%rax\n\t"
1579 "lea 0x8(%rsp),%rsp");
1580 }
1581
1582 static void
1583 amd64_emit_sub (void)
1584 {
1585 EMIT_ASM (amd64_sub,
1586 "sub %rax,(%rsp)\n\t"
1587 "pop %rax");
1588 }
1589
1590 static void
1591 amd64_emit_mul (void)
1592 {
1593 emit_error = 1;
1594 }
1595
1596 static void
1597 amd64_emit_lsh (void)
1598 {
1599 emit_error = 1;
1600 }
1601
1602 static void
1603 amd64_emit_rsh_signed (void)
1604 {
1605 emit_error = 1;
1606 }
1607
1608 static void
1609 amd64_emit_rsh_unsigned (void)
1610 {
1611 emit_error = 1;
1612 }
1613
1614 static void
1615 amd64_emit_ext (int arg)
1616 {
1617 switch (arg)
1618 {
1619 case 8:
1620 EMIT_ASM (amd64_ext_8,
1621 "cbtw\n\t"
1622 "cwtl\n\t"
1623 "cltq");
1624 break;
1625 case 16:
1626 EMIT_ASM (amd64_ext_16,
1627 "cwtl\n\t"
1628 "cltq");
1629 break;
1630 case 32:
1631 EMIT_ASM (amd64_ext_32,
1632 "cltq");
1633 break;
1634 default:
1635 emit_error = 1;
1636 }
1637 }
1638
1639 static void
1640 amd64_emit_log_not (void)
1641 {
1642 EMIT_ASM (amd64_log_not,
1643 "test %rax,%rax\n\t"
1644 "sete %cl\n\t"
1645 "movzbq %cl,%rax");
1646 }
1647
1648 static void
1649 amd64_emit_bit_and (void)
1650 {
1651 EMIT_ASM (amd64_and,
1652 "and (%rsp),%rax\n\t"
1653 "lea 0x8(%rsp),%rsp");
1654 }
1655
1656 static void
1657 amd64_emit_bit_or (void)
1658 {
1659 EMIT_ASM (amd64_or,
1660 "or (%rsp),%rax\n\t"
1661 "lea 0x8(%rsp),%rsp");
1662 }
1663
1664 static void
1665 amd64_emit_bit_xor (void)
1666 {
1667 EMIT_ASM (amd64_xor,
1668 "xor (%rsp),%rax\n\t"
1669 "lea 0x8(%rsp),%rsp");
1670 }
1671
1672 static void
1673 amd64_emit_bit_not (void)
1674 {
1675 EMIT_ASM (amd64_bit_not,
1676 "xorq $0xffffffffffffffff,%rax");
1677 }
1678
1679 static void
1680 amd64_emit_equal (void)
1681 {
1682 EMIT_ASM (amd64_equal,
1683 "cmp %rax,(%rsp)\n\t"
1684 "je .Lamd64_equal_true\n\t"
1685 "xor %rax,%rax\n\t"
1686 "jmp .Lamd64_equal_end\n\t"
1687 ".Lamd64_equal_true:\n\t"
1688 "mov $0x1,%rax\n\t"
1689 ".Lamd64_equal_end:\n\t"
1690 "lea 0x8(%rsp),%rsp");
1691 }
1692
1693 static void
1694 amd64_emit_less_signed (void)
1695 {
1696 EMIT_ASM (amd64_less_signed,
1697 "cmp %rax,(%rsp)\n\t"
1698 "jl .Lamd64_less_signed_true\n\t"
1699 "xor %rax,%rax\n\t"
1700 "jmp .Lamd64_less_signed_end\n\t"
1701 ".Lamd64_less_signed_true:\n\t"
1702 "mov $1,%rax\n\t"
1703 ".Lamd64_less_signed_end:\n\t"
1704 "lea 0x8(%rsp),%rsp");
1705 }
1706
1707 static void
1708 amd64_emit_less_unsigned (void)
1709 {
1710 EMIT_ASM (amd64_less_unsigned,
1711 "cmp %rax,(%rsp)\n\t"
1712 "jb .Lamd64_less_unsigned_true\n\t"
1713 "xor %rax,%rax\n\t"
1714 "jmp .Lamd64_less_unsigned_end\n\t"
1715 ".Lamd64_less_unsigned_true:\n\t"
1716 "mov $1,%rax\n\t"
1717 ".Lamd64_less_unsigned_end:\n\t"
1718 "lea 0x8(%rsp),%rsp");
1719 }
1720
1721 static void
1722 amd64_emit_ref (int size)
1723 {
1724 switch (size)
1725 {
1726 case 1:
1727 EMIT_ASM (amd64_ref1,
1728 "movb (%rax),%al");
1729 break;
1730 case 2:
1731 EMIT_ASM (amd64_ref2,
1732 "movw (%rax),%ax");
1733 break;
1734 case 4:
1735 EMIT_ASM (amd64_ref4,
1736 "movl (%rax),%eax");
1737 break;
1738 case 8:
1739 EMIT_ASM (amd64_ref8,
1740 "movq (%rax),%rax");
1741 break;
1742 }
1743 }
1744
1745 static void
1746 amd64_emit_if_goto (int *offset_p, int *size_p)
1747 {
1748 EMIT_ASM (amd64_if_goto,
1749 "mov %rax,%rcx\n\t"
1750 "pop %rax\n\t"
1751 "cmp $0,%rcx\n\t"
1752 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1753 if (offset_p)
1754 *offset_p = 10;
1755 if (size_p)
1756 *size_p = 4;
1757 }
1758
1759 static void
1760 amd64_emit_goto (int *offset_p, int *size_p)
1761 {
1762 EMIT_ASM (amd64_goto,
1763 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1764 if (offset_p)
1765 *offset_p = 1;
1766 if (size_p)
1767 *size_p = 4;
1768 }
1769
1770 static void
1771 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1772 {
1773 int diff = (to - (from + size));
1774 unsigned char buf[sizeof (int)];
1775
1776 if (size != 4)
1777 {
1778 emit_error = 1;
1779 return;
1780 }
1781
1782 memcpy (buf, &diff, sizeof (int));
1783 write_inferior_memory (from, buf, sizeof (int));
1784 }
1785
1786 static void
1787 amd64_emit_const (LONGEST num)
1788 {
1789 unsigned char buf[16];
1790 int i;
1791 CORE_ADDR buildaddr = current_insn_ptr;
1792
1793 i = 0;
1794 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1795 memcpy (&buf[i], &num, sizeof (num));
1796 i += 8;
1797 append_insns (&buildaddr, i, buf);
1798 current_insn_ptr = buildaddr;
1799 }
1800
1801 static void
1802 amd64_emit_call (CORE_ADDR fn)
1803 {
1804 unsigned char buf[16];
1805 int i;
1806 CORE_ADDR buildaddr;
1807 LONGEST offset64;
1808
1809 /* The destination function being in the shared library, may be
1810 >31-bits away off the compiled code pad. */
1811
1812 buildaddr = current_insn_ptr;
1813
1814 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1815
1816 i = 0;
1817
1818 if (offset64 > INT_MAX || offset64 < INT_MIN)
1819 {
1820 /* Offset is too large for a call. Use callq, but that requires
1821 a register, so avoid it if possible. Use r10, since it is
1822 call-clobbered, we don't have to push/pop it. */
1823 buf[i++] = 0x48; /* mov $fn,%r10 */
1824 buf[i++] = 0xba;
1825 memcpy (buf + i, &fn, 8);
1826 i += 8;
1827 buf[i++] = 0xff; /* callq *%r10 */
1828 buf[i++] = 0xd2;
1829 }
1830 else
1831 {
1832 int offset32 = offset64; /* we know we can't overflow here. */
1833
1834 buf[i++] = 0xe8; /* call <reladdr> */
1835 memcpy (buf + i, &offset32, 4);
1836 i += 4;
1837 }
1838
1839 append_insns (&buildaddr, i, buf);
1840 current_insn_ptr = buildaddr;
1841 }
1842
1843 static void
1844 amd64_emit_reg (int reg)
1845 {
1846 unsigned char buf[16];
1847 int i;
1848 CORE_ADDR buildaddr;
1849
1850 /* Assume raw_regs is still in %rdi. */
1851 buildaddr = current_insn_ptr;
1852 i = 0;
1853 buf[i++] = 0xbe; /* mov $<n>,%esi */
1854 memcpy (&buf[i], &reg, sizeof (reg));
1855 i += 4;
1856 append_insns (&buildaddr, i, buf);
1857 current_insn_ptr = buildaddr;
1858 amd64_emit_call (get_raw_reg_func_addr ());
1859 }
1860
1861 static void
1862 amd64_emit_pop (void)
1863 {
1864 EMIT_ASM (amd64_pop,
1865 "pop %rax");
1866 }
1867
1868 static void
1869 amd64_emit_stack_flush (void)
1870 {
1871 EMIT_ASM (amd64_stack_flush,
1872 "push %rax");
1873 }
1874
1875 static void
1876 amd64_emit_zero_ext (int arg)
1877 {
1878 switch (arg)
1879 {
1880 case 8:
1881 EMIT_ASM (amd64_zero_ext_8,
1882 "and $0xff,%rax");
1883 break;
1884 case 16:
1885 EMIT_ASM (amd64_zero_ext_16,
1886 "and $0xffff,%rax");
1887 break;
1888 case 32:
1889 EMIT_ASM (amd64_zero_ext_32,
1890 "mov $0xffffffff,%rcx\n\t"
1891 "and %rcx,%rax");
1892 break;
1893 default:
1894 emit_error = 1;
1895 }
1896 }
1897
1898 static void
1899 amd64_emit_swap (void)
1900 {
1901 EMIT_ASM (amd64_swap,
1902 "mov %rax,%rcx\n\t"
1903 "pop %rax\n\t"
1904 "push %rcx");
1905 }
1906
1907 static void
1908 amd64_emit_stack_adjust (int n)
1909 {
1910 unsigned char buf[16];
1911 int i;
1912 CORE_ADDR buildaddr = current_insn_ptr;
1913
1914 i = 0;
1915 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1916 buf[i++] = 0x8d;
1917 buf[i++] = 0x64;
1918 buf[i++] = 0x24;
1919 /* This only handles adjustments up to 16, but we don't expect any more. */
1920 buf[i++] = n * 8;
1921 append_insns (&buildaddr, i, buf);
1922 current_insn_ptr = buildaddr;
1923 }
1924
1925 /* FN's prototype is `LONGEST(*fn)(int)'. */
1926
1927 static void
1928 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1929 {
1930 unsigned char buf[16];
1931 int i;
1932 CORE_ADDR buildaddr;
1933
1934 buildaddr = current_insn_ptr;
1935 i = 0;
1936 buf[i++] = 0xbf; /* movl $<n>,%edi */
1937 memcpy (&buf[i], &arg1, sizeof (arg1));
1938 i += 4;
1939 append_insns (&buildaddr, i, buf);
1940 current_insn_ptr = buildaddr;
1941 amd64_emit_call (fn);
1942 }
1943
1944 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1945
1946 static void
1947 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1948 {
1949 unsigned char buf[16];
1950 int i;
1951 CORE_ADDR buildaddr;
1952
1953 buildaddr = current_insn_ptr;
1954 i = 0;
1955 buf[i++] = 0xbf; /* movl $<n>,%edi */
1956 memcpy (&buf[i], &arg1, sizeof (arg1));
1957 i += 4;
1958 append_insns (&buildaddr, i, buf);
1959 current_insn_ptr = buildaddr;
1960 EMIT_ASM (amd64_void_call_2_a,
1961 /* Save away a copy of the stack top. */
1962 "push %rax\n\t"
1963 /* Also pass top as the second argument. */
1964 "mov %rax,%rsi");
1965 amd64_emit_call (fn);
1966 EMIT_ASM (amd64_void_call_2_b,
1967 /* Restore the stack top, %rax may have been trashed. */
1968 "pop %rax");
1969 }
1970
1971 void
1972 amd64_emit_eq_goto (int *offset_p, int *size_p)
1973 {
1974 EMIT_ASM (amd64_eq,
1975 "cmp %rax,(%rsp)\n\t"
1976 "jne .Lamd64_eq_fallthru\n\t"
1977 "lea 0x8(%rsp),%rsp\n\t"
1978 "pop %rax\n\t"
1979 /* jmp, but don't trust the assembler to choose the right jump */
1980 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1981 ".Lamd64_eq_fallthru:\n\t"
1982 "lea 0x8(%rsp),%rsp\n\t"
1983 "pop %rax");
1984
1985 if (offset_p)
1986 *offset_p = 13;
1987 if (size_p)
1988 *size_p = 4;
1989 }
1990
1991 void
1992 amd64_emit_ne_goto (int *offset_p, int *size_p)
1993 {
1994 EMIT_ASM (amd64_ne,
1995 "cmp %rax,(%rsp)\n\t"
1996 "je .Lamd64_ne_fallthru\n\t"
1997 "lea 0x8(%rsp),%rsp\n\t"
1998 "pop %rax\n\t"
1999 /* jmp, but don't trust the assembler to choose the right jump */
2000 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2001 ".Lamd64_ne_fallthru:\n\t"
2002 "lea 0x8(%rsp),%rsp\n\t"
2003 "pop %rax");
2004
2005 if (offset_p)
2006 *offset_p = 13;
2007 if (size_p)
2008 *size_p = 4;
2009 }
2010
2011 void
2012 amd64_emit_lt_goto (int *offset_p, int *size_p)
2013 {
2014 EMIT_ASM (amd64_lt,
2015 "cmp %rax,(%rsp)\n\t"
2016 "jnl .Lamd64_lt_fallthru\n\t"
2017 "lea 0x8(%rsp),%rsp\n\t"
2018 "pop %rax\n\t"
2019 /* jmp, but don't trust the assembler to choose the right jump */
2020 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2021 ".Lamd64_lt_fallthru:\n\t"
2022 "lea 0x8(%rsp),%rsp\n\t"
2023 "pop %rax");
2024
2025 if (offset_p)
2026 *offset_p = 13;
2027 if (size_p)
2028 *size_p = 4;
2029 }
2030
2031 void
2032 amd64_emit_le_goto (int *offset_p, int *size_p)
2033 {
2034 EMIT_ASM (amd64_le,
2035 "cmp %rax,(%rsp)\n\t"
2036 "jnle .Lamd64_le_fallthru\n\t"
2037 "lea 0x8(%rsp),%rsp\n\t"
2038 "pop %rax\n\t"
2039 /* jmp, but don't trust the assembler to choose the right jump */
2040 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2041 ".Lamd64_le_fallthru:\n\t"
2042 "lea 0x8(%rsp),%rsp\n\t"
2043 "pop %rax");
2044
2045 if (offset_p)
2046 *offset_p = 13;
2047 if (size_p)
2048 *size_p = 4;
2049 }
2050
2051 void
2052 amd64_emit_gt_goto (int *offset_p, int *size_p)
2053 {
2054 EMIT_ASM (amd64_gt,
2055 "cmp %rax,(%rsp)\n\t"
2056 "jng .Lamd64_gt_fallthru\n\t"
2057 "lea 0x8(%rsp),%rsp\n\t"
2058 "pop %rax\n\t"
2059 /* jmp, but don't trust the assembler to choose the right jump */
2060 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2061 ".Lamd64_gt_fallthru:\n\t"
2062 "lea 0x8(%rsp),%rsp\n\t"
2063 "pop %rax");
2064
2065 if (offset_p)
2066 *offset_p = 13;
2067 if (size_p)
2068 *size_p = 4;
2069 }
2070
2071 void
2072 amd64_emit_ge_goto (int *offset_p, int *size_p)
2073 {
2074 EMIT_ASM (amd64_ge,
2075 "cmp %rax,(%rsp)\n\t"
2076 "jnge .Lamd64_ge_fallthru\n\t"
2077 ".Lamd64_ge_jump:\n\t"
2078 "lea 0x8(%rsp),%rsp\n\t"
2079 "pop %rax\n\t"
2080 /* jmp, but don't trust the assembler to choose the right jump */
2081 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2082 ".Lamd64_ge_fallthru:\n\t"
2083 "lea 0x8(%rsp),%rsp\n\t"
2084 "pop %rax");
2085
2086 if (offset_p)
2087 *offset_p = 13;
2088 if (size_p)
2089 *size_p = 4;
2090 }
2091
2092 struct emit_ops amd64_emit_ops =
2093 {
2094 amd64_emit_prologue,
2095 amd64_emit_epilogue,
2096 amd64_emit_add,
2097 amd64_emit_sub,
2098 amd64_emit_mul,
2099 amd64_emit_lsh,
2100 amd64_emit_rsh_signed,
2101 amd64_emit_rsh_unsigned,
2102 amd64_emit_ext,
2103 amd64_emit_log_not,
2104 amd64_emit_bit_and,
2105 amd64_emit_bit_or,
2106 amd64_emit_bit_xor,
2107 amd64_emit_bit_not,
2108 amd64_emit_equal,
2109 amd64_emit_less_signed,
2110 amd64_emit_less_unsigned,
2111 amd64_emit_ref,
2112 amd64_emit_if_goto,
2113 amd64_emit_goto,
2114 amd64_write_goto_address,
2115 amd64_emit_const,
2116 amd64_emit_call,
2117 amd64_emit_reg,
2118 amd64_emit_pop,
2119 amd64_emit_stack_flush,
2120 amd64_emit_zero_ext,
2121 amd64_emit_swap,
2122 amd64_emit_stack_adjust,
2123 amd64_emit_int_call_1,
2124 amd64_emit_void_call_2,
2125 amd64_emit_eq_goto,
2126 amd64_emit_ne_goto,
2127 amd64_emit_lt_goto,
2128 amd64_emit_le_goto,
2129 amd64_emit_gt_goto,
2130 amd64_emit_ge_goto
2131 };
2132
2133 #endif /* __x86_64__ */
2134
2135 static void
2136 i386_emit_prologue (void)
2137 {
2138 EMIT_ASM32 (i386_prologue,
2139 "push %ebp\n\t"
2140 "mov %esp,%ebp\n\t"
2141 "push %ebx");
2142 /* At this point, the raw regs base address is at 8(%ebp), and the
2143 value pointer is at 12(%ebp). */
2144 }
2145
2146 static void
2147 i386_emit_epilogue (void)
2148 {
2149 EMIT_ASM32 (i386_epilogue,
2150 "mov 12(%ebp),%ecx\n\t"
2151 "mov %eax,(%ecx)\n\t"
2152 "mov %ebx,0x4(%ecx)\n\t"
2153 "xor %eax,%eax\n\t"
2154 "pop %ebx\n\t"
2155 "pop %ebp\n\t"
2156 "ret");
2157 }
2158
2159 static void
2160 i386_emit_add (void)
2161 {
2162 EMIT_ASM32 (i386_add,
2163 "add (%esp),%eax\n\t"
2164 "adc 0x4(%esp),%ebx\n\t"
2165 "lea 0x8(%esp),%esp");
2166 }
2167
2168 static void
2169 i386_emit_sub (void)
2170 {
2171 EMIT_ASM32 (i386_sub,
2172 "subl %eax,(%esp)\n\t"
2173 "sbbl %ebx,4(%esp)\n\t"
2174 "pop %eax\n\t"
2175 "pop %ebx\n\t");
2176 }
2177
2178 static void
2179 i386_emit_mul (void)
2180 {
2181 emit_error = 1;
2182 }
2183
2184 static void
2185 i386_emit_lsh (void)
2186 {
2187 emit_error = 1;
2188 }
2189
2190 static void
2191 i386_emit_rsh_signed (void)
2192 {
2193 emit_error = 1;
2194 }
2195
2196 static void
2197 i386_emit_rsh_unsigned (void)
2198 {
2199 emit_error = 1;
2200 }
2201
2202 static void
2203 i386_emit_ext (int arg)
2204 {
2205 switch (arg)
2206 {
2207 case 8:
2208 EMIT_ASM32 (i386_ext_8,
2209 "cbtw\n\t"
2210 "cwtl\n\t"
2211 "movl %eax,%ebx\n\t"
2212 "sarl $31,%ebx");
2213 break;
2214 case 16:
2215 EMIT_ASM32 (i386_ext_16,
2216 "cwtl\n\t"
2217 "movl %eax,%ebx\n\t"
2218 "sarl $31,%ebx");
2219 break;
2220 case 32:
2221 EMIT_ASM32 (i386_ext_32,
2222 "movl %eax,%ebx\n\t"
2223 "sarl $31,%ebx");
2224 break;
2225 default:
2226 emit_error = 1;
2227 }
2228 }
2229
2230 static void
2231 i386_emit_log_not (void)
2232 {
2233 EMIT_ASM32 (i386_log_not,
2234 "or %ebx,%eax\n\t"
2235 "test %eax,%eax\n\t"
2236 "sete %cl\n\t"
2237 "xor %ebx,%ebx\n\t"
2238 "movzbl %cl,%eax");
2239 }
2240
2241 static void
2242 i386_emit_bit_and (void)
2243 {
2244 EMIT_ASM32 (i386_and,
2245 "and (%esp),%eax\n\t"
2246 "and 0x4(%esp),%ebx\n\t"
2247 "lea 0x8(%esp),%esp");
2248 }
2249
2250 static void
2251 i386_emit_bit_or (void)
2252 {
2253 EMIT_ASM32 (i386_or,
2254 "or (%esp),%eax\n\t"
2255 "or 0x4(%esp),%ebx\n\t"
2256 "lea 0x8(%esp),%esp");
2257 }
2258
2259 static void
2260 i386_emit_bit_xor (void)
2261 {
2262 EMIT_ASM32 (i386_xor,
2263 "xor (%esp),%eax\n\t"
2264 "xor 0x4(%esp),%ebx\n\t"
2265 "lea 0x8(%esp),%esp");
2266 }
2267
2268 static void
2269 i386_emit_bit_not (void)
2270 {
2271 EMIT_ASM32 (i386_bit_not,
2272 "xor $0xffffffff,%eax\n\t"
2273 "xor $0xffffffff,%ebx\n\t");
2274 }
2275
2276 static void
2277 i386_emit_equal (void)
2278 {
2279 EMIT_ASM32 (i386_equal,
2280 "cmpl %ebx,4(%esp)\n\t"
2281 "jne .Li386_equal_false\n\t"
2282 "cmpl %eax,(%esp)\n\t"
2283 "je .Li386_equal_true\n\t"
2284 ".Li386_equal_false:\n\t"
2285 "xor %eax,%eax\n\t"
2286 "jmp .Li386_equal_end\n\t"
2287 ".Li386_equal_true:\n\t"
2288 "mov $1,%eax\n\t"
2289 ".Li386_equal_end:\n\t"
2290 "xor %ebx,%ebx\n\t"
2291 "lea 0x8(%esp),%esp");
2292 }
2293
2294 static void
2295 i386_emit_less_signed (void)
2296 {
2297 EMIT_ASM32 (i386_less_signed,
2298 "cmpl %ebx,4(%esp)\n\t"
2299 "jl .Li386_less_signed_true\n\t"
2300 "jne .Li386_less_signed_false\n\t"
2301 "cmpl %eax,(%esp)\n\t"
2302 "jl .Li386_less_signed_true\n\t"
2303 ".Li386_less_signed_false:\n\t"
2304 "xor %eax,%eax\n\t"
2305 "jmp .Li386_less_signed_end\n\t"
2306 ".Li386_less_signed_true:\n\t"
2307 "mov $1,%eax\n\t"
2308 ".Li386_less_signed_end:\n\t"
2309 "xor %ebx,%ebx\n\t"
2310 "lea 0x8(%esp),%esp");
2311 }
2312
2313 static void
2314 i386_emit_less_unsigned (void)
2315 {
2316 EMIT_ASM32 (i386_less_unsigned,
2317 "cmpl %ebx,4(%esp)\n\t"
2318 "jb .Li386_less_unsigned_true\n\t"
2319 "jne .Li386_less_unsigned_false\n\t"
2320 "cmpl %eax,(%esp)\n\t"
2321 "jb .Li386_less_unsigned_true\n\t"
2322 ".Li386_less_unsigned_false:\n\t"
2323 "xor %eax,%eax\n\t"
2324 "jmp .Li386_less_unsigned_end\n\t"
2325 ".Li386_less_unsigned_true:\n\t"
2326 "mov $1,%eax\n\t"
2327 ".Li386_less_unsigned_end:\n\t"
2328 "xor %ebx,%ebx\n\t"
2329 "lea 0x8(%esp),%esp");
2330 }
2331
2332 static void
2333 i386_emit_ref (int size)
2334 {
2335 switch (size)
2336 {
2337 case 1:
2338 EMIT_ASM32 (i386_ref1,
2339 "movb (%eax),%al");
2340 break;
2341 case 2:
2342 EMIT_ASM32 (i386_ref2,
2343 "movw (%eax),%ax");
2344 break;
2345 case 4:
2346 EMIT_ASM32 (i386_ref4,
2347 "movl (%eax),%eax");
2348 break;
2349 case 8:
2350 EMIT_ASM32 (i386_ref8,
2351 "movl 4(%eax),%ebx\n\t"
2352 "movl (%eax),%eax");
2353 break;
2354 }
2355 }
2356
2357 static void
2358 i386_emit_if_goto (int *offset_p, int *size_p)
2359 {
2360 EMIT_ASM32 (i386_if_goto,
2361 "mov %eax,%ecx\n\t"
2362 "or %ebx,%ecx\n\t"
2363 "pop %eax\n\t"
2364 "pop %ebx\n\t"
2365 "cmpl $0,%ecx\n\t"
2366 /* Don't trust the assembler to choose the right jump */
2367 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2368
2369 if (offset_p)
2370 *offset_p = 11; /* be sure that this matches the sequence above */
2371 if (size_p)
2372 *size_p = 4;
2373 }
2374
2375 static void
2376 i386_emit_goto (int *offset_p, int *size_p)
2377 {
2378 EMIT_ASM32 (i386_goto,
2379 /* Don't trust the assembler to choose the right jump */
2380 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2381 if (offset_p)
2382 *offset_p = 1;
2383 if (size_p)
2384 *size_p = 4;
2385 }
2386
2387 static void
2388 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2389 {
2390 int diff = (to - (from + size));
2391 unsigned char buf[sizeof (int)];
2392
2393 /* We're only doing 4-byte sizes at the moment. */
2394 if (size != 4)
2395 {
2396 emit_error = 1;
2397 return;
2398 }
2399
2400 memcpy (buf, &diff, sizeof (int));
2401 write_inferior_memory (from, buf, sizeof (int));
2402 }
2403
2404 static void
2405 i386_emit_const (LONGEST num)
2406 {
2407 unsigned char buf[16];
2408 int i, hi, lo;
2409 CORE_ADDR buildaddr = current_insn_ptr;
2410
2411 i = 0;
2412 buf[i++] = 0xb8; /* mov $<n>,%eax */
2413 lo = num & 0xffffffff;
2414 memcpy (&buf[i], &lo, sizeof (lo));
2415 i += 4;
2416 hi = ((num >> 32) & 0xffffffff);
2417 if (hi)
2418 {
2419 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2420 memcpy (&buf[i], &hi, sizeof (hi));
2421 i += 4;
2422 }
2423 else
2424 {
2425 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2426 }
2427 append_insns (&buildaddr, i, buf);
2428 current_insn_ptr = buildaddr;
2429 }
2430
2431 static void
2432 i386_emit_call (CORE_ADDR fn)
2433 {
2434 unsigned char buf[16];
2435 int i, offset;
2436 CORE_ADDR buildaddr;
2437
2438 buildaddr = current_insn_ptr;
2439 i = 0;
2440 buf[i++] = 0xe8; /* call <reladdr> */
2441 offset = ((int) fn) - (buildaddr + 5);
2442 memcpy (buf + 1, &offset, 4);
2443 append_insns (&buildaddr, 5, buf);
2444 current_insn_ptr = buildaddr;
2445 }
2446
2447 static void
2448 i386_emit_reg (int reg)
2449 {
2450 unsigned char buf[16];
2451 int i;
2452 CORE_ADDR buildaddr;
2453
2454 EMIT_ASM32 (i386_reg_a,
2455 "sub $0x8,%esp");
2456 buildaddr = current_insn_ptr;
2457 i = 0;
2458 buf[i++] = 0xb8; /* mov $<n>,%eax */
2459 memcpy (&buf[i], &reg, sizeof (reg));
2460 i += 4;
2461 append_insns (&buildaddr, i, buf);
2462 current_insn_ptr = buildaddr;
2463 EMIT_ASM32 (i386_reg_b,
2464 "mov %eax,4(%esp)\n\t"
2465 "mov 8(%ebp),%eax\n\t"
2466 "mov %eax,(%esp)");
2467 i386_emit_call (get_raw_reg_func_addr ());
2468 EMIT_ASM32 (i386_reg_c,
2469 "xor %ebx,%ebx\n\t"
2470 "lea 0x8(%esp),%esp");
2471 }
2472
2473 static void
2474 i386_emit_pop (void)
2475 {
2476 EMIT_ASM32 (i386_pop,
2477 "pop %eax\n\t"
2478 "pop %ebx");
2479 }
2480
2481 static void
2482 i386_emit_stack_flush (void)
2483 {
2484 EMIT_ASM32 (i386_stack_flush,
2485 "push %ebx\n\t"
2486 "push %eax");
2487 }
2488
2489 static void
2490 i386_emit_zero_ext (int arg)
2491 {
2492 switch (arg)
2493 {
2494 case 8:
2495 EMIT_ASM32 (i386_zero_ext_8,
2496 "and $0xff,%eax\n\t"
2497 "xor %ebx,%ebx");
2498 break;
2499 case 16:
2500 EMIT_ASM32 (i386_zero_ext_16,
2501 "and $0xffff,%eax\n\t"
2502 "xor %ebx,%ebx");
2503 break;
2504 case 32:
2505 EMIT_ASM32 (i386_zero_ext_32,
2506 "xor %ebx,%ebx");
2507 break;
2508 default:
2509 emit_error = 1;
2510 }
2511 }
2512
2513 static void
2514 i386_emit_swap (void)
2515 {
2516 EMIT_ASM32 (i386_swap,
2517 "mov %eax,%ecx\n\t"
2518 "mov %ebx,%edx\n\t"
2519 "pop %eax\n\t"
2520 "pop %ebx\n\t"
2521 "push %edx\n\t"
2522 "push %ecx");
2523 }
2524
2525 static void
2526 i386_emit_stack_adjust (int n)
2527 {
2528 unsigned char buf[16];
2529 int i;
2530 CORE_ADDR buildaddr = current_insn_ptr;
2531
2532 i = 0;
2533 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2534 buf[i++] = 0x64;
2535 buf[i++] = 0x24;
2536 buf[i++] = n * 8;
2537 append_insns (&buildaddr, i, buf);
2538 current_insn_ptr = buildaddr;
2539 }
2540
2541 /* FN's prototype is `LONGEST(*fn)(int)'. */
2542
2543 static void
2544 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2545 {
2546 unsigned char buf[16];
2547 int i;
2548 CORE_ADDR buildaddr;
2549
2550 EMIT_ASM32 (i386_int_call_1_a,
2551 /* Reserve a bit of stack space. */
2552 "sub $0x8,%esp");
2553 /* Put the one argument on the stack. */
2554 buildaddr = current_insn_ptr;
2555 i = 0;
2556 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2557 buf[i++] = 0x04;
2558 buf[i++] = 0x24;
2559 memcpy (&buf[i], &arg1, sizeof (arg1));
2560 i += 4;
2561 append_insns (&buildaddr, i, buf);
2562 current_insn_ptr = buildaddr;
2563 i386_emit_call (fn);
2564 EMIT_ASM32 (i386_int_call_1_c,
2565 "mov %edx,%ebx\n\t"
2566 "lea 0x8(%esp),%esp");
2567 }
2568
2569 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2570
2571 static void
2572 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2573 {
2574 unsigned char buf[16];
2575 int i;
2576 CORE_ADDR buildaddr;
2577
2578 EMIT_ASM32 (i386_void_call_2_a,
2579 /* Preserve %eax only; we don't have to worry about %ebx. */
2580 "push %eax\n\t"
2581 /* Reserve a bit of stack space for arguments. */
2582 "sub $0x10,%esp\n\t"
2583 /* Copy "top" to the second argument position. (Note that
2584 we can't assume function won't scribble on its
2585 arguments, so don't try to restore from this.) */
2586 "mov %eax,4(%esp)\n\t"
2587 "mov %ebx,8(%esp)");
2588 /* Put the first argument on the stack. */
2589 buildaddr = current_insn_ptr;
2590 i = 0;
2591 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2592 buf[i++] = 0x04;
2593 buf[i++] = 0x24;
2594 memcpy (&buf[i], &arg1, sizeof (arg1));
2595 i += 4;
2596 append_insns (&buildaddr, i, buf);
2597 current_insn_ptr = buildaddr;
2598 i386_emit_call (fn);
2599 EMIT_ASM32 (i386_void_call_2_b,
2600 "lea 0x10(%esp),%esp\n\t"
2601 /* Restore original stack top. */
2602 "pop %eax");
2603 }
2604
2605
2606 void
2607 i386_emit_eq_goto (int *offset_p, int *size_p)
2608 {
2609 EMIT_ASM32 (eq,
2610 /* Check low half first, more likely to be decider */
2611 "cmpl %eax,(%esp)\n\t"
2612 "jne .Leq_fallthru\n\t"
2613 "cmpl %ebx,4(%esp)\n\t"
2614 "jne .Leq_fallthru\n\t"
2615 "lea 0x8(%esp),%esp\n\t"
2616 "pop %eax\n\t"
2617 "pop %ebx\n\t"
2618 /* jmp, but don't trust the assembler to choose the right jump */
2619 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2620 ".Leq_fallthru:\n\t"
2621 "lea 0x8(%esp),%esp\n\t"
2622 "pop %eax\n\t"
2623 "pop %ebx");
2624
2625 if (offset_p)
2626 *offset_p = 18;
2627 if (size_p)
2628 *size_p = 4;
2629 }
2630
2631 void
2632 i386_emit_ne_goto (int *offset_p, int *size_p)
2633 {
2634 EMIT_ASM32 (ne,
2635 /* Check low half first, more likely to be decider */
2636 "cmpl %eax,(%esp)\n\t"
2637 "jne .Lne_jump\n\t"
2638 "cmpl %ebx,4(%esp)\n\t"
2639 "je .Lne_fallthru\n\t"
2640 ".Lne_jump:\n\t"
2641 "lea 0x8(%esp),%esp\n\t"
2642 "pop %eax\n\t"
2643 "pop %ebx\n\t"
2644 /* jmp, but don't trust the assembler to choose the right jump */
2645 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2646 ".Lne_fallthru:\n\t"
2647 "lea 0x8(%esp),%esp\n\t"
2648 "pop %eax\n\t"
2649 "pop %ebx");
2650
2651 if (offset_p)
2652 *offset_p = 18;
2653 if (size_p)
2654 *size_p = 4;
2655 }
2656
2657 void
2658 i386_emit_lt_goto (int *offset_p, int *size_p)
2659 {
2660 EMIT_ASM32 (lt,
2661 "cmpl %ebx,4(%esp)\n\t"
2662 "jl .Llt_jump\n\t"
2663 "jne .Llt_fallthru\n\t"
2664 "cmpl %eax,(%esp)\n\t"
2665 "jnl .Llt_fallthru\n\t"
2666 ".Llt_jump:\n\t"
2667 "lea 0x8(%esp),%esp\n\t"
2668 "pop %eax\n\t"
2669 "pop %ebx\n\t"
2670 /* jmp, but don't trust the assembler to choose the right jump */
2671 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2672 ".Llt_fallthru:\n\t"
2673 "lea 0x8(%esp),%esp\n\t"
2674 "pop %eax\n\t"
2675 "pop %ebx");
2676
2677 if (offset_p)
2678 *offset_p = 20;
2679 if (size_p)
2680 *size_p = 4;
2681 }
2682
2683 void
2684 i386_emit_le_goto (int *offset_p, int *size_p)
2685 {
2686 EMIT_ASM32 (le,
2687 "cmpl %ebx,4(%esp)\n\t"
2688 "jle .Lle_jump\n\t"
2689 "jne .Lle_fallthru\n\t"
2690 "cmpl %eax,(%esp)\n\t"
2691 "jnle .Lle_fallthru\n\t"
2692 ".Lle_jump:\n\t"
2693 "lea 0x8(%esp),%esp\n\t"
2694 "pop %eax\n\t"
2695 "pop %ebx\n\t"
2696 /* jmp, but don't trust the assembler to choose the right jump */
2697 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2698 ".Lle_fallthru:\n\t"
2699 "lea 0x8(%esp),%esp\n\t"
2700 "pop %eax\n\t"
2701 "pop %ebx");
2702
2703 if (offset_p)
2704 *offset_p = 20;
2705 if (size_p)
2706 *size_p = 4;
2707 }
2708
2709 void
2710 i386_emit_gt_goto (int *offset_p, int *size_p)
2711 {
2712 EMIT_ASM32 (gt,
2713 "cmpl %ebx,4(%esp)\n\t"
2714 "jg .Lgt_jump\n\t"
2715 "jne .Lgt_fallthru\n\t"
2716 "cmpl %eax,(%esp)\n\t"
2717 "jng .Lgt_fallthru\n\t"
2718 ".Lgt_jump:\n\t"
2719 "lea 0x8(%esp),%esp\n\t"
2720 "pop %eax\n\t"
2721 "pop %ebx\n\t"
2722 /* jmp, but don't trust the assembler to choose the right jump */
2723 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2724 ".Lgt_fallthru:\n\t"
2725 "lea 0x8(%esp),%esp\n\t"
2726 "pop %eax\n\t"
2727 "pop %ebx");
2728
2729 if (offset_p)
2730 *offset_p = 20;
2731 if (size_p)
2732 *size_p = 4;
2733 }
2734
2735 void
2736 i386_emit_ge_goto (int *offset_p, int *size_p)
2737 {
2738 EMIT_ASM32 (ge,
2739 "cmpl %ebx,4(%esp)\n\t"
2740 "jge .Lge_jump\n\t"
2741 "jne .Lge_fallthru\n\t"
2742 "cmpl %eax,(%esp)\n\t"
2743 "jnge .Lge_fallthru\n\t"
2744 ".Lge_jump:\n\t"
2745 "lea 0x8(%esp),%esp\n\t"
2746 "pop %eax\n\t"
2747 "pop %ebx\n\t"
2748 /* jmp, but don't trust the assembler to choose the right jump */
2749 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2750 ".Lge_fallthru:\n\t"
2751 "lea 0x8(%esp),%esp\n\t"
2752 "pop %eax\n\t"
2753 "pop %ebx");
2754
2755 if (offset_p)
2756 *offset_p = 20;
2757 if (size_p)
2758 *size_p = 4;
2759 }
2760
2761 struct emit_ops i386_emit_ops =
2762 {
2763 i386_emit_prologue,
2764 i386_emit_epilogue,
2765 i386_emit_add,
2766 i386_emit_sub,
2767 i386_emit_mul,
2768 i386_emit_lsh,
2769 i386_emit_rsh_signed,
2770 i386_emit_rsh_unsigned,
2771 i386_emit_ext,
2772 i386_emit_log_not,
2773 i386_emit_bit_and,
2774 i386_emit_bit_or,
2775 i386_emit_bit_xor,
2776 i386_emit_bit_not,
2777 i386_emit_equal,
2778 i386_emit_less_signed,
2779 i386_emit_less_unsigned,
2780 i386_emit_ref,
2781 i386_emit_if_goto,
2782 i386_emit_goto,
2783 i386_write_goto_address,
2784 i386_emit_const,
2785 i386_emit_call,
2786 i386_emit_reg,
2787 i386_emit_pop,
2788 i386_emit_stack_flush,
2789 i386_emit_zero_ext,
2790 i386_emit_swap,
2791 i386_emit_stack_adjust,
2792 i386_emit_int_call_1,
2793 i386_emit_void_call_2,
2794 i386_emit_eq_goto,
2795 i386_emit_ne_goto,
2796 i386_emit_lt_goto,
2797 i386_emit_le_goto,
2798 i386_emit_gt_goto,
2799 i386_emit_ge_goto
2800 };
2801
2802
2803 static struct emit_ops *
2804 x86_emit_ops (void)
2805 {
2806 #ifdef __x86_64__
2807 if (is_64bit_tdesc ())
2808 return &amd64_emit_ops;
2809 else
2810 #endif
2811 return &i386_emit_ops;
2812 }
2813
2814 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2815
2816 static const gdb_byte *
2817 x86_sw_breakpoint_from_kind (int kind, int *size)
2818 {
2819 *size = x86_breakpoint_len;
2820 return x86_breakpoint;
2821 }
2822
2823 static int
2824 x86_supports_range_stepping (void)
2825 {
2826 return 1;
2827 }
2828
2829 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2830 */
2831
2832 static int
2833 x86_supports_hardware_single_step (void)
2834 {
2835 return 1;
2836 }
2837
2838 static int
2839 x86_get_ipa_tdesc_idx (void)
2840 {
2841 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2842 const struct target_desc *tdesc = regcache->tdesc;
2843
2844 #ifdef __x86_64__
2845 return amd64_get_ipa_tdesc_idx (tdesc);
2846 #endif
2847
2848 if (tdesc == tdesc_i386_linux_no_xml)
2849 return X86_TDESC_SSE;
2850
2851 return i386_get_ipa_tdesc_idx (tdesc);
2852 }
2853
2854 /* This is initialized assuming an amd64 target.
2855 x86_arch_setup will correct it for i386 or amd64 targets. */
2856
2857 struct linux_target_ops the_low_target =
2858 {
2859 x86_arch_setup,
2860 x86_linux_regs_info,
2861 x86_cannot_fetch_register,
2862 x86_cannot_store_register,
2863 NULL, /* fetch_register */
2864 x86_get_pc,
2865 x86_set_pc,
2866 NULL, /* breakpoint_kind_from_pc */
2867 x86_sw_breakpoint_from_kind,
2868 NULL,
2869 1,
2870 x86_breakpoint_at,
2871 x86_supports_z_point_type,
2872 x86_insert_point,
2873 x86_remove_point,
2874 x86_stopped_by_watchpoint,
2875 x86_stopped_data_address,
2876 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2877 native i386 case (no registers smaller than an xfer unit), and are not
2878 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2879 NULL,
2880 NULL,
2881 /* need to fix up i386 siginfo if host is amd64 */
2882 x86_siginfo_fixup,
2883 x86_linux_new_process,
2884 x86_linux_delete_process,
2885 x86_linux_new_thread,
2886 x86_linux_delete_thread,
2887 x86_linux_new_fork,
2888 x86_linux_prepare_to_resume,
2889 x86_linux_process_qsupported,
2890 x86_supports_tracepoints,
2891 x86_get_thread_area,
2892 x86_install_fast_tracepoint_jump_pad,
2893 x86_emit_ops,
2894 x86_get_min_fast_tracepoint_insn_len,
2895 x86_supports_range_stepping,
2896 NULL, /* breakpoint_kind_from_current_state */
2897 x86_supports_hardware_single_step,
2898 x86_get_syscall_trapinfo,
2899 x86_get_ipa_tdesc_idx,
2900 };
2901
2902 void
2903 initialize_low_arch (void)
2904 {
2905 /* Initialize the Linux target descriptions. */
2906 #ifdef __x86_64__
2907 tdesc_amd64_linux_no_xml = allocate_target_description ();
2908 copy_target_description (tdesc_amd64_linux_no_xml,
2909 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2910 false));
2911 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2912 #endif
2913
2914 #if GDB_SELF_TEST
2915 initialize_low_tdesc ();
2916 #endif
2917
2918 tdesc_i386_linux_no_xml = allocate_target_description ();
2919 copy_target_description (tdesc_i386_linux_no_xml,
2920 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2921 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2922
2923 initialize_regsets_info (&x86_regsets_info);
2924 }
This page took 0.111714 seconds and 4 git commands to generate.