gdbserver/gdbsupport: Add .dir-locals.el file
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Per-process arch-specific data we want to keep. */
96
97 struct arch_process_info
98 {
99 struct x86_debug_reg_state debug_reg_state;
100 };
101
102 #ifdef __x86_64__
103
104 /* Mapping between the general-purpose registers in `struct user'
105 format and GDB's register array layout.
106 Note that the transfer layout uses 64-bit regs. */
107 static /*const*/ int i386_regmap[] =
108 {
109 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
110 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
111 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
112 DS * 8, ES * 8, FS * 8, GS * 8
113 };
114
115 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
116
117 /* So code below doesn't have to care, i386 or amd64. */
118 #define ORIG_EAX ORIG_RAX
119 #define REGSIZE 8
120
121 static const int x86_64_regmap[] =
122 {
123 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
124 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
125 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
126 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
127 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
128 DS * 8, ES * 8, FS * 8, GS * 8,
129 -1, -1, -1, -1, -1, -1, -1, -1,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
132 -1,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 ORIG_RAX * 8,
135 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
136 21 * 8, 22 * 8,
137 #else
138 -1, -1,
139 #endif
140 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
141 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
142 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
143 -1, -1, -1, -1, -1, -1, -1, -1,
144 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
145 -1, -1, -1, -1, -1, -1, -1, -1,
146 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
147 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
148 -1, -1, -1, -1, -1, -1, -1, -1,
149 -1, -1, -1, -1, -1, -1, -1, -1,
150 -1, -1, -1, -1, -1, -1, -1, -1,
151 -1 /* pkru */
152 };
153
154 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
155 #define X86_64_USER_REGS (GS + 1)
156
157 #else /* ! __x86_64__ */
158
159 /* Mapping between the general-purpose registers in `struct user'
160 format and GDB's register array layout. */
161 static /*const*/ int i386_regmap[] =
162 {
163 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
164 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
165 EIP * 4, EFL * 4, CS * 4, SS * 4,
166 DS * 4, ES * 4, FS * 4, GS * 4
167 };
168
169 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
170
171 #define REGSIZE 4
172
173 #endif
174
175 #ifdef __x86_64__
176
177 /* Returns true if the current inferior belongs to a x86-64 process,
178 per the tdesc. */
179
180 static int
181 is_64bit_tdesc (void)
182 {
183 struct regcache *regcache = get_thread_regcache (current_thread, 0);
184
185 return register_size (regcache->tdesc, 0) == 8;
186 }
187
188 #endif
189
190 \f
191 /* Called by libthread_db. */
192
193 ps_err_e
194 ps_get_thread_area (struct ps_prochandle *ph,
195 lwpid_t lwpid, int idx, void **base)
196 {
197 #ifdef __x86_64__
198 int use_64bit = is_64bit_tdesc ();
199
200 if (use_64bit)
201 {
202 switch (idx)
203 {
204 case FS:
205 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
206 return PS_OK;
207 break;
208 case GS:
209 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
210 return PS_OK;
211 break;
212 default:
213 return PS_BADADDR;
214 }
215 return PS_ERR;
216 }
217 #endif
218
219 {
220 unsigned int desc[4];
221
222 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
223 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
224 return PS_ERR;
225
226 /* Ensure we properly extend the value to 64-bits for x86_64. */
227 *base = (void *) (uintptr_t) desc[1];
228 return PS_OK;
229 }
230 }
231
232 /* Get the thread area address. This is used to recognize which
233 thread is which when tracing with the in-process agent library. We
234 don't read anything from the address, and treat it as opaque; it's
235 the address itself that we assume is unique per-thread. */
236
237 static int
238 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
239 {
240 #ifdef __x86_64__
241 int use_64bit = is_64bit_tdesc ();
242
243 if (use_64bit)
244 {
245 void *base;
246 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
247 {
248 *addr = (CORE_ADDR) (uintptr_t) base;
249 return 0;
250 }
251
252 return -1;
253 }
254 #endif
255
256 {
257 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
258 struct thread_info *thr = get_lwp_thread (lwp);
259 struct regcache *regcache = get_thread_regcache (thr, 1);
260 unsigned int desc[4];
261 ULONGEST gs = 0;
262 const int reg_thread_area = 3; /* bits to scale down register value. */
263 int idx;
264
265 collect_register_by_name (regcache, "gs", &gs);
266
267 idx = gs >> reg_thread_area;
268
269 if (ptrace (PTRACE_GET_THREAD_AREA,
270 lwpid_of (thr),
271 (void *) (long) idx, (unsigned long) &desc) < 0)
272 return -1;
273
274 *addr = desc[1];
275 return 0;
276 }
277 }
278
279
280 \f
281 static int
282 x86_cannot_store_register (int regno)
283 {
284 #ifdef __x86_64__
285 if (is_64bit_tdesc ())
286 return 0;
287 #endif
288
289 return regno >= I386_NUM_REGS;
290 }
291
292 static int
293 x86_cannot_fetch_register (int regno)
294 {
295 #ifdef __x86_64__
296 if (is_64bit_tdesc ())
297 return 0;
298 #endif
299
300 return regno >= I386_NUM_REGS;
301 }
302
303 static void
304 x86_fill_gregset (struct regcache *regcache, void *buf)
305 {
306 int i;
307
308 #ifdef __x86_64__
309 if (register_size (regcache->tdesc, 0) == 8)
310 {
311 for (i = 0; i < X86_64_NUM_REGS; i++)
312 if (x86_64_regmap[i] != -1)
313 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
314
315 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
316 {
317 unsigned long base;
318 int lwpid = lwpid_of (current_thread);
319
320 collect_register_by_name (regcache, "fs_base", &base);
321 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
322
323 collect_register_by_name (regcache, "gs_base", &base);
324 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
325 }
326 #endif
327
328 return;
329 }
330
331 /* 32-bit inferior registers need to be zero-extended.
332 Callers would read uninitialized memory otherwise. */
333 memset (buf, 0x00, X86_64_USER_REGS * 8);
334 #endif
335
336 for (i = 0; i < I386_NUM_REGS; i++)
337 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
338
339 collect_register_by_name (regcache, "orig_eax",
340 ((char *) buf) + ORIG_EAX * REGSIZE);
341
342 #ifdef __x86_64__
343 /* Sign extend EAX value to avoid potential syscall restart
344 problems.
345
346 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
347 for a detailed explanation. */
348 if (register_size (regcache->tdesc, 0) == 4)
349 {
350 void *ptr = ((gdb_byte *) buf
351 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
352
353 *(int64_t *) ptr = *(int32_t *) ptr;
354 }
355 #endif
356 }
357
358 static void
359 x86_store_gregset (struct regcache *regcache, const void *buf)
360 {
361 int i;
362
363 #ifdef __x86_64__
364 if (register_size (regcache->tdesc, 0) == 8)
365 {
366 for (i = 0; i < X86_64_NUM_REGS; i++)
367 if (x86_64_regmap[i] != -1)
368 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
369
370 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
371 {
372 unsigned long base;
373 int lwpid = lwpid_of (current_thread);
374
375 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
376 supply_register_by_name (regcache, "fs_base", &base);
377
378 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
379 supply_register_by_name (regcache, "gs_base", &base);
380 }
381 #endif
382 return;
383 }
384 #endif
385
386 for (i = 0; i < I386_NUM_REGS; i++)
387 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
388
389 supply_register_by_name (regcache, "orig_eax",
390 ((char *) buf) + ORIG_EAX * REGSIZE);
391 }
392
393 static void
394 x86_fill_fpregset (struct regcache *regcache, void *buf)
395 {
396 #ifdef __x86_64__
397 i387_cache_to_fxsave (regcache, buf);
398 #else
399 i387_cache_to_fsave (regcache, buf);
400 #endif
401 }
402
403 static void
404 x86_store_fpregset (struct regcache *regcache, const void *buf)
405 {
406 #ifdef __x86_64__
407 i387_fxsave_to_cache (regcache, buf);
408 #else
409 i387_fsave_to_cache (regcache, buf);
410 #endif
411 }
412
413 #ifndef __x86_64__
414
415 static void
416 x86_fill_fpxregset (struct regcache *regcache, void *buf)
417 {
418 i387_cache_to_fxsave (regcache, buf);
419 }
420
421 static void
422 x86_store_fpxregset (struct regcache *regcache, const void *buf)
423 {
424 i387_fxsave_to_cache (regcache, buf);
425 }
426
427 #endif
428
429 static void
430 x86_fill_xstateregset (struct regcache *regcache, void *buf)
431 {
432 i387_cache_to_xsave (regcache, buf);
433 }
434
435 static void
436 x86_store_xstateregset (struct regcache *regcache, const void *buf)
437 {
438 i387_xsave_to_cache (regcache, buf);
439 }
440
441 /* ??? The non-biarch i386 case stores all the i387 regs twice.
442 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
443 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
444 doesn't work. IWBN to avoid the duplication in the case where it
445 does work. Maybe the arch_setup routine could check whether it works
446 and update the supported regsets accordingly. */
447
448 static struct regset_info x86_regsets[] =
449 {
450 #ifdef HAVE_PTRACE_GETREGS
451 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
452 GENERAL_REGS,
453 x86_fill_gregset, x86_store_gregset },
454 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
455 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
456 # ifndef __x86_64__
457 # ifdef HAVE_PTRACE_GETFPXREGS
458 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
459 EXTENDED_REGS,
460 x86_fill_fpxregset, x86_store_fpxregset },
461 # endif
462 # endif
463 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
464 FP_REGS,
465 x86_fill_fpregset, x86_store_fpregset },
466 #endif /* HAVE_PTRACE_GETREGS */
467 NULL_REGSET
468 };
469
470 static CORE_ADDR
471 x86_get_pc (struct regcache *regcache)
472 {
473 int use_64bit = register_size (regcache->tdesc, 0) == 8;
474
475 if (use_64bit)
476 {
477 uint64_t pc;
478
479 collect_register_by_name (regcache, "rip", &pc);
480 return (CORE_ADDR) pc;
481 }
482 else
483 {
484 uint32_t pc;
485
486 collect_register_by_name (regcache, "eip", &pc);
487 return (CORE_ADDR) pc;
488 }
489 }
490
491 static void
492 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
493 {
494 int use_64bit = register_size (regcache->tdesc, 0) == 8;
495
496 if (use_64bit)
497 {
498 uint64_t newpc = pc;
499
500 supply_register_by_name (regcache, "rip", &newpc);
501 }
502 else
503 {
504 uint32_t newpc = pc;
505
506 supply_register_by_name (regcache, "eip", &newpc);
507 }
508 }
509 \f
510 static const gdb_byte x86_breakpoint[] = { 0xCC };
511 #define x86_breakpoint_len 1
512
513 static int
514 x86_breakpoint_at (CORE_ADDR pc)
515 {
516 unsigned char c;
517
518 the_target->read_memory (pc, &c, 1);
519 if (c == 0xCC)
520 return 1;
521
522 return 0;
523 }
524 \f
525 /* Low-level function vector. */
526 struct x86_dr_low_type x86_dr_low =
527 {
528 x86_linux_dr_set_control,
529 x86_linux_dr_set_addr,
530 x86_linux_dr_get_addr,
531 x86_linux_dr_get_status,
532 x86_linux_dr_get_control,
533 sizeof (void *),
534 };
535 \f
536 /* Breakpoint/Watchpoint support. */
537
538 static int
539 x86_supports_z_point_type (char z_type)
540 {
541 switch (z_type)
542 {
543 case Z_PACKET_SW_BP:
544 case Z_PACKET_HW_BP:
545 case Z_PACKET_WRITE_WP:
546 case Z_PACKET_ACCESS_WP:
547 return 1;
548 default:
549 return 0;
550 }
551 }
552
553 static int
554 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
555 int size, struct raw_breakpoint *bp)
556 {
557 struct process_info *proc = current_process ();
558
559 switch (type)
560 {
561 case raw_bkpt_type_hw:
562 case raw_bkpt_type_write_wp:
563 case raw_bkpt_type_access_wp:
564 {
565 enum target_hw_bp_type hw_type
566 = raw_bkpt_type_to_target_hw_bp_type (type);
567 struct x86_debug_reg_state *state
568 = &proc->priv->arch_private->debug_reg_state;
569
570 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
571 }
572
573 default:
574 /* Unsupported. */
575 return 1;
576 }
577 }
578
579 static int
580 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
581 int size, struct raw_breakpoint *bp)
582 {
583 struct process_info *proc = current_process ();
584
585 switch (type)
586 {
587 case raw_bkpt_type_hw:
588 case raw_bkpt_type_write_wp:
589 case raw_bkpt_type_access_wp:
590 {
591 enum target_hw_bp_type hw_type
592 = raw_bkpt_type_to_target_hw_bp_type (type);
593 struct x86_debug_reg_state *state
594 = &proc->priv->arch_private->debug_reg_state;
595
596 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
597 }
598 default:
599 /* Unsupported. */
600 return 1;
601 }
602 }
603
604 static int
605 x86_stopped_by_watchpoint (void)
606 {
607 struct process_info *proc = current_process ();
608 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
609 }
610
611 static CORE_ADDR
612 x86_stopped_data_address (void)
613 {
614 struct process_info *proc = current_process ();
615 CORE_ADDR addr;
616 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
617 &addr))
618 return addr;
619 return 0;
620 }
621 \f
622 /* Called when a new process is created. */
623
624 static struct arch_process_info *
625 x86_linux_new_process (void)
626 {
627 struct arch_process_info *info = XCNEW (struct arch_process_info);
628
629 x86_low_init_dregs (&info->debug_reg_state);
630
631 return info;
632 }
633
634 /* Called when a process is being deleted. */
635
636 static void
637 x86_linux_delete_process (struct arch_process_info *info)
638 {
639 xfree (info);
640 }
641
642 /* Target routine for linux_new_fork. */
643
644 static void
645 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
646 {
647 /* These are allocated by linux_add_process. */
648 gdb_assert (parent->priv != NULL
649 && parent->priv->arch_private != NULL);
650 gdb_assert (child->priv != NULL
651 && child->priv->arch_private != NULL);
652
653 /* Linux kernel before 2.6.33 commit
654 72f674d203cd230426437cdcf7dd6f681dad8b0d
655 will inherit hardware debug registers from parent
656 on fork/vfork/clone. Newer Linux kernels create such tasks with
657 zeroed debug registers.
658
659 GDB core assumes the child inherits the watchpoints/hw
660 breakpoints of the parent, and will remove them all from the
661 forked off process. Copy the debug registers mirrors into the
662 new process so that all breakpoints and watchpoints can be
663 removed together. The debug registers mirror will become zeroed
664 in the end before detaching the forked off process, thus making
665 this compatible with older Linux kernels too. */
666
667 *child->priv->arch_private = *parent->priv->arch_private;
668 }
669
670 /* See nat/x86-dregs.h. */
671
672 struct x86_debug_reg_state *
673 x86_debug_reg_state (pid_t pid)
674 {
675 struct process_info *proc = find_process_pid (pid);
676
677 return &proc->priv->arch_private->debug_reg_state;
678 }
679 \f
680 /* When GDBSERVER is built as a 64-bit application on linux, the
681 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
682 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
683 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
684 conversion in-place ourselves. */
685
686 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
687 layout of the inferiors' architecture. Returns true if any
688 conversion was done; false otherwise. If DIRECTION is 1, then copy
689 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
690 INF. */
691
692 static int
693 x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
694 {
695 #ifdef __x86_64__
696 unsigned int machine;
697 int tid = lwpid_of (current_thread);
698 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
699
700 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
701 if (!is_64bit_tdesc ())
702 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
703 FIXUP_32);
704 /* No fixup for native x32 GDB. */
705 else if (!is_elf64 && sizeof (void *) == 8)
706 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
707 FIXUP_X32);
708 #endif
709
710 return 0;
711 }
712 \f
713 static int use_xml;
714
715 /* Format of XSAVE extended state is:
716 struct
717 {
718 fxsave_bytes[0..463]
719 sw_usable_bytes[464..511]
720 xstate_hdr_bytes[512..575]
721 avx_bytes[576..831]
722 future_state etc
723 };
724
725 Same memory layout will be used for the coredump NT_X86_XSTATE
726 representing the XSAVE extended state registers.
727
728 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
729 extended state mask, which is the same as the extended control register
730 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
731 together with the mask saved in the xstate_hdr_bytes to determine what
732 states the processor/OS supports and what state, used or initialized,
733 the process/thread is in. */
734 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
735
736 /* Does the current host support the GETFPXREGS request? The header
737 file may or may not define it, and even if it is defined, the
738 kernel will return EIO if it's running on a pre-SSE processor. */
739 int have_ptrace_getfpxregs =
740 #ifdef HAVE_PTRACE_GETFPXREGS
741 -1
742 #else
743 0
744 #endif
745 ;
746
747 /* Get Linux/x86 target description from running target. */
748
749 static const struct target_desc *
750 x86_linux_read_description (void)
751 {
752 unsigned int machine;
753 int is_elf64;
754 int xcr0_features;
755 int tid;
756 static uint64_t xcr0;
757 struct regset_info *regset;
758
759 tid = lwpid_of (current_thread);
760
761 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
762
763 if (sizeof (void *) == 4)
764 {
765 if (is_elf64 > 0)
766 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
767 #ifndef __x86_64__
768 else if (machine == EM_X86_64)
769 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
770 #endif
771 }
772
773 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
774 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
775 {
776 elf_fpxregset_t fpxregs;
777
778 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
779 {
780 have_ptrace_getfpxregs = 0;
781 have_ptrace_getregset = 0;
782 return i386_linux_read_description (X86_XSTATE_X87);
783 }
784 else
785 have_ptrace_getfpxregs = 1;
786 }
787 #endif
788
789 if (!use_xml)
790 {
791 x86_xcr0 = X86_XSTATE_SSE_MASK;
792
793 /* Don't use XML. */
794 #ifdef __x86_64__
795 if (machine == EM_X86_64)
796 return tdesc_amd64_linux_no_xml;
797 else
798 #endif
799 return tdesc_i386_linux_no_xml;
800 }
801
802 if (have_ptrace_getregset == -1)
803 {
804 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
805 struct iovec iov;
806
807 iov.iov_base = xstateregs;
808 iov.iov_len = sizeof (xstateregs);
809
810 /* Check if PTRACE_GETREGSET works. */
811 if (ptrace (PTRACE_GETREGSET, tid,
812 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
813 have_ptrace_getregset = 0;
814 else
815 {
816 have_ptrace_getregset = 1;
817
818 /* Get XCR0 from XSAVE extended state. */
819 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
820 / sizeof (uint64_t))];
821
822 /* Use PTRACE_GETREGSET if it is available. */
823 for (regset = x86_regsets;
824 regset->fill_function != NULL; regset++)
825 if (regset->get_request == PTRACE_GETREGSET)
826 regset->size = X86_XSTATE_SIZE (xcr0);
827 else if (regset->type != GENERAL_REGS)
828 regset->size = 0;
829 }
830 }
831
832 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
833 xcr0_features = (have_ptrace_getregset
834 && (xcr0 & X86_XSTATE_ALL_MASK));
835
836 if (xcr0_features)
837 x86_xcr0 = xcr0;
838
839 if (machine == EM_X86_64)
840 {
841 #ifdef __x86_64__
842 const target_desc *tdesc = NULL;
843
844 if (xcr0_features)
845 {
846 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
847 !is_elf64);
848 }
849
850 if (tdesc == NULL)
851 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
852 return tdesc;
853 #endif
854 }
855 else
856 {
857 const target_desc *tdesc = NULL;
858
859 if (xcr0_features)
860 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
861
862 if (tdesc == NULL)
863 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
864
865 return tdesc;
866 }
867
868 gdb_assert_not_reached ("failed to return tdesc");
869 }
870
871 /* Update all the target description of all processes; a new GDB
872 connected, and it may or not support xml target descriptions. */
873
874 static void
875 x86_linux_update_xmltarget (void)
876 {
877 struct thread_info *saved_thread = current_thread;
878
879 /* Before changing the register cache's internal layout, flush the
880 contents of the current valid caches back to the threads, and
881 release the current regcache objects. */
882 regcache_release ();
883
884 for_each_process ([] (process_info *proc) {
885 int pid = proc->pid;
886
887 /* Look up any thread of this process. */
888 current_thread = find_any_thread_of_pid (pid);
889
890 the_low_target.arch_setup ();
891 });
892
893 current_thread = saved_thread;
894 }
895
896 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
897 PTRACE_GETREGSET. */
898
899 static void
900 x86_linux_process_qsupported (char **features, int count)
901 {
902 int i;
903
904 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
905 with "i386" in qSupported query, it supports x86 XML target
906 descriptions. */
907 use_xml = 0;
908 for (i = 0; i < count; i++)
909 {
910 const char *feature = features[i];
911
912 if (startswith (feature, "xmlRegisters="))
913 {
914 char *copy = xstrdup (feature + 13);
915
916 char *saveptr;
917 for (char *p = strtok_r (copy, ",", &saveptr);
918 p != NULL;
919 p = strtok_r (NULL, ",", &saveptr))
920 {
921 if (strcmp (p, "i386") == 0)
922 {
923 use_xml = 1;
924 break;
925 }
926 }
927
928 free (copy);
929 }
930 }
931 x86_linux_update_xmltarget ();
932 }
933
934 /* Common for x86/x86-64. */
935
936 static struct regsets_info x86_regsets_info =
937 {
938 x86_regsets, /* regsets */
939 0, /* num_regsets */
940 NULL, /* disabled_regsets */
941 };
942
943 #ifdef __x86_64__
944 static struct regs_info amd64_linux_regs_info =
945 {
946 NULL, /* regset_bitmap */
947 NULL, /* usrregs_info */
948 &x86_regsets_info
949 };
950 #endif
951 static struct usrregs_info i386_linux_usrregs_info =
952 {
953 I386_NUM_REGS,
954 i386_regmap,
955 };
956
957 static struct regs_info i386_linux_regs_info =
958 {
959 NULL, /* regset_bitmap */
960 &i386_linux_usrregs_info,
961 &x86_regsets_info
962 };
963
964 static const struct regs_info *
965 x86_linux_regs_info (void)
966 {
967 #ifdef __x86_64__
968 if (is_64bit_tdesc ())
969 return &amd64_linux_regs_info;
970 else
971 #endif
972 return &i386_linux_regs_info;
973 }
974
975 /* Initialize the target description for the architecture of the
976 inferior. */
977
978 static void
979 x86_arch_setup (void)
980 {
981 current_process ()->tdesc = x86_linux_read_description ();
982 }
983
984 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
985 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
986
987 static void
988 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
989 {
990 int use_64bit = register_size (regcache->tdesc, 0) == 8;
991
992 if (use_64bit)
993 {
994 long l_sysno;
995
996 collect_register_by_name (regcache, "orig_rax", &l_sysno);
997 *sysno = (int) l_sysno;
998 }
999 else
1000 collect_register_by_name (regcache, "orig_eax", sysno);
1001 }
1002
1003 static int
1004 x86_supports_tracepoints (void)
1005 {
1006 return 1;
1007 }
1008
1009 static void
1010 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1011 {
1012 target_write_memory (*to, buf, len);
1013 *to += len;
1014 }
1015
1016 static int
1017 push_opcode (unsigned char *buf, const char *op)
1018 {
1019 unsigned char *buf_org = buf;
1020
1021 while (1)
1022 {
1023 char *endptr;
1024 unsigned long ul = strtoul (op, &endptr, 16);
1025
1026 if (endptr == op)
1027 break;
1028
1029 *buf++ = ul;
1030 op = endptr;
1031 }
1032
1033 return buf - buf_org;
1034 }
1035
1036 #ifdef __x86_64__
1037
1038 /* Build a jump pad that saves registers and calls a collection
1039 function. Writes a jump instruction to the jump pad to
1040 JJUMPAD_INSN. The caller is responsible to write it in at the
1041 tracepoint address. */
1042
1043 static int
1044 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1045 CORE_ADDR collector,
1046 CORE_ADDR lockaddr,
1047 ULONGEST orig_size,
1048 CORE_ADDR *jump_entry,
1049 CORE_ADDR *trampoline,
1050 ULONGEST *trampoline_size,
1051 unsigned char *jjump_pad_insn,
1052 ULONGEST *jjump_pad_insn_size,
1053 CORE_ADDR *adjusted_insn_addr,
1054 CORE_ADDR *adjusted_insn_addr_end,
1055 char *err)
1056 {
1057 unsigned char buf[40];
1058 int i, offset;
1059 int64_t loffset;
1060
1061 CORE_ADDR buildaddr = *jump_entry;
1062
1063 /* Build the jump pad. */
1064
1065 /* First, do tracepoint data collection. Save registers. */
1066 i = 0;
1067 /* Need to ensure stack pointer saved first. */
1068 buf[i++] = 0x54; /* push %rsp */
1069 buf[i++] = 0x55; /* push %rbp */
1070 buf[i++] = 0x57; /* push %rdi */
1071 buf[i++] = 0x56; /* push %rsi */
1072 buf[i++] = 0x52; /* push %rdx */
1073 buf[i++] = 0x51; /* push %rcx */
1074 buf[i++] = 0x53; /* push %rbx */
1075 buf[i++] = 0x50; /* push %rax */
1076 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1077 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1078 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1079 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1080 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1081 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1082 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1083 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1084 buf[i++] = 0x9c; /* pushfq */
1085 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1086 buf[i++] = 0xbf;
1087 memcpy (buf + i, &tpaddr, 8);
1088 i += 8;
1089 buf[i++] = 0x57; /* push %rdi */
1090 append_insns (&buildaddr, i, buf);
1091
1092 /* Stack space for the collecting_t object. */
1093 i = 0;
1094 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1095 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1096 memcpy (buf + i, &tpoint, 8);
1097 i += 8;
1098 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1099 i += push_opcode (&buf[i],
1100 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1101 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1102 append_insns (&buildaddr, i, buf);
1103
1104 /* spin-lock. */
1105 i = 0;
1106 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1107 memcpy (&buf[i], (void *) &lockaddr, 8);
1108 i += 8;
1109 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1110 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1111 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1112 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1113 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1114 append_insns (&buildaddr, i, buf);
1115
1116 /* Set up the gdb_collect call. */
1117 /* At this point, (stack pointer + 0x18) is the base of our saved
1118 register block. */
1119
1120 i = 0;
1121 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1122 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1123
1124 /* tpoint address may be 64-bit wide. */
1125 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1126 memcpy (buf + i, &tpoint, 8);
1127 i += 8;
1128 append_insns (&buildaddr, i, buf);
1129
1130 /* The collector function being in the shared library, may be
1131 >31-bits away off the jump pad. */
1132 i = 0;
1133 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1134 memcpy (buf + i, &collector, 8);
1135 i += 8;
1136 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1137 append_insns (&buildaddr, i, buf);
1138
1139 /* Clear the spin-lock. */
1140 i = 0;
1141 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1142 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1143 memcpy (buf + i, &lockaddr, 8);
1144 i += 8;
1145 append_insns (&buildaddr, i, buf);
1146
1147 /* Remove stack that had been used for the collect_t object. */
1148 i = 0;
1149 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1150 append_insns (&buildaddr, i, buf);
1151
1152 /* Restore register state. */
1153 i = 0;
1154 buf[i++] = 0x48; /* add $0x8,%rsp */
1155 buf[i++] = 0x83;
1156 buf[i++] = 0xc4;
1157 buf[i++] = 0x08;
1158 buf[i++] = 0x9d; /* popfq */
1159 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1160 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1161 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1162 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1163 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1164 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1165 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1166 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1167 buf[i++] = 0x58; /* pop %rax */
1168 buf[i++] = 0x5b; /* pop %rbx */
1169 buf[i++] = 0x59; /* pop %rcx */
1170 buf[i++] = 0x5a; /* pop %rdx */
1171 buf[i++] = 0x5e; /* pop %rsi */
1172 buf[i++] = 0x5f; /* pop %rdi */
1173 buf[i++] = 0x5d; /* pop %rbp */
1174 buf[i++] = 0x5c; /* pop %rsp */
1175 append_insns (&buildaddr, i, buf);
1176
1177 /* Now, adjust the original instruction to execute in the jump
1178 pad. */
1179 *adjusted_insn_addr = buildaddr;
1180 relocate_instruction (&buildaddr, tpaddr);
1181 *adjusted_insn_addr_end = buildaddr;
1182
1183 /* Finally, write a jump back to the program. */
1184
1185 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1186 if (loffset > INT_MAX || loffset < INT_MIN)
1187 {
1188 sprintf (err,
1189 "E.Jump back from jump pad too far from tracepoint "
1190 "(offset 0x%" PRIx64 " > int32).", loffset);
1191 return 1;
1192 }
1193
1194 offset = (int) loffset;
1195 memcpy (buf, jump_insn, sizeof (jump_insn));
1196 memcpy (buf + 1, &offset, 4);
1197 append_insns (&buildaddr, sizeof (jump_insn), buf);
1198
1199 /* The jump pad is now built. Wire in a jump to our jump pad. This
1200 is always done last (by our caller actually), so that we can
1201 install fast tracepoints with threads running. This relies on
1202 the agent's atomic write support. */
1203 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1204 if (loffset > INT_MAX || loffset < INT_MIN)
1205 {
1206 sprintf (err,
1207 "E.Jump pad too far from tracepoint "
1208 "(offset 0x%" PRIx64 " > int32).", loffset);
1209 return 1;
1210 }
1211
1212 offset = (int) loffset;
1213
1214 memcpy (buf, jump_insn, sizeof (jump_insn));
1215 memcpy (buf + 1, &offset, 4);
1216 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1217 *jjump_pad_insn_size = sizeof (jump_insn);
1218
1219 /* Return the end address of our pad. */
1220 *jump_entry = buildaddr;
1221
1222 return 0;
1223 }
1224
1225 #endif /* __x86_64__ */
1226
1227 /* Build a jump pad that saves registers and calls a collection
1228 function. Writes a jump instruction to the jump pad to
1229 JJUMPAD_INSN. The caller is responsible to write it in at the
1230 tracepoint address. */
1231
1232 static int
1233 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1234 CORE_ADDR collector,
1235 CORE_ADDR lockaddr,
1236 ULONGEST orig_size,
1237 CORE_ADDR *jump_entry,
1238 CORE_ADDR *trampoline,
1239 ULONGEST *trampoline_size,
1240 unsigned char *jjump_pad_insn,
1241 ULONGEST *jjump_pad_insn_size,
1242 CORE_ADDR *adjusted_insn_addr,
1243 CORE_ADDR *adjusted_insn_addr_end,
1244 char *err)
1245 {
1246 unsigned char buf[0x100];
1247 int i, offset;
1248 CORE_ADDR buildaddr = *jump_entry;
1249
1250 /* Build the jump pad. */
1251
1252 /* First, do tracepoint data collection. Save registers. */
1253 i = 0;
1254 buf[i++] = 0x60; /* pushad */
1255 buf[i++] = 0x68; /* push tpaddr aka $pc */
1256 *((int *)(buf + i)) = (int) tpaddr;
1257 i += 4;
1258 buf[i++] = 0x9c; /* pushf */
1259 buf[i++] = 0x1e; /* push %ds */
1260 buf[i++] = 0x06; /* push %es */
1261 buf[i++] = 0x0f; /* push %fs */
1262 buf[i++] = 0xa0;
1263 buf[i++] = 0x0f; /* push %gs */
1264 buf[i++] = 0xa8;
1265 buf[i++] = 0x16; /* push %ss */
1266 buf[i++] = 0x0e; /* push %cs */
1267 append_insns (&buildaddr, i, buf);
1268
1269 /* Stack space for the collecting_t object. */
1270 i = 0;
1271 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1272
1273 /* Build the object. */
1274 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1275 memcpy (buf + i, &tpoint, 4);
1276 i += 4;
1277 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1278
1279 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1280 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1281 append_insns (&buildaddr, i, buf);
1282
1283 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1284 If we cared for it, this could be using xchg alternatively. */
1285
1286 i = 0;
1287 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1288 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1289 %esp,<lockaddr> */
1290 memcpy (&buf[i], (void *) &lockaddr, 4);
1291 i += 4;
1292 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1293 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1294 append_insns (&buildaddr, i, buf);
1295
1296
1297 /* Set up arguments to the gdb_collect call. */
1298 i = 0;
1299 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1300 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1301 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1302 append_insns (&buildaddr, i, buf);
1303
1304 i = 0;
1305 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1306 append_insns (&buildaddr, i, buf);
1307
1308 i = 0;
1309 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1310 memcpy (&buf[i], (void *) &tpoint, 4);
1311 i += 4;
1312 append_insns (&buildaddr, i, buf);
1313
1314 buf[0] = 0xe8; /* call <reladdr> */
1315 offset = collector - (buildaddr + sizeof (jump_insn));
1316 memcpy (buf + 1, &offset, 4);
1317 append_insns (&buildaddr, 5, buf);
1318 /* Clean up after the call. */
1319 buf[0] = 0x83; /* add $0x8,%esp */
1320 buf[1] = 0xc4;
1321 buf[2] = 0x08;
1322 append_insns (&buildaddr, 3, buf);
1323
1324
1325 /* Clear the spin-lock. This would need the LOCK prefix on older
1326 broken archs. */
1327 i = 0;
1328 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1329 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1330 memcpy (buf + i, &lockaddr, 4);
1331 i += 4;
1332 append_insns (&buildaddr, i, buf);
1333
1334
1335 /* Remove stack that had been used for the collect_t object. */
1336 i = 0;
1337 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1338 append_insns (&buildaddr, i, buf);
1339
1340 i = 0;
1341 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1342 buf[i++] = 0xc4;
1343 buf[i++] = 0x04;
1344 buf[i++] = 0x17; /* pop %ss */
1345 buf[i++] = 0x0f; /* pop %gs */
1346 buf[i++] = 0xa9;
1347 buf[i++] = 0x0f; /* pop %fs */
1348 buf[i++] = 0xa1;
1349 buf[i++] = 0x07; /* pop %es */
1350 buf[i++] = 0x1f; /* pop %ds */
1351 buf[i++] = 0x9d; /* popf */
1352 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1353 buf[i++] = 0xc4;
1354 buf[i++] = 0x04;
1355 buf[i++] = 0x61; /* popad */
1356 append_insns (&buildaddr, i, buf);
1357
1358 /* Now, adjust the original instruction to execute in the jump
1359 pad. */
1360 *adjusted_insn_addr = buildaddr;
1361 relocate_instruction (&buildaddr, tpaddr);
1362 *adjusted_insn_addr_end = buildaddr;
1363
1364 /* Write the jump back to the program. */
1365 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1366 memcpy (buf, jump_insn, sizeof (jump_insn));
1367 memcpy (buf + 1, &offset, 4);
1368 append_insns (&buildaddr, sizeof (jump_insn), buf);
1369
1370 /* The jump pad is now built. Wire in a jump to our jump pad. This
1371 is always done last (by our caller actually), so that we can
1372 install fast tracepoints with threads running. This relies on
1373 the agent's atomic write support. */
1374 if (orig_size == 4)
1375 {
1376 /* Create a trampoline. */
1377 *trampoline_size = sizeof (jump_insn);
1378 if (!claim_trampoline_space (*trampoline_size, trampoline))
1379 {
1380 /* No trampoline space available. */
1381 strcpy (err,
1382 "E.Cannot allocate trampoline space needed for fast "
1383 "tracepoints on 4-byte instructions.");
1384 return 1;
1385 }
1386
1387 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1388 memcpy (buf, jump_insn, sizeof (jump_insn));
1389 memcpy (buf + 1, &offset, 4);
1390 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1391
1392 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1393 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1394 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1395 memcpy (buf + 2, &offset, 2);
1396 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1397 *jjump_pad_insn_size = sizeof (small_jump_insn);
1398 }
1399 else
1400 {
1401 /* Else use a 32-bit relative jump instruction. */
1402 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1403 memcpy (buf, jump_insn, sizeof (jump_insn));
1404 memcpy (buf + 1, &offset, 4);
1405 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1406 *jjump_pad_insn_size = sizeof (jump_insn);
1407 }
1408
1409 /* Return the end address of our pad. */
1410 *jump_entry = buildaddr;
1411
1412 return 0;
1413 }
1414
1415 static int
1416 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1417 CORE_ADDR collector,
1418 CORE_ADDR lockaddr,
1419 ULONGEST orig_size,
1420 CORE_ADDR *jump_entry,
1421 CORE_ADDR *trampoline,
1422 ULONGEST *trampoline_size,
1423 unsigned char *jjump_pad_insn,
1424 ULONGEST *jjump_pad_insn_size,
1425 CORE_ADDR *adjusted_insn_addr,
1426 CORE_ADDR *adjusted_insn_addr_end,
1427 char *err)
1428 {
1429 #ifdef __x86_64__
1430 if (is_64bit_tdesc ())
1431 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1432 collector, lockaddr,
1433 orig_size, jump_entry,
1434 trampoline, trampoline_size,
1435 jjump_pad_insn,
1436 jjump_pad_insn_size,
1437 adjusted_insn_addr,
1438 adjusted_insn_addr_end,
1439 err);
1440 #endif
1441
1442 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1443 collector, lockaddr,
1444 orig_size, jump_entry,
1445 trampoline, trampoline_size,
1446 jjump_pad_insn,
1447 jjump_pad_insn_size,
1448 adjusted_insn_addr,
1449 adjusted_insn_addr_end,
1450 err);
1451 }
1452
1453 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1454 architectures. */
1455
1456 static int
1457 x86_get_min_fast_tracepoint_insn_len (void)
1458 {
1459 static int warned_about_fast_tracepoints = 0;
1460
1461 #ifdef __x86_64__
1462 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1463 used for fast tracepoints. */
1464 if (is_64bit_tdesc ())
1465 return 5;
1466 #endif
1467
1468 if (agent_loaded_p ())
1469 {
1470 char errbuf[IPA_BUFSIZ];
1471
1472 errbuf[0] = '\0';
1473
1474 /* On x86, if trampolines are available, then 4-byte jump instructions
1475 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1476 with a 4-byte offset are used instead. */
1477 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1478 return 4;
1479 else
1480 {
1481 /* GDB has no channel to explain to user why a shorter fast
1482 tracepoint is not possible, but at least make GDBserver
1483 mention that something has gone awry. */
1484 if (!warned_about_fast_tracepoints)
1485 {
1486 warning ("4-byte fast tracepoints not available; %s", errbuf);
1487 warned_about_fast_tracepoints = 1;
1488 }
1489 return 5;
1490 }
1491 }
1492 else
1493 {
1494 /* Indicate that the minimum length is currently unknown since the IPA
1495 has not loaded yet. */
1496 return 0;
1497 }
1498 }
1499
1500 static void
1501 add_insns (unsigned char *start, int len)
1502 {
1503 CORE_ADDR buildaddr = current_insn_ptr;
1504
1505 if (debug_threads)
1506 debug_printf ("Adding %d bytes of insn at %s\n",
1507 len, paddress (buildaddr));
1508
1509 append_insns (&buildaddr, len, start);
1510 current_insn_ptr = buildaddr;
1511 }
1512
1513 /* Our general strategy for emitting code is to avoid specifying raw
1514 bytes whenever possible, and instead copy a block of inline asm
1515 that is embedded in the function. This is a little messy, because
1516 we need to keep the compiler from discarding what looks like dead
1517 code, plus suppress various warnings. */
1518
1519 #define EMIT_ASM(NAME, INSNS) \
1520 do \
1521 { \
1522 extern unsigned char start_ ## NAME, end_ ## NAME; \
1523 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1524 __asm__ ("jmp end_" #NAME "\n" \
1525 "\t" "start_" #NAME ":" \
1526 "\t" INSNS "\n" \
1527 "\t" "end_" #NAME ":"); \
1528 } while (0)
1529
1530 #ifdef __x86_64__
1531
1532 #define EMIT_ASM32(NAME,INSNS) \
1533 do \
1534 { \
1535 extern unsigned char start_ ## NAME, end_ ## NAME; \
1536 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1537 __asm__ (".code32\n" \
1538 "\t" "jmp end_" #NAME "\n" \
1539 "\t" "start_" #NAME ":\n" \
1540 "\t" INSNS "\n" \
1541 "\t" "end_" #NAME ":\n" \
1542 ".code64\n"); \
1543 } while (0)
1544
1545 #else
1546
1547 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1548
1549 #endif
1550
1551 #ifdef __x86_64__
1552
1553 static void
1554 amd64_emit_prologue (void)
1555 {
1556 EMIT_ASM (amd64_prologue,
1557 "pushq %rbp\n\t"
1558 "movq %rsp,%rbp\n\t"
1559 "sub $0x20,%rsp\n\t"
1560 "movq %rdi,-8(%rbp)\n\t"
1561 "movq %rsi,-16(%rbp)");
1562 }
1563
1564
1565 static void
1566 amd64_emit_epilogue (void)
1567 {
1568 EMIT_ASM (amd64_epilogue,
1569 "movq -16(%rbp),%rdi\n\t"
1570 "movq %rax,(%rdi)\n\t"
1571 "xor %rax,%rax\n\t"
1572 "leave\n\t"
1573 "ret");
1574 }
1575
1576 static void
1577 amd64_emit_add (void)
1578 {
1579 EMIT_ASM (amd64_add,
1580 "add (%rsp),%rax\n\t"
1581 "lea 0x8(%rsp),%rsp");
1582 }
1583
1584 static void
1585 amd64_emit_sub (void)
1586 {
1587 EMIT_ASM (amd64_sub,
1588 "sub %rax,(%rsp)\n\t"
1589 "pop %rax");
1590 }
1591
1592 static void
1593 amd64_emit_mul (void)
1594 {
1595 emit_error = 1;
1596 }
1597
1598 static void
1599 amd64_emit_lsh (void)
1600 {
1601 emit_error = 1;
1602 }
1603
1604 static void
1605 amd64_emit_rsh_signed (void)
1606 {
1607 emit_error = 1;
1608 }
1609
1610 static void
1611 amd64_emit_rsh_unsigned (void)
1612 {
1613 emit_error = 1;
1614 }
1615
1616 static void
1617 amd64_emit_ext (int arg)
1618 {
1619 switch (arg)
1620 {
1621 case 8:
1622 EMIT_ASM (amd64_ext_8,
1623 "cbtw\n\t"
1624 "cwtl\n\t"
1625 "cltq");
1626 break;
1627 case 16:
1628 EMIT_ASM (amd64_ext_16,
1629 "cwtl\n\t"
1630 "cltq");
1631 break;
1632 case 32:
1633 EMIT_ASM (amd64_ext_32,
1634 "cltq");
1635 break;
1636 default:
1637 emit_error = 1;
1638 }
1639 }
1640
1641 static void
1642 amd64_emit_log_not (void)
1643 {
1644 EMIT_ASM (amd64_log_not,
1645 "test %rax,%rax\n\t"
1646 "sete %cl\n\t"
1647 "movzbq %cl,%rax");
1648 }
1649
1650 static void
1651 amd64_emit_bit_and (void)
1652 {
1653 EMIT_ASM (amd64_and,
1654 "and (%rsp),%rax\n\t"
1655 "lea 0x8(%rsp),%rsp");
1656 }
1657
1658 static void
1659 amd64_emit_bit_or (void)
1660 {
1661 EMIT_ASM (amd64_or,
1662 "or (%rsp),%rax\n\t"
1663 "lea 0x8(%rsp),%rsp");
1664 }
1665
1666 static void
1667 amd64_emit_bit_xor (void)
1668 {
1669 EMIT_ASM (amd64_xor,
1670 "xor (%rsp),%rax\n\t"
1671 "lea 0x8(%rsp),%rsp");
1672 }
1673
1674 static void
1675 amd64_emit_bit_not (void)
1676 {
1677 EMIT_ASM (amd64_bit_not,
1678 "xorq $0xffffffffffffffff,%rax");
1679 }
1680
1681 static void
1682 amd64_emit_equal (void)
1683 {
1684 EMIT_ASM (amd64_equal,
1685 "cmp %rax,(%rsp)\n\t"
1686 "je .Lamd64_equal_true\n\t"
1687 "xor %rax,%rax\n\t"
1688 "jmp .Lamd64_equal_end\n\t"
1689 ".Lamd64_equal_true:\n\t"
1690 "mov $0x1,%rax\n\t"
1691 ".Lamd64_equal_end:\n\t"
1692 "lea 0x8(%rsp),%rsp");
1693 }
1694
1695 static void
1696 amd64_emit_less_signed (void)
1697 {
1698 EMIT_ASM (amd64_less_signed,
1699 "cmp %rax,(%rsp)\n\t"
1700 "jl .Lamd64_less_signed_true\n\t"
1701 "xor %rax,%rax\n\t"
1702 "jmp .Lamd64_less_signed_end\n\t"
1703 ".Lamd64_less_signed_true:\n\t"
1704 "mov $1,%rax\n\t"
1705 ".Lamd64_less_signed_end:\n\t"
1706 "lea 0x8(%rsp),%rsp");
1707 }
1708
1709 static void
1710 amd64_emit_less_unsigned (void)
1711 {
1712 EMIT_ASM (amd64_less_unsigned,
1713 "cmp %rax,(%rsp)\n\t"
1714 "jb .Lamd64_less_unsigned_true\n\t"
1715 "xor %rax,%rax\n\t"
1716 "jmp .Lamd64_less_unsigned_end\n\t"
1717 ".Lamd64_less_unsigned_true:\n\t"
1718 "mov $1,%rax\n\t"
1719 ".Lamd64_less_unsigned_end:\n\t"
1720 "lea 0x8(%rsp),%rsp");
1721 }
1722
1723 static void
1724 amd64_emit_ref (int size)
1725 {
1726 switch (size)
1727 {
1728 case 1:
1729 EMIT_ASM (amd64_ref1,
1730 "movb (%rax),%al");
1731 break;
1732 case 2:
1733 EMIT_ASM (amd64_ref2,
1734 "movw (%rax),%ax");
1735 break;
1736 case 4:
1737 EMIT_ASM (amd64_ref4,
1738 "movl (%rax),%eax");
1739 break;
1740 case 8:
1741 EMIT_ASM (amd64_ref8,
1742 "movq (%rax),%rax");
1743 break;
1744 }
1745 }
1746
1747 static void
1748 amd64_emit_if_goto (int *offset_p, int *size_p)
1749 {
1750 EMIT_ASM (amd64_if_goto,
1751 "mov %rax,%rcx\n\t"
1752 "pop %rax\n\t"
1753 "cmp $0,%rcx\n\t"
1754 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1755 if (offset_p)
1756 *offset_p = 10;
1757 if (size_p)
1758 *size_p = 4;
1759 }
1760
1761 static void
1762 amd64_emit_goto (int *offset_p, int *size_p)
1763 {
1764 EMIT_ASM (amd64_goto,
1765 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1766 if (offset_p)
1767 *offset_p = 1;
1768 if (size_p)
1769 *size_p = 4;
1770 }
1771
1772 static void
1773 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1774 {
1775 int diff = (to - (from + size));
1776 unsigned char buf[sizeof (int)];
1777
1778 if (size != 4)
1779 {
1780 emit_error = 1;
1781 return;
1782 }
1783
1784 memcpy (buf, &diff, sizeof (int));
1785 target_write_memory (from, buf, sizeof (int));
1786 }
1787
1788 static void
1789 amd64_emit_const (LONGEST num)
1790 {
1791 unsigned char buf[16];
1792 int i;
1793 CORE_ADDR buildaddr = current_insn_ptr;
1794
1795 i = 0;
1796 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1797 memcpy (&buf[i], &num, sizeof (num));
1798 i += 8;
1799 append_insns (&buildaddr, i, buf);
1800 current_insn_ptr = buildaddr;
1801 }
1802
1803 static void
1804 amd64_emit_call (CORE_ADDR fn)
1805 {
1806 unsigned char buf[16];
1807 int i;
1808 CORE_ADDR buildaddr;
1809 LONGEST offset64;
1810
1811 /* The destination function being in the shared library, may be
1812 >31-bits away off the compiled code pad. */
1813
1814 buildaddr = current_insn_ptr;
1815
1816 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1817
1818 i = 0;
1819
1820 if (offset64 > INT_MAX || offset64 < INT_MIN)
1821 {
1822 /* Offset is too large for a call. Use callq, but that requires
1823 a register, so avoid it if possible. Use r10, since it is
1824 call-clobbered, we don't have to push/pop it. */
1825 buf[i++] = 0x48; /* mov $fn,%r10 */
1826 buf[i++] = 0xba;
1827 memcpy (buf + i, &fn, 8);
1828 i += 8;
1829 buf[i++] = 0xff; /* callq *%r10 */
1830 buf[i++] = 0xd2;
1831 }
1832 else
1833 {
1834 int offset32 = offset64; /* we know we can't overflow here. */
1835
1836 buf[i++] = 0xe8; /* call <reladdr> */
1837 memcpy (buf + i, &offset32, 4);
1838 i += 4;
1839 }
1840
1841 append_insns (&buildaddr, i, buf);
1842 current_insn_ptr = buildaddr;
1843 }
1844
1845 static void
1846 amd64_emit_reg (int reg)
1847 {
1848 unsigned char buf[16];
1849 int i;
1850 CORE_ADDR buildaddr;
1851
1852 /* Assume raw_regs is still in %rdi. */
1853 buildaddr = current_insn_ptr;
1854 i = 0;
1855 buf[i++] = 0xbe; /* mov $<n>,%esi */
1856 memcpy (&buf[i], &reg, sizeof (reg));
1857 i += 4;
1858 append_insns (&buildaddr, i, buf);
1859 current_insn_ptr = buildaddr;
1860 amd64_emit_call (get_raw_reg_func_addr ());
1861 }
1862
1863 static void
1864 amd64_emit_pop (void)
1865 {
1866 EMIT_ASM (amd64_pop,
1867 "pop %rax");
1868 }
1869
1870 static void
1871 amd64_emit_stack_flush (void)
1872 {
1873 EMIT_ASM (amd64_stack_flush,
1874 "push %rax");
1875 }
1876
1877 static void
1878 amd64_emit_zero_ext (int arg)
1879 {
1880 switch (arg)
1881 {
1882 case 8:
1883 EMIT_ASM (amd64_zero_ext_8,
1884 "and $0xff,%rax");
1885 break;
1886 case 16:
1887 EMIT_ASM (amd64_zero_ext_16,
1888 "and $0xffff,%rax");
1889 break;
1890 case 32:
1891 EMIT_ASM (amd64_zero_ext_32,
1892 "mov $0xffffffff,%rcx\n\t"
1893 "and %rcx,%rax");
1894 break;
1895 default:
1896 emit_error = 1;
1897 }
1898 }
1899
1900 static void
1901 amd64_emit_swap (void)
1902 {
1903 EMIT_ASM (amd64_swap,
1904 "mov %rax,%rcx\n\t"
1905 "pop %rax\n\t"
1906 "push %rcx");
1907 }
1908
1909 static void
1910 amd64_emit_stack_adjust (int n)
1911 {
1912 unsigned char buf[16];
1913 int i;
1914 CORE_ADDR buildaddr = current_insn_ptr;
1915
1916 i = 0;
1917 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1918 buf[i++] = 0x8d;
1919 buf[i++] = 0x64;
1920 buf[i++] = 0x24;
1921 /* This only handles adjustments up to 16, but we don't expect any more. */
1922 buf[i++] = n * 8;
1923 append_insns (&buildaddr, i, buf);
1924 current_insn_ptr = buildaddr;
1925 }
1926
1927 /* FN's prototype is `LONGEST(*fn)(int)'. */
1928
1929 static void
1930 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1931 {
1932 unsigned char buf[16];
1933 int i;
1934 CORE_ADDR buildaddr;
1935
1936 buildaddr = current_insn_ptr;
1937 i = 0;
1938 buf[i++] = 0xbf; /* movl $<n>,%edi */
1939 memcpy (&buf[i], &arg1, sizeof (arg1));
1940 i += 4;
1941 append_insns (&buildaddr, i, buf);
1942 current_insn_ptr = buildaddr;
1943 amd64_emit_call (fn);
1944 }
1945
1946 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1947
1948 static void
1949 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1950 {
1951 unsigned char buf[16];
1952 int i;
1953 CORE_ADDR buildaddr;
1954
1955 buildaddr = current_insn_ptr;
1956 i = 0;
1957 buf[i++] = 0xbf; /* movl $<n>,%edi */
1958 memcpy (&buf[i], &arg1, sizeof (arg1));
1959 i += 4;
1960 append_insns (&buildaddr, i, buf);
1961 current_insn_ptr = buildaddr;
1962 EMIT_ASM (amd64_void_call_2_a,
1963 /* Save away a copy of the stack top. */
1964 "push %rax\n\t"
1965 /* Also pass top as the second argument. */
1966 "mov %rax,%rsi");
1967 amd64_emit_call (fn);
1968 EMIT_ASM (amd64_void_call_2_b,
1969 /* Restore the stack top, %rax may have been trashed. */
1970 "pop %rax");
1971 }
1972
1973 static void
1974 amd64_emit_eq_goto (int *offset_p, int *size_p)
1975 {
1976 EMIT_ASM (amd64_eq,
1977 "cmp %rax,(%rsp)\n\t"
1978 "jne .Lamd64_eq_fallthru\n\t"
1979 "lea 0x8(%rsp),%rsp\n\t"
1980 "pop %rax\n\t"
1981 /* jmp, but don't trust the assembler to choose the right jump */
1982 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1983 ".Lamd64_eq_fallthru:\n\t"
1984 "lea 0x8(%rsp),%rsp\n\t"
1985 "pop %rax");
1986
1987 if (offset_p)
1988 *offset_p = 13;
1989 if (size_p)
1990 *size_p = 4;
1991 }
1992
1993 static void
1994 amd64_emit_ne_goto (int *offset_p, int *size_p)
1995 {
1996 EMIT_ASM (amd64_ne,
1997 "cmp %rax,(%rsp)\n\t"
1998 "je .Lamd64_ne_fallthru\n\t"
1999 "lea 0x8(%rsp),%rsp\n\t"
2000 "pop %rax\n\t"
2001 /* jmp, but don't trust the assembler to choose the right jump */
2002 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2003 ".Lamd64_ne_fallthru:\n\t"
2004 "lea 0x8(%rsp),%rsp\n\t"
2005 "pop %rax");
2006
2007 if (offset_p)
2008 *offset_p = 13;
2009 if (size_p)
2010 *size_p = 4;
2011 }
2012
2013 static void
2014 amd64_emit_lt_goto (int *offset_p, int *size_p)
2015 {
2016 EMIT_ASM (amd64_lt,
2017 "cmp %rax,(%rsp)\n\t"
2018 "jnl .Lamd64_lt_fallthru\n\t"
2019 "lea 0x8(%rsp),%rsp\n\t"
2020 "pop %rax\n\t"
2021 /* jmp, but don't trust the assembler to choose the right jump */
2022 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2023 ".Lamd64_lt_fallthru:\n\t"
2024 "lea 0x8(%rsp),%rsp\n\t"
2025 "pop %rax");
2026
2027 if (offset_p)
2028 *offset_p = 13;
2029 if (size_p)
2030 *size_p = 4;
2031 }
2032
2033 static void
2034 amd64_emit_le_goto (int *offset_p, int *size_p)
2035 {
2036 EMIT_ASM (amd64_le,
2037 "cmp %rax,(%rsp)\n\t"
2038 "jnle .Lamd64_le_fallthru\n\t"
2039 "lea 0x8(%rsp),%rsp\n\t"
2040 "pop %rax\n\t"
2041 /* jmp, but don't trust the assembler to choose the right jump */
2042 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2043 ".Lamd64_le_fallthru:\n\t"
2044 "lea 0x8(%rsp),%rsp\n\t"
2045 "pop %rax");
2046
2047 if (offset_p)
2048 *offset_p = 13;
2049 if (size_p)
2050 *size_p = 4;
2051 }
2052
2053 static void
2054 amd64_emit_gt_goto (int *offset_p, int *size_p)
2055 {
2056 EMIT_ASM (amd64_gt,
2057 "cmp %rax,(%rsp)\n\t"
2058 "jng .Lamd64_gt_fallthru\n\t"
2059 "lea 0x8(%rsp),%rsp\n\t"
2060 "pop %rax\n\t"
2061 /* jmp, but don't trust the assembler to choose the right jump */
2062 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2063 ".Lamd64_gt_fallthru:\n\t"
2064 "lea 0x8(%rsp),%rsp\n\t"
2065 "pop %rax");
2066
2067 if (offset_p)
2068 *offset_p = 13;
2069 if (size_p)
2070 *size_p = 4;
2071 }
2072
2073 static void
2074 amd64_emit_ge_goto (int *offset_p, int *size_p)
2075 {
2076 EMIT_ASM (amd64_ge,
2077 "cmp %rax,(%rsp)\n\t"
2078 "jnge .Lamd64_ge_fallthru\n\t"
2079 ".Lamd64_ge_jump:\n\t"
2080 "lea 0x8(%rsp),%rsp\n\t"
2081 "pop %rax\n\t"
2082 /* jmp, but don't trust the assembler to choose the right jump */
2083 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2084 ".Lamd64_ge_fallthru:\n\t"
2085 "lea 0x8(%rsp),%rsp\n\t"
2086 "pop %rax");
2087
2088 if (offset_p)
2089 *offset_p = 13;
2090 if (size_p)
2091 *size_p = 4;
2092 }
2093
2094 struct emit_ops amd64_emit_ops =
2095 {
2096 amd64_emit_prologue,
2097 amd64_emit_epilogue,
2098 amd64_emit_add,
2099 amd64_emit_sub,
2100 amd64_emit_mul,
2101 amd64_emit_lsh,
2102 amd64_emit_rsh_signed,
2103 amd64_emit_rsh_unsigned,
2104 amd64_emit_ext,
2105 amd64_emit_log_not,
2106 amd64_emit_bit_and,
2107 amd64_emit_bit_or,
2108 amd64_emit_bit_xor,
2109 amd64_emit_bit_not,
2110 amd64_emit_equal,
2111 amd64_emit_less_signed,
2112 amd64_emit_less_unsigned,
2113 amd64_emit_ref,
2114 amd64_emit_if_goto,
2115 amd64_emit_goto,
2116 amd64_write_goto_address,
2117 amd64_emit_const,
2118 amd64_emit_call,
2119 amd64_emit_reg,
2120 amd64_emit_pop,
2121 amd64_emit_stack_flush,
2122 amd64_emit_zero_ext,
2123 amd64_emit_swap,
2124 amd64_emit_stack_adjust,
2125 amd64_emit_int_call_1,
2126 amd64_emit_void_call_2,
2127 amd64_emit_eq_goto,
2128 amd64_emit_ne_goto,
2129 amd64_emit_lt_goto,
2130 amd64_emit_le_goto,
2131 amd64_emit_gt_goto,
2132 amd64_emit_ge_goto
2133 };
2134
2135 #endif /* __x86_64__ */
2136
2137 static void
2138 i386_emit_prologue (void)
2139 {
2140 EMIT_ASM32 (i386_prologue,
2141 "push %ebp\n\t"
2142 "mov %esp,%ebp\n\t"
2143 "push %ebx");
2144 /* At this point, the raw regs base address is at 8(%ebp), and the
2145 value pointer is at 12(%ebp). */
2146 }
2147
2148 static void
2149 i386_emit_epilogue (void)
2150 {
2151 EMIT_ASM32 (i386_epilogue,
2152 "mov 12(%ebp),%ecx\n\t"
2153 "mov %eax,(%ecx)\n\t"
2154 "mov %ebx,0x4(%ecx)\n\t"
2155 "xor %eax,%eax\n\t"
2156 "pop %ebx\n\t"
2157 "pop %ebp\n\t"
2158 "ret");
2159 }
2160
2161 static void
2162 i386_emit_add (void)
2163 {
2164 EMIT_ASM32 (i386_add,
2165 "add (%esp),%eax\n\t"
2166 "adc 0x4(%esp),%ebx\n\t"
2167 "lea 0x8(%esp),%esp");
2168 }
2169
2170 static void
2171 i386_emit_sub (void)
2172 {
2173 EMIT_ASM32 (i386_sub,
2174 "subl %eax,(%esp)\n\t"
2175 "sbbl %ebx,4(%esp)\n\t"
2176 "pop %eax\n\t"
2177 "pop %ebx\n\t");
2178 }
2179
2180 static void
2181 i386_emit_mul (void)
2182 {
2183 emit_error = 1;
2184 }
2185
2186 static void
2187 i386_emit_lsh (void)
2188 {
2189 emit_error = 1;
2190 }
2191
2192 static void
2193 i386_emit_rsh_signed (void)
2194 {
2195 emit_error = 1;
2196 }
2197
2198 static void
2199 i386_emit_rsh_unsigned (void)
2200 {
2201 emit_error = 1;
2202 }
2203
2204 static void
2205 i386_emit_ext (int arg)
2206 {
2207 switch (arg)
2208 {
2209 case 8:
2210 EMIT_ASM32 (i386_ext_8,
2211 "cbtw\n\t"
2212 "cwtl\n\t"
2213 "movl %eax,%ebx\n\t"
2214 "sarl $31,%ebx");
2215 break;
2216 case 16:
2217 EMIT_ASM32 (i386_ext_16,
2218 "cwtl\n\t"
2219 "movl %eax,%ebx\n\t"
2220 "sarl $31,%ebx");
2221 break;
2222 case 32:
2223 EMIT_ASM32 (i386_ext_32,
2224 "movl %eax,%ebx\n\t"
2225 "sarl $31,%ebx");
2226 break;
2227 default:
2228 emit_error = 1;
2229 }
2230 }
2231
2232 static void
2233 i386_emit_log_not (void)
2234 {
2235 EMIT_ASM32 (i386_log_not,
2236 "or %ebx,%eax\n\t"
2237 "test %eax,%eax\n\t"
2238 "sete %cl\n\t"
2239 "xor %ebx,%ebx\n\t"
2240 "movzbl %cl,%eax");
2241 }
2242
2243 static void
2244 i386_emit_bit_and (void)
2245 {
2246 EMIT_ASM32 (i386_and,
2247 "and (%esp),%eax\n\t"
2248 "and 0x4(%esp),%ebx\n\t"
2249 "lea 0x8(%esp),%esp");
2250 }
2251
2252 static void
2253 i386_emit_bit_or (void)
2254 {
2255 EMIT_ASM32 (i386_or,
2256 "or (%esp),%eax\n\t"
2257 "or 0x4(%esp),%ebx\n\t"
2258 "lea 0x8(%esp),%esp");
2259 }
2260
2261 static void
2262 i386_emit_bit_xor (void)
2263 {
2264 EMIT_ASM32 (i386_xor,
2265 "xor (%esp),%eax\n\t"
2266 "xor 0x4(%esp),%ebx\n\t"
2267 "lea 0x8(%esp),%esp");
2268 }
2269
2270 static void
2271 i386_emit_bit_not (void)
2272 {
2273 EMIT_ASM32 (i386_bit_not,
2274 "xor $0xffffffff,%eax\n\t"
2275 "xor $0xffffffff,%ebx\n\t");
2276 }
2277
2278 static void
2279 i386_emit_equal (void)
2280 {
2281 EMIT_ASM32 (i386_equal,
2282 "cmpl %ebx,4(%esp)\n\t"
2283 "jne .Li386_equal_false\n\t"
2284 "cmpl %eax,(%esp)\n\t"
2285 "je .Li386_equal_true\n\t"
2286 ".Li386_equal_false:\n\t"
2287 "xor %eax,%eax\n\t"
2288 "jmp .Li386_equal_end\n\t"
2289 ".Li386_equal_true:\n\t"
2290 "mov $1,%eax\n\t"
2291 ".Li386_equal_end:\n\t"
2292 "xor %ebx,%ebx\n\t"
2293 "lea 0x8(%esp),%esp");
2294 }
2295
2296 static void
2297 i386_emit_less_signed (void)
2298 {
2299 EMIT_ASM32 (i386_less_signed,
2300 "cmpl %ebx,4(%esp)\n\t"
2301 "jl .Li386_less_signed_true\n\t"
2302 "jne .Li386_less_signed_false\n\t"
2303 "cmpl %eax,(%esp)\n\t"
2304 "jl .Li386_less_signed_true\n\t"
2305 ".Li386_less_signed_false:\n\t"
2306 "xor %eax,%eax\n\t"
2307 "jmp .Li386_less_signed_end\n\t"
2308 ".Li386_less_signed_true:\n\t"
2309 "mov $1,%eax\n\t"
2310 ".Li386_less_signed_end:\n\t"
2311 "xor %ebx,%ebx\n\t"
2312 "lea 0x8(%esp),%esp");
2313 }
2314
2315 static void
2316 i386_emit_less_unsigned (void)
2317 {
2318 EMIT_ASM32 (i386_less_unsigned,
2319 "cmpl %ebx,4(%esp)\n\t"
2320 "jb .Li386_less_unsigned_true\n\t"
2321 "jne .Li386_less_unsigned_false\n\t"
2322 "cmpl %eax,(%esp)\n\t"
2323 "jb .Li386_less_unsigned_true\n\t"
2324 ".Li386_less_unsigned_false:\n\t"
2325 "xor %eax,%eax\n\t"
2326 "jmp .Li386_less_unsigned_end\n\t"
2327 ".Li386_less_unsigned_true:\n\t"
2328 "mov $1,%eax\n\t"
2329 ".Li386_less_unsigned_end:\n\t"
2330 "xor %ebx,%ebx\n\t"
2331 "lea 0x8(%esp),%esp");
2332 }
2333
2334 static void
2335 i386_emit_ref (int size)
2336 {
2337 switch (size)
2338 {
2339 case 1:
2340 EMIT_ASM32 (i386_ref1,
2341 "movb (%eax),%al");
2342 break;
2343 case 2:
2344 EMIT_ASM32 (i386_ref2,
2345 "movw (%eax),%ax");
2346 break;
2347 case 4:
2348 EMIT_ASM32 (i386_ref4,
2349 "movl (%eax),%eax");
2350 break;
2351 case 8:
2352 EMIT_ASM32 (i386_ref8,
2353 "movl 4(%eax),%ebx\n\t"
2354 "movl (%eax),%eax");
2355 break;
2356 }
2357 }
2358
2359 static void
2360 i386_emit_if_goto (int *offset_p, int *size_p)
2361 {
2362 EMIT_ASM32 (i386_if_goto,
2363 "mov %eax,%ecx\n\t"
2364 "or %ebx,%ecx\n\t"
2365 "pop %eax\n\t"
2366 "pop %ebx\n\t"
2367 "cmpl $0,%ecx\n\t"
2368 /* Don't trust the assembler to choose the right jump */
2369 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2370
2371 if (offset_p)
2372 *offset_p = 11; /* be sure that this matches the sequence above */
2373 if (size_p)
2374 *size_p = 4;
2375 }
2376
2377 static void
2378 i386_emit_goto (int *offset_p, int *size_p)
2379 {
2380 EMIT_ASM32 (i386_goto,
2381 /* Don't trust the assembler to choose the right jump */
2382 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2383 if (offset_p)
2384 *offset_p = 1;
2385 if (size_p)
2386 *size_p = 4;
2387 }
2388
2389 static void
2390 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2391 {
2392 int diff = (to - (from + size));
2393 unsigned char buf[sizeof (int)];
2394
2395 /* We're only doing 4-byte sizes at the moment. */
2396 if (size != 4)
2397 {
2398 emit_error = 1;
2399 return;
2400 }
2401
2402 memcpy (buf, &diff, sizeof (int));
2403 target_write_memory (from, buf, sizeof (int));
2404 }
2405
2406 static void
2407 i386_emit_const (LONGEST num)
2408 {
2409 unsigned char buf[16];
2410 int i, hi, lo;
2411 CORE_ADDR buildaddr = current_insn_ptr;
2412
2413 i = 0;
2414 buf[i++] = 0xb8; /* mov $<n>,%eax */
2415 lo = num & 0xffffffff;
2416 memcpy (&buf[i], &lo, sizeof (lo));
2417 i += 4;
2418 hi = ((num >> 32) & 0xffffffff);
2419 if (hi)
2420 {
2421 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2422 memcpy (&buf[i], &hi, sizeof (hi));
2423 i += 4;
2424 }
2425 else
2426 {
2427 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2428 }
2429 append_insns (&buildaddr, i, buf);
2430 current_insn_ptr = buildaddr;
2431 }
2432
2433 static void
2434 i386_emit_call (CORE_ADDR fn)
2435 {
2436 unsigned char buf[16];
2437 int i, offset;
2438 CORE_ADDR buildaddr;
2439
2440 buildaddr = current_insn_ptr;
2441 i = 0;
2442 buf[i++] = 0xe8; /* call <reladdr> */
2443 offset = ((int) fn) - (buildaddr + 5);
2444 memcpy (buf + 1, &offset, 4);
2445 append_insns (&buildaddr, 5, buf);
2446 current_insn_ptr = buildaddr;
2447 }
2448
2449 static void
2450 i386_emit_reg (int reg)
2451 {
2452 unsigned char buf[16];
2453 int i;
2454 CORE_ADDR buildaddr;
2455
2456 EMIT_ASM32 (i386_reg_a,
2457 "sub $0x8,%esp");
2458 buildaddr = current_insn_ptr;
2459 i = 0;
2460 buf[i++] = 0xb8; /* mov $<n>,%eax */
2461 memcpy (&buf[i], &reg, sizeof (reg));
2462 i += 4;
2463 append_insns (&buildaddr, i, buf);
2464 current_insn_ptr = buildaddr;
2465 EMIT_ASM32 (i386_reg_b,
2466 "mov %eax,4(%esp)\n\t"
2467 "mov 8(%ebp),%eax\n\t"
2468 "mov %eax,(%esp)");
2469 i386_emit_call (get_raw_reg_func_addr ());
2470 EMIT_ASM32 (i386_reg_c,
2471 "xor %ebx,%ebx\n\t"
2472 "lea 0x8(%esp),%esp");
2473 }
2474
2475 static void
2476 i386_emit_pop (void)
2477 {
2478 EMIT_ASM32 (i386_pop,
2479 "pop %eax\n\t"
2480 "pop %ebx");
2481 }
2482
2483 static void
2484 i386_emit_stack_flush (void)
2485 {
2486 EMIT_ASM32 (i386_stack_flush,
2487 "push %ebx\n\t"
2488 "push %eax");
2489 }
2490
2491 static void
2492 i386_emit_zero_ext (int arg)
2493 {
2494 switch (arg)
2495 {
2496 case 8:
2497 EMIT_ASM32 (i386_zero_ext_8,
2498 "and $0xff,%eax\n\t"
2499 "xor %ebx,%ebx");
2500 break;
2501 case 16:
2502 EMIT_ASM32 (i386_zero_ext_16,
2503 "and $0xffff,%eax\n\t"
2504 "xor %ebx,%ebx");
2505 break;
2506 case 32:
2507 EMIT_ASM32 (i386_zero_ext_32,
2508 "xor %ebx,%ebx");
2509 break;
2510 default:
2511 emit_error = 1;
2512 }
2513 }
2514
2515 static void
2516 i386_emit_swap (void)
2517 {
2518 EMIT_ASM32 (i386_swap,
2519 "mov %eax,%ecx\n\t"
2520 "mov %ebx,%edx\n\t"
2521 "pop %eax\n\t"
2522 "pop %ebx\n\t"
2523 "push %edx\n\t"
2524 "push %ecx");
2525 }
2526
2527 static void
2528 i386_emit_stack_adjust (int n)
2529 {
2530 unsigned char buf[16];
2531 int i;
2532 CORE_ADDR buildaddr = current_insn_ptr;
2533
2534 i = 0;
2535 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2536 buf[i++] = 0x64;
2537 buf[i++] = 0x24;
2538 buf[i++] = n * 8;
2539 append_insns (&buildaddr, i, buf);
2540 current_insn_ptr = buildaddr;
2541 }
2542
2543 /* FN's prototype is `LONGEST(*fn)(int)'. */
2544
2545 static void
2546 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2547 {
2548 unsigned char buf[16];
2549 int i;
2550 CORE_ADDR buildaddr;
2551
2552 EMIT_ASM32 (i386_int_call_1_a,
2553 /* Reserve a bit of stack space. */
2554 "sub $0x8,%esp");
2555 /* Put the one argument on the stack. */
2556 buildaddr = current_insn_ptr;
2557 i = 0;
2558 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2559 buf[i++] = 0x04;
2560 buf[i++] = 0x24;
2561 memcpy (&buf[i], &arg1, sizeof (arg1));
2562 i += 4;
2563 append_insns (&buildaddr, i, buf);
2564 current_insn_ptr = buildaddr;
2565 i386_emit_call (fn);
2566 EMIT_ASM32 (i386_int_call_1_c,
2567 "mov %edx,%ebx\n\t"
2568 "lea 0x8(%esp),%esp");
2569 }
2570
2571 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2572
2573 static void
2574 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2575 {
2576 unsigned char buf[16];
2577 int i;
2578 CORE_ADDR buildaddr;
2579
2580 EMIT_ASM32 (i386_void_call_2_a,
2581 /* Preserve %eax only; we don't have to worry about %ebx. */
2582 "push %eax\n\t"
2583 /* Reserve a bit of stack space for arguments. */
2584 "sub $0x10,%esp\n\t"
2585 /* Copy "top" to the second argument position. (Note that
2586 we can't assume function won't scribble on its
2587 arguments, so don't try to restore from this.) */
2588 "mov %eax,4(%esp)\n\t"
2589 "mov %ebx,8(%esp)");
2590 /* Put the first argument on the stack. */
2591 buildaddr = current_insn_ptr;
2592 i = 0;
2593 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2594 buf[i++] = 0x04;
2595 buf[i++] = 0x24;
2596 memcpy (&buf[i], &arg1, sizeof (arg1));
2597 i += 4;
2598 append_insns (&buildaddr, i, buf);
2599 current_insn_ptr = buildaddr;
2600 i386_emit_call (fn);
2601 EMIT_ASM32 (i386_void_call_2_b,
2602 "lea 0x10(%esp),%esp\n\t"
2603 /* Restore original stack top. */
2604 "pop %eax");
2605 }
2606
2607
2608 static void
2609 i386_emit_eq_goto (int *offset_p, int *size_p)
2610 {
2611 EMIT_ASM32 (eq,
2612 /* Check low half first, more likely to be decider */
2613 "cmpl %eax,(%esp)\n\t"
2614 "jne .Leq_fallthru\n\t"
2615 "cmpl %ebx,4(%esp)\n\t"
2616 "jne .Leq_fallthru\n\t"
2617 "lea 0x8(%esp),%esp\n\t"
2618 "pop %eax\n\t"
2619 "pop %ebx\n\t"
2620 /* jmp, but don't trust the assembler to choose the right jump */
2621 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2622 ".Leq_fallthru:\n\t"
2623 "lea 0x8(%esp),%esp\n\t"
2624 "pop %eax\n\t"
2625 "pop %ebx");
2626
2627 if (offset_p)
2628 *offset_p = 18;
2629 if (size_p)
2630 *size_p = 4;
2631 }
2632
2633 static void
2634 i386_emit_ne_goto (int *offset_p, int *size_p)
2635 {
2636 EMIT_ASM32 (ne,
2637 /* Check low half first, more likely to be decider */
2638 "cmpl %eax,(%esp)\n\t"
2639 "jne .Lne_jump\n\t"
2640 "cmpl %ebx,4(%esp)\n\t"
2641 "je .Lne_fallthru\n\t"
2642 ".Lne_jump:\n\t"
2643 "lea 0x8(%esp),%esp\n\t"
2644 "pop %eax\n\t"
2645 "pop %ebx\n\t"
2646 /* jmp, but don't trust the assembler to choose the right jump */
2647 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2648 ".Lne_fallthru:\n\t"
2649 "lea 0x8(%esp),%esp\n\t"
2650 "pop %eax\n\t"
2651 "pop %ebx");
2652
2653 if (offset_p)
2654 *offset_p = 18;
2655 if (size_p)
2656 *size_p = 4;
2657 }
2658
2659 static void
2660 i386_emit_lt_goto (int *offset_p, int *size_p)
2661 {
2662 EMIT_ASM32 (lt,
2663 "cmpl %ebx,4(%esp)\n\t"
2664 "jl .Llt_jump\n\t"
2665 "jne .Llt_fallthru\n\t"
2666 "cmpl %eax,(%esp)\n\t"
2667 "jnl .Llt_fallthru\n\t"
2668 ".Llt_jump:\n\t"
2669 "lea 0x8(%esp),%esp\n\t"
2670 "pop %eax\n\t"
2671 "pop %ebx\n\t"
2672 /* jmp, but don't trust the assembler to choose the right jump */
2673 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2674 ".Llt_fallthru:\n\t"
2675 "lea 0x8(%esp),%esp\n\t"
2676 "pop %eax\n\t"
2677 "pop %ebx");
2678
2679 if (offset_p)
2680 *offset_p = 20;
2681 if (size_p)
2682 *size_p = 4;
2683 }
2684
2685 static void
2686 i386_emit_le_goto (int *offset_p, int *size_p)
2687 {
2688 EMIT_ASM32 (le,
2689 "cmpl %ebx,4(%esp)\n\t"
2690 "jle .Lle_jump\n\t"
2691 "jne .Lle_fallthru\n\t"
2692 "cmpl %eax,(%esp)\n\t"
2693 "jnle .Lle_fallthru\n\t"
2694 ".Lle_jump:\n\t"
2695 "lea 0x8(%esp),%esp\n\t"
2696 "pop %eax\n\t"
2697 "pop %ebx\n\t"
2698 /* jmp, but don't trust the assembler to choose the right jump */
2699 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2700 ".Lle_fallthru:\n\t"
2701 "lea 0x8(%esp),%esp\n\t"
2702 "pop %eax\n\t"
2703 "pop %ebx");
2704
2705 if (offset_p)
2706 *offset_p = 20;
2707 if (size_p)
2708 *size_p = 4;
2709 }
2710
2711 static void
2712 i386_emit_gt_goto (int *offset_p, int *size_p)
2713 {
2714 EMIT_ASM32 (gt,
2715 "cmpl %ebx,4(%esp)\n\t"
2716 "jg .Lgt_jump\n\t"
2717 "jne .Lgt_fallthru\n\t"
2718 "cmpl %eax,(%esp)\n\t"
2719 "jng .Lgt_fallthru\n\t"
2720 ".Lgt_jump:\n\t"
2721 "lea 0x8(%esp),%esp\n\t"
2722 "pop %eax\n\t"
2723 "pop %ebx\n\t"
2724 /* jmp, but don't trust the assembler to choose the right jump */
2725 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2726 ".Lgt_fallthru:\n\t"
2727 "lea 0x8(%esp),%esp\n\t"
2728 "pop %eax\n\t"
2729 "pop %ebx");
2730
2731 if (offset_p)
2732 *offset_p = 20;
2733 if (size_p)
2734 *size_p = 4;
2735 }
2736
2737 static void
2738 i386_emit_ge_goto (int *offset_p, int *size_p)
2739 {
2740 EMIT_ASM32 (ge,
2741 "cmpl %ebx,4(%esp)\n\t"
2742 "jge .Lge_jump\n\t"
2743 "jne .Lge_fallthru\n\t"
2744 "cmpl %eax,(%esp)\n\t"
2745 "jnge .Lge_fallthru\n\t"
2746 ".Lge_jump:\n\t"
2747 "lea 0x8(%esp),%esp\n\t"
2748 "pop %eax\n\t"
2749 "pop %ebx\n\t"
2750 /* jmp, but don't trust the assembler to choose the right jump */
2751 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2752 ".Lge_fallthru:\n\t"
2753 "lea 0x8(%esp),%esp\n\t"
2754 "pop %eax\n\t"
2755 "pop %ebx");
2756
2757 if (offset_p)
2758 *offset_p = 20;
2759 if (size_p)
2760 *size_p = 4;
2761 }
2762
2763 struct emit_ops i386_emit_ops =
2764 {
2765 i386_emit_prologue,
2766 i386_emit_epilogue,
2767 i386_emit_add,
2768 i386_emit_sub,
2769 i386_emit_mul,
2770 i386_emit_lsh,
2771 i386_emit_rsh_signed,
2772 i386_emit_rsh_unsigned,
2773 i386_emit_ext,
2774 i386_emit_log_not,
2775 i386_emit_bit_and,
2776 i386_emit_bit_or,
2777 i386_emit_bit_xor,
2778 i386_emit_bit_not,
2779 i386_emit_equal,
2780 i386_emit_less_signed,
2781 i386_emit_less_unsigned,
2782 i386_emit_ref,
2783 i386_emit_if_goto,
2784 i386_emit_goto,
2785 i386_write_goto_address,
2786 i386_emit_const,
2787 i386_emit_call,
2788 i386_emit_reg,
2789 i386_emit_pop,
2790 i386_emit_stack_flush,
2791 i386_emit_zero_ext,
2792 i386_emit_swap,
2793 i386_emit_stack_adjust,
2794 i386_emit_int_call_1,
2795 i386_emit_void_call_2,
2796 i386_emit_eq_goto,
2797 i386_emit_ne_goto,
2798 i386_emit_lt_goto,
2799 i386_emit_le_goto,
2800 i386_emit_gt_goto,
2801 i386_emit_ge_goto
2802 };
2803
2804
2805 static struct emit_ops *
2806 x86_emit_ops (void)
2807 {
2808 #ifdef __x86_64__
2809 if (is_64bit_tdesc ())
2810 return &amd64_emit_ops;
2811 else
2812 #endif
2813 return &i386_emit_ops;
2814 }
2815
2816 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2817
2818 static const gdb_byte *
2819 x86_sw_breakpoint_from_kind (int kind, int *size)
2820 {
2821 *size = x86_breakpoint_len;
2822 return x86_breakpoint;
2823 }
2824
2825 static int
2826 x86_supports_range_stepping (void)
2827 {
2828 return 1;
2829 }
2830
2831 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2832 */
2833
2834 static int
2835 x86_supports_hardware_single_step (void)
2836 {
2837 return 1;
2838 }
2839
2840 static int
2841 x86_get_ipa_tdesc_idx (void)
2842 {
2843 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2844 const struct target_desc *tdesc = regcache->tdesc;
2845
2846 #ifdef __x86_64__
2847 return amd64_get_ipa_tdesc_idx (tdesc);
2848 #endif
2849
2850 if (tdesc == tdesc_i386_linux_no_xml)
2851 return X86_TDESC_SSE;
2852
2853 return i386_get_ipa_tdesc_idx (tdesc);
2854 }
2855
2856 /* This is initialized assuming an amd64 target.
2857 x86_arch_setup will correct it for i386 or amd64 targets. */
2858
2859 struct linux_target_ops the_low_target =
2860 {
2861 x86_arch_setup,
2862 x86_linux_regs_info,
2863 x86_cannot_fetch_register,
2864 x86_cannot_store_register,
2865 NULL, /* fetch_register */
2866 x86_get_pc,
2867 x86_set_pc,
2868 NULL, /* breakpoint_kind_from_pc */
2869 x86_sw_breakpoint_from_kind,
2870 NULL,
2871 1,
2872 x86_breakpoint_at,
2873 x86_supports_z_point_type,
2874 x86_insert_point,
2875 x86_remove_point,
2876 x86_stopped_by_watchpoint,
2877 x86_stopped_data_address,
2878 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2879 native i386 case (no registers smaller than an xfer unit), and are not
2880 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2881 NULL,
2882 NULL,
2883 /* need to fix up i386 siginfo if host is amd64 */
2884 x86_siginfo_fixup,
2885 x86_linux_new_process,
2886 x86_linux_delete_process,
2887 x86_linux_new_thread,
2888 x86_linux_delete_thread,
2889 x86_linux_new_fork,
2890 x86_linux_prepare_to_resume,
2891 x86_linux_process_qsupported,
2892 x86_supports_tracepoints,
2893 x86_get_thread_area,
2894 x86_install_fast_tracepoint_jump_pad,
2895 x86_emit_ops,
2896 x86_get_min_fast_tracepoint_insn_len,
2897 x86_supports_range_stepping,
2898 NULL, /* breakpoint_kind_from_current_state */
2899 x86_supports_hardware_single_step,
2900 x86_get_syscall_trapinfo,
2901 x86_get_ipa_tdesc_idx,
2902 };
2903
2904 void
2905 initialize_low_arch (void)
2906 {
2907 /* Initialize the Linux target descriptions. */
2908 #ifdef __x86_64__
2909 tdesc_amd64_linux_no_xml = allocate_target_description ();
2910 copy_target_description (tdesc_amd64_linux_no_xml,
2911 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2912 false));
2913 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2914 #endif
2915
2916 tdesc_i386_linux_no_xml = allocate_target_description ();
2917 copy_target_description (tdesc_i386_linux_no_xml,
2918 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2919 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2920
2921 initialize_regsets_info (&x86_regsets_info);
2922 }
This page took 0.14567 seconds and 4 git commands to generate.