gdbserver/linux-low: turn 'insert_point' and 'remove_point' into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
107 const regs_info *get_regs_info () override;
108
109 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
110
111 bool supports_z_point_type (char z_type) override;
112
113 protected:
114
115 void low_arch_setup () override;
116
117 bool low_cannot_fetch_register (int regno) override;
118
119 bool low_cannot_store_register (int regno) override;
120
121 bool low_supports_breakpoints () override;
122
123 CORE_ADDR low_get_pc (regcache *regcache) override;
124
125 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
126
127 int low_decr_pc_after_break () override;
128
129 bool low_breakpoint_at (CORE_ADDR pc) override;
130
131 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
132 int size, raw_breakpoint *bp) override;
133
134 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
135 int size, raw_breakpoint *bp) override;
136 };
137
138 /* The singleton target ops object. */
139
140 static x86_target the_x86_target;
141
142 /* Per-process arch-specific data we want to keep. */
143
144 struct arch_process_info
145 {
146 struct x86_debug_reg_state debug_reg_state;
147 };
148
149 #ifdef __x86_64__
150
151 /* Mapping between the general-purpose registers in `struct user'
152 format and GDB's register array layout.
153 Note that the transfer layout uses 64-bit regs. */
154 static /*const*/ int i386_regmap[] =
155 {
156 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
157 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
158 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
159 DS * 8, ES * 8, FS * 8, GS * 8
160 };
161
162 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
163
164 /* So code below doesn't have to care, i386 or amd64. */
165 #define ORIG_EAX ORIG_RAX
166 #define REGSIZE 8
167
168 static const int x86_64_regmap[] =
169 {
170 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
171 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
172 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
173 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
174 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
175 DS * 8, ES * 8, FS * 8, GS * 8,
176 -1, -1, -1, -1, -1, -1, -1, -1,
177 -1, -1, -1, -1, -1, -1, -1, -1,
178 -1, -1, -1, -1, -1, -1, -1, -1,
179 -1,
180 -1, -1, -1, -1, -1, -1, -1, -1,
181 ORIG_RAX * 8,
182 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
183 21 * 8, 22 * 8,
184 #else
185 -1, -1,
186 #endif
187 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
188 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
189 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
194 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
195 -1, -1, -1, -1, -1, -1, -1, -1,
196 -1, -1, -1, -1, -1, -1, -1, -1,
197 -1, -1, -1, -1, -1, -1, -1, -1,
198 -1 /* pkru */
199 };
200
201 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
202 #define X86_64_USER_REGS (GS + 1)
203
204 #else /* ! __x86_64__ */
205
206 /* Mapping between the general-purpose registers in `struct user'
207 format and GDB's register array layout. */
208 static /*const*/ int i386_regmap[] =
209 {
210 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
211 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
212 EIP * 4, EFL * 4, CS * 4, SS * 4,
213 DS * 4, ES * 4, FS * 4, GS * 4
214 };
215
216 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
217
218 #define REGSIZE 4
219
220 #endif
221
222 #ifdef __x86_64__
223
224 /* Returns true if the current inferior belongs to a x86-64 process,
225 per the tdesc. */
226
227 static int
228 is_64bit_tdesc (void)
229 {
230 struct regcache *regcache = get_thread_regcache (current_thread, 0);
231
232 return register_size (regcache->tdesc, 0) == 8;
233 }
234
235 #endif
236
237 \f
238 /* Called by libthread_db. */
239
240 ps_err_e
241 ps_get_thread_area (struct ps_prochandle *ph,
242 lwpid_t lwpid, int idx, void **base)
243 {
244 #ifdef __x86_64__
245 int use_64bit = is_64bit_tdesc ();
246
247 if (use_64bit)
248 {
249 switch (idx)
250 {
251 case FS:
252 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
253 return PS_OK;
254 break;
255 case GS:
256 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
257 return PS_OK;
258 break;
259 default:
260 return PS_BADADDR;
261 }
262 return PS_ERR;
263 }
264 #endif
265
266 {
267 unsigned int desc[4];
268
269 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
270 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
271 return PS_ERR;
272
273 /* Ensure we properly extend the value to 64-bits for x86_64. */
274 *base = (void *) (uintptr_t) desc[1];
275 return PS_OK;
276 }
277 }
278
279 /* Get the thread area address. This is used to recognize which
280 thread is which when tracing with the in-process agent library. We
281 don't read anything from the address, and treat it as opaque; it's
282 the address itself that we assume is unique per-thread. */
283
284 static int
285 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
286 {
287 #ifdef __x86_64__
288 int use_64bit = is_64bit_tdesc ();
289
290 if (use_64bit)
291 {
292 void *base;
293 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
294 {
295 *addr = (CORE_ADDR) (uintptr_t) base;
296 return 0;
297 }
298
299 return -1;
300 }
301 #endif
302
303 {
304 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
305 struct thread_info *thr = get_lwp_thread (lwp);
306 struct regcache *regcache = get_thread_regcache (thr, 1);
307 unsigned int desc[4];
308 ULONGEST gs = 0;
309 const int reg_thread_area = 3; /* bits to scale down register value. */
310 int idx;
311
312 collect_register_by_name (regcache, "gs", &gs);
313
314 idx = gs >> reg_thread_area;
315
316 if (ptrace (PTRACE_GET_THREAD_AREA,
317 lwpid_of (thr),
318 (void *) (long) idx, (unsigned long) &desc) < 0)
319 return -1;
320
321 *addr = desc[1];
322 return 0;
323 }
324 }
325
326
327 \f
328 bool
329 x86_target::low_cannot_store_register (int regno)
330 {
331 #ifdef __x86_64__
332 if (is_64bit_tdesc ())
333 return false;
334 #endif
335
336 return regno >= I386_NUM_REGS;
337 }
338
339 bool
340 x86_target::low_cannot_fetch_register (int regno)
341 {
342 #ifdef __x86_64__
343 if (is_64bit_tdesc ())
344 return false;
345 #endif
346
347 return regno >= I386_NUM_REGS;
348 }
349
350 static void
351 x86_fill_gregset (struct regcache *regcache, void *buf)
352 {
353 int i;
354
355 #ifdef __x86_64__
356 if (register_size (regcache->tdesc, 0) == 8)
357 {
358 for (i = 0; i < X86_64_NUM_REGS; i++)
359 if (x86_64_regmap[i] != -1)
360 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
361
362 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
363 {
364 unsigned long base;
365 int lwpid = lwpid_of (current_thread);
366
367 collect_register_by_name (regcache, "fs_base", &base);
368 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
369
370 collect_register_by_name (regcache, "gs_base", &base);
371 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
372 }
373 #endif
374
375 return;
376 }
377
378 /* 32-bit inferior registers need to be zero-extended.
379 Callers would read uninitialized memory otherwise. */
380 memset (buf, 0x00, X86_64_USER_REGS * 8);
381 #endif
382
383 for (i = 0; i < I386_NUM_REGS; i++)
384 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
385
386 collect_register_by_name (regcache, "orig_eax",
387 ((char *) buf) + ORIG_EAX * REGSIZE);
388
389 #ifdef __x86_64__
390 /* Sign extend EAX value to avoid potential syscall restart
391 problems.
392
393 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
394 for a detailed explanation. */
395 if (register_size (regcache->tdesc, 0) == 4)
396 {
397 void *ptr = ((gdb_byte *) buf
398 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
399
400 *(int64_t *) ptr = *(int32_t *) ptr;
401 }
402 #endif
403 }
404
405 static void
406 x86_store_gregset (struct regcache *regcache, const void *buf)
407 {
408 int i;
409
410 #ifdef __x86_64__
411 if (register_size (regcache->tdesc, 0) == 8)
412 {
413 for (i = 0; i < X86_64_NUM_REGS; i++)
414 if (x86_64_regmap[i] != -1)
415 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
416
417 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
418 {
419 unsigned long base;
420 int lwpid = lwpid_of (current_thread);
421
422 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
423 supply_register_by_name (regcache, "fs_base", &base);
424
425 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
426 supply_register_by_name (regcache, "gs_base", &base);
427 }
428 #endif
429 return;
430 }
431 #endif
432
433 for (i = 0; i < I386_NUM_REGS; i++)
434 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
435
436 supply_register_by_name (regcache, "orig_eax",
437 ((char *) buf) + ORIG_EAX * REGSIZE);
438 }
439
440 static void
441 x86_fill_fpregset (struct regcache *regcache, void *buf)
442 {
443 #ifdef __x86_64__
444 i387_cache_to_fxsave (regcache, buf);
445 #else
446 i387_cache_to_fsave (regcache, buf);
447 #endif
448 }
449
450 static void
451 x86_store_fpregset (struct regcache *regcache, const void *buf)
452 {
453 #ifdef __x86_64__
454 i387_fxsave_to_cache (regcache, buf);
455 #else
456 i387_fsave_to_cache (regcache, buf);
457 #endif
458 }
459
460 #ifndef __x86_64__
461
462 static void
463 x86_fill_fpxregset (struct regcache *regcache, void *buf)
464 {
465 i387_cache_to_fxsave (regcache, buf);
466 }
467
468 static void
469 x86_store_fpxregset (struct regcache *regcache, const void *buf)
470 {
471 i387_fxsave_to_cache (regcache, buf);
472 }
473
474 #endif
475
476 static void
477 x86_fill_xstateregset (struct regcache *regcache, void *buf)
478 {
479 i387_cache_to_xsave (regcache, buf);
480 }
481
482 static void
483 x86_store_xstateregset (struct regcache *regcache, const void *buf)
484 {
485 i387_xsave_to_cache (regcache, buf);
486 }
487
488 /* ??? The non-biarch i386 case stores all the i387 regs twice.
489 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
490 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
491 doesn't work. IWBN to avoid the duplication in the case where it
492 does work. Maybe the arch_setup routine could check whether it works
493 and update the supported regsets accordingly. */
494
495 static struct regset_info x86_regsets[] =
496 {
497 #ifdef HAVE_PTRACE_GETREGS
498 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
499 GENERAL_REGS,
500 x86_fill_gregset, x86_store_gregset },
501 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
502 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
503 # ifndef __x86_64__
504 # ifdef HAVE_PTRACE_GETFPXREGS
505 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
506 EXTENDED_REGS,
507 x86_fill_fpxregset, x86_store_fpxregset },
508 # endif
509 # endif
510 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
511 FP_REGS,
512 x86_fill_fpregset, x86_store_fpregset },
513 #endif /* HAVE_PTRACE_GETREGS */
514 NULL_REGSET
515 };
516
517 bool
518 x86_target::low_supports_breakpoints ()
519 {
520 return true;
521 }
522
523 CORE_ADDR
524 x86_target::low_get_pc (regcache *regcache)
525 {
526 int use_64bit = register_size (regcache->tdesc, 0) == 8;
527
528 if (use_64bit)
529 {
530 uint64_t pc;
531
532 collect_register_by_name (regcache, "rip", &pc);
533 return (CORE_ADDR) pc;
534 }
535 else
536 {
537 uint32_t pc;
538
539 collect_register_by_name (regcache, "eip", &pc);
540 return (CORE_ADDR) pc;
541 }
542 }
543
544 void
545 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
546 {
547 int use_64bit = register_size (regcache->tdesc, 0) == 8;
548
549 if (use_64bit)
550 {
551 uint64_t newpc = pc;
552
553 supply_register_by_name (regcache, "rip", &newpc);
554 }
555 else
556 {
557 uint32_t newpc = pc;
558
559 supply_register_by_name (regcache, "eip", &newpc);
560 }
561 }
562
563 int
564 x86_target::low_decr_pc_after_break ()
565 {
566 return 1;
567 }
568
569 \f
570 static const gdb_byte x86_breakpoint[] = { 0xCC };
571 #define x86_breakpoint_len 1
572
573 bool
574 x86_target::low_breakpoint_at (CORE_ADDR pc)
575 {
576 unsigned char c;
577
578 read_memory (pc, &c, 1);
579 if (c == 0xCC)
580 return true;
581
582 return false;
583 }
584 \f
585 /* Low-level function vector. */
586 struct x86_dr_low_type x86_dr_low =
587 {
588 x86_linux_dr_set_control,
589 x86_linux_dr_set_addr,
590 x86_linux_dr_get_addr,
591 x86_linux_dr_get_status,
592 x86_linux_dr_get_control,
593 sizeof (void *),
594 };
595 \f
596 /* Breakpoint/Watchpoint support. */
597
598 bool
599 x86_target::supports_z_point_type (char z_type)
600 {
601 switch (z_type)
602 {
603 case Z_PACKET_SW_BP:
604 case Z_PACKET_HW_BP:
605 case Z_PACKET_WRITE_WP:
606 case Z_PACKET_ACCESS_WP:
607 return true;
608 default:
609 return false;
610 }
611 }
612
613 int
614 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
615 int size, raw_breakpoint *bp)
616 {
617 struct process_info *proc = current_process ();
618
619 switch (type)
620 {
621 case raw_bkpt_type_hw:
622 case raw_bkpt_type_write_wp:
623 case raw_bkpt_type_access_wp:
624 {
625 enum target_hw_bp_type hw_type
626 = raw_bkpt_type_to_target_hw_bp_type (type);
627 struct x86_debug_reg_state *state
628 = &proc->priv->arch_private->debug_reg_state;
629
630 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
631 }
632
633 default:
634 /* Unsupported. */
635 return 1;
636 }
637 }
638
639 int
640 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
641 int size, raw_breakpoint *bp)
642 {
643 struct process_info *proc = current_process ();
644
645 switch (type)
646 {
647 case raw_bkpt_type_hw:
648 case raw_bkpt_type_write_wp:
649 case raw_bkpt_type_access_wp:
650 {
651 enum target_hw_bp_type hw_type
652 = raw_bkpt_type_to_target_hw_bp_type (type);
653 struct x86_debug_reg_state *state
654 = &proc->priv->arch_private->debug_reg_state;
655
656 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
657 }
658 default:
659 /* Unsupported. */
660 return 1;
661 }
662 }
663
664 static int
665 x86_stopped_by_watchpoint (void)
666 {
667 struct process_info *proc = current_process ();
668 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
669 }
670
671 static CORE_ADDR
672 x86_stopped_data_address (void)
673 {
674 struct process_info *proc = current_process ();
675 CORE_ADDR addr;
676 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
677 &addr))
678 return addr;
679 return 0;
680 }
681 \f
682 /* Called when a new process is created. */
683
684 static struct arch_process_info *
685 x86_linux_new_process (void)
686 {
687 struct arch_process_info *info = XCNEW (struct arch_process_info);
688
689 x86_low_init_dregs (&info->debug_reg_state);
690
691 return info;
692 }
693
694 /* Called when a process is being deleted. */
695
696 static void
697 x86_linux_delete_process (struct arch_process_info *info)
698 {
699 xfree (info);
700 }
701
702 /* Target routine for linux_new_fork. */
703
704 static void
705 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
706 {
707 /* These are allocated by linux_add_process. */
708 gdb_assert (parent->priv != NULL
709 && parent->priv->arch_private != NULL);
710 gdb_assert (child->priv != NULL
711 && child->priv->arch_private != NULL);
712
713 /* Linux kernel before 2.6.33 commit
714 72f674d203cd230426437cdcf7dd6f681dad8b0d
715 will inherit hardware debug registers from parent
716 on fork/vfork/clone. Newer Linux kernels create such tasks with
717 zeroed debug registers.
718
719 GDB core assumes the child inherits the watchpoints/hw
720 breakpoints of the parent, and will remove them all from the
721 forked off process. Copy the debug registers mirrors into the
722 new process so that all breakpoints and watchpoints can be
723 removed together. The debug registers mirror will become zeroed
724 in the end before detaching the forked off process, thus making
725 this compatible with older Linux kernels too. */
726
727 *child->priv->arch_private = *parent->priv->arch_private;
728 }
729
730 /* See nat/x86-dregs.h. */
731
732 struct x86_debug_reg_state *
733 x86_debug_reg_state (pid_t pid)
734 {
735 struct process_info *proc = find_process_pid (pid);
736
737 return &proc->priv->arch_private->debug_reg_state;
738 }
739 \f
740 /* When GDBSERVER is built as a 64-bit application on linux, the
741 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
742 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
743 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
744 conversion in-place ourselves. */
745
746 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
747 layout of the inferiors' architecture. Returns true if any
748 conversion was done; false otherwise. If DIRECTION is 1, then copy
749 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
750 INF. */
751
752 static int
753 x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
754 {
755 #ifdef __x86_64__
756 unsigned int machine;
757 int tid = lwpid_of (current_thread);
758 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
759
760 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
761 if (!is_64bit_tdesc ())
762 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
763 FIXUP_32);
764 /* No fixup for native x32 GDB. */
765 else if (!is_elf64 && sizeof (void *) == 8)
766 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
767 FIXUP_X32);
768 #endif
769
770 return 0;
771 }
772 \f
773 static int use_xml;
774
775 /* Format of XSAVE extended state is:
776 struct
777 {
778 fxsave_bytes[0..463]
779 sw_usable_bytes[464..511]
780 xstate_hdr_bytes[512..575]
781 avx_bytes[576..831]
782 future_state etc
783 };
784
785 Same memory layout will be used for the coredump NT_X86_XSTATE
786 representing the XSAVE extended state registers.
787
788 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
789 extended state mask, which is the same as the extended control register
790 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
791 together with the mask saved in the xstate_hdr_bytes to determine what
792 states the processor/OS supports and what state, used or initialized,
793 the process/thread is in. */
794 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
795
796 /* Does the current host support the GETFPXREGS request? The header
797 file may or may not define it, and even if it is defined, the
798 kernel will return EIO if it's running on a pre-SSE processor. */
799 int have_ptrace_getfpxregs =
800 #ifdef HAVE_PTRACE_GETFPXREGS
801 -1
802 #else
803 0
804 #endif
805 ;
806
807 /* Get Linux/x86 target description from running target. */
808
809 static const struct target_desc *
810 x86_linux_read_description (void)
811 {
812 unsigned int machine;
813 int is_elf64;
814 int xcr0_features;
815 int tid;
816 static uint64_t xcr0;
817 struct regset_info *regset;
818
819 tid = lwpid_of (current_thread);
820
821 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
822
823 if (sizeof (void *) == 4)
824 {
825 if (is_elf64 > 0)
826 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
827 #ifndef __x86_64__
828 else if (machine == EM_X86_64)
829 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
830 #endif
831 }
832
833 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
834 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
835 {
836 elf_fpxregset_t fpxregs;
837
838 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
839 {
840 have_ptrace_getfpxregs = 0;
841 have_ptrace_getregset = 0;
842 return i386_linux_read_description (X86_XSTATE_X87);
843 }
844 else
845 have_ptrace_getfpxregs = 1;
846 }
847 #endif
848
849 if (!use_xml)
850 {
851 x86_xcr0 = X86_XSTATE_SSE_MASK;
852
853 /* Don't use XML. */
854 #ifdef __x86_64__
855 if (machine == EM_X86_64)
856 return tdesc_amd64_linux_no_xml;
857 else
858 #endif
859 return tdesc_i386_linux_no_xml;
860 }
861
862 if (have_ptrace_getregset == -1)
863 {
864 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
865 struct iovec iov;
866
867 iov.iov_base = xstateregs;
868 iov.iov_len = sizeof (xstateregs);
869
870 /* Check if PTRACE_GETREGSET works. */
871 if (ptrace (PTRACE_GETREGSET, tid,
872 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
873 have_ptrace_getregset = 0;
874 else
875 {
876 have_ptrace_getregset = 1;
877
878 /* Get XCR0 from XSAVE extended state. */
879 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
880 / sizeof (uint64_t))];
881
882 /* Use PTRACE_GETREGSET if it is available. */
883 for (regset = x86_regsets;
884 regset->fill_function != NULL; regset++)
885 if (regset->get_request == PTRACE_GETREGSET)
886 regset->size = X86_XSTATE_SIZE (xcr0);
887 else if (regset->type != GENERAL_REGS)
888 regset->size = 0;
889 }
890 }
891
892 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
893 xcr0_features = (have_ptrace_getregset
894 && (xcr0 & X86_XSTATE_ALL_MASK));
895
896 if (xcr0_features)
897 x86_xcr0 = xcr0;
898
899 if (machine == EM_X86_64)
900 {
901 #ifdef __x86_64__
902 const target_desc *tdesc = NULL;
903
904 if (xcr0_features)
905 {
906 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
907 !is_elf64);
908 }
909
910 if (tdesc == NULL)
911 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
912 return tdesc;
913 #endif
914 }
915 else
916 {
917 const target_desc *tdesc = NULL;
918
919 if (xcr0_features)
920 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
921
922 if (tdesc == NULL)
923 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
924
925 return tdesc;
926 }
927
928 gdb_assert_not_reached ("failed to return tdesc");
929 }
930
931 /* Update all the target description of all processes; a new GDB
932 connected, and it may or not support xml target descriptions. */
933
934 void
935 x86_target::update_xmltarget ()
936 {
937 struct thread_info *saved_thread = current_thread;
938
939 /* Before changing the register cache's internal layout, flush the
940 contents of the current valid caches back to the threads, and
941 release the current regcache objects. */
942 regcache_release ();
943
944 for_each_process ([this] (process_info *proc) {
945 int pid = proc->pid;
946
947 /* Look up any thread of this process. */
948 current_thread = find_any_thread_of_pid (pid);
949
950 low_arch_setup ();
951 });
952
953 current_thread = saved_thread;
954 }
955
956 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
957 PTRACE_GETREGSET. */
958
959 static void
960 x86_linux_process_qsupported (char **features, int count)
961 {
962 int i;
963
964 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
965 with "i386" in qSupported query, it supports x86 XML target
966 descriptions. */
967 use_xml = 0;
968 for (i = 0; i < count; i++)
969 {
970 const char *feature = features[i];
971
972 if (startswith (feature, "xmlRegisters="))
973 {
974 char *copy = xstrdup (feature + 13);
975
976 char *saveptr;
977 for (char *p = strtok_r (copy, ",", &saveptr);
978 p != NULL;
979 p = strtok_r (NULL, ",", &saveptr))
980 {
981 if (strcmp (p, "i386") == 0)
982 {
983 use_xml = 1;
984 break;
985 }
986 }
987
988 free (copy);
989 }
990 }
991 the_x86_target.update_xmltarget ();
992 }
993
994 /* Common for x86/x86-64. */
995
996 static struct regsets_info x86_regsets_info =
997 {
998 x86_regsets, /* regsets */
999 0, /* num_regsets */
1000 NULL, /* disabled_regsets */
1001 };
1002
1003 #ifdef __x86_64__
1004 static struct regs_info amd64_linux_regs_info =
1005 {
1006 NULL, /* regset_bitmap */
1007 NULL, /* usrregs_info */
1008 &x86_regsets_info
1009 };
1010 #endif
1011 static struct usrregs_info i386_linux_usrregs_info =
1012 {
1013 I386_NUM_REGS,
1014 i386_regmap,
1015 };
1016
1017 static struct regs_info i386_linux_regs_info =
1018 {
1019 NULL, /* regset_bitmap */
1020 &i386_linux_usrregs_info,
1021 &x86_regsets_info
1022 };
1023
1024 const regs_info *
1025 x86_target::get_regs_info ()
1026 {
1027 #ifdef __x86_64__
1028 if (is_64bit_tdesc ())
1029 return &amd64_linux_regs_info;
1030 else
1031 #endif
1032 return &i386_linux_regs_info;
1033 }
1034
1035 /* Initialize the target description for the architecture of the
1036 inferior. */
1037
1038 void
1039 x86_target::low_arch_setup ()
1040 {
1041 current_process ()->tdesc = x86_linux_read_description ();
1042 }
1043
1044 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1045 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1046
1047 static void
1048 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
1049 {
1050 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1051
1052 if (use_64bit)
1053 {
1054 long l_sysno;
1055
1056 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1057 *sysno = (int) l_sysno;
1058 }
1059 else
1060 collect_register_by_name (regcache, "orig_eax", sysno);
1061 }
1062
1063 static int
1064 x86_supports_tracepoints (void)
1065 {
1066 return 1;
1067 }
1068
1069 static void
1070 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1071 {
1072 target_write_memory (*to, buf, len);
1073 *to += len;
1074 }
1075
1076 static int
1077 push_opcode (unsigned char *buf, const char *op)
1078 {
1079 unsigned char *buf_org = buf;
1080
1081 while (1)
1082 {
1083 char *endptr;
1084 unsigned long ul = strtoul (op, &endptr, 16);
1085
1086 if (endptr == op)
1087 break;
1088
1089 *buf++ = ul;
1090 op = endptr;
1091 }
1092
1093 return buf - buf_org;
1094 }
1095
1096 #ifdef __x86_64__
1097
1098 /* Build a jump pad that saves registers and calls a collection
1099 function. Writes a jump instruction to the jump pad to
1100 JJUMPAD_INSN. The caller is responsible to write it in at the
1101 tracepoint address. */
1102
1103 static int
1104 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1105 CORE_ADDR collector,
1106 CORE_ADDR lockaddr,
1107 ULONGEST orig_size,
1108 CORE_ADDR *jump_entry,
1109 CORE_ADDR *trampoline,
1110 ULONGEST *trampoline_size,
1111 unsigned char *jjump_pad_insn,
1112 ULONGEST *jjump_pad_insn_size,
1113 CORE_ADDR *adjusted_insn_addr,
1114 CORE_ADDR *adjusted_insn_addr_end,
1115 char *err)
1116 {
1117 unsigned char buf[40];
1118 int i, offset;
1119 int64_t loffset;
1120
1121 CORE_ADDR buildaddr = *jump_entry;
1122
1123 /* Build the jump pad. */
1124
1125 /* First, do tracepoint data collection. Save registers. */
1126 i = 0;
1127 /* Need to ensure stack pointer saved first. */
1128 buf[i++] = 0x54; /* push %rsp */
1129 buf[i++] = 0x55; /* push %rbp */
1130 buf[i++] = 0x57; /* push %rdi */
1131 buf[i++] = 0x56; /* push %rsi */
1132 buf[i++] = 0x52; /* push %rdx */
1133 buf[i++] = 0x51; /* push %rcx */
1134 buf[i++] = 0x53; /* push %rbx */
1135 buf[i++] = 0x50; /* push %rax */
1136 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1137 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1138 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1139 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1140 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1141 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1142 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1143 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1144 buf[i++] = 0x9c; /* pushfq */
1145 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1146 buf[i++] = 0xbf;
1147 memcpy (buf + i, &tpaddr, 8);
1148 i += 8;
1149 buf[i++] = 0x57; /* push %rdi */
1150 append_insns (&buildaddr, i, buf);
1151
1152 /* Stack space for the collecting_t object. */
1153 i = 0;
1154 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1155 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1156 memcpy (buf + i, &tpoint, 8);
1157 i += 8;
1158 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1159 i += push_opcode (&buf[i],
1160 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1161 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1162 append_insns (&buildaddr, i, buf);
1163
1164 /* spin-lock. */
1165 i = 0;
1166 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1167 memcpy (&buf[i], (void *) &lockaddr, 8);
1168 i += 8;
1169 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1170 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1171 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1172 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1173 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1174 append_insns (&buildaddr, i, buf);
1175
1176 /* Set up the gdb_collect call. */
1177 /* At this point, (stack pointer + 0x18) is the base of our saved
1178 register block. */
1179
1180 i = 0;
1181 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1182 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1183
1184 /* tpoint address may be 64-bit wide. */
1185 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1186 memcpy (buf + i, &tpoint, 8);
1187 i += 8;
1188 append_insns (&buildaddr, i, buf);
1189
1190 /* The collector function being in the shared library, may be
1191 >31-bits away off the jump pad. */
1192 i = 0;
1193 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1194 memcpy (buf + i, &collector, 8);
1195 i += 8;
1196 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1197 append_insns (&buildaddr, i, buf);
1198
1199 /* Clear the spin-lock. */
1200 i = 0;
1201 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1202 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1203 memcpy (buf + i, &lockaddr, 8);
1204 i += 8;
1205 append_insns (&buildaddr, i, buf);
1206
1207 /* Remove stack that had been used for the collect_t object. */
1208 i = 0;
1209 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1210 append_insns (&buildaddr, i, buf);
1211
1212 /* Restore register state. */
1213 i = 0;
1214 buf[i++] = 0x48; /* add $0x8,%rsp */
1215 buf[i++] = 0x83;
1216 buf[i++] = 0xc4;
1217 buf[i++] = 0x08;
1218 buf[i++] = 0x9d; /* popfq */
1219 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1220 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1221 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1222 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1223 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1224 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1225 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1226 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1227 buf[i++] = 0x58; /* pop %rax */
1228 buf[i++] = 0x5b; /* pop %rbx */
1229 buf[i++] = 0x59; /* pop %rcx */
1230 buf[i++] = 0x5a; /* pop %rdx */
1231 buf[i++] = 0x5e; /* pop %rsi */
1232 buf[i++] = 0x5f; /* pop %rdi */
1233 buf[i++] = 0x5d; /* pop %rbp */
1234 buf[i++] = 0x5c; /* pop %rsp */
1235 append_insns (&buildaddr, i, buf);
1236
1237 /* Now, adjust the original instruction to execute in the jump
1238 pad. */
1239 *adjusted_insn_addr = buildaddr;
1240 relocate_instruction (&buildaddr, tpaddr);
1241 *adjusted_insn_addr_end = buildaddr;
1242
1243 /* Finally, write a jump back to the program. */
1244
1245 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1246 if (loffset > INT_MAX || loffset < INT_MIN)
1247 {
1248 sprintf (err,
1249 "E.Jump back from jump pad too far from tracepoint "
1250 "(offset 0x%" PRIx64 " > int32).", loffset);
1251 return 1;
1252 }
1253
1254 offset = (int) loffset;
1255 memcpy (buf, jump_insn, sizeof (jump_insn));
1256 memcpy (buf + 1, &offset, 4);
1257 append_insns (&buildaddr, sizeof (jump_insn), buf);
1258
1259 /* The jump pad is now built. Wire in a jump to our jump pad. This
1260 is always done last (by our caller actually), so that we can
1261 install fast tracepoints with threads running. This relies on
1262 the agent's atomic write support. */
1263 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1264 if (loffset > INT_MAX || loffset < INT_MIN)
1265 {
1266 sprintf (err,
1267 "E.Jump pad too far from tracepoint "
1268 "(offset 0x%" PRIx64 " > int32).", loffset);
1269 return 1;
1270 }
1271
1272 offset = (int) loffset;
1273
1274 memcpy (buf, jump_insn, sizeof (jump_insn));
1275 memcpy (buf + 1, &offset, 4);
1276 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1277 *jjump_pad_insn_size = sizeof (jump_insn);
1278
1279 /* Return the end address of our pad. */
1280 *jump_entry = buildaddr;
1281
1282 return 0;
1283 }
1284
1285 #endif /* __x86_64__ */
1286
1287 /* Build a jump pad that saves registers and calls a collection
1288 function. Writes a jump instruction to the jump pad to
1289 JJUMPAD_INSN. The caller is responsible to write it in at the
1290 tracepoint address. */
1291
1292 static int
1293 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1294 CORE_ADDR collector,
1295 CORE_ADDR lockaddr,
1296 ULONGEST orig_size,
1297 CORE_ADDR *jump_entry,
1298 CORE_ADDR *trampoline,
1299 ULONGEST *trampoline_size,
1300 unsigned char *jjump_pad_insn,
1301 ULONGEST *jjump_pad_insn_size,
1302 CORE_ADDR *adjusted_insn_addr,
1303 CORE_ADDR *adjusted_insn_addr_end,
1304 char *err)
1305 {
1306 unsigned char buf[0x100];
1307 int i, offset;
1308 CORE_ADDR buildaddr = *jump_entry;
1309
1310 /* Build the jump pad. */
1311
1312 /* First, do tracepoint data collection. Save registers. */
1313 i = 0;
1314 buf[i++] = 0x60; /* pushad */
1315 buf[i++] = 0x68; /* push tpaddr aka $pc */
1316 *((int *)(buf + i)) = (int) tpaddr;
1317 i += 4;
1318 buf[i++] = 0x9c; /* pushf */
1319 buf[i++] = 0x1e; /* push %ds */
1320 buf[i++] = 0x06; /* push %es */
1321 buf[i++] = 0x0f; /* push %fs */
1322 buf[i++] = 0xa0;
1323 buf[i++] = 0x0f; /* push %gs */
1324 buf[i++] = 0xa8;
1325 buf[i++] = 0x16; /* push %ss */
1326 buf[i++] = 0x0e; /* push %cs */
1327 append_insns (&buildaddr, i, buf);
1328
1329 /* Stack space for the collecting_t object. */
1330 i = 0;
1331 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1332
1333 /* Build the object. */
1334 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1335 memcpy (buf + i, &tpoint, 4);
1336 i += 4;
1337 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1338
1339 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1340 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1341 append_insns (&buildaddr, i, buf);
1342
1343 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1344 If we cared for it, this could be using xchg alternatively. */
1345
1346 i = 0;
1347 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1348 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1349 %esp,<lockaddr> */
1350 memcpy (&buf[i], (void *) &lockaddr, 4);
1351 i += 4;
1352 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1353 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1354 append_insns (&buildaddr, i, buf);
1355
1356
1357 /* Set up arguments to the gdb_collect call. */
1358 i = 0;
1359 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1360 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1361 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1362 append_insns (&buildaddr, i, buf);
1363
1364 i = 0;
1365 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1366 append_insns (&buildaddr, i, buf);
1367
1368 i = 0;
1369 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1370 memcpy (&buf[i], (void *) &tpoint, 4);
1371 i += 4;
1372 append_insns (&buildaddr, i, buf);
1373
1374 buf[0] = 0xe8; /* call <reladdr> */
1375 offset = collector - (buildaddr + sizeof (jump_insn));
1376 memcpy (buf + 1, &offset, 4);
1377 append_insns (&buildaddr, 5, buf);
1378 /* Clean up after the call. */
1379 buf[0] = 0x83; /* add $0x8,%esp */
1380 buf[1] = 0xc4;
1381 buf[2] = 0x08;
1382 append_insns (&buildaddr, 3, buf);
1383
1384
1385 /* Clear the spin-lock. This would need the LOCK prefix on older
1386 broken archs. */
1387 i = 0;
1388 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1389 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1390 memcpy (buf + i, &lockaddr, 4);
1391 i += 4;
1392 append_insns (&buildaddr, i, buf);
1393
1394
1395 /* Remove stack that had been used for the collect_t object. */
1396 i = 0;
1397 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1398 append_insns (&buildaddr, i, buf);
1399
1400 i = 0;
1401 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1402 buf[i++] = 0xc4;
1403 buf[i++] = 0x04;
1404 buf[i++] = 0x17; /* pop %ss */
1405 buf[i++] = 0x0f; /* pop %gs */
1406 buf[i++] = 0xa9;
1407 buf[i++] = 0x0f; /* pop %fs */
1408 buf[i++] = 0xa1;
1409 buf[i++] = 0x07; /* pop %es */
1410 buf[i++] = 0x1f; /* pop %ds */
1411 buf[i++] = 0x9d; /* popf */
1412 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1413 buf[i++] = 0xc4;
1414 buf[i++] = 0x04;
1415 buf[i++] = 0x61; /* popad */
1416 append_insns (&buildaddr, i, buf);
1417
1418 /* Now, adjust the original instruction to execute in the jump
1419 pad. */
1420 *adjusted_insn_addr = buildaddr;
1421 relocate_instruction (&buildaddr, tpaddr);
1422 *adjusted_insn_addr_end = buildaddr;
1423
1424 /* Write the jump back to the program. */
1425 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1426 memcpy (buf, jump_insn, sizeof (jump_insn));
1427 memcpy (buf + 1, &offset, 4);
1428 append_insns (&buildaddr, sizeof (jump_insn), buf);
1429
1430 /* The jump pad is now built. Wire in a jump to our jump pad. This
1431 is always done last (by our caller actually), so that we can
1432 install fast tracepoints with threads running. This relies on
1433 the agent's atomic write support. */
1434 if (orig_size == 4)
1435 {
1436 /* Create a trampoline. */
1437 *trampoline_size = sizeof (jump_insn);
1438 if (!claim_trampoline_space (*trampoline_size, trampoline))
1439 {
1440 /* No trampoline space available. */
1441 strcpy (err,
1442 "E.Cannot allocate trampoline space needed for fast "
1443 "tracepoints on 4-byte instructions.");
1444 return 1;
1445 }
1446
1447 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1448 memcpy (buf, jump_insn, sizeof (jump_insn));
1449 memcpy (buf + 1, &offset, 4);
1450 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1451
1452 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1453 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1454 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1455 memcpy (buf + 2, &offset, 2);
1456 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1457 *jjump_pad_insn_size = sizeof (small_jump_insn);
1458 }
1459 else
1460 {
1461 /* Else use a 32-bit relative jump instruction. */
1462 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1463 memcpy (buf, jump_insn, sizeof (jump_insn));
1464 memcpy (buf + 1, &offset, 4);
1465 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1466 *jjump_pad_insn_size = sizeof (jump_insn);
1467 }
1468
1469 /* Return the end address of our pad. */
1470 *jump_entry = buildaddr;
1471
1472 return 0;
1473 }
1474
1475 static int
1476 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1477 CORE_ADDR collector,
1478 CORE_ADDR lockaddr,
1479 ULONGEST orig_size,
1480 CORE_ADDR *jump_entry,
1481 CORE_ADDR *trampoline,
1482 ULONGEST *trampoline_size,
1483 unsigned char *jjump_pad_insn,
1484 ULONGEST *jjump_pad_insn_size,
1485 CORE_ADDR *adjusted_insn_addr,
1486 CORE_ADDR *adjusted_insn_addr_end,
1487 char *err)
1488 {
1489 #ifdef __x86_64__
1490 if (is_64bit_tdesc ())
1491 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1492 collector, lockaddr,
1493 orig_size, jump_entry,
1494 trampoline, trampoline_size,
1495 jjump_pad_insn,
1496 jjump_pad_insn_size,
1497 adjusted_insn_addr,
1498 adjusted_insn_addr_end,
1499 err);
1500 #endif
1501
1502 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1503 collector, lockaddr,
1504 orig_size, jump_entry,
1505 trampoline, trampoline_size,
1506 jjump_pad_insn,
1507 jjump_pad_insn_size,
1508 adjusted_insn_addr,
1509 adjusted_insn_addr_end,
1510 err);
1511 }
1512
1513 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1514 architectures. */
1515
1516 static int
1517 x86_get_min_fast_tracepoint_insn_len (void)
1518 {
1519 static int warned_about_fast_tracepoints = 0;
1520
1521 #ifdef __x86_64__
1522 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1523 used for fast tracepoints. */
1524 if (is_64bit_tdesc ())
1525 return 5;
1526 #endif
1527
1528 if (agent_loaded_p ())
1529 {
1530 char errbuf[IPA_BUFSIZ];
1531
1532 errbuf[0] = '\0';
1533
1534 /* On x86, if trampolines are available, then 4-byte jump instructions
1535 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1536 with a 4-byte offset are used instead. */
1537 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1538 return 4;
1539 else
1540 {
1541 /* GDB has no channel to explain to user why a shorter fast
1542 tracepoint is not possible, but at least make GDBserver
1543 mention that something has gone awry. */
1544 if (!warned_about_fast_tracepoints)
1545 {
1546 warning ("4-byte fast tracepoints not available; %s", errbuf);
1547 warned_about_fast_tracepoints = 1;
1548 }
1549 return 5;
1550 }
1551 }
1552 else
1553 {
1554 /* Indicate that the minimum length is currently unknown since the IPA
1555 has not loaded yet. */
1556 return 0;
1557 }
1558 }
1559
1560 static void
1561 add_insns (unsigned char *start, int len)
1562 {
1563 CORE_ADDR buildaddr = current_insn_ptr;
1564
1565 if (debug_threads)
1566 debug_printf ("Adding %d bytes of insn at %s\n",
1567 len, paddress (buildaddr));
1568
1569 append_insns (&buildaddr, len, start);
1570 current_insn_ptr = buildaddr;
1571 }
1572
1573 /* Our general strategy for emitting code is to avoid specifying raw
1574 bytes whenever possible, and instead copy a block of inline asm
1575 that is embedded in the function. This is a little messy, because
1576 we need to keep the compiler from discarding what looks like dead
1577 code, plus suppress various warnings. */
1578
1579 #define EMIT_ASM(NAME, INSNS) \
1580 do \
1581 { \
1582 extern unsigned char start_ ## NAME, end_ ## NAME; \
1583 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1584 __asm__ ("jmp end_" #NAME "\n" \
1585 "\t" "start_" #NAME ":" \
1586 "\t" INSNS "\n" \
1587 "\t" "end_" #NAME ":"); \
1588 } while (0)
1589
1590 #ifdef __x86_64__
1591
1592 #define EMIT_ASM32(NAME,INSNS) \
1593 do \
1594 { \
1595 extern unsigned char start_ ## NAME, end_ ## NAME; \
1596 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1597 __asm__ (".code32\n" \
1598 "\t" "jmp end_" #NAME "\n" \
1599 "\t" "start_" #NAME ":\n" \
1600 "\t" INSNS "\n" \
1601 "\t" "end_" #NAME ":\n" \
1602 ".code64\n"); \
1603 } while (0)
1604
1605 #else
1606
1607 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1608
1609 #endif
1610
1611 #ifdef __x86_64__
1612
1613 static void
1614 amd64_emit_prologue (void)
1615 {
1616 EMIT_ASM (amd64_prologue,
1617 "pushq %rbp\n\t"
1618 "movq %rsp,%rbp\n\t"
1619 "sub $0x20,%rsp\n\t"
1620 "movq %rdi,-8(%rbp)\n\t"
1621 "movq %rsi,-16(%rbp)");
1622 }
1623
1624
1625 static void
1626 amd64_emit_epilogue (void)
1627 {
1628 EMIT_ASM (amd64_epilogue,
1629 "movq -16(%rbp),%rdi\n\t"
1630 "movq %rax,(%rdi)\n\t"
1631 "xor %rax,%rax\n\t"
1632 "leave\n\t"
1633 "ret");
1634 }
1635
1636 static void
1637 amd64_emit_add (void)
1638 {
1639 EMIT_ASM (amd64_add,
1640 "add (%rsp),%rax\n\t"
1641 "lea 0x8(%rsp),%rsp");
1642 }
1643
1644 static void
1645 amd64_emit_sub (void)
1646 {
1647 EMIT_ASM (amd64_sub,
1648 "sub %rax,(%rsp)\n\t"
1649 "pop %rax");
1650 }
1651
1652 static void
1653 amd64_emit_mul (void)
1654 {
1655 emit_error = 1;
1656 }
1657
1658 static void
1659 amd64_emit_lsh (void)
1660 {
1661 emit_error = 1;
1662 }
1663
1664 static void
1665 amd64_emit_rsh_signed (void)
1666 {
1667 emit_error = 1;
1668 }
1669
1670 static void
1671 amd64_emit_rsh_unsigned (void)
1672 {
1673 emit_error = 1;
1674 }
1675
1676 static void
1677 amd64_emit_ext (int arg)
1678 {
1679 switch (arg)
1680 {
1681 case 8:
1682 EMIT_ASM (amd64_ext_8,
1683 "cbtw\n\t"
1684 "cwtl\n\t"
1685 "cltq");
1686 break;
1687 case 16:
1688 EMIT_ASM (amd64_ext_16,
1689 "cwtl\n\t"
1690 "cltq");
1691 break;
1692 case 32:
1693 EMIT_ASM (amd64_ext_32,
1694 "cltq");
1695 break;
1696 default:
1697 emit_error = 1;
1698 }
1699 }
1700
1701 static void
1702 amd64_emit_log_not (void)
1703 {
1704 EMIT_ASM (amd64_log_not,
1705 "test %rax,%rax\n\t"
1706 "sete %cl\n\t"
1707 "movzbq %cl,%rax");
1708 }
1709
1710 static void
1711 amd64_emit_bit_and (void)
1712 {
1713 EMIT_ASM (amd64_and,
1714 "and (%rsp),%rax\n\t"
1715 "lea 0x8(%rsp),%rsp");
1716 }
1717
1718 static void
1719 amd64_emit_bit_or (void)
1720 {
1721 EMIT_ASM (amd64_or,
1722 "or (%rsp),%rax\n\t"
1723 "lea 0x8(%rsp),%rsp");
1724 }
1725
1726 static void
1727 amd64_emit_bit_xor (void)
1728 {
1729 EMIT_ASM (amd64_xor,
1730 "xor (%rsp),%rax\n\t"
1731 "lea 0x8(%rsp),%rsp");
1732 }
1733
1734 static void
1735 amd64_emit_bit_not (void)
1736 {
1737 EMIT_ASM (amd64_bit_not,
1738 "xorq $0xffffffffffffffff,%rax");
1739 }
1740
1741 static void
1742 amd64_emit_equal (void)
1743 {
1744 EMIT_ASM (amd64_equal,
1745 "cmp %rax,(%rsp)\n\t"
1746 "je .Lamd64_equal_true\n\t"
1747 "xor %rax,%rax\n\t"
1748 "jmp .Lamd64_equal_end\n\t"
1749 ".Lamd64_equal_true:\n\t"
1750 "mov $0x1,%rax\n\t"
1751 ".Lamd64_equal_end:\n\t"
1752 "lea 0x8(%rsp),%rsp");
1753 }
1754
1755 static void
1756 amd64_emit_less_signed (void)
1757 {
1758 EMIT_ASM (amd64_less_signed,
1759 "cmp %rax,(%rsp)\n\t"
1760 "jl .Lamd64_less_signed_true\n\t"
1761 "xor %rax,%rax\n\t"
1762 "jmp .Lamd64_less_signed_end\n\t"
1763 ".Lamd64_less_signed_true:\n\t"
1764 "mov $1,%rax\n\t"
1765 ".Lamd64_less_signed_end:\n\t"
1766 "lea 0x8(%rsp),%rsp");
1767 }
1768
1769 static void
1770 amd64_emit_less_unsigned (void)
1771 {
1772 EMIT_ASM (amd64_less_unsigned,
1773 "cmp %rax,(%rsp)\n\t"
1774 "jb .Lamd64_less_unsigned_true\n\t"
1775 "xor %rax,%rax\n\t"
1776 "jmp .Lamd64_less_unsigned_end\n\t"
1777 ".Lamd64_less_unsigned_true:\n\t"
1778 "mov $1,%rax\n\t"
1779 ".Lamd64_less_unsigned_end:\n\t"
1780 "lea 0x8(%rsp),%rsp");
1781 }
1782
1783 static void
1784 amd64_emit_ref (int size)
1785 {
1786 switch (size)
1787 {
1788 case 1:
1789 EMIT_ASM (amd64_ref1,
1790 "movb (%rax),%al");
1791 break;
1792 case 2:
1793 EMIT_ASM (amd64_ref2,
1794 "movw (%rax),%ax");
1795 break;
1796 case 4:
1797 EMIT_ASM (amd64_ref4,
1798 "movl (%rax),%eax");
1799 break;
1800 case 8:
1801 EMIT_ASM (amd64_ref8,
1802 "movq (%rax),%rax");
1803 break;
1804 }
1805 }
1806
1807 static void
1808 amd64_emit_if_goto (int *offset_p, int *size_p)
1809 {
1810 EMIT_ASM (amd64_if_goto,
1811 "mov %rax,%rcx\n\t"
1812 "pop %rax\n\t"
1813 "cmp $0,%rcx\n\t"
1814 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1815 if (offset_p)
1816 *offset_p = 10;
1817 if (size_p)
1818 *size_p = 4;
1819 }
1820
1821 static void
1822 amd64_emit_goto (int *offset_p, int *size_p)
1823 {
1824 EMIT_ASM (amd64_goto,
1825 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1826 if (offset_p)
1827 *offset_p = 1;
1828 if (size_p)
1829 *size_p = 4;
1830 }
1831
1832 static void
1833 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1834 {
1835 int diff = (to - (from + size));
1836 unsigned char buf[sizeof (int)];
1837
1838 if (size != 4)
1839 {
1840 emit_error = 1;
1841 return;
1842 }
1843
1844 memcpy (buf, &diff, sizeof (int));
1845 target_write_memory (from, buf, sizeof (int));
1846 }
1847
1848 static void
1849 amd64_emit_const (LONGEST num)
1850 {
1851 unsigned char buf[16];
1852 int i;
1853 CORE_ADDR buildaddr = current_insn_ptr;
1854
1855 i = 0;
1856 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1857 memcpy (&buf[i], &num, sizeof (num));
1858 i += 8;
1859 append_insns (&buildaddr, i, buf);
1860 current_insn_ptr = buildaddr;
1861 }
1862
1863 static void
1864 amd64_emit_call (CORE_ADDR fn)
1865 {
1866 unsigned char buf[16];
1867 int i;
1868 CORE_ADDR buildaddr;
1869 LONGEST offset64;
1870
1871 /* The destination function being in the shared library, may be
1872 >31-bits away off the compiled code pad. */
1873
1874 buildaddr = current_insn_ptr;
1875
1876 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1877
1878 i = 0;
1879
1880 if (offset64 > INT_MAX || offset64 < INT_MIN)
1881 {
1882 /* Offset is too large for a call. Use callq, but that requires
1883 a register, so avoid it if possible. Use r10, since it is
1884 call-clobbered, we don't have to push/pop it. */
1885 buf[i++] = 0x48; /* mov $fn,%r10 */
1886 buf[i++] = 0xba;
1887 memcpy (buf + i, &fn, 8);
1888 i += 8;
1889 buf[i++] = 0xff; /* callq *%r10 */
1890 buf[i++] = 0xd2;
1891 }
1892 else
1893 {
1894 int offset32 = offset64; /* we know we can't overflow here. */
1895
1896 buf[i++] = 0xe8; /* call <reladdr> */
1897 memcpy (buf + i, &offset32, 4);
1898 i += 4;
1899 }
1900
1901 append_insns (&buildaddr, i, buf);
1902 current_insn_ptr = buildaddr;
1903 }
1904
1905 static void
1906 amd64_emit_reg (int reg)
1907 {
1908 unsigned char buf[16];
1909 int i;
1910 CORE_ADDR buildaddr;
1911
1912 /* Assume raw_regs is still in %rdi. */
1913 buildaddr = current_insn_ptr;
1914 i = 0;
1915 buf[i++] = 0xbe; /* mov $<n>,%esi */
1916 memcpy (&buf[i], &reg, sizeof (reg));
1917 i += 4;
1918 append_insns (&buildaddr, i, buf);
1919 current_insn_ptr = buildaddr;
1920 amd64_emit_call (get_raw_reg_func_addr ());
1921 }
1922
1923 static void
1924 amd64_emit_pop (void)
1925 {
1926 EMIT_ASM (amd64_pop,
1927 "pop %rax");
1928 }
1929
1930 static void
1931 amd64_emit_stack_flush (void)
1932 {
1933 EMIT_ASM (amd64_stack_flush,
1934 "push %rax");
1935 }
1936
1937 static void
1938 amd64_emit_zero_ext (int arg)
1939 {
1940 switch (arg)
1941 {
1942 case 8:
1943 EMIT_ASM (amd64_zero_ext_8,
1944 "and $0xff,%rax");
1945 break;
1946 case 16:
1947 EMIT_ASM (amd64_zero_ext_16,
1948 "and $0xffff,%rax");
1949 break;
1950 case 32:
1951 EMIT_ASM (amd64_zero_ext_32,
1952 "mov $0xffffffff,%rcx\n\t"
1953 "and %rcx,%rax");
1954 break;
1955 default:
1956 emit_error = 1;
1957 }
1958 }
1959
1960 static void
1961 amd64_emit_swap (void)
1962 {
1963 EMIT_ASM (amd64_swap,
1964 "mov %rax,%rcx\n\t"
1965 "pop %rax\n\t"
1966 "push %rcx");
1967 }
1968
1969 static void
1970 amd64_emit_stack_adjust (int n)
1971 {
1972 unsigned char buf[16];
1973 int i;
1974 CORE_ADDR buildaddr = current_insn_ptr;
1975
1976 i = 0;
1977 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1978 buf[i++] = 0x8d;
1979 buf[i++] = 0x64;
1980 buf[i++] = 0x24;
1981 /* This only handles adjustments up to 16, but we don't expect any more. */
1982 buf[i++] = n * 8;
1983 append_insns (&buildaddr, i, buf);
1984 current_insn_ptr = buildaddr;
1985 }
1986
1987 /* FN's prototype is `LONGEST(*fn)(int)'. */
1988
1989 static void
1990 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1991 {
1992 unsigned char buf[16];
1993 int i;
1994 CORE_ADDR buildaddr;
1995
1996 buildaddr = current_insn_ptr;
1997 i = 0;
1998 buf[i++] = 0xbf; /* movl $<n>,%edi */
1999 memcpy (&buf[i], &arg1, sizeof (arg1));
2000 i += 4;
2001 append_insns (&buildaddr, i, buf);
2002 current_insn_ptr = buildaddr;
2003 amd64_emit_call (fn);
2004 }
2005
2006 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2007
2008 static void
2009 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2010 {
2011 unsigned char buf[16];
2012 int i;
2013 CORE_ADDR buildaddr;
2014
2015 buildaddr = current_insn_ptr;
2016 i = 0;
2017 buf[i++] = 0xbf; /* movl $<n>,%edi */
2018 memcpy (&buf[i], &arg1, sizeof (arg1));
2019 i += 4;
2020 append_insns (&buildaddr, i, buf);
2021 current_insn_ptr = buildaddr;
2022 EMIT_ASM (amd64_void_call_2_a,
2023 /* Save away a copy of the stack top. */
2024 "push %rax\n\t"
2025 /* Also pass top as the second argument. */
2026 "mov %rax,%rsi");
2027 amd64_emit_call (fn);
2028 EMIT_ASM (amd64_void_call_2_b,
2029 /* Restore the stack top, %rax may have been trashed. */
2030 "pop %rax");
2031 }
2032
2033 static void
2034 amd64_emit_eq_goto (int *offset_p, int *size_p)
2035 {
2036 EMIT_ASM (amd64_eq,
2037 "cmp %rax,(%rsp)\n\t"
2038 "jne .Lamd64_eq_fallthru\n\t"
2039 "lea 0x8(%rsp),%rsp\n\t"
2040 "pop %rax\n\t"
2041 /* jmp, but don't trust the assembler to choose the right jump */
2042 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2043 ".Lamd64_eq_fallthru:\n\t"
2044 "lea 0x8(%rsp),%rsp\n\t"
2045 "pop %rax");
2046
2047 if (offset_p)
2048 *offset_p = 13;
2049 if (size_p)
2050 *size_p = 4;
2051 }
2052
2053 static void
2054 amd64_emit_ne_goto (int *offset_p, int *size_p)
2055 {
2056 EMIT_ASM (amd64_ne,
2057 "cmp %rax,(%rsp)\n\t"
2058 "je .Lamd64_ne_fallthru\n\t"
2059 "lea 0x8(%rsp),%rsp\n\t"
2060 "pop %rax\n\t"
2061 /* jmp, but don't trust the assembler to choose the right jump */
2062 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2063 ".Lamd64_ne_fallthru:\n\t"
2064 "lea 0x8(%rsp),%rsp\n\t"
2065 "pop %rax");
2066
2067 if (offset_p)
2068 *offset_p = 13;
2069 if (size_p)
2070 *size_p = 4;
2071 }
2072
2073 static void
2074 amd64_emit_lt_goto (int *offset_p, int *size_p)
2075 {
2076 EMIT_ASM (amd64_lt,
2077 "cmp %rax,(%rsp)\n\t"
2078 "jnl .Lamd64_lt_fallthru\n\t"
2079 "lea 0x8(%rsp),%rsp\n\t"
2080 "pop %rax\n\t"
2081 /* jmp, but don't trust the assembler to choose the right jump */
2082 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2083 ".Lamd64_lt_fallthru:\n\t"
2084 "lea 0x8(%rsp),%rsp\n\t"
2085 "pop %rax");
2086
2087 if (offset_p)
2088 *offset_p = 13;
2089 if (size_p)
2090 *size_p = 4;
2091 }
2092
2093 static void
2094 amd64_emit_le_goto (int *offset_p, int *size_p)
2095 {
2096 EMIT_ASM (amd64_le,
2097 "cmp %rax,(%rsp)\n\t"
2098 "jnle .Lamd64_le_fallthru\n\t"
2099 "lea 0x8(%rsp),%rsp\n\t"
2100 "pop %rax\n\t"
2101 /* jmp, but don't trust the assembler to choose the right jump */
2102 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2103 ".Lamd64_le_fallthru:\n\t"
2104 "lea 0x8(%rsp),%rsp\n\t"
2105 "pop %rax");
2106
2107 if (offset_p)
2108 *offset_p = 13;
2109 if (size_p)
2110 *size_p = 4;
2111 }
2112
2113 static void
2114 amd64_emit_gt_goto (int *offset_p, int *size_p)
2115 {
2116 EMIT_ASM (amd64_gt,
2117 "cmp %rax,(%rsp)\n\t"
2118 "jng .Lamd64_gt_fallthru\n\t"
2119 "lea 0x8(%rsp),%rsp\n\t"
2120 "pop %rax\n\t"
2121 /* jmp, but don't trust the assembler to choose the right jump */
2122 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2123 ".Lamd64_gt_fallthru:\n\t"
2124 "lea 0x8(%rsp),%rsp\n\t"
2125 "pop %rax");
2126
2127 if (offset_p)
2128 *offset_p = 13;
2129 if (size_p)
2130 *size_p = 4;
2131 }
2132
2133 static void
2134 amd64_emit_ge_goto (int *offset_p, int *size_p)
2135 {
2136 EMIT_ASM (amd64_ge,
2137 "cmp %rax,(%rsp)\n\t"
2138 "jnge .Lamd64_ge_fallthru\n\t"
2139 ".Lamd64_ge_jump:\n\t"
2140 "lea 0x8(%rsp),%rsp\n\t"
2141 "pop %rax\n\t"
2142 /* jmp, but don't trust the assembler to choose the right jump */
2143 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2144 ".Lamd64_ge_fallthru:\n\t"
2145 "lea 0x8(%rsp),%rsp\n\t"
2146 "pop %rax");
2147
2148 if (offset_p)
2149 *offset_p = 13;
2150 if (size_p)
2151 *size_p = 4;
2152 }
2153
2154 struct emit_ops amd64_emit_ops =
2155 {
2156 amd64_emit_prologue,
2157 amd64_emit_epilogue,
2158 amd64_emit_add,
2159 amd64_emit_sub,
2160 amd64_emit_mul,
2161 amd64_emit_lsh,
2162 amd64_emit_rsh_signed,
2163 amd64_emit_rsh_unsigned,
2164 amd64_emit_ext,
2165 amd64_emit_log_not,
2166 amd64_emit_bit_and,
2167 amd64_emit_bit_or,
2168 amd64_emit_bit_xor,
2169 amd64_emit_bit_not,
2170 amd64_emit_equal,
2171 amd64_emit_less_signed,
2172 amd64_emit_less_unsigned,
2173 amd64_emit_ref,
2174 amd64_emit_if_goto,
2175 amd64_emit_goto,
2176 amd64_write_goto_address,
2177 amd64_emit_const,
2178 amd64_emit_call,
2179 amd64_emit_reg,
2180 amd64_emit_pop,
2181 amd64_emit_stack_flush,
2182 amd64_emit_zero_ext,
2183 amd64_emit_swap,
2184 amd64_emit_stack_adjust,
2185 amd64_emit_int_call_1,
2186 amd64_emit_void_call_2,
2187 amd64_emit_eq_goto,
2188 amd64_emit_ne_goto,
2189 amd64_emit_lt_goto,
2190 amd64_emit_le_goto,
2191 amd64_emit_gt_goto,
2192 amd64_emit_ge_goto
2193 };
2194
2195 #endif /* __x86_64__ */
2196
2197 static void
2198 i386_emit_prologue (void)
2199 {
2200 EMIT_ASM32 (i386_prologue,
2201 "push %ebp\n\t"
2202 "mov %esp,%ebp\n\t"
2203 "push %ebx");
2204 /* At this point, the raw regs base address is at 8(%ebp), and the
2205 value pointer is at 12(%ebp). */
2206 }
2207
2208 static void
2209 i386_emit_epilogue (void)
2210 {
2211 EMIT_ASM32 (i386_epilogue,
2212 "mov 12(%ebp),%ecx\n\t"
2213 "mov %eax,(%ecx)\n\t"
2214 "mov %ebx,0x4(%ecx)\n\t"
2215 "xor %eax,%eax\n\t"
2216 "pop %ebx\n\t"
2217 "pop %ebp\n\t"
2218 "ret");
2219 }
2220
2221 static void
2222 i386_emit_add (void)
2223 {
2224 EMIT_ASM32 (i386_add,
2225 "add (%esp),%eax\n\t"
2226 "adc 0x4(%esp),%ebx\n\t"
2227 "lea 0x8(%esp),%esp");
2228 }
2229
2230 static void
2231 i386_emit_sub (void)
2232 {
2233 EMIT_ASM32 (i386_sub,
2234 "subl %eax,(%esp)\n\t"
2235 "sbbl %ebx,4(%esp)\n\t"
2236 "pop %eax\n\t"
2237 "pop %ebx\n\t");
2238 }
2239
2240 static void
2241 i386_emit_mul (void)
2242 {
2243 emit_error = 1;
2244 }
2245
2246 static void
2247 i386_emit_lsh (void)
2248 {
2249 emit_error = 1;
2250 }
2251
2252 static void
2253 i386_emit_rsh_signed (void)
2254 {
2255 emit_error = 1;
2256 }
2257
2258 static void
2259 i386_emit_rsh_unsigned (void)
2260 {
2261 emit_error = 1;
2262 }
2263
2264 static void
2265 i386_emit_ext (int arg)
2266 {
2267 switch (arg)
2268 {
2269 case 8:
2270 EMIT_ASM32 (i386_ext_8,
2271 "cbtw\n\t"
2272 "cwtl\n\t"
2273 "movl %eax,%ebx\n\t"
2274 "sarl $31,%ebx");
2275 break;
2276 case 16:
2277 EMIT_ASM32 (i386_ext_16,
2278 "cwtl\n\t"
2279 "movl %eax,%ebx\n\t"
2280 "sarl $31,%ebx");
2281 break;
2282 case 32:
2283 EMIT_ASM32 (i386_ext_32,
2284 "movl %eax,%ebx\n\t"
2285 "sarl $31,%ebx");
2286 break;
2287 default:
2288 emit_error = 1;
2289 }
2290 }
2291
2292 static void
2293 i386_emit_log_not (void)
2294 {
2295 EMIT_ASM32 (i386_log_not,
2296 "or %ebx,%eax\n\t"
2297 "test %eax,%eax\n\t"
2298 "sete %cl\n\t"
2299 "xor %ebx,%ebx\n\t"
2300 "movzbl %cl,%eax");
2301 }
2302
2303 static void
2304 i386_emit_bit_and (void)
2305 {
2306 EMIT_ASM32 (i386_and,
2307 "and (%esp),%eax\n\t"
2308 "and 0x4(%esp),%ebx\n\t"
2309 "lea 0x8(%esp),%esp");
2310 }
2311
2312 static void
2313 i386_emit_bit_or (void)
2314 {
2315 EMIT_ASM32 (i386_or,
2316 "or (%esp),%eax\n\t"
2317 "or 0x4(%esp),%ebx\n\t"
2318 "lea 0x8(%esp),%esp");
2319 }
2320
2321 static void
2322 i386_emit_bit_xor (void)
2323 {
2324 EMIT_ASM32 (i386_xor,
2325 "xor (%esp),%eax\n\t"
2326 "xor 0x4(%esp),%ebx\n\t"
2327 "lea 0x8(%esp),%esp");
2328 }
2329
2330 static void
2331 i386_emit_bit_not (void)
2332 {
2333 EMIT_ASM32 (i386_bit_not,
2334 "xor $0xffffffff,%eax\n\t"
2335 "xor $0xffffffff,%ebx\n\t");
2336 }
2337
2338 static void
2339 i386_emit_equal (void)
2340 {
2341 EMIT_ASM32 (i386_equal,
2342 "cmpl %ebx,4(%esp)\n\t"
2343 "jne .Li386_equal_false\n\t"
2344 "cmpl %eax,(%esp)\n\t"
2345 "je .Li386_equal_true\n\t"
2346 ".Li386_equal_false:\n\t"
2347 "xor %eax,%eax\n\t"
2348 "jmp .Li386_equal_end\n\t"
2349 ".Li386_equal_true:\n\t"
2350 "mov $1,%eax\n\t"
2351 ".Li386_equal_end:\n\t"
2352 "xor %ebx,%ebx\n\t"
2353 "lea 0x8(%esp),%esp");
2354 }
2355
2356 static void
2357 i386_emit_less_signed (void)
2358 {
2359 EMIT_ASM32 (i386_less_signed,
2360 "cmpl %ebx,4(%esp)\n\t"
2361 "jl .Li386_less_signed_true\n\t"
2362 "jne .Li386_less_signed_false\n\t"
2363 "cmpl %eax,(%esp)\n\t"
2364 "jl .Li386_less_signed_true\n\t"
2365 ".Li386_less_signed_false:\n\t"
2366 "xor %eax,%eax\n\t"
2367 "jmp .Li386_less_signed_end\n\t"
2368 ".Li386_less_signed_true:\n\t"
2369 "mov $1,%eax\n\t"
2370 ".Li386_less_signed_end:\n\t"
2371 "xor %ebx,%ebx\n\t"
2372 "lea 0x8(%esp),%esp");
2373 }
2374
2375 static void
2376 i386_emit_less_unsigned (void)
2377 {
2378 EMIT_ASM32 (i386_less_unsigned,
2379 "cmpl %ebx,4(%esp)\n\t"
2380 "jb .Li386_less_unsigned_true\n\t"
2381 "jne .Li386_less_unsigned_false\n\t"
2382 "cmpl %eax,(%esp)\n\t"
2383 "jb .Li386_less_unsigned_true\n\t"
2384 ".Li386_less_unsigned_false:\n\t"
2385 "xor %eax,%eax\n\t"
2386 "jmp .Li386_less_unsigned_end\n\t"
2387 ".Li386_less_unsigned_true:\n\t"
2388 "mov $1,%eax\n\t"
2389 ".Li386_less_unsigned_end:\n\t"
2390 "xor %ebx,%ebx\n\t"
2391 "lea 0x8(%esp),%esp");
2392 }
2393
2394 static void
2395 i386_emit_ref (int size)
2396 {
2397 switch (size)
2398 {
2399 case 1:
2400 EMIT_ASM32 (i386_ref1,
2401 "movb (%eax),%al");
2402 break;
2403 case 2:
2404 EMIT_ASM32 (i386_ref2,
2405 "movw (%eax),%ax");
2406 break;
2407 case 4:
2408 EMIT_ASM32 (i386_ref4,
2409 "movl (%eax),%eax");
2410 break;
2411 case 8:
2412 EMIT_ASM32 (i386_ref8,
2413 "movl 4(%eax),%ebx\n\t"
2414 "movl (%eax),%eax");
2415 break;
2416 }
2417 }
2418
2419 static void
2420 i386_emit_if_goto (int *offset_p, int *size_p)
2421 {
2422 EMIT_ASM32 (i386_if_goto,
2423 "mov %eax,%ecx\n\t"
2424 "or %ebx,%ecx\n\t"
2425 "pop %eax\n\t"
2426 "pop %ebx\n\t"
2427 "cmpl $0,%ecx\n\t"
2428 /* Don't trust the assembler to choose the right jump */
2429 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2430
2431 if (offset_p)
2432 *offset_p = 11; /* be sure that this matches the sequence above */
2433 if (size_p)
2434 *size_p = 4;
2435 }
2436
2437 static void
2438 i386_emit_goto (int *offset_p, int *size_p)
2439 {
2440 EMIT_ASM32 (i386_goto,
2441 /* Don't trust the assembler to choose the right jump */
2442 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2443 if (offset_p)
2444 *offset_p = 1;
2445 if (size_p)
2446 *size_p = 4;
2447 }
2448
2449 static void
2450 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2451 {
2452 int diff = (to - (from + size));
2453 unsigned char buf[sizeof (int)];
2454
2455 /* We're only doing 4-byte sizes at the moment. */
2456 if (size != 4)
2457 {
2458 emit_error = 1;
2459 return;
2460 }
2461
2462 memcpy (buf, &diff, sizeof (int));
2463 target_write_memory (from, buf, sizeof (int));
2464 }
2465
2466 static void
2467 i386_emit_const (LONGEST num)
2468 {
2469 unsigned char buf[16];
2470 int i, hi, lo;
2471 CORE_ADDR buildaddr = current_insn_ptr;
2472
2473 i = 0;
2474 buf[i++] = 0xb8; /* mov $<n>,%eax */
2475 lo = num & 0xffffffff;
2476 memcpy (&buf[i], &lo, sizeof (lo));
2477 i += 4;
2478 hi = ((num >> 32) & 0xffffffff);
2479 if (hi)
2480 {
2481 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2482 memcpy (&buf[i], &hi, sizeof (hi));
2483 i += 4;
2484 }
2485 else
2486 {
2487 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2488 }
2489 append_insns (&buildaddr, i, buf);
2490 current_insn_ptr = buildaddr;
2491 }
2492
2493 static void
2494 i386_emit_call (CORE_ADDR fn)
2495 {
2496 unsigned char buf[16];
2497 int i, offset;
2498 CORE_ADDR buildaddr;
2499
2500 buildaddr = current_insn_ptr;
2501 i = 0;
2502 buf[i++] = 0xe8; /* call <reladdr> */
2503 offset = ((int) fn) - (buildaddr + 5);
2504 memcpy (buf + 1, &offset, 4);
2505 append_insns (&buildaddr, 5, buf);
2506 current_insn_ptr = buildaddr;
2507 }
2508
2509 static void
2510 i386_emit_reg (int reg)
2511 {
2512 unsigned char buf[16];
2513 int i;
2514 CORE_ADDR buildaddr;
2515
2516 EMIT_ASM32 (i386_reg_a,
2517 "sub $0x8,%esp");
2518 buildaddr = current_insn_ptr;
2519 i = 0;
2520 buf[i++] = 0xb8; /* mov $<n>,%eax */
2521 memcpy (&buf[i], &reg, sizeof (reg));
2522 i += 4;
2523 append_insns (&buildaddr, i, buf);
2524 current_insn_ptr = buildaddr;
2525 EMIT_ASM32 (i386_reg_b,
2526 "mov %eax,4(%esp)\n\t"
2527 "mov 8(%ebp),%eax\n\t"
2528 "mov %eax,(%esp)");
2529 i386_emit_call (get_raw_reg_func_addr ());
2530 EMIT_ASM32 (i386_reg_c,
2531 "xor %ebx,%ebx\n\t"
2532 "lea 0x8(%esp),%esp");
2533 }
2534
2535 static void
2536 i386_emit_pop (void)
2537 {
2538 EMIT_ASM32 (i386_pop,
2539 "pop %eax\n\t"
2540 "pop %ebx");
2541 }
2542
2543 static void
2544 i386_emit_stack_flush (void)
2545 {
2546 EMIT_ASM32 (i386_stack_flush,
2547 "push %ebx\n\t"
2548 "push %eax");
2549 }
2550
2551 static void
2552 i386_emit_zero_ext (int arg)
2553 {
2554 switch (arg)
2555 {
2556 case 8:
2557 EMIT_ASM32 (i386_zero_ext_8,
2558 "and $0xff,%eax\n\t"
2559 "xor %ebx,%ebx");
2560 break;
2561 case 16:
2562 EMIT_ASM32 (i386_zero_ext_16,
2563 "and $0xffff,%eax\n\t"
2564 "xor %ebx,%ebx");
2565 break;
2566 case 32:
2567 EMIT_ASM32 (i386_zero_ext_32,
2568 "xor %ebx,%ebx");
2569 break;
2570 default:
2571 emit_error = 1;
2572 }
2573 }
2574
2575 static void
2576 i386_emit_swap (void)
2577 {
2578 EMIT_ASM32 (i386_swap,
2579 "mov %eax,%ecx\n\t"
2580 "mov %ebx,%edx\n\t"
2581 "pop %eax\n\t"
2582 "pop %ebx\n\t"
2583 "push %edx\n\t"
2584 "push %ecx");
2585 }
2586
2587 static void
2588 i386_emit_stack_adjust (int n)
2589 {
2590 unsigned char buf[16];
2591 int i;
2592 CORE_ADDR buildaddr = current_insn_ptr;
2593
2594 i = 0;
2595 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2596 buf[i++] = 0x64;
2597 buf[i++] = 0x24;
2598 buf[i++] = n * 8;
2599 append_insns (&buildaddr, i, buf);
2600 current_insn_ptr = buildaddr;
2601 }
2602
2603 /* FN's prototype is `LONGEST(*fn)(int)'. */
2604
2605 static void
2606 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2607 {
2608 unsigned char buf[16];
2609 int i;
2610 CORE_ADDR buildaddr;
2611
2612 EMIT_ASM32 (i386_int_call_1_a,
2613 /* Reserve a bit of stack space. */
2614 "sub $0x8,%esp");
2615 /* Put the one argument on the stack. */
2616 buildaddr = current_insn_ptr;
2617 i = 0;
2618 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2619 buf[i++] = 0x04;
2620 buf[i++] = 0x24;
2621 memcpy (&buf[i], &arg1, sizeof (arg1));
2622 i += 4;
2623 append_insns (&buildaddr, i, buf);
2624 current_insn_ptr = buildaddr;
2625 i386_emit_call (fn);
2626 EMIT_ASM32 (i386_int_call_1_c,
2627 "mov %edx,%ebx\n\t"
2628 "lea 0x8(%esp),%esp");
2629 }
2630
2631 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2632
2633 static void
2634 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2635 {
2636 unsigned char buf[16];
2637 int i;
2638 CORE_ADDR buildaddr;
2639
2640 EMIT_ASM32 (i386_void_call_2_a,
2641 /* Preserve %eax only; we don't have to worry about %ebx. */
2642 "push %eax\n\t"
2643 /* Reserve a bit of stack space for arguments. */
2644 "sub $0x10,%esp\n\t"
2645 /* Copy "top" to the second argument position. (Note that
2646 we can't assume function won't scribble on its
2647 arguments, so don't try to restore from this.) */
2648 "mov %eax,4(%esp)\n\t"
2649 "mov %ebx,8(%esp)");
2650 /* Put the first argument on the stack. */
2651 buildaddr = current_insn_ptr;
2652 i = 0;
2653 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2654 buf[i++] = 0x04;
2655 buf[i++] = 0x24;
2656 memcpy (&buf[i], &arg1, sizeof (arg1));
2657 i += 4;
2658 append_insns (&buildaddr, i, buf);
2659 current_insn_ptr = buildaddr;
2660 i386_emit_call (fn);
2661 EMIT_ASM32 (i386_void_call_2_b,
2662 "lea 0x10(%esp),%esp\n\t"
2663 /* Restore original stack top. */
2664 "pop %eax");
2665 }
2666
2667
2668 static void
2669 i386_emit_eq_goto (int *offset_p, int *size_p)
2670 {
2671 EMIT_ASM32 (eq,
2672 /* Check low half first, more likely to be decider */
2673 "cmpl %eax,(%esp)\n\t"
2674 "jne .Leq_fallthru\n\t"
2675 "cmpl %ebx,4(%esp)\n\t"
2676 "jne .Leq_fallthru\n\t"
2677 "lea 0x8(%esp),%esp\n\t"
2678 "pop %eax\n\t"
2679 "pop %ebx\n\t"
2680 /* jmp, but don't trust the assembler to choose the right jump */
2681 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2682 ".Leq_fallthru:\n\t"
2683 "lea 0x8(%esp),%esp\n\t"
2684 "pop %eax\n\t"
2685 "pop %ebx");
2686
2687 if (offset_p)
2688 *offset_p = 18;
2689 if (size_p)
2690 *size_p = 4;
2691 }
2692
2693 static void
2694 i386_emit_ne_goto (int *offset_p, int *size_p)
2695 {
2696 EMIT_ASM32 (ne,
2697 /* Check low half first, more likely to be decider */
2698 "cmpl %eax,(%esp)\n\t"
2699 "jne .Lne_jump\n\t"
2700 "cmpl %ebx,4(%esp)\n\t"
2701 "je .Lne_fallthru\n\t"
2702 ".Lne_jump:\n\t"
2703 "lea 0x8(%esp),%esp\n\t"
2704 "pop %eax\n\t"
2705 "pop %ebx\n\t"
2706 /* jmp, but don't trust the assembler to choose the right jump */
2707 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2708 ".Lne_fallthru:\n\t"
2709 "lea 0x8(%esp),%esp\n\t"
2710 "pop %eax\n\t"
2711 "pop %ebx");
2712
2713 if (offset_p)
2714 *offset_p = 18;
2715 if (size_p)
2716 *size_p = 4;
2717 }
2718
2719 static void
2720 i386_emit_lt_goto (int *offset_p, int *size_p)
2721 {
2722 EMIT_ASM32 (lt,
2723 "cmpl %ebx,4(%esp)\n\t"
2724 "jl .Llt_jump\n\t"
2725 "jne .Llt_fallthru\n\t"
2726 "cmpl %eax,(%esp)\n\t"
2727 "jnl .Llt_fallthru\n\t"
2728 ".Llt_jump:\n\t"
2729 "lea 0x8(%esp),%esp\n\t"
2730 "pop %eax\n\t"
2731 "pop %ebx\n\t"
2732 /* jmp, but don't trust the assembler to choose the right jump */
2733 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2734 ".Llt_fallthru:\n\t"
2735 "lea 0x8(%esp),%esp\n\t"
2736 "pop %eax\n\t"
2737 "pop %ebx");
2738
2739 if (offset_p)
2740 *offset_p = 20;
2741 if (size_p)
2742 *size_p = 4;
2743 }
2744
2745 static void
2746 i386_emit_le_goto (int *offset_p, int *size_p)
2747 {
2748 EMIT_ASM32 (le,
2749 "cmpl %ebx,4(%esp)\n\t"
2750 "jle .Lle_jump\n\t"
2751 "jne .Lle_fallthru\n\t"
2752 "cmpl %eax,(%esp)\n\t"
2753 "jnle .Lle_fallthru\n\t"
2754 ".Lle_jump:\n\t"
2755 "lea 0x8(%esp),%esp\n\t"
2756 "pop %eax\n\t"
2757 "pop %ebx\n\t"
2758 /* jmp, but don't trust the assembler to choose the right jump */
2759 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2760 ".Lle_fallthru:\n\t"
2761 "lea 0x8(%esp),%esp\n\t"
2762 "pop %eax\n\t"
2763 "pop %ebx");
2764
2765 if (offset_p)
2766 *offset_p = 20;
2767 if (size_p)
2768 *size_p = 4;
2769 }
2770
2771 static void
2772 i386_emit_gt_goto (int *offset_p, int *size_p)
2773 {
2774 EMIT_ASM32 (gt,
2775 "cmpl %ebx,4(%esp)\n\t"
2776 "jg .Lgt_jump\n\t"
2777 "jne .Lgt_fallthru\n\t"
2778 "cmpl %eax,(%esp)\n\t"
2779 "jng .Lgt_fallthru\n\t"
2780 ".Lgt_jump:\n\t"
2781 "lea 0x8(%esp),%esp\n\t"
2782 "pop %eax\n\t"
2783 "pop %ebx\n\t"
2784 /* jmp, but don't trust the assembler to choose the right jump */
2785 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2786 ".Lgt_fallthru:\n\t"
2787 "lea 0x8(%esp),%esp\n\t"
2788 "pop %eax\n\t"
2789 "pop %ebx");
2790
2791 if (offset_p)
2792 *offset_p = 20;
2793 if (size_p)
2794 *size_p = 4;
2795 }
2796
2797 static void
2798 i386_emit_ge_goto (int *offset_p, int *size_p)
2799 {
2800 EMIT_ASM32 (ge,
2801 "cmpl %ebx,4(%esp)\n\t"
2802 "jge .Lge_jump\n\t"
2803 "jne .Lge_fallthru\n\t"
2804 "cmpl %eax,(%esp)\n\t"
2805 "jnge .Lge_fallthru\n\t"
2806 ".Lge_jump:\n\t"
2807 "lea 0x8(%esp),%esp\n\t"
2808 "pop %eax\n\t"
2809 "pop %ebx\n\t"
2810 /* jmp, but don't trust the assembler to choose the right jump */
2811 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2812 ".Lge_fallthru:\n\t"
2813 "lea 0x8(%esp),%esp\n\t"
2814 "pop %eax\n\t"
2815 "pop %ebx");
2816
2817 if (offset_p)
2818 *offset_p = 20;
2819 if (size_p)
2820 *size_p = 4;
2821 }
2822
2823 struct emit_ops i386_emit_ops =
2824 {
2825 i386_emit_prologue,
2826 i386_emit_epilogue,
2827 i386_emit_add,
2828 i386_emit_sub,
2829 i386_emit_mul,
2830 i386_emit_lsh,
2831 i386_emit_rsh_signed,
2832 i386_emit_rsh_unsigned,
2833 i386_emit_ext,
2834 i386_emit_log_not,
2835 i386_emit_bit_and,
2836 i386_emit_bit_or,
2837 i386_emit_bit_xor,
2838 i386_emit_bit_not,
2839 i386_emit_equal,
2840 i386_emit_less_signed,
2841 i386_emit_less_unsigned,
2842 i386_emit_ref,
2843 i386_emit_if_goto,
2844 i386_emit_goto,
2845 i386_write_goto_address,
2846 i386_emit_const,
2847 i386_emit_call,
2848 i386_emit_reg,
2849 i386_emit_pop,
2850 i386_emit_stack_flush,
2851 i386_emit_zero_ext,
2852 i386_emit_swap,
2853 i386_emit_stack_adjust,
2854 i386_emit_int_call_1,
2855 i386_emit_void_call_2,
2856 i386_emit_eq_goto,
2857 i386_emit_ne_goto,
2858 i386_emit_lt_goto,
2859 i386_emit_le_goto,
2860 i386_emit_gt_goto,
2861 i386_emit_ge_goto
2862 };
2863
2864
2865 static struct emit_ops *
2866 x86_emit_ops (void)
2867 {
2868 #ifdef __x86_64__
2869 if (is_64bit_tdesc ())
2870 return &amd64_emit_ops;
2871 else
2872 #endif
2873 return &i386_emit_ops;
2874 }
2875
2876 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2877
2878 const gdb_byte *
2879 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2880 {
2881 *size = x86_breakpoint_len;
2882 return x86_breakpoint;
2883 }
2884
2885 static int
2886 x86_supports_range_stepping (void)
2887 {
2888 return 1;
2889 }
2890
2891 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2892 */
2893
2894 static int
2895 x86_supports_hardware_single_step (void)
2896 {
2897 return 1;
2898 }
2899
2900 static int
2901 x86_get_ipa_tdesc_idx (void)
2902 {
2903 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2904 const struct target_desc *tdesc = regcache->tdesc;
2905
2906 #ifdef __x86_64__
2907 return amd64_get_ipa_tdesc_idx (tdesc);
2908 #endif
2909
2910 if (tdesc == tdesc_i386_linux_no_xml)
2911 return X86_TDESC_SSE;
2912
2913 return i386_get_ipa_tdesc_idx (tdesc);
2914 }
2915
2916 /* This is initialized assuming an amd64 target.
2917 x86_arch_setup will correct it for i386 or amd64 targets. */
2918
2919 struct linux_target_ops the_low_target =
2920 {
2921 x86_stopped_by_watchpoint,
2922 x86_stopped_data_address,
2923 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2924 native i386 case (no registers smaller than an xfer unit), and are not
2925 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2926 NULL,
2927 NULL,
2928 /* need to fix up i386 siginfo if host is amd64 */
2929 x86_siginfo_fixup,
2930 x86_linux_new_process,
2931 x86_linux_delete_process,
2932 x86_linux_new_thread,
2933 x86_linux_delete_thread,
2934 x86_linux_new_fork,
2935 x86_linux_prepare_to_resume,
2936 x86_linux_process_qsupported,
2937 x86_supports_tracepoints,
2938 x86_get_thread_area,
2939 x86_install_fast_tracepoint_jump_pad,
2940 x86_emit_ops,
2941 x86_get_min_fast_tracepoint_insn_len,
2942 x86_supports_range_stepping,
2943 x86_supports_hardware_single_step,
2944 x86_get_syscall_trapinfo,
2945 x86_get_ipa_tdesc_idx,
2946 };
2947
2948 /* The linux target ops object. */
2949
2950 linux_process_target *the_linux_target = &the_x86_target;
2951
2952 void
2953 initialize_low_arch (void)
2954 {
2955 /* Initialize the Linux target descriptions. */
2956 #ifdef __x86_64__
2957 tdesc_amd64_linux_no_xml = allocate_target_description ();
2958 copy_target_description (tdesc_amd64_linux_no_xml,
2959 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2960 false));
2961 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2962 #endif
2963
2964 tdesc_i386_linux_no_xml = allocate_target_description ();
2965 copy_target_description (tdesc_i386_linux_no_xml,
2966 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2967 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2968
2969 initialize_regsets_info (&x86_regsets_info);
2970 }
This page took 0.173137 seconds and 5 git commands to generate.