gdbserver/linux-low: turn 'get_thread_area' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 const regs_info *get_regs_info () override;
104
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
107 bool supports_z_point_type (char z_type) override;
108
109 void process_qsupported (char **features, int count) override;
110
111 bool supports_tracepoints () override;
112
113 protected:
114
115 void low_arch_setup () override;
116
117 bool low_cannot_fetch_register (int regno) override;
118
119 bool low_cannot_store_register (int regno) override;
120
121 bool low_supports_breakpoints () override;
122
123 CORE_ADDR low_get_pc (regcache *regcache) override;
124
125 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
126
127 int low_decr_pc_after_break () override;
128
129 bool low_breakpoint_at (CORE_ADDR pc) override;
130
131 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
132 int size, raw_breakpoint *bp) override;
133
134 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
135 int size, raw_breakpoint *bp) override;
136
137 bool low_stopped_by_watchpoint () override;
138
139 CORE_ADDR low_stopped_data_address () override;
140
141 /* collect_ptrace_register/supply_ptrace_register are not needed in the
142 native i386 case (no registers smaller than an xfer unit), and are not
143 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
144
145 /* Need to fix up i386 siginfo if host is amd64. */
146 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
147 int direction) override;
148
149 arch_process_info *low_new_process () override;
150
151 void low_delete_process (arch_process_info *info) override;
152
153 void low_new_thread (lwp_info *) override;
154
155 void low_delete_thread (arch_lwp_info *) override;
156
157 void low_new_fork (process_info *parent, process_info *child) override;
158
159 void low_prepare_to_resume (lwp_info *lwp) override;
160
161 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
162
163 private:
164
165 /* Update all the target description of all processes; a new GDB
166 connected, and it may or not support xml target descriptions. */
167 void update_xmltarget ();
168 };
169
170 /* The singleton target ops object. */
171
172 static x86_target the_x86_target;
173
174 /* Per-process arch-specific data we want to keep. */
175
176 struct arch_process_info
177 {
178 struct x86_debug_reg_state debug_reg_state;
179 };
180
181 #ifdef __x86_64__
182
183 /* Mapping between the general-purpose registers in `struct user'
184 format and GDB's register array layout.
185 Note that the transfer layout uses 64-bit regs. */
186 static /*const*/ int i386_regmap[] =
187 {
188 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
189 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
190 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
191 DS * 8, ES * 8, FS * 8, GS * 8
192 };
193
194 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
195
196 /* So code below doesn't have to care, i386 or amd64. */
197 #define ORIG_EAX ORIG_RAX
198 #define REGSIZE 8
199
200 static const int x86_64_regmap[] =
201 {
202 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
203 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
204 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
205 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
206 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
207 DS * 8, ES * 8, FS * 8, GS * 8,
208 -1, -1, -1, -1, -1, -1, -1, -1,
209 -1, -1, -1, -1, -1, -1, -1, -1,
210 -1, -1, -1, -1, -1, -1, -1, -1,
211 -1,
212 -1, -1, -1, -1, -1, -1, -1, -1,
213 ORIG_RAX * 8,
214 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
215 21 * 8, 22 * 8,
216 #else
217 -1, -1,
218 #endif
219 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
220 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
221 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
222 -1, -1, -1, -1, -1, -1, -1, -1,
223 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
224 -1, -1, -1, -1, -1, -1, -1, -1,
225 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
226 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
227 -1, -1, -1, -1, -1, -1, -1, -1,
228 -1, -1, -1, -1, -1, -1, -1, -1,
229 -1, -1, -1, -1, -1, -1, -1, -1,
230 -1 /* pkru */
231 };
232
233 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
234 #define X86_64_USER_REGS (GS + 1)
235
236 #else /* ! __x86_64__ */
237
238 /* Mapping between the general-purpose registers in `struct user'
239 format and GDB's register array layout. */
240 static /*const*/ int i386_regmap[] =
241 {
242 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
243 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
244 EIP * 4, EFL * 4, CS * 4, SS * 4,
245 DS * 4, ES * 4, FS * 4, GS * 4
246 };
247
248 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
249
250 #define REGSIZE 4
251
252 #endif
253
254 #ifdef __x86_64__
255
256 /* Returns true if the current inferior belongs to a x86-64 process,
257 per the tdesc. */
258
259 static int
260 is_64bit_tdesc (void)
261 {
262 struct regcache *regcache = get_thread_regcache (current_thread, 0);
263
264 return register_size (regcache->tdesc, 0) == 8;
265 }
266
267 #endif
268
269 \f
270 /* Called by libthread_db. */
271
272 ps_err_e
273 ps_get_thread_area (struct ps_prochandle *ph,
274 lwpid_t lwpid, int idx, void **base)
275 {
276 #ifdef __x86_64__
277 int use_64bit = is_64bit_tdesc ();
278
279 if (use_64bit)
280 {
281 switch (idx)
282 {
283 case FS:
284 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
285 return PS_OK;
286 break;
287 case GS:
288 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
289 return PS_OK;
290 break;
291 default:
292 return PS_BADADDR;
293 }
294 return PS_ERR;
295 }
296 #endif
297
298 {
299 unsigned int desc[4];
300
301 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
302 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
303 return PS_ERR;
304
305 /* Ensure we properly extend the value to 64-bits for x86_64. */
306 *base = (void *) (uintptr_t) desc[1];
307 return PS_OK;
308 }
309 }
310
311 /* Get the thread area address. This is used to recognize which
312 thread is which when tracing with the in-process agent library. We
313 don't read anything from the address, and treat it as opaque; it's
314 the address itself that we assume is unique per-thread. */
315
316 int
317 x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
318 {
319 #ifdef __x86_64__
320 int use_64bit = is_64bit_tdesc ();
321
322 if (use_64bit)
323 {
324 void *base;
325 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
326 {
327 *addr = (CORE_ADDR) (uintptr_t) base;
328 return 0;
329 }
330
331 return -1;
332 }
333 #endif
334
335 {
336 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
337 struct thread_info *thr = get_lwp_thread (lwp);
338 struct regcache *regcache = get_thread_regcache (thr, 1);
339 unsigned int desc[4];
340 ULONGEST gs = 0;
341 const int reg_thread_area = 3; /* bits to scale down register value. */
342 int idx;
343
344 collect_register_by_name (regcache, "gs", &gs);
345
346 idx = gs >> reg_thread_area;
347
348 if (ptrace (PTRACE_GET_THREAD_AREA,
349 lwpid_of (thr),
350 (void *) (long) idx, (unsigned long) &desc) < 0)
351 return -1;
352
353 *addr = desc[1];
354 return 0;
355 }
356 }
357
358
359 \f
360 bool
361 x86_target::low_cannot_store_register (int regno)
362 {
363 #ifdef __x86_64__
364 if (is_64bit_tdesc ())
365 return false;
366 #endif
367
368 return regno >= I386_NUM_REGS;
369 }
370
371 bool
372 x86_target::low_cannot_fetch_register (int regno)
373 {
374 #ifdef __x86_64__
375 if (is_64bit_tdesc ())
376 return false;
377 #endif
378
379 return regno >= I386_NUM_REGS;
380 }
381
382 static void
383 x86_fill_gregset (struct regcache *regcache, void *buf)
384 {
385 int i;
386
387 #ifdef __x86_64__
388 if (register_size (regcache->tdesc, 0) == 8)
389 {
390 for (i = 0; i < X86_64_NUM_REGS; i++)
391 if (x86_64_regmap[i] != -1)
392 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
393
394 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
395 {
396 unsigned long base;
397 int lwpid = lwpid_of (current_thread);
398
399 collect_register_by_name (regcache, "fs_base", &base);
400 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
401
402 collect_register_by_name (regcache, "gs_base", &base);
403 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
404 }
405 #endif
406
407 return;
408 }
409
410 /* 32-bit inferior registers need to be zero-extended.
411 Callers would read uninitialized memory otherwise. */
412 memset (buf, 0x00, X86_64_USER_REGS * 8);
413 #endif
414
415 for (i = 0; i < I386_NUM_REGS; i++)
416 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
417
418 collect_register_by_name (regcache, "orig_eax",
419 ((char *) buf) + ORIG_EAX * REGSIZE);
420
421 #ifdef __x86_64__
422 /* Sign extend EAX value to avoid potential syscall restart
423 problems.
424
425 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
426 for a detailed explanation. */
427 if (register_size (regcache->tdesc, 0) == 4)
428 {
429 void *ptr = ((gdb_byte *) buf
430 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
431
432 *(int64_t *) ptr = *(int32_t *) ptr;
433 }
434 #endif
435 }
436
437 static void
438 x86_store_gregset (struct regcache *regcache, const void *buf)
439 {
440 int i;
441
442 #ifdef __x86_64__
443 if (register_size (regcache->tdesc, 0) == 8)
444 {
445 for (i = 0; i < X86_64_NUM_REGS; i++)
446 if (x86_64_regmap[i] != -1)
447 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
448
449 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
450 {
451 unsigned long base;
452 int lwpid = lwpid_of (current_thread);
453
454 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
455 supply_register_by_name (regcache, "fs_base", &base);
456
457 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
458 supply_register_by_name (regcache, "gs_base", &base);
459 }
460 #endif
461 return;
462 }
463 #endif
464
465 for (i = 0; i < I386_NUM_REGS; i++)
466 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
467
468 supply_register_by_name (regcache, "orig_eax",
469 ((char *) buf) + ORIG_EAX * REGSIZE);
470 }
471
472 static void
473 x86_fill_fpregset (struct regcache *regcache, void *buf)
474 {
475 #ifdef __x86_64__
476 i387_cache_to_fxsave (regcache, buf);
477 #else
478 i387_cache_to_fsave (regcache, buf);
479 #endif
480 }
481
482 static void
483 x86_store_fpregset (struct regcache *regcache, const void *buf)
484 {
485 #ifdef __x86_64__
486 i387_fxsave_to_cache (regcache, buf);
487 #else
488 i387_fsave_to_cache (regcache, buf);
489 #endif
490 }
491
492 #ifndef __x86_64__
493
494 static void
495 x86_fill_fpxregset (struct regcache *regcache, void *buf)
496 {
497 i387_cache_to_fxsave (regcache, buf);
498 }
499
500 static void
501 x86_store_fpxregset (struct regcache *regcache, const void *buf)
502 {
503 i387_fxsave_to_cache (regcache, buf);
504 }
505
506 #endif
507
508 static void
509 x86_fill_xstateregset (struct regcache *regcache, void *buf)
510 {
511 i387_cache_to_xsave (regcache, buf);
512 }
513
514 static void
515 x86_store_xstateregset (struct regcache *regcache, const void *buf)
516 {
517 i387_xsave_to_cache (regcache, buf);
518 }
519
520 /* ??? The non-biarch i386 case stores all the i387 regs twice.
521 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
522 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
523 doesn't work. IWBN to avoid the duplication in the case where it
524 does work. Maybe the arch_setup routine could check whether it works
525 and update the supported regsets accordingly. */
526
527 static struct regset_info x86_regsets[] =
528 {
529 #ifdef HAVE_PTRACE_GETREGS
530 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
531 GENERAL_REGS,
532 x86_fill_gregset, x86_store_gregset },
533 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
534 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
535 # ifndef __x86_64__
536 # ifdef HAVE_PTRACE_GETFPXREGS
537 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
538 EXTENDED_REGS,
539 x86_fill_fpxregset, x86_store_fpxregset },
540 # endif
541 # endif
542 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
543 FP_REGS,
544 x86_fill_fpregset, x86_store_fpregset },
545 #endif /* HAVE_PTRACE_GETREGS */
546 NULL_REGSET
547 };
548
549 bool
550 x86_target::low_supports_breakpoints ()
551 {
552 return true;
553 }
554
555 CORE_ADDR
556 x86_target::low_get_pc (regcache *regcache)
557 {
558 int use_64bit = register_size (regcache->tdesc, 0) == 8;
559
560 if (use_64bit)
561 {
562 uint64_t pc;
563
564 collect_register_by_name (regcache, "rip", &pc);
565 return (CORE_ADDR) pc;
566 }
567 else
568 {
569 uint32_t pc;
570
571 collect_register_by_name (regcache, "eip", &pc);
572 return (CORE_ADDR) pc;
573 }
574 }
575
576 void
577 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
578 {
579 int use_64bit = register_size (regcache->tdesc, 0) == 8;
580
581 if (use_64bit)
582 {
583 uint64_t newpc = pc;
584
585 supply_register_by_name (regcache, "rip", &newpc);
586 }
587 else
588 {
589 uint32_t newpc = pc;
590
591 supply_register_by_name (regcache, "eip", &newpc);
592 }
593 }
594
595 int
596 x86_target::low_decr_pc_after_break ()
597 {
598 return 1;
599 }
600
601 \f
602 static const gdb_byte x86_breakpoint[] = { 0xCC };
603 #define x86_breakpoint_len 1
604
605 bool
606 x86_target::low_breakpoint_at (CORE_ADDR pc)
607 {
608 unsigned char c;
609
610 read_memory (pc, &c, 1);
611 if (c == 0xCC)
612 return true;
613
614 return false;
615 }
616 \f
617 /* Low-level function vector. */
618 struct x86_dr_low_type x86_dr_low =
619 {
620 x86_linux_dr_set_control,
621 x86_linux_dr_set_addr,
622 x86_linux_dr_get_addr,
623 x86_linux_dr_get_status,
624 x86_linux_dr_get_control,
625 sizeof (void *),
626 };
627 \f
628 /* Breakpoint/Watchpoint support. */
629
630 bool
631 x86_target::supports_z_point_type (char z_type)
632 {
633 switch (z_type)
634 {
635 case Z_PACKET_SW_BP:
636 case Z_PACKET_HW_BP:
637 case Z_PACKET_WRITE_WP:
638 case Z_PACKET_ACCESS_WP:
639 return true;
640 default:
641 return false;
642 }
643 }
644
645 int
646 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
647 int size, raw_breakpoint *bp)
648 {
649 struct process_info *proc = current_process ();
650
651 switch (type)
652 {
653 case raw_bkpt_type_hw:
654 case raw_bkpt_type_write_wp:
655 case raw_bkpt_type_access_wp:
656 {
657 enum target_hw_bp_type hw_type
658 = raw_bkpt_type_to_target_hw_bp_type (type);
659 struct x86_debug_reg_state *state
660 = &proc->priv->arch_private->debug_reg_state;
661
662 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
663 }
664
665 default:
666 /* Unsupported. */
667 return 1;
668 }
669 }
670
671 int
672 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
673 int size, raw_breakpoint *bp)
674 {
675 struct process_info *proc = current_process ();
676
677 switch (type)
678 {
679 case raw_bkpt_type_hw:
680 case raw_bkpt_type_write_wp:
681 case raw_bkpt_type_access_wp:
682 {
683 enum target_hw_bp_type hw_type
684 = raw_bkpt_type_to_target_hw_bp_type (type);
685 struct x86_debug_reg_state *state
686 = &proc->priv->arch_private->debug_reg_state;
687
688 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
689 }
690 default:
691 /* Unsupported. */
692 return 1;
693 }
694 }
695
696 bool
697 x86_target::low_stopped_by_watchpoint ()
698 {
699 struct process_info *proc = current_process ();
700 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
701 }
702
703 CORE_ADDR
704 x86_target::low_stopped_data_address ()
705 {
706 struct process_info *proc = current_process ();
707 CORE_ADDR addr;
708 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
709 &addr))
710 return addr;
711 return 0;
712 }
713 \f
714 /* Called when a new process is created. */
715
716 arch_process_info *
717 x86_target::low_new_process ()
718 {
719 struct arch_process_info *info = XCNEW (struct arch_process_info);
720
721 x86_low_init_dregs (&info->debug_reg_state);
722
723 return info;
724 }
725
726 /* Called when a process is being deleted. */
727
728 void
729 x86_target::low_delete_process (arch_process_info *info)
730 {
731 xfree (info);
732 }
733
734 void
735 x86_target::low_new_thread (lwp_info *lwp)
736 {
737 /* This comes from nat/. */
738 x86_linux_new_thread (lwp);
739 }
740
741 void
742 x86_target::low_delete_thread (arch_lwp_info *alwp)
743 {
744 /* This comes from nat/. */
745 x86_linux_delete_thread (alwp);
746 }
747
748 /* Target routine for new_fork. */
749
750 void
751 x86_target::low_new_fork (process_info *parent, process_info *child)
752 {
753 /* These are allocated by linux_add_process. */
754 gdb_assert (parent->priv != NULL
755 && parent->priv->arch_private != NULL);
756 gdb_assert (child->priv != NULL
757 && child->priv->arch_private != NULL);
758
759 /* Linux kernel before 2.6.33 commit
760 72f674d203cd230426437cdcf7dd6f681dad8b0d
761 will inherit hardware debug registers from parent
762 on fork/vfork/clone. Newer Linux kernels create such tasks with
763 zeroed debug registers.
764
765 GDB core assumes the child inherits the watchpoints/hw
766 breakpoints of the parent, and will remove them all from the
767 forked off process. Copy the debug registers mirrors into the
768 new process so that all breakpoints and watchpoints can be
769 removed together. The debug registers mirror will become zeroed
770 in the end before detaching the forked off process, thus making
771 this compatible with older Linux kernels too. */
772
773 *child->priv->arch_private = *parent->priv->arch_private;
774 }
775
776 void
777 x86_target::low_prepare_to_resume (lwp_info *lwp)
778 {
779 /* This comes from nat/. */
780 x86_linux_prepare_to_resume (lwp);
781 }
782
783 /* See nat/x86-dregs.h. */
784
785 struct x86_debug_reg_state *
786 x86_debug_reg_state (pid_t pid)
787 {
788 struct process_info *proc = find_process_pid (pid);
789
790 return &proc->priv->arch_private->debug_reg_state;
791 }
792 \f
793 /* When GDBSERVER is built as a 64-bit application on linux, the
794 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
795 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
796 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
797 conversion in-place ourselves. */
798
799 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
800 layout of the inferiors' architecture. Returns true if any
801 conversion was done; false otherwise. If DIRECTION is 1, then copy
802 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
803 INF. */
804
805 bool
806 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
807 {
808 #ifdef __x86_64__
809 unsigned int machine;
810 int tid = lwpid_of (current_thread);
811 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
812
813 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
814 if (!is_64bit_tdesc ())
815 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
816 FIXUP_32);
817 /* No fixup for native x32 GDB. */
818 else if (!is_elf64 && sizeof (void *) == 8)
819 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
820 FIXUP_X32);
821 #endif
822
823 return false;
824 }
825 \f
826 static int use_xml;
827
828 /* Format of XSAVE extended state is:
829 struct
830 {
831 fxsave_bytes[0..463]
832 sw_usable_bytes[464..511]
833 xstate_hdr_bytes[512..575]
834 avx_bytes[576..831]
835 future_state etc
836 };
837
838 Same memory layout will be used for the coredump NT_X86_XSTATE
839 representing the XSAVE extended state registers.
840
841 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
842 extended state mask, which is the same as the extended control register
843 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
844 together with the mask saved in the xstate_hdr_bytes to determine what
845 states the processor/OS supports and what state, used or initialized,
846 the process/thread is in. */
847 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
848
849 /* Does the current host support the GETFPXREGS request? The header
850 file may or may not define it, and even if it is defined, the
851 kernel will return EIO if it's running on a pre-SSE processor. */
852 int have_ptrace_getfpxregs =
853 #ifdef HAVE_PTRACE_GETFPXREGS
854 -1
855 #else
856 0
857 #endif
858 ;
859
860 /* Get Linux/x86 target description from running target. */
861
862 static const struct target_desc *
863 x86_linux_read_description (void)
864 {
865 unsigned int machine;
866 int is_elf64;
867 int xcr0_features;
868 int tid;
869 static uint64_t xcr0;
870 struct regset_info *regset;
871
872 tid = lwpid_of (current_thread);
873
874 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
875
876 if (sizeof (void *) == 4)
877 {
878 if (is_elf64 > 0)
879 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
880 #ifndef __x86_64__
881 else if (machine == EM_X86_64)
882 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
883 #endif
884 }
885
886 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
887 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
888 {
889 elf_fpxregset_t fpxregs;
890
891 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
892 {
893 have_ptrace_getfpxregs = 0;
894 have_ptrace_getregset = 0;
895 return i386_linux_read_description (X86_XSTATE_X87);
896 }
897 else
898 have_ptrace_getfpxregs = 1;
899 }
900 #endif
901
902 if (!use_xml)
903 {
904 x86_xcr0 = X86_XSTATE_SSE_MASK;
905
906 /* Don't use XML. */
907 #ifdef __x86_64__
908 if (machine == EM_X86_64)
909 return tdesc_amd64_linux_no_xml;
910 else
911 #endif
912 return tdesc_i386_linux_no_xml;
913 }
914
915 if (have_ptrace_getregset == -1)
916 {
917 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
918 struct iovec iov;
919
920 iov.iov_base = xstateregs;
921 iov.iov_len = sizeof (xstateregs);
922
923 /* Check if PTRACE_GETREGSET works. */
924 if (ptrace (PTRACE_GETREGSET, tid,
925 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
926 have_ptrace_getregset = 0;
927 else
928 {
929 have_ptrace_getregset = 1;
930
931 /* Get XCR0 from XSAVE extended state. */
932 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
933 / sizeof (uint64_t))];
934
935 /* Use PTRACE_GETREGSET if it is available. */
936 for (regset = x86_regsets;
937 regset->fill_function != NULL; regset++)
938 if (regset->get_request == PTRACE_GETREGSET)
939 regset->size = X86_XSTATE_SIZE (xcr0);
940 else if (regset->type != GENERAL_REGS)
941 regset->size = 0;
942 }
943 }
944
945 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
946 xcr0_features = (have_ptrace_getregset
947 && (xcr0 & X86_XSTATE_ALL_MASK));
948
949 if (xcr0_features)
950 x86_xcr0 = xcr0;
951
952 if (machine == EM_X86_64)
953 {
954 #ifdef __x86_64__
955 const target_desc *tdesc = NULL;
956
957 if (xcr0_features)
958 {
959 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
960 !is_elf64);
961 }
962
963 if (tdesc == NULL)
964 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
965 return tdesc;
966 #endif
967 }
968 else
969 {
970 const target_desc *tdesc = NULL;
971
972 if (xcr0_features)
973 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
974
975 if (tdesc == NULL)
976 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
977
978 return tdesc;
979 }
980
981 gdb_assert_not_reached ("failed to return tdesc");
982 }
983
984 /* Update all the target description of all processes; a new GDB
985 connected, and it may or not support xml target descriptions. */
986
987 void
988 x86_target::update_xmltarget ()
989 {
990 struct thread_info *saved_thread = current_thread;
991
992 /* Before changing the register cache's internal layout, flush the
993 contents of the current valid caches back to the threads, and
994 release the current regcache objects. */
995 regcache_release ();
996
997 for_each_process ([this] (process_info *proc) {
998 int pid = proc->pid;
999
1000 /* Look up any thread of this process. */
1001 current_thread = find_any_thread_of_pid (pid);
1002
1003 low_arch_setup ();
1004 });
1005
1006 current_thread = saved_thread;
1007 }
1008
1009 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1010 PTRACE_GETREGSET. */
1011
1012 void
1013 x86_target::process_qsupported (char **features, int count)
1014 {
1015 int i;
1016
1017 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1018 with "i386" in qSupported query, it supports x86 XML target
1019 descriptions. */
1020 use_xml = 0;
1021 for (i = 0; i < count; i++)
1022 {
1023 const char *feature = features[i];
1024
1025 if (startswith (feature, "xmlRegisters="))
1026 {
1027 char *copy = xstrdup (feature + 13);
1028
1029 char *saveptr;
1030 for (char *p = strtok_r (copy, ",", &saveptr);
1031 p != NULL;
1032 p = strtok_r (NULL, ",", &saveptr))
1033 {
1034 if (strcmp (p, "i386") == 0)
1035 {
1036 use_xml = 1;
1037 break;
1038 }
1039 }
1040
1041 free (copy);
1042 }
1043 }
1044 update_xmltarget ();
1045 }
1046
1047 /* Common for x86/x86-64. */
1048
1049 static struct regsets_info x86_regsets_info =
1050 {
1051 x86_regsets, /* regsets */
1052 0, /* num_regsets */
1053 NULL, /* disabled_regsets */
1054 };
1055
1056 #ifdef __x86_64__
1057 static struct regs_info amd64_linux_regs_info =
1058 {
1059 NULL, /* regset_bitmap */
1060 NULL, /* usrregs_info */
1061 &x86_regsets_info
1062 };
1063 #endif
1064 static struct usrregs_info i386_linux_usrregs_info =
1065 {
1066 I386_NUM_REGS,
1067 i386_regmap,
1068 };
1069
1070 static struct regs_info i386_linux_regs_info =
1071 {
1072 NULL, /* regset_bitmap */
1073 &i386_linux_usrregs_info,
1074 &x86_regsets_info
1075 };
1076
1077 const regs_info *
1078 x86_target::get_regs_info ()
1079 {
1080 #ifdef __x86_64__
1081 if (is_64bit_tdesc ())
1082 return &amd64_linux_regs_info;
1083 else
1084 #endif
1085 return &i386_linux_regs_info;
1086 }
1087
1088 /* Initialize the target description for the architecture of the
1089 inferior. */
1090
1091 void
1092 x86_target::low_arch_setup ()
1093 {
1094 current_process ()->tdesc = x86_linux_read_description ();
1095 }
1096
1097 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1098 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1099
1100 static void
1101 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
1102 {
1103 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1104
1105 if (use_64bit)
1106 {
1107 long l_sysno;
1108
1109 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1110 *sysno = (int) l_sysno;
1111 }
1112 else
1113 collect_register_by_name (regcache, "orig_eax", sysno);
1114 }
1115
1116 bool
1117 x86_target::supports_tracepoints ()
1118 {
1119 return true;
1120 }
1121
1122 static void
1123 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1124 {
1125 target_write_memory (*to, buf, len);
1126 *to += len;
1127 }
1128
1129 static int
1130 push_opcode (unsigned char *buf, const char *op)
1131 {
1132 unsigned char *buf_org = buf;
1133
1134 while (1)
1135 {
1136 char *endptr;
1137 unsigned long ul = strtoul (op, &endptr, 16);
1138
1139 if (endptr == op)
1140 break;
1141
1142 *buf++ = ul;
1143 op = endptr;
1144 }
1145
1146 return buf - buf_org;
1147 }
1148
1149 #ifdef __x86_64__
1150
1151 /* Build a jump pad that saves registers and calls a collection
1152 function. Writes a jump instruction to the jump pad to
1153 JJUMPAD_INSN. The caller is responsible to write it in at the
1154 tracepoint address. */
1155
1156 static int
1157 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1158 CORE_ADDR collector,
1159 CORE_ADDR lockaddr,
1160 ULONGEST orig_size,
1161 CORE_ADDR *jump_entry,
1162 CORE_ADDR *trampoline,
1163 ULONGEST *trampoline_size,
1164 unsigned char *jjump_pad_insn,
1165 ULONGEST *jjump_pad_insn_size,
1166 CORE_ADDR *adjusted_insn_addr,
1167 CORE_ADDR *adjusted_insn_addr_end,
1168 char *err)
1169 {
1170 unsigned char buf[40];
1171 int i, offset;
1172 int64_t loffset;
1173
1174 CORE_ADDR buildaddr = *jump_entry;
1175
1176 /* Build the jump pad. */
1177
1178 /* First, do tracepoint data collection. Save registers. */
1179 i = 0;
1180 /* Need to ensure stack pointer saved first. */
1181 buf[i++] = 0x54; /* push %rsp */
1182 buf[i++] = 0x55; /* push %rbp */
1183 buf[i++] = 0x57; /* push %rdi */
1184 buf[i++] = 0x56; /* push %rsi */
1185 buf[i++] = 0x52; /* push %rdx */
1186 buf[i++] = 0x51; /* push %rcx */
1187 buf[i++] = 0x53; /* push %rbx */
1188 buf[i++] = 0x50; /* push %rax */
1189 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1190 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1191 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1192 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1193 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1194 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1195 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1196 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1197 buf[i++] = 0x9c; /* pushfq */
1198 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1199 buf[i++] = 0xbf;
1200 memcpy (buf + i, &tpaddr, 8);
1201 i += 8;
1202 buf[i++] = 0x57; /* push %rdi */
1203 append_insns (&buildaddr, i, buf);
1204
1205 /* Stack space for the collecting_t object. */
1206 i = 0;
1207 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1208 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1209 memcpy (buf + i, &tpoint, 8);
1210 i += 8;
1211 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1212 i += push_opcode (&buf[i],
1213 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1214 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1215 append_insns (&buildaddr, i, buf);
1216
1217 /* spin-lock. */
1218 i = 0;
1219 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1220 memcpy (&buf[i], (void *) &lockaddr, 8);
1221 i += 8;
1222 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1223 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1224 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1225 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1226 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1227 append_insns (&buildaddr, i, buf);
1228
1229 /* Set up the gdb_collect call. */
1230 /* At this point, (stack pointer + 0x18) is the base of our saved
1231 register block. */
1232
1233 i = 0;
1234 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1235 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1236
1237 /* tpoint address may be 64-bit wide. */
1238 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1239 memcpy (buf + i, &tpoint, 8);
1240 i += 8;
1241 append_insns (&buildaddr, i, buf);
1242
1243 /* The collector function being in the shared library, may be
1244 >31-bits away off the jump pad. */
1245 i = 0;
1246 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1247 memcpy (buf + i, &collector, 8);
1248 i += 8;
1249 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1250 append_insns (&buildaddr, i, buf);
1251
1252 /* Clear the spin-lock. */
1253 i = 0;
1254 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1255 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1256 memcpy (buf + i, &lockaddr, 8);
1257 i += 8;
1258 append_insns (&buildaddr, i, buf);
1259
1260 /* Remove stack that had been used for the collect_t object. */
1261 i = 0;
1262 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1263 append_insns (&buildaddr, i, buf);
1264
1265 /* Restore register state. */
1266 i = 0;
1267 buf[i++] = 0x48; /* add $0x8,%rsp */
1268 buf[i++] = 0x83;
1269 buf[i++] = 0xc4;
1270 buf[i++] = 0x08;
1271 buf[i++] = 0x9d; /* popfq */
1272 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1273 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1274 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1275 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1276 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1277 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1278 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1279 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1280 buf[i++] = 0x58; /* pop %rax */
1281 buf[i++] = 0x5b; /* pop %rbx */
1282 buf[i++] = 0x59; /* pop %rcx */
1283 buf[i++] = 0x5a; /* pop %rdx */
1284 buf[i++] = 0x5e; /* pop %rsi */
1285 buf[i++] = 0x5f; /* pop %rdi */
1286 buf[i++] = 0x5d; /* pop %rbp */
1287 buf[i++] = 0x5c; /* pop %rsp */
1288 append_insns (&buildaddr, i, buf);
1289
1290 /* Now, adjust the original instruction to execute in the jump
1291 pad. */
1292 *adjusted_insn_addr = buildaddr;
1293 relocate_instruction (&buildaddr, tpaddr);
1294 *adjusted_insn_addr_end = buildaddr;
1295
1296 /* Finally, write a jump back to the program. */
1297
1298 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1299 if (loffset > INT_MAX || loffset < INT_MIN)
1300 {
1301 sprintf (err,
1302 "E.Jump back from jump pad too far from tracepoint "
1303 "(offset 0x%" PRIx64 " > int32).", loffset);
1304 return 1;
1305 }
1306
1307 offset = (int) loffset;
1308 memcpy (buf, jump_insn, sizeof (jump_insn));
1309 memcpy (buf + 1, &offset, 4);
1310 append_insns (&buildaddr, sizeof (jump_insn), buf);
1311
1312 /* The jump pad is now built. Wire in a jump to our jump pad. This
1313 is always done last (by our caller actually), so that we can
1314 install fast tracepoints with threads running. This relies on
1315 the agent's atomic write support. */
1316 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1317 if (loffset > INT_MAX || loffset < INT_MIN)
1318 {
1319 sprintf (err,
1320 "E.Jump pad too far from tracepoint "
1321 "(offset 0x%" PRIx64 " > int32).", loffset);
1322 return 1;
1323 }
1324
1325 offset = (int) loffset;
1326
1327 memcpy (buf, jump_insn, sizeof (jump_insn));
1328 memcpy (buf + 1, &offset, 4);
1329 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1330 *jjump_pad_insn_size = sizeof (jump_insn);
1331
1332 /* Return the end address of our pad. */
1333 *jump_entry = buildaddr;
1334
1335 return 0;
1336 }
1337
1338 #endif /* __x86_64__ */
1339
1340 /* Build a jump pad that saves registers and calls a collection
1341 function. Writes a jump instruction to the jump pad to
1342 JJUMPAD_INSN. The caller is responsible to write it in at the
1343 tracepoint address. */
1344
1345 static int
1346 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1347 CORE_ADDR collector,
1348 CORE_ADDR lockaddr,
1349 ULONGEST orig_size,
1350 CORE_ADDR *jump_entry,
1351 CORE_ADDR *trampoline,
1352 ULONGEST *trampoline_size,
1353 unsigned char *jjump_pad_insn,
1354 ULONGEST *jjump_pad_insn_size,
1355 CORE_ADDR *adjusted_insn_addr,
1356 CORE_ADDR *adjusted_insn_addr_end,
1357 char *err)
1358 {
1359 unsigned char buf[0x100];
1360 int i, offset;
1361 CORE_ADDR buildaddr = *jump_entry;
1362
1363 /* Build the jump pad. */
1364
1365 /* First, do tracepoint data collection. Save registers. */
1366 i = 0;
1367 buf[i++] = 0x60; /* pushad */
1368 buf[i++] = 0x68; /* push tpaddr aka $pc */
1369 *((int *)(buf + i)) = (int) tpaddr;
1370 i += 4;
1371 buf[i++] = 0x9c; /* pushf */
1372 buf[i++] = 0x1e; /* push %ds */
1373 buf[i++] = 0x06; /* push %es */
1374 buf[i++] = 0x0f; /* push %fs */
1375 buf[i++] = 0xa0;
1376 buf[i++] = 0x0f; /* push %gs */
1377 buf[i++] = 0xa8;
1378 buf[i++] = 0x16; /* push %ss */
1379 buf[i++] = 0x0e; /* push %cs */
1380 append_insns (&buildaddr, i, buf);
1381
1382 /* Stack space for the collecting_t object. */
1383 i = 0;
1384 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1385
1386 /* Build the object. */
1387 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1388 memcpy (buf + i, &tpoint, 4);
1389 i += 4;
1390 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1391
1392 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1393 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1394 append_insns (&buildaddr, i, buf);
1395
1396 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1397 If we cared for it, this could be using xchg alternatively. */
1398
1399 i = 0;
1400 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1401 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1402 %esp,<lockaddr> */
1403 memcpy (&buf[i], (void *) &lockaddr, 4);
1404 i += 4;
1405 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1406 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1407 append_insns (&buildaddr, i, buf);
1408
1409
1410 /* Set up arguments to the gdb_collect call. */
1411 i = 0;
1412 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1413 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1414 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1415 append_insns (&buildaddr, i, buf);
1416
1417 i = 0;
1418 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1419 append_insns (&buildaddr, i, buf);
1420
1421 i = 0;
1422 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1423 memcpy (&buf[i], (void *) &tpoint, 4);
1424 i += 4;
1425 append_insns (&buildaddr, i, buf);
1426
1427 buf[0] = 0xe8; /* call <reladdr> */
1428 offset = collector - (buildaddr + sizeof (jump_insn));
1429 memcpy (buf + 1, &offset, 4);
1430 append_insns (&buildaddr, 5, buf);
1431 /* Clean up after the call. */
1432 buf[0] = 0x83; /* add $0x8,%esp */
1433 buf[1] = 0xc4;
1434 buf[2] = 0x08;
1435 append_insns (&buildaddr, 3, buf);
1436
1437
1438 /* Clear the spin-lock. This would need the LOCK prefix on older
1439 broken archs. */
1440 i = 0;
1441 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1442 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1443 memcpy (buf + i, &lockaddr, 4);
1444 i += 4;
1445 append_insns (&buildaddr, i, buf);
1446
1447
1448 /* Remove stack that had been used for the collect_t object. */
1449 i = 0;
1450 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1451 append_insns (&buildaddr, i, buf);
1452
1453 i = 0;
1454 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1455 buf[i++] = 0xc4;
1456 buf[i++] = 0x04;
1457 buf[i++] = 0x17; /* pop %ss */
1458 buf[i++] = 0x0f; /* pop %gs */
1459 buf[i++] = 0xa9;
1460 buf[i++] = 0x0f; /* pop %fs */
1461 buf[i++] = 0xa1;
1462 buf[i++] = 0x07; /* pop %es */
1463 buf[i++] = 0x1f; /* pop %ds */
1464 buf[i++] = 0x9d; /* popf */
1465 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1466 buf[i++] = 0xc4;
1467 buf[i++] = 0x04;
1468 buf[i++] = 0x61; /* popad */
1469 append_insns (&buildaddr, i, buf);
1470
1471 /* Now, adjust the original instruction to execute in the jump
1472 pad. */
1473 *adjusted_insn_addr = buildaddr;
1474 relocate_instruction (&buildaddr, tpaddr);
1475 *adjusted_insn_addr_end = buildaddr;
1476
1477 /* Write the jump back to the program. */
1478 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1479 memcpy (buf, jump_insn, sizeof (jump_insn));
1480 memcpy (buf + 1, &offset, 4);
1481 append_insns (&buildaddr, sizeof (jump_insn), buf);
1482
1483 /* The jump pad is now built. Wire in a jump to our jump pad. This
1484 is always done last (by our caller actually), so that we can
1485 install fast tracepoints with threads running. This relies on
1486 the agent's atomic write support. */
1487 if (orig_size == 4)
1488 {
1489 /* Create a trampoline. */
1490 *trampoline_size = sizeof (jump_insn);
1491 if (!claim_trampoline_space (*trampoline_size, trampoline))
1492 {
1493 /* No trampoline space available. */
1494 strcpy (err,
1495 "E.Cannot allocate trampoline space needed for fast "
1496 "tracepoints on 4-byte instructions.");
1497 return 1;
1498 }
1499
1500 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1501 memcpy (buf, jump_insn, sizeof (jump_insn));
1502 memcpy (buf + 1, &offset, 4);
1503 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1504
1505 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1506 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1507 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1508 memcpy (buf + 2, &offset, 2);
1509 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1510 *jjump_pad_insn_size = sizeof (small_jump_insn);
1511 }
1512 else
1513 {
1514 /* Else use a 32-bit relative jump instruction. */
1515 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1516 memcpy (buf, jump_insn, sizeof (jump_insn));
1517 memcpy (buf + 1, &offset, 4);
1518 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1519 *jjump_pad_insn_size = sizeof (jump_insn);
1520 }
1521
1522 /* Return the end address of our pad. */
1523 *jump_entry = buildaddr;
1524
1525 return 0;
1526 }
1527
1528 static int
1529 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1530 CORE_ADDR collector,
1531 CORE_ADDR lockaddr,
1532 ULONGEST orig_size,
1533 CORE_ADDR *jump_entry,
1534 CORE_ADDR *trampoline,
1535 ULONGEST *trampoline_size,
1536 unsigned char *jjump_pad_insn,
1537 ULONGEST *jjump_pad_insn_size,
1538 CORE_ADDR *adjusted_insn_addr,
1539 CORE_ADDR *adjusted_insn_addr_end,
1540 char *err)
1541 {
1542 #ifdef __x86_64__
1543 if (is_64bit_tdesc ())
1544 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1545 collector, lockaddr,
1546 orig_size, jump_entry,
1547 trampoline, trampoline_size,
1548 jjump_pad_insn,
1549 jjump_pad_insn_size,
1550 adjusted_insn_addr,
1551 adjusted_insn_addr_end,
1552 err);
1553 #endif
1554
1555 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1556 collector, lockaddr,
1557 orig_size, jump_entry,
1558 trampoline, trampoline_size,
1559 jjump_pad_insn,
1560 jjump_pad_insn_size,
1561 adjusted_insn_addr,
1562 adjusted_insn_addr_end,
1563 err);
1564 }
1565
1566 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1567 architectures. */
1568
1569 static int
1570 x86_get_min_fast_tracepoint_insn_len (void)
1571 {
1572 static int warned_about_fast_tracepoints = 0;
1573
1574 #ifdef __x86_64__
1575 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1576 used for fast tracepoints. */
1577 if (is_64bit_tdesc ())
1578 return 5;
1579 #endif
1580
1581 if (agent_loaded_p ())
1582 {
1583 char errbuf[IPA_BUFSIZ];
1584
1585 errbuf[0] = '\0';
1586
1587 /* On x86, if trampolines are available, then 4-byte jump instructions
1588 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1589 with a 4-byte offset are used instead. */
1590 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1591 return 4;
1592 else
1593 {
1594 /* GDB has no channel to explain to user why a shorter fast
1595 tracepoint is not possible, but at least make GDBserver
1596 mention that something has gone awry. */
1597 if (!warned_about_fast_tracepoints)
1598 {
1599 warning ("4-byte fast tracepoints not available; %s", errbuf);
1600 warned_about_fast_tracepoints = 1;
1601 }
1602 return 5;
1603 }
1604 }
1605 else
1606 {
1607 /* Indicate that the minimum length is currently unknown since the IPA
1608 has not loaded yet. */
1609 return 0;
1610 }
1611 }
1612
1613 static void
1614 add_insns (unsigned char *start, int len)
1615 {
1616 CORE_ADDR buildaddr = current_insn_ptr;
1617
1618 if (debug_threads)
1619 debug_printf ("Adding %d bytes of insn at %s\n",
1620 len, paddress (buildaddr));
1621
1622 append_insns (&buildaddr, len, start);
1623 current_insn_ptr = buildaddr;
1624 }
1625
1626 /* Our general strategy for emitting code is to avoid specifying raw
1627 bytes whenever possible, and instead copy a block of inline asm
1628 that is embedded in the function. This is a little messy, because
1629 we need to keep the compiler from discarding what looks like dead
1630 code, plus suppress various warnings. */
1631
1632 #define EMIT_ASM(NAME, INSNS) \
1633 do \
1634 { \
1635 extern unsigned char start_ ## NAME, end_ ## NAME; \
1636 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1637 __asm__ ("jmp end_" #NAME "\n" \
1638 "\t" "start_" #NAME ":" \
1639 "\t" INSNS "\n" \
1640 "\t" "end_" #NAME ":"); \
1641 } while (0)
1642
1643 #ifdef __x86_64__
1644
1645 #define EMIT_ASM32(NAME,INSNS) \
1646 do \
1647 { \
1648 extern unsigned char start_ ## NAME, end_ ## NAME; \
1649 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1650 __asm__ (".code32\n" \
1651 "\t" "jmp end_" #NAME "\n" \
1652 "\t" "start_" #NAME ":\n" \
1653 "\t" INSNS "\n" \
1654 "\t" "end_" #NAME ":\n" \
1655 ".code64\n"); \
1656 } while (0)
1657
1658 #else
1659
1660 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1661
1662 #endif
1663
1664 #ifdef __x86_64__
1665
1666 static void
1667 amd64_emit_prologue (void)
1668 {
1669 EMIT_ASM (amd64_prologue,
1670 "pushq %rbp\n\t"
1671 "movq %rsp,%rbp\n\t"
1672 "sub $0x20,%rsp\n\t"
1673 "movq %rdi,-8(%rbp)\n\t"
1674 "movq %rsi,-16(%rbp)");
1675 }
1676
1677
1678 static void
1679 amd64_emit_epilogue (void)
1680 {
1681 EMIT_ASM (amd64_epilogue,
1682 "movq -16(%rbp),%rdi\n\t"
1683 "movq %rax,(%rdi)\n\t"
1684 "xor %rax,%rax\n\t"
1685 "leave\n\t"
1686 "ret");
1687 }
1688
1689 static void
1690 amd64_emit_add (void)
1691 {
1692 EMIT_ASM (amd64_add,
1693 "add (%rsp),%rax\n\t"
1694 "lea 0x8(%rsp),%rsp");
1695 }
1696
1697 static void
1698 amd64_emit_sub (void)
1699 {
1700 EMIT_ASM (amd64_sub,
1701 "sub %rax,(%rsp)\n\t"
1702 "pop %rax");
1703 }
1704
1705 static void
1706 amd64_emit_mul (void)
1707 {
1708 emit_error = 1;
1709 }
1710
1711 static void
1712 amd64_emit_lsh (void)
1713 {
1714 emit_error = 1;
1715 }
1716
1717 static void
1718 amd64_emit_rsh_signed (void)
1719 {
1720 emit_error = 1;
1721 }
1722
1723 static void
1724 amd64_emit_rsh_unsigned (void)
1725 {
1726 emit_error = 1;
1727 }
1728
1729 static void
1730 amd64_emit_ext (int arg)
1731 {
1732 switch (arg)
1733 {
1734 case 8:
1735 EMIT_ASM (amd64_ext_8,
1736 "cbtw\n\t"
1737 "cwtl\n\t"
1738 "cltq");
1739 break;
1740 case 16:
1741 EMIT_ASM (amd64_ext_16,
1742 "cwtl\n\t"
1743 "cltq");
1744 break;
1745 case 32:
1746 EMIT_ASM (amd64_ext_32,
1747 "cltq");
1748 break;
1749 default:
1750 emit_error = 1;
1751 }
1752 }
1753
1754 static void
1755 amd64_emit_log_not (void)
1756 {
1757 EMIT_ASM (amd64_log_not,
1758 "test %rax,%rax\n\t"
1759 "sete %cl\n\t"
1760 "movzbq %cl,%rax");
1761 }
1762
1763 static void
1764 amd64_emit_bit_and (void)
1765 {
1766 EMIT_ASM (amd64_and,
1767 "and (%rsp),%rax\n\t"
1768 "lea 0x8(%rsp),%rsp");
1769 }
1770
1771 static void
1772 amd64_emit_bit_or (void)
1773 {
1774 EMIT_ASM (amd64_or,
1775 "or (%rsp),%rax\n\t"
1776 "lea 0x8(%rsp),%rsp");
1777 }
1778
1779 static void
1780 amd64_emit_bit_xor (void)
1781 {
1782 EMIT_ASM (amd64_xor,
1783 "xor (%rsp),%rax\n\t"
1784 "lea 0x8(%rsp),%rsp");
1785 }
1786
1787 static void
1788 amd64_emit_bit_not (void)
1789 {
1790 EMIT_ASM (amd64_bit_not,
1791 "xorq $0xffffffffffffffff,%rax");
1792 }
1793
1794 static void
1795 amd64_emit_equal (void)
1796 {
1797 EMIT_ASM (amd64_equal,
1798 "cmp %rax,(%rsp)\n\t"
1799 "je .Lamd64_equal_true\n\t"
1800 "xor %rax,%rax\n\t"
1801 "jmp .Lamd64_equal_end\n\t"
1802 ".Lamd64_equal_true:\n\t"
1803 "mov $0x1,%rax\n\t"
1804 ".Lamd64_equal_end:\n\t"
1805 "lea 0x8(%rsp),%rsp");
1806 }
1807
1808 static void
1809 amd64_emit_less_signed (void)
1810 {
1811 EMIT_ASM (amd64_less_signed,
1812 "cmp %rax,(%rsp)\n\t"
1813 "jl .Lamd64_less_signed_true\n\t"
1814 "xor %rax,%rax\n\t"
1815 "jmp .Lamd64_less_signed_end\n\t"
1816 ".Lamd64_less_signed_true:\n\t"
1817 "mov $1,%rax\n\t"
1818 ".Lamd64_less_signed_end:\n\t"
1819 "lea 0x8(%rsp),%rsp");
1820 }
1821
1822 static void
1823 amd64_emit_less_unsigned (void)
1824 {
1825 EMIT_ASM (amd64_less_unsigned,
1826 "cmp %rax,(%rsp)\n\t"
1827 "jb .Lamd64_less_unsigned_true\n\t"
1828 "xor %rax,%rax\n\t"
1829 "jmp .Lamd64_less_unsigned_end\n\t"
1830 ".Lamd64_less_unsigned_true:\n\t"
1831 "mov $1,%rax\n\t"
1832 ".Lamd64_less_unsigned_end:\n\t"
1833 "lea 0x8(%rsp),%rsp");
1834 }
1835
1836 static void
1837 amd64_emit_ref (int size)
1838 {
1839 switch (size)
1840 {
1841 case 1:
1842 EMIT_ASM (amd64_ref1,
1843 "movb (%rax),%al");
1844 break;
1845 case 2:
1846 EMIT_ASM (amd64_ref2,
1847 "movw (%rax),%ax");
1848 break;
1849 case 4:
1850 EMIT_ASM (amd64_ref4,
1851 "movl (%rax),%eax");
1852 break;
1853 case 8:
1854 EMIT_ASM (amd64_ref8,
1855 "movq (%rax),%rax");
1856 break;
1857 }
1858 }
1859
1860 static void
1861 amd64_emit_if_goto (int *offset_p, int *size_p)
1862 {
1863 EMIT_ASM (amd64_if_goto,
1864 "mov %rax,%rcx\n\t"
1865 "pop %rax\n\t"
1866 "cmp $0,%rcx\n\t"
1867 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1868 if (offset_p)
1869 *offset_p = 10;
1870 if (size_p)
1871 *size_p = 4;
1872 }
1873
1874 static void
1875 amd64_emit_goto (int *offset_p, int *size_p)
1876 {
1877 EMIT_ASM (amd64_goto,
1878 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1879 if (offset_p)
1880 *offset_p = 1;
1881 if (size_p)
1882 *size_p = 4;
1883 }
1884
1885 static void
1886 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1887 {
1888 int diff = (to - (from + size));
1889 unsigned char buf[sizeof (int)];
1890
1891 if (size != 4)
1892 {
1893 emit_error = 1;
1894 return;
1895 }
1896
1897 memcpy (buf, &diff, sizeof (int));
1898 target_write_memory (from, buf, sizeof (int));
1899 }
1900
1901 static void
1902 amd64_emit_const (LONGEST num)
1903 {
1904 unsigned char buf[16];
1905 int i;
1906 CORE_ADDR buildaddr = current_insn_ptr;
1907
1908 i = 0;
1909 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1910 memcpy (&buf[i], &num, sizeof (num));
1911 i += 8;
1912 append_insns (&buildaddr, i, buf);
1913 current_insn_ptr = buildaddr;
1914 }
1915
1916 static void
1917 amd64_emit_call (CORE_ADDR fn)
1918 {
1919 unsigned char buf[16];
1920 int i;
1921 CORE_ADDR buildaddr;
1922 LONGEST offset64;
1923
1924 /* The destination function being in the shared library, may be
1925 >31-bits away off the compiled code pad. */
1926
1927 buildaddr = current_insn_ptr;
1928
1929 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1930
1931 i = 0;
1932
1933 if (offset64 > INT_MAX || offset64 < INT_MIN)
1934 {
1935 /* Offset is too large for a call. Use callq, but that requires
1936 a register, so avoid it if possible. Use r10, since it is
1937 call-clobbered, we don't have to push/pop it. */
1938 buf[i++] = 0x48; /* mov $fn,%r10 */
1939 buf[i++] = 0xba;
1940 memcpy (buf + i, &fn, 8);
1941 i += 8;
1942 buf[i++] = 0xff; /* callq *%r10 */
1943 buf[i++] = 0xd2;
1944 }
1945 else
1946 {
1947 int offset32 = offset64; /* we know we can't overflow here. */
1948
1949 buf[i++] = 0xe8; /* call <reladdr> */
1950 memcpy (buf + i, &offset32, 4);
1951 i += 4;
1952 }
1953
1954 append_insns (&buildaddr, i, buf);
1955 current_insn_ptr = buildaddr;
1956 }
1957
1958 static void
1959 amd64_emit_reg (int reg)
1960 {
1961 unsigned char buf[16];
1962 int i;
1963 CORE_ADDR buildaddr;
1964
1965 /* Assume raw_regs is still in %rdi. */
1966 buildaddr = current_insn_ptr;
1967 i = 0;
1968 buf[i++] = 0xbe; /* mov $<n>,%esi */
1969 memcpy (&buf[i], &reg, sizeof (reg));
1970 i += 4;
1971 append_insns (&buildaddr, i, buf);
1972 current_insn_ptr = buildaddr;
1973 amd64_emit_call (get_raw_reg_func_addr ());
1974 }
1975
1976 static void
1977 amd64_emit_pop (void)
1978 {
1979 EMIT_ASM (amd64_pop,
1980 "pop %rax");
1981 }
1982
1983 static void
1984 amd64_emit_stack_flush (void)
1985 {
1986 EMIT_ASM (amd64_stack_flush,
1987 "push %rax");
1988 }
1989
1990 static void
1991 amd64_emit_zero_ext (int arg)
1992 {
1993 switch (arg)
1994 {
1995 case 8:
1996 EMIT_ASM (amd64_zero_ext_8,
1997 "and $0xff,%rax");
1998 break;
1999 case 16:
2000 EMIT_ASM (amd64_zero_ext_16,
2001 "and $0xffff,%rax");
2002 break;
2003 case 32:
2004 EMIT_ASM (amd64_zero_ext_32,
2005 "mov $0xffffffff,%rcx\n\t"
2006 "and %rcx,%rax");
2007 break;
2008 default:
2009 emit_error = 1;
2010 }
2011 }
2012
2013 static void
2014 amd64_emit_swap (void)
2015 {
2016 EMIT_ASM (amd64_swap,
2017 "mov %rax,%rcx\n\t"
2018 "pop %rax\n\t"
2019 "push %rcx");
2020 }
2021
2022 static void
2023 amd64_emit_stack_adjust (int n)
2024 {
2025 unsigned char buf[16];
2026 int i;
2027 CORE_ADDR buildaddr = current_insn_ptr;
2028
2029 i = 0;
2030 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2031 buf[i++] = 0x8d;
2032 buf[i++] = 0x64;
2033 buf[i++] = 0x24;
2034 /* This only handles adjustments up to 16, but we don't expect any more. */
2035 buf[i++] = n * 8;
2036 append_insns (&buildaddr, i, buf);
2037 current_insn_ptr = buildaddr;
2038 }
2039
2040 /* FN's prototype is `LONGEST(*fn)(int)'. */
2041
2042 static void
2043 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2044 {
2045 unsigned char buf[16];
2046 int i;
2047 CORE_ADDR buildaddr;
2048
2049 buildaddr = current_insn_ptr;
2050 i = 0;
2051 buf[i++] = 0xbf; /* movl $<n>,%edi */
2052 memcpy (&buf[i], &arg1, sizeof (arg1));
2053 i += 4;
2054 append_insns (&buildaddr, i, buf);
2055 current_insn_ptr = buildaddr;
2056 amd64_emit_call (fn);
2057 }
2058
2059 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2060
2061 static void
2062 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2063 {
2064 unsigned char buf[16];
2065 int i;
2066 CORE_ADDR buildaddr;
2067
2068 buildaddr = current_insn_ptr;
2069 i = 0;
2070 buf[i++] = 0xbf; /* movl $<n>,%edi */
2071 memcpy (&buf[i], &arg1, sizeof (arg1));
2072 i += 4;
2073 append_insns (&buildaddr, i, buf);
2074 current_insn_ptr = buildaddr;
2075 EMIT_ASM (amd64_void_call_2_a,
2076 /* Save away a copy of the stack top. */
2077 "push %rax\n\t"
2078 /* Also pass top as the second argument. */
2079 "mov %rax,%rsi");
2080 amd64_emit_call (fn);
2081 EMIT_ASM (amd64_void_call_2_b,
2082 /* Restore the stack top, %rax may have been trashed. */
2083 "pop %rax");
2084 }
2085
2086 static void
2087 amd64_emit_eq_goto (int *offset_p, int *size_p)
2088 {
2089 EMIT_ASM (amd64_eq,
2090 "cmp %rax,(%rsp)\n\t"
2091 "jne .Lamd64_eq_fallthru\n\t"
2092 "lea 0x8(%rsp),%rsp\n\t"
2093 "pop %rax\n\t"
2094 /* jmp, but don't trust the assembler to choose the right jump */
2095 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2096 ".Lamd64_eq_fallthru:\n\t"
2097 "lea 0x8(%rsp),%rsp\n\t"
2098 "pop %rax");
2099
2100 if (offset_p)
2101 *offset_p = 13;
2102 if (size_p)
2103 *size_p = 4;
2104 }
2105
2106 static void
2107 amd64_emit_ne_goto (int *offset_p, int *size_p)
2108 {
2109 EMIT_ASM (amd64_ne,
2110 "cmp %rax,(%rsp)\n\t"
2111 "je .Lamd64_ne_fallthru\n\t"
2112 "lea 0x8(%rsp),%rsp\n\t"
2113 "pop %rax\n\t"
2114 /* jmp, but don't trust the assembler to choose the right jump */
2115 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2116 ".Lamd64_ne_fallthru:\n\t"
2117 "lea 0x8(%rsp),%rsp\n\t"
2118 "pop %rax");
2119
2120 if (offset_p)
2121 *offset_p = 13;
2122 if (size_p)
2123 *size_p = 4;
2124 }
2125
2126 static void
2127 amd64_emit_lt_goto (int *offset_p, int *size_p)
2128 {
2129 EMIT_ASM (amd64_lt,
2130 "cmp %rax,(%rsp)\n\t"
2131 "jnl .Lamd64_lt_fallthru\n\t"
2132 "lea 0x8(%rsp),%rsp\n\t"
2133 "pop %rax\n\t"
2134 /* jmp, but don't trust the assembler to choose the right jump */
2135 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2136 ".Lamd64_lt_fallthru:\n\t"
2137 "lea 0x8(%rsp),%rsp\n\t"
2138 "pop %rax");
2139
2140 if (offset_p)
2141 *offset_p = 13;
2142 if (size_p)
2143 *size_p = 4;
2144 }
2145
2146 static void
2147 amd64_emit_le_goto (int *offset_p, int *size_p)
2148 {
2149 EMIT_ASM (amd64_le,
2150 "cmp %rax,(%rsp)\n\t"
2151 "jnle .Lamd64_le_fallthru\n\t"
2152 "lea 0x8(%rsp),%rsp\n\t"
2153 "pop %rax\n\t"
2154 /* jmp, but don't trust the assembler to choose the right jump */
2155 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2156 ".Lamd64_le_fallthru:\n\t"
2157 "lea 0x8(%rsp),%rsp\n\t"
2158 "pop %rax");
2159
2160 if (offset_p)
2161 *offset_p = 13;
2162 if (size_p)
2163 *size_p = 4;
2164 }
2165
2166 static void
2167 amd64_emit_gt_goto (int *offset_p, int *size_p)
2168 {
2169 EMIT_ASM (amd64_gt,
2170 "cmp %rax,(%rsp)\n\t"
2171 "jng .Lamd64_gt_fallthru\n\t"
2172 "lea 0x8(%rsp),%rsp\n\t"
2173 "pop %rax\n\t"
2174 /* jmp, but don't trust the assembler to choose the right jump */
2175 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2176 ".Lamd64_gt_fallthru:\n\t"
2177 "lea 0x8(%rsp),%rsp\n\t"
2178 "pop %rax");
2179
2180 if (offset_p)
2181 *offset_p = 13;
2182 if (size_p)
2183 *size_p = 4;
2184 }
2185
2186 static void
2187 amd64_emit_ge_goto (int *offset_p, int *size_p)
2188 {
2189 EMIT_ASM (amd64_ge,
2190 "cmp %rax,(%rsp)\n\t"
2191 "jnge .Lamd64_ge_fallthru\n\t"
2192 ".Lamd64_ge_jump:\n\t"
2193 "lea 0x8(%rsp),%rsp\n\t"
2194 "pop %rax\n\t"
2195 /* jmp, but don't trust the assembler to choose the right jump */
2196 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2197 ".Lamd64_ge_fallthru:\n\t"
2198 "lea 0x8(%rsp),%rsp\n\t"
2199 "pop %rax");
2200
2201 if (offset_p)
2202 *offset_p = 13;
2203 if (size_p)
2204 *size_p = 4;
2205 }
2206
2207 struct emit_ops amd64_emit_ops =
2208 {
2209 amd64_emit_prologue,
2210 amd64_emit_epilogue,
2211 amd64_emit_add,
2212 amd64_emit_sub,
2213 amd64_emit_mul,
2214 amd64_emit_lsh,
2215 amd64_emit_rsh_signed,
2216 amd64_emit_rsh_unsigned,
2217 amd64_emit_ext,
2218 amd64_emit_log_not,
2219 amd64_emit_bit_and,
2220 amd64_emit_bit_or,
2221 amd64_emit_bit_xor,
2222 amd64_emit_bit_not,
2223 amd64_emit_equal,
2224 amd64_emit_less_signed,
2225 amd64_emit_less_unsigned,
2226 amd64_emit_ref,
2227 amd64_emit_if_goto,
2228 amd64_emit_goto,
2229 amd64_write_goto_address,
2230 amd64_emit_const,
2231 amd64_emit_call,
2232 amd64_emit_reg,
2233 amd64_emit_pop,
2234 amd64_emit_stack_flush,
2235 amd64_emit_zero_ext,
2236 amd64_emit_swap,
2237 amd64_emit_stack_adjust,
2238 amd64_emit_int_call_1,
2239 amd64_emit_void_call_2,
2240 amd64_emit_eq_goto,
2241 amd64_emit_ne_goto,
2242 amd64_emit_lt_goto,
2243 amd64_emit_le_goto,
2244 amd64_emit_gt_goto,
2245 amd64_emit_ge_goto
2246 };
2247
2248 #endif /* __x86_64__ */
2249
2250 static void
2251 i386_emit_prologue (void)
2252 {
2253 EMIT_ASM32 (i386_prologue,
2254 "push %ebp\n\t"
2255 "mov %esp,%ebp\n\t"
2256 "push %ebx");
2257 /* At this point, the raw regs base address is at 8(%ebp), and the
2258 value pointer is at 12(%ebp). */
2259 }
2260
2261 static void
2262 i386_emit_epilogue (void)
2263 {
2264 EMIT_ASM32 (i386_epilogue,
2265 "mov 12(%ebp),%ecx\n\t"
2266 "mov %eax,(%ecx)\n\t"
2267 "mov %ebx,0x4(%ecx)\n\t"
2268 "xor %eax,%eax\n\t"
2269 "pop %ebx\n\t"
2270 "pop %ebp\n\t"
2271 "ret");
2272 }
2273
2274 static void
2275 i386_emit_add (void)
2276 {
2277 EMIT_ASM32 (i386_add,
2278 "add (%esp),%eax\n\t"
2279 "adc 0x4(%esp),%ebx\n\t"
2280 "lea 0x8(%esp),%esp");
2281 }
2282
2283 static void
2284 i386_emit_sub (void)
2285 {
2286 EMIT_ASM32 (i386_sub,
2287 "subl %eax,(%esp)\n\t"
2288 "sbbl %ebx,4(%esp)\n\t"
2289 "pop %eax\n\t"
2290 "pop %ebx\n\t");
2291 }
2292
2293 static void
2294 i386_emit_mul (void)
2295 {
2296 emit_error = 1;
2297 }
2298
2299 static void
2300 i386_emit_lsh (void)
2301 {
2302 emit_error = 1;
2303 }
2304
2305 static void
2306 i386_emit_rsh_signed (void)
2307 {
2308 emit_error = 1;
2309 }
2310
2311 static void
2312 i386_emit_rsh_unsigned (void)
2313 {
2314 emit_error = 1;
2315 }
2316
2317 static void
2318 i386_emit_ext (int arg)
2319 {
2320 switch (arg)
2321 {
2322 case 8:
2323 EMIT_ASM32 (i386_ext_8,
2324 "cbtw\n\t"
2325 "cwtl\n\t"
2326 "movl %eax,%ebx\n\t"
2327 "sarl $31,%ebx");
2328 break;
2329 case 16:
2330 EMIT_ASM32 (i386_ext_16,
2331 "cwtl\n\t"
2332 "movl %eax,%ebx\n\t"
2333 "sarl $31,%ebx");
2334 break;
2335 case 32:
2336 EMIT_ASM32 (i386_ext_32,
2337 "movl %eax,%ebx\n\t"
2338 "sarl $31,%ebx");
2339 break;
2340 default:
2341 emit_error = 1;
2342 }
2343 }
2344
2345 static void
2346 i386_emit_log_not (void)
2347 {
2348 EMIT_ASM32 (i386_log_not,
2349 "or %ebx,%eax\n\t"
2350 "test %eax,%eax\n\t"
2351 "sete %cl\n\t"
2352 "xor %ebx,%ebx\n\t"
2353 "movzbl %cl,%eax");
2354 }
2355
2356 static void
2357 i386_emit_bit_and (void)
2358 {
2359 EMIT_ASM32 (i386_and,
2360 "and (%esp),%eax\n\t"
2361 "and 0x4(%esp),%ebx\n\t"
2362 "lea 0x8(%esp),%esp");
2363 }
2364
2365 static void
2366 i386_emit_bit_or (void)
2367 {
2368 EMIT_ASM32 (i386_or,
2369 "or (%esp),%eax\n\t"
2370 "or 0x4(%esp),%ebx\n\t"
2371 "lea 0x8(%esp),%esp");
2372 }
2373
2374 static void
2375 i386_emit_bit_xor (void)
2376 {
2377 EMIT_ASM32 (i386_xor,
2378 "xor (%esp),%eax\n\t"
2379 "xor 0x4(%esp),%ebx\n\t"
2380 "lea 0x8(%esp),%esp");
2381 }
2382
2383 static void
2384 i386_emit_bit_not (void)
2385 {
2386 EMIT_ASM32 (i386_bit_not,
2387 "xor $0xffffffff,%eax\n\t"
2388 "xor $0xffffffff,%ebx\n\t");
2389 }
2390
2391 static void
2392 i386_emit_equal (void)
2393 {
2394 EMIT_ASM32 (i386_equal,
2395 "cmpl %ebx,4(%esp)\n\t"
2396 "jne .Li386_equal_false\n\t"
2397 "cmpl %eax,(%esp)\n\t"
2398 "je .Li386_equal_true\n\t"
2399 ".Li386_equal_false:\n\t"
2400 "xor %eax,%eax\n\t"
2401 "jmp .Li386_equal_end\n\t"
2402 ".Li386_equal_true:\n\t"
2403 "mov $1,%eax\n\t"
2404 ".Li386_equal_end:\n\t"
2405 "xor %ebx,%ebx\n\t"
2406 "lea 0x8(%esp),%esp");
2407 }
2408
2409 static void
2410 i386_emit_less_signed (void)
2411 {
2412 EMIT_ASM32 (i386_less_signed,
2413 "cmpl %ebx,4(%esp)\n\t"
2414 "jl .Li386_less_signed_true\n\t"
2415 "jne .Li386_less_signed_false\n\t"
2416 "cmpl %eax,(%esp)\n\t"
2417 "jl .Li386_less_signed_true\n\t"
2418 ".Li386_less_signed_false:\n\t"
2419 "xor %eax,%eax\n\t"
2420 "jmp .Li386_less_signed_end\n\t"
2421 ".Li386_less_signed_true:\n\t"
2422 "mov $1,%eax\n\t"
2423 ".Li386_less_signed_end:\n\t"
2424 "xor %ebx,%ebx\n\t"
2425 "lea 0x8(%esp),%esp");
2426 }
2427
2428 static void
2429 i386_emit_less_unsigned (void)
2430 {
2431 EMIT_ASM32 (i386_less_unsigned,
2432 "cmpl %ebx,4(%esp)\n\t"
2433 "jb .Li386_less_unsigned_true\n\t"
2434 "jne .Li386_less_unsigned_false\n\t"
2435 "cmpl %eax,(%esp)\n\t"
2436 "jb .Li386_less_unsigned_true\n\t"
2437 ".Li386_less_unsigned_false:\n\t"
2438 "xor %eax,%eax\n\t"
2439 "jmp .Li386_less_unsigned_end\n\t"
2440 ".Li386_less_unsigned_true:\n\t"
2441 "mov $1,%eax\n\t"
2442 ".Li386_less_unsigned_end:\n\t"
2443 "xor %ebx,%ebx\n\t"
2444 "lea 0x8(%esp),%esp");
2445 }
2446
2447 static void
2448 i386_emit_ref (int size)
2449 {
2450 switch (size)
2451 {
2452 case 1:
2453 EMIT_ASM32 (i386_ref1,
2454 "movb (%eax),%al");
2455 break;
2456 case 2:
2457 EMIT_ASM32 (i386_ref2,
2458 "movw (%eax),%ax");
2459 break;
2460 case 4:
2461 EMIT_ASM32 (i386_ref4,
2462 "movl (%eax),%eax");
2463 break;
2464 case 8:
2465 EMIT_ASM32 (i386_ref8,
2466 "movl 4(%eax),%ebx\n\t"
2467 "movl (%eax),%eax");
2468 break;
2469 }
2470 }
2471
2472 static void
2473 i386_emit_if_goto (int *offset_p, int *size_p)
2474 {
2475 EMIT_ASM32 (i386_if_goto,
2476 "mov %eax,%ecx\n\t"
2477 "or %ebx,%ecx\n\t"
2478 "pop %eax\n\t"
2479 "pop %ebx\n\t"
2480 "cmpl $0,%ecx\n\t"
2481 /* Don't trust the assembler to choose the right jump */
2482 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2483
2484 if (offset_p)
2485 *offset_p = 11; /* be sure that this matches the sequence above */
2486 if (size_p)
2487 *size_p = 4;
2488 }
2489
2490 static void
2491 i386_emit_goto (int *offset_p, int *size_p)
2492 {
2493 EMIT_ASM32 (i386_goto,
2494 /* Don't trust the assembler to choose the right jump */
2495 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2496 if (offset_p)
2497 *offset_p = 1;
2498 if (size_p)
2499 *size_p = 4;
2500 }
2501
2502 static void
2503 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2504 {
2505 int diff = (to - (from + size));
2506 unsigned char buf[sizeof (int)];
2507
2508 /* We're only doing 4-byte sizes at the moment. */
2509 if (size != 4)
2510 {
2511 emit_error = 1;
2512 return;
2513 }
2514
2515 memcpy (buf, &diff, sizeof (int));
2516 target_write_memory (from, buf, sizeof (int));
2517 }
2518
2519 static void
2520 i386_emit_const (LONGEST num)
2521 {
2522 unsigned char buf[16];
2523 int i, hi, lo;
2524 CORE_ADDR buildaddr = current_insn_ptr;
2525
2526 i = 0;
2527 buf[i++] = 0xb8; /* mov $<n>,%eax */
2528 lo = num & 0xffffffff;
2529 memcpy (&buf[i], &lo, sizeof (lo));
2530 i += 4;
2531 hi = ((num >> 32) & 0xffffffff);
2532 if (hi)
2533 {
2534 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2535 memcpy (&buf[i], &hi, sizeof (hi));
2536 i += 4;
2537 }
2538 else
2539 {
2540 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2541 }
2542 append_insns (&buildaddr, i, buf);
2543 current_insn_ptr = buildaddr;
2544 }
2545
2546 static void
2547 i386_emit_call (CORE_ADDR fn)
2548 {
2549 unsigned char buf[16];
2550 int i, offset;
2551 CORE_ADDR buildaddr;
2552
2553 buildaddr = current_insn_ptr;
2554 i = 0;
2555 buf[i++] = 0xe8; /* call <reladdr> */
2556 offset = ((int) fn) - (buildaddr + 5);
2557 memcpy (buf + 1, &offset, 4);
2558 append_insns (&buildaddr, 5, buf);
2559 current_insn_ptr = buildaddr;
2560 }
2561
2562 static void
2563 i386_emit_reg (int reg)
2564 {
2565 unsigned char buf[16];
2566 int i;
2567 CORE_ADDR buildaddr;
2568
2569 EMIT_ASM32 (i386_reg_a,
2570 "sub $0x8,%esp");
2571 buildaddr = current_insn_ptr;
2572 i = 0;
2573 buf[i++] = 0xb8; /* mov $<n>,%eax */
2574 memcpy (&buf[i], &reg, sizeof (reg));
2575 i += 4;
2576 append_insns (&buildaddr, i, buf);
2577 current_insn_ptr = buildaddr;
2578 EMIT_ASM32 (i386_reg_b,
2579 "mov %eax,4(%esp)\n\t"
2580 "mov 8(%ebp),%eax\n\t"
2581 "mov %eax,(%esp)");
2582 i386_emit_call (get_raw_reg_func_addr ());
2583 EMIT_ASM32 (i386_reg_c,
2584 "xor %ebx,%ebx\n\t"
2585 "lea 0x8(%esp),%esp");
2586 }
2587
2588 static void
2589 i386_emit_pop (void)
2590 {
2591 EMIT_ASM32 (i386_pop,
2592 "pop %eax\n\t"
2593 "pop %ebx");
2594 }
2595
2596 static void
2597 i386_emit_stack_flush (void)
2598 {
2599 EMIT_ASM32 (i386_stack_flush,
2600 "push %ebx\n\t"
2601 "push %eax");
2602 }
2603
2604 static void
2605 i386_emit_zero_ext (int arg)
2606 {
2607 switch (arg)
2608 {
2609 case 8:
2610 EMIT_ASM32 (i386_zero_ext_8,
2611 "and $0xff,%eax\n\t"
2612 "xor %ebx,%ebx");
2613 break;
2614 case 16:
2615 EMIT_ASM32 (i386_zero_ext_16,
2616 "and $0xffff,%eax\n\t"
2617 "xor %ebx,%ebx");
2618 break;
2619 case 32:
2620 EMIT_ASM32 (i386_zero_ext_32,
2621 "xor %ebx,%ebx");
2622 break;
2623 default:
2624 emit_error = 1;
2625 }
2626 }
2627
2628 static void
2629 i386_emit_swap (void)
2630 {
2631 EMIT_ASM32 (i386_swap,
2632 "mov %eax,%ecx\n\t"
2633 "mov %ebx,%edx\n\t"
2634 "pop %eax\n\t"
2635 "pop %ebx\n\t"
2636 "push %edx\n\t"
2637 "push %ecx");
2638 }
2639
2640 static void
2641 i386_emit_stack_adjust (int n)
2642 {
2643 unsigned char buf[16];
2644 int i;
2645 CORE_ADDR buildaddr = current_insn_ptr;
2646
2647 i = 0;
2648 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2649 buf[i++] = 0x64;
2650 buf[i++] = 0x24;
2651 buf[i++] = n * 8;
2652 append_insns (&buildaddr, i, buf);
2653 current_insn_ptr = buildaddr;
2654 }
2655
2656 /* FN's prototype is `LONGEST(*fn)(int)'. */
2657
2658 static void
2659 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2660 {
2661 unsigned char buf[16];
2662 int i;
2663 CORE_ADDR buildaddr;
2664
2665 EMIT_ASM32 (i386_int_call_1_a,
2666 /* Reserve a bit of stack space. */
2667 "sub $0x8,%esp");
2668 /* Put the one argument on the stack. */
2669 buildaddr = current_insn_ptr;
2670 i = 0;
2671 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2672 buf[i++] = 0x04;
2673 buf[i++] = 0x24;
2674 memcpy (&buf[i], &arg1, sizeof (arg1));
2675 i += 4;
2676 append_insns (&buildaddr, i, buf);
2677 current_insn_ptr = buildaddr;
2678 i386_emit_call (fn);
2679 EMIT_ASM32 (i386_int_call_1_c,
2680 "mov %edx,%ebx\n\t"
2681 "lea 0x8(%esp),%esp");
2682 }
2683
2684 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2685
2686 static void
2687 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2688 {
2689 unsigned char buf[16];
2690 int i;
2691 CORE_ADDR buildaddr;
2692
2693 EMIT_ASM32 (i386_void_call_2_a,
2694 /* Preserve %eax only; we don't have to worry about %ebx. */
2695 "push %eax\n\t"
2696 /* Reserve a bit of stack space for arguments. */
2697 "sub $0x10,%esp\n\t"
2698 /* Copy "top" to the second argument position. (Note that
2699 we can't assume function won't scribble on its
2700 arguments, so don't try to restore from this.) */
2701 "mov %eax,4(%esp)\n\t"
2702 "mov %ebx,8(%esp)");
2703 /* Put the first argument on the stack. */
2704 buildaddr = current_insn_ptr;
2705 i = 0;
2706 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2707 buf[i++] = 0x04;
2708 buf[i++] = 0x24;
2709 memcpy (&buf[i], &arg1, sizeof (arg1));
2710 i += 4;
2711 append_insns (&buildaddr, i, buf);
2712 current_insn_ptr = buildaddr;
2713 i386_emit_call (fn);
2714 EMIT_ASM32 (i386_void_call_2_b,
2715 "lea 0x10(%esp),%esp\n\t"
2716 /* Restore original stack top. */
2717 "pop %eax");
2718 }
2719
2720
2721 static void
2722 i386_emit_eq_goto (int *offset_p, int *size_p)
2723 {
2724 EMIT_ASM32 (eq,
2725 /* Check low half first, more likely to be decider */
2726 "cmpl %eax,(%esp)\n\t"
2727 "jne .Leq_fallthru\n\t"
2728 "cmpl %ebx,4(%esp)\n\t"
2729 "jne .Leq_fallthru\n\t"
2730 "lea 0x8(%esp),%esp\n\t"
2731 "pop %eax\n\t"
2732 "pop %ebx\n\t"
2733 /* jmp, but don't trust the assembler to choose the right jump */
2734 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2735 ".Leq_fallthru:\n\t"
2736 "lea 0x8(%esp),%esp\n\t"
2737 "pop %eax\n\t"
2738 "pop %ebx");
2739
2740 if (offset_p)
2741 *offset_p = 18;
2742 if (size_p)
2743 *size_p = 4;
2744 }
2745
2746 static void
2747 i386_emit_ne_goto (int *offset_p, int *size_p)
2748 {
2749 EMIT_ASM32 (ne,
2750 /* Check low half first, more likely to be decider */
2751 "cmpl %eax,(%esp)\n\t"
2752 "jne .Lne_jump\n\t"
2753 "cmpl %ebx,4(%esp)\n\t"
2754 "je .Lne_fallthru\n\t"
2755 ".Lne_jump:\n\t"
2756 "lea 0x8(%esp),%esp\n\t"
2757 "pop %eax\n\t"
2758 "pop %ebx\n\t"
2759 /* jmp, but don't trust the assembler to choose the right jump */
2760 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2761 ".Lne_fallthru:\n\t"
2762 "lea 0x8(%esp),%esp\n\t"
2763 "pop %eax\n\t"
2764 "pop %ebx");
2765
2766 if (offset_p)
2767 *offset_p = 18;
2768 if (size_p)
2769 *size_p = 4;
2770 }
2771
2772 static void
2773 i386_emit_lt_goto (int *offset_p, int *size_p)
2774 {
2775 EMIT_ASM32 (lt,
2776 "cmpl %ebx,4(%esp)\n\t"
2777 "jl .Llt_jump\n\t"
2778 "jne .Llt_fallthru\n\t"
2779 "cmpl %eax,(%esp)\n\t"
2780 "jnl .Llt_fallthru\n\t"
2781 ".Llt_jump:\n\t"
2782 "lea 0x8(%esp),%esp\n\t"
2783 "pop %eax\n\t"
2784 "pop %ebx\n\t"
2785 /* jmp, but don't trust the assembler to choose the right jump */
2786 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2787 ".Llt_fallthru:\n\t"
2788 "lea 0x8(%esp),%esp\n\t"
2789 "pop %eax\n\t"
2790 "pop %ebx");
2791
2792 if (offset_p)
2793 *offset_p = 20;
2794 if (size_p)
2795 *size_p = 4;
2796 }
2797
2798 static void
2799 i386_emit_le_goto (int *offset_p, int *size_p)
2800 {
2801 EMIT_ASM32 (le,
2802 "cmpl %ebx,4(%esp)\n\t"
2803 "jle .Lle_jump\n\t"
2804 "jne .Lle_fallthru\n\t"
2805 "cmpl %eax,(%esp)\n\t"
2806 "jnle .Lle_fallthru\n\t"
2807 ".Lle_jump:\n\t"
2808 "lea 0x8(%esp),%esp\n\t"
2809 "pop %eax\n\t"
2810 "pop %ebx\n\t"
2811 /* jmp, but don't trust the assembler to choose the right jump */
2812 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2813 ".Lle_fallthru:\n\t"
2814 "lea 0x8(%esp),%esp\n\t"
2815 "pop %eax\n\t"
2816 "pop %ebx");
2817
2818 if (offset_p)
2819 *offset_p = 20;
2820 if (size_p)
2821 *size_p = 4;
2822 }
2823
2824 static void
2825 i386_emit_gt_goto (int *offset_p, int *size_p)
2826 {
2827 EMIT_ASM32 (gt,
2828 "cmpl %ebx,4(%esp)\n\t"
2829 "jg .Lgt_jump\n\t"
2830 "jne .Lgt_fallthru\n\t"
2831 "cmpl %eax,(%esp)\n\t"
2832 "jng .Lgt_fallthru\n\t"
2833 ".Lgt_jump:\n\t"
2834 "lea 0x8(%esp),%esp\n\t"
2835 "pop %eax\n\t"
2836 "pop %ebx\n\t"
2837 /* jmp, but don't trust the assembler to choose the right jump */
2838 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2839 ".Lgt_fallthru:\n\t"
2840 "lea 0x8(%esp),%esp\n\t"
2841 "pop %eax\n\t"
2842 "pop %ebx");
2843
2844 if (offset_p)
2845 *offset_p = 20;
2846 if (size_p)
2847 *size_p = 4;
2848 }
2849
2850 static void
2851 i386_emit_ge_goto (int *offset_p, int *size_p)
2852 {
2853 EMIT_ASM32 (ge,
2854 "cmpl %ebx,4(%esp)\n\t"
2855 "jge .Lge_jump\n\t"
2856 "jne .Lge_fallthru\n\t"
2857 "cmpl %eax,(%esp)\n\t"
2858 "jnge .Lge_fallthru\n\t"
2859 ".Lge_jump:\n\t"
2860 "lea 0x8(%esp),%esp\n\t"
2861 "pop %eax\n\t"
2862 "pop %ebx\n\t"
2863 /* jmp, but don't trust the assembler to choose the right jump */
2864 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2865 ".Lge_fallthru:\n\t"
2866 "lea 0x8(%esp),%esp\n\t"
2867 "pop %eax\n\t"
2868 "pop %ebx");
2869
2870 if (offset_p)
2871 *offset_p = 20;
2872 if (size_p)
2873 *size_p = 4;
2874 }
2875
2876 struct emit_ops i386_emit_ops =
2877 {
2878 i386_emit_prologue,
2879 i386_emit_epilogue,
2880 i386_emit_add,
2881 i386_emit_sub,
2882 i386_emit_mul,
2883 i386_emit_lsh,
2884 i386_emit_rsh_signed,
2885 i386_emit_rsh_unsigned,
2886 i386_emit_ext,
2887 i386_emit_log_not,
2888 i386_emit_bit_and,
2889 i386_emit_bit_or,
2890 i386_emit_bit_xor,
2891 i386_emit_bit_not,
2892 i386_emit_equal,
2893 i386_emit_less_signed,
2894 i386_emit_less_unsigned,
2895 i386_emit_ref,
2896 i386_emit_if_goto,
2897 i386_emit_goto,
2898 i386_write_goto_address,
2899 i386_emit_const,
2900 i386_emit_call,
2901 i386_emit_reg,
2902 i386_emit_pop,
2903 i386_emit_stack_flush,
2904 i386_emit_zero_ext,
2905 i386_emit_swap,
2906 i386_emit_stack_adjust,
2907 i386_emit_int_call_1,
2908 i386_emit_void_call_2,
2909 i386_emit_eq_goto,
2910 i386_emit_ne_goto,
2911 i386_emit_lt_goto,
2912 i386_emit_le_goto,
2913 i386_emit_gt_goto,
2914 i386_emit_ge_goto
2915 };
2916
2917
2918 static struct emit_ops *
2919 x86_emit_ops (void)
2920 {
2921 #ifdef __x86_64__
2922 if (is_64bit_tdesc ())
2923 return &amd64_emit_ops;
2924 else
2925 #endif
2926 return &i386_emit_ops;
2927 }
2928
2929 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2930
2931 const gdb_byte *
2932 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2933 {
2934 *size = x86_breakpoint_len;
2935 return x86_breakpoint;
2936 }
2937
2938 static int
2939 x86_supports_range_stepping (void)
2940 {
2941 return 1;
2942 }
2943
2944 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2945 */
2946
2947 static int
2948 x86_supports_hardware_single_step (void)
2949 {
2950 return 1;
2951 }
2952
2953 static int
2954 x86_get_ipa_tdesc_idx (void)
2955 {
2956 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2957 const struct target_desc *tdesc = regcache->tdesc;
2958
2959 #ifdef __x86_64__
2960 return amd64_get_ipa_tdesc_idx (tdesc);
2961 #endif
2962
2963 if (tdesc == tdesc_i386_linux_no_xml)
2964 return X86_TDESC_SSE;
2965
2966 return i386_get_ipa_tdesc_idx (tdesc);
2967 }
2968
2969 /* This is initialized assuming an amd64 target.
2970 x86_arch_setup will correct it for i386 or amd64 targets. */
2971
2972 struct linux_target_ops the_low_target =
2973 {
2974 x86_install_fast_tracepoint_jump_pad,
2975 x86_emit_ops,
2976 x86_get_min_fast_tracepoint_insn_len,
2977 x86_supports_range_stepping,
2978 x86_supports_hardware_single_step,
2979 x86_get_syscall_trapinfo,
2980 x86_get_ipa_tdesc_idx,
2981 };
2982
2983 /* The linux target ops object. */
2984
2985 linux_process_target *the_linux_target = &the_x86_target;
2986
2987 void
2988 initialize_low_arch (void)
2989 {
2990 /* Initialize the Linux target descriptions. */
2991 #ifdef __x86_64__
2992 tdesc_amd64_linux_no_xml = allocate_target_description ();
2993 copy_target_description (tdesc_amd64_linux_no_xml,
2994 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2995 false));
2996 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2997 #endif
2998
2999 tdesc_i386_linux_no_xml = allocate_target_description ();
3000 copy_target_description (tdesc_i386_linux_no_xml,
3001 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3002 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3003
3004 initialize_regsets_info (&x86_regsets_info);
3005 }
This page took 0.13744 seconds and 5 git commands to generate.