Linux x86 low-level debug register code synchronization
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
32 #ifndef ELFMAG0
33 #include "elf/common.h"
34 #endif
35
36 #include "agent.h"
37 #include "tdesc.h"
38 #include "tracepoint.h"
39 #include "ax.h"
40 #include "nat/linux-nat.h"
41 #include "nat/x86-linux.h"
42
43 #ifdef __x86_64__
44 /* Defined in auto-generated file amd64-linux.c. */
45 void init_registers_amd64_linux (void);
46 extern const struct target_desc *tdesc_amd64_linux;
47
48 /* Defined in auto-generated file amd64-avx-linux.c. */
49 void init_registers_amd64_avx_linux (void);
50 extern const struct target_desc *tdesc_amd64_avx_linux;
51
52 /* Defined in auto-generated file amd64-avx512-linux.c. */
53 void init_registers_amd64_avx512_linux (void);
54 extern const struct target_desc *tdesc_amd64_avx512_linux;
55
56 /* Defined in auto-generated file amd64-mpx-linux.c. */
57 void init_registers_amd64_mpx_linux (void);
58 extern const struct target_desc *tdesc_amd64_mpx_linux;
59
60 /* Defined in auto-generated file x32-linux.c. */
61 void init_registers_x32_linux (void);
62 extern const struct target_desc *tdesc_x32_linux;
63
64 /* Defined in auto-generated file x32-avx-linux.c. */
65 void init_registers_x32_avx_linux (void);
66 extern const struct target_desc *tdesc_x32_avx_linux;
67
68 /* Defined in auto-generated file x32-avx512-linux.c. */
69 void init_registers_x32_avx512_linux (void);
70 extern const struct target_desc *tdesc_x32_avx512_linux;
71
72 #endif
73
74 /* Defined in auto-generated file i386-linux.c. */
75 void init_registers_i386_linux (void);
76 extern const struct target_desc *tdesc_i386_linux;
77
78 /* Defined in auto-generated file i386-mmx-linux.c. */
79 void init_registers_i386_mmx_linux (void);
80 extern const struct target_desc *tdesc_i386_mmx_linux;
81
82 /* Defined in auto-generated file i386-avx-linux.c. */
83 void init_registers_i386_avx_linux (void);
84 extern const struct target_desc *tdesc_i386_avx_linux;
85
86 /* Defined in auto-generated file i386-avx512-linux.c. */
87 void init_registers_i386_avx512_linux (void);
88 extern const struct target_desc *tdesc_i386_avx512_linux;
89
90 /* Defined in auto-generated file i386-mpx-linux.c. */
91 void init_registers_i386_mpx_linux (void);
92 extern const struct target_desc *tdesc_i386_mpx_linux;
93
94 #ifdef __x86_64__
95 static struct target_desc *tdesc_amd64_linux_no_xml;
96 #endif
97 static struct target_desc *tdesc_i386_linux_no_xml;
98
99
100 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
101 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
102
103 /* Backward compatibility for gdb without XML support. */
104
105 static const char *xmltarget_i386_linux_no_xml = "@<target>\
106 <architecture>i386</architecture>\
107 <osabi>GNU/Linux</osabi>\
108 </target>";
109
110 #ifdef __x86_64__
111 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
112 <architecture>i386:x86-64</architecture>\
113 <osabi>GNU/Linux</osabi>\
114 </target>";
115 #endif
116
117 #include <sys/reg.h>
118 #include <sys/procfs.h>
119 #include <sys/ptrace.h>
120 #include <sys/uio.h>
121
122 #ifndef PTRACE_GETREGSET
123 #define PTRACE_GETREGSET 0x4204
124 #endif
125
126 #ifndef PTRACE_SETREGSET
127 #define PTRACE_SETREGSET 0x4205
128 #endif
129
130
131 #ifndef PTRACE_GET_THREAD_AREA
132 #define PTRACE_GET_THREAD_AREA 25
133 #endif
134
135 /* This definition comes from prctl.h, but some kernels may not have it. */
136 #ifndef PTRACE_ARCH_PRCTL
137 #define PTRACE_ARCH_PRCTL 30
138 #endif
139
140 /* The following definitions come from prctl.h, but may be absent
141 for certain configurations. */
142 #ifndef ARCH_GET_FS
143 #define ARCH_SET_GS 0x1001
144 #define ARCH_SET_FS 0x1002
145 #define ARCH_GET_FS 0x1003
146 #define ARCH_GET_GS 0x1004
147 #endif
148
149 /* Per-process arch-specific data we want to keep. */
150
151 struct arch_process_info
152 {
153 struct x86_debug_reg_state debug_reg_state;
154 };
155
156 #ifdef __x86_64__
157
158 /* Mapping between the general-purpose registers in `struct user'
159 format and GDB's register array layout.
160 Note that the transfer layout uses 64-bit regs. */
161 static /*const*/ int i386_regmap[] =
162 {
163 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
164 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
165 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
166 DS * 8, ES * 8, FS * 8, GS * 8
167 };
168
169 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
170
171 /* So code below doesn't have to care, i386 or amd64. */
172 #define ORIG_EAX ORIG_RAX
173 #define REGSIZE 8
174
175 static const int x86_64_regmap[] =
176 {
177 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
178 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
179 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
180 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
181 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
182 DS * 8, ES * 8, FS * 8, GS * 8,
183 -1, -1, -1, -1, -1, -1, -1, -1,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1,
186 -1,
187 -1, -1, -1, -1, -1, -1, -1, -1,
188 ORIG_RAX * 8,
189 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
190 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
191 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
196 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1,
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1
200 };
201
202 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
203 #define X86_64_USER_REGS (GS + 1)
204
205 #else /* ! __x86_64__ */
206
207 /* Mapping between the general-purpose registers in `struct user'
208 format and GDB's register array layout. */
209 static /*const*/ int i386_regmap[] =
210 {
211 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
212 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
213 EIP * 4, EFL * 4, CS * 4, SS * 4,
214 DS * 4, ES * 4, FS * 4, GS * 4
215 };
216
217 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
218
219 #define REGSIZE 4
220
221 #endif
222
223 #ifdef __x86_64__
224
225 /* Returns true if the current inferior belongs to a x86-64 process,
226 per the tdesc. */
227
228 static int
229 is_64bit_tdesc (void)
230 {
231 struct regcache *regcache = get_thread_regcache (current_thread, 0);
232
233 return register_size (regcache->tdesc, 0) == 8;
234 }
235
236 #endif
237
238 \f
239 /* Called by libthread_db. */
240
241 ps_err_e
242 ps_get_thread_area (const struct ps_prochandle *ph,
243 lwpid_t lwpid, int idx, void **base)
244 {
245 #ifdef __x86_64__
246 int use_64bit = is_64bit_tdesc ();
247
248 if (use_64bit)
249 {
250 switch (idx)
251 {
252 case FS:
253 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
254 return PS_OK;
255 break;
256 case GS:
257 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
258 return PS_OK;
259 break;
260 default:
261 return PS_BADADDR;
262 }
263 return PS_ERR;
264 }
265 #endif
266
267 {
268 unsigned int desc[4];
269
270 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
271 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
272 return PS_ERR;
273
274 /* Ensure we properly extend the value to 64-bits for x86_64. */
275 *base = (void *) (uintptr_t) desc[1];
276 return PS_OK;
277 }
278 }
279
280 /* Get the thread area address. This is used to recognize which
281 thread is which when tracing with the in-process agent library. We
282 don't read anything from the address, and treat it as opaque; it's
283 the address itself that we assume is unique per-thread. */
284
285 static int
286 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
287 {
288 #ifdef __x86_64__
289 int use_64bit = is_64bit_tdesc ();
290
291 if (use_64bit)
292 {
293 void *base;
294 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
295 {
296 *addr = (CORE_ADDR) (uintptr_t) base;
297 return 0;
298 }
299
300 return -1;
301 }
302 #endif
303
304 {
305 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
306 struct thread_info *thr = get_lwp_thread (lwp);
307 struct regcache *regcache = get_thread_regcache (thr, 1);
308 unsigned int desc[4];
309 ULONGEST gs = 0;
310 const int reg_thread_area = 3; /* bits to scale down register value. */
311 int idx;
312
313 collect_register_by_name (regcache, "gs", &gs);
314
315 idx = gs >> reg_thread_area;
316
317 if (ptrace (PTRACE_GET_THREAD_AREA,
318 lwpid_of (thr),
319 (void *) (long) idx, (unsigned long) &desc) < 0)
320 return -1;
321
322 *addr = desc[1];
323 return 0;
324 }
325 }
326
327
328 \f
329 static int
330 x86_cannot_store_register (int regno)
331 {
332 #ifdef __x86_64__
333 if (is_64bit_tdesc ())
334 return 0;
335 #endif
336
337 return regno >= I386_NUM_REGS;
338 }
339
340 static int
341 x86_cannot_fetch_register (int regno)
342 {
343 #ifdef __x86_64__
344 if (is_64bit_tdesc ())
345 return 0;
346 #endif
347
348 return regno >= I386_NUM_REGS;
349 }
350
351 static void
352 x86_fill_gregset (struct regcache *regcache, void *buf)
353 {
354 int i;
355
356 #ifdef __x86_64__
357 if (register_size (regcache->tdesc, 0) == 8)
358 {
359 for (i = 0; i < X86_64_NUM_REGS; i++)
360 if (x86_64_regmap[i] != -1)
361 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
362 return;
363 }
364
365 /* 32-bit inferior registers need to be zero-extended.
366 Callers would read uninitialized memory otherwise. */
367 memset (buf, 0x00, X86_64_USER_REGS * 8);
368 #endif
369
370 for (i = 0; i < I386_NUM_REGS; i++)
371 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
372
373 collect_register_by_name (regcache, "orig_eax",
374 ((char *) buf) + ORIG_EAX * REGSIZE);
375 }
376
377 static void
378 x86_store_gregset (struct regcache *regcache, const void *buf)
379 {
380 int i;
381
382 #ifdef __x86_64__
383 if (register_size (regcache->tdesc, 0) == 8)
384 {
385 for (i = 0; i < X86_64_NUM_REGS; i++)
386 if (x86_64_regmap[i] != -1)
387 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
388 return;
389 }
390 #endif
391
392 for (i = 0; i < I386_NUM_REGS; i++)
393 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
394
395 supply_register_by_name (regcache, "orig_eax",
396 ((char *) buf) + ORIG_EAX * REGSIZE);
397 }
398
399 static void
400 x86_fill_fpregset (struct regcache *regcache, void *buf)
401 {
402 #ifdef __x86_64__
403 i387_cache_to_fxsave (regcache, buf);
404 #else
405 i387_cache_to_fsave (regcache, buf);
406 #endif
407 }
408
409 static void
410 x86_store_fpregset (struct regcache *regcache, const void *buf)
411 {
412 #ifdef __x86_64__
413 i387_fxsave_to_cache (regcache, buf);
414 #else
415 i387_fsave_to_cache (regcache, buf);
416 #endif
417 }
418
419 #ifndef __x86_64__
420
421 static void
422 x86_fill_fpxregset (struct regcache *regcache, void *buf)
423 {
424 i387_cache_to_fxsave (regcache, buf);
425 }
426
427 static void
428 x86_store_fpxregset (struct regcache *regcache, const void *buf)
429 {
430 i387_fxsave_to_cache (regcache, buf);
431 }
432
433 #endif
434
435 static void
436 x86_fill_xstateregset (struct regcache *regcache, void *buf)
437 {
438 i387_cache_to_xsave (regcache, buf);
439 }
440
441 static void
442 x86_store_xstateregset (struct regcache *regcache, const void *buf)
443 {
444 i387_xsave_to_cache (regcache, buf);
445 }
446
447 /* ??? The non-biarch i386 case stores all the i387 regs twice.
448 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
449 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
450 doesn't work. IWBN to avoid the duplication in the case where it
451 does work. Maybe the arch_setup routine could check whether it works
452 and update the supported regsets accordingly. */
453
454 static struct regset_info x86_regsets[] =
455 {
456 #ifdef HAVE_PTRACE_GETREGS
457 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
458 GENERAL_REGS,
459 x86_fill_gregset, x86_store_gregset },
460 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
461 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
462 # ifndef __x86_64__
463 # ifdef HAVE_PTRACE_GETFPXREGS
464 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
465 EXTENDED_REGS,
466 x86_fill_fpxregset, x86_store_fpxregset },
467 # endif
468 # endif
469 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
470 FP_REGS,
471 x86_fill_fpregset, x86_store_fpregset },
472 #endif /* HAVE_PTRACE_GETREGS */
473 { 0, 0, 0, -1, -1, NULL, NULL }
474 };
475
476 static CORE_ADDR
477 x86_get_pc (struct regcache *regcache)
478 {
479 int use_64bit = register_size (regcache->tdesc, 0) == 8;
480
481 if (use_64bit)
482 {
483 unsigned long pc;
484 collect_register_by_name (regcache, "rip", &pc);
485 return (CORE_ADDR) pc;
486 }
487 else
488 {
489 unsigned int pc;
490 collect_register_by_name (regcache, "eip", &pc);
491 return (CORE_ADDR) pc;
492 }
493 }
494
495 static void
496 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
497 {
498 int use_64bit = register_size (regcache->tdesc, 0) == 8;
499
500 if (use_64bit)
501 {
502 unsigned long newpc = pc;
503 supply_register_by_name (regcache, "rip", &newpc);
504 }
505 else
506 {
507 unsigned int newpc = pc;
508 supply_register_by_name (regcache, "eip", &newpc);
509 }
510 }
511 \f
512 static const unsigned char x86_breakpoint[] = { 0xCC };
513 #define x86_breakpoint_len 1
514
515 static int
516 x86_breakpoint_at (CORE_ADDR pc)
517 {
518 unsigned char c;
519
520 (*the_target->read_memory) (pc, &c, 1);
521 if (c == 0xCC)
522 return 1;
523
524 return 0;
525 }
526 \f
527
528 /* Return the offset of REGNUM in the u_debugreg field of struct
529 user. */
530
531 static int
532 u_debugreg_offset (int regnum)
533 {
534 return (offsetof (struct user, u_debugreg)
535 + sizeof (((struct user *) 0)->u_debugreg[0]) * regnum);
536 }
537
538
539 /* Support for debug registers. */
540
541 static unsigned long
542 x86_linux_dr_get (ptid_t ptid, int regnum)
543 {
544 int tid;
545 unsigned long value;
546
547 gdb_assert (ptid_lwp_p (ptid));
548 tid = ptid_get_lwp (ptid);
549
550 errno = 0;
551 value = ptrace (PTRACE_PEEKUSER, tid, u_debugreg_offset (regnum), 0);
552 if (errno != 0)
553 perror_with_name (_("Couldn't read debug register"));
554
555 return value;
556 }
557
558 static void
559 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
560 {
561 int tid;
562
563 gdb_assert (ptid_lwp_p (ptid));
564 tid = ptid_get_lwp (ptid);
565
566 errno = 0;
567 ptrace (PTRACE_POKEUSER, tid, u_debugreg_offset (regnum), value);
568 if (errno != 0)
569 perror_with_name (_("Couldn't write debug register"));
570 }
571
572 static int
573 update_debug_registers_callback (struct lwp_info *lwp, void *arg)
574 {
575 /* The actual update is done later just before resuming the lwp,
576 we just mark that the registers need updating. */
577 lwp_set_debug_registers_changed (lwp, 1);
578
579 /* If the lwp isn't stopped, force it to momentarily pause, so
580 we can update its debug registers. */
581 if (!lwp_is_stopped (lwp))
582 linux_stop_lwp (lwp);
583
584 return 0;
585 }
586
587 /* Update the inferior's debug register REGNUM from STATE. */
588
589 static void
590 x86_linux_dr_set_addr (int regnum, CORE_ADDR addr)
591 {
592 /* Only update the threads of this process. */
593 ptid_t pid_ptid = pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
594
595 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
596
597 iterate_over_lwps (pid_ptid, update_debug_registers_callback, NULL);
598 }
599
600 /* Return the inferior's debug register REGNUM. */
601
602 static CORE_ADDR
603 x86_linux_dr_get_addr (int regnum)
604 {
605 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
606
607 return x86_linux_dr_get (current_lwp_ptid (), regnum);
608 }
609
610 /* Update the inferior's DR7 debug control register from STATE. */
611
612 static void
613 x86_linux_dr_set_control (unsigned long control)
614 {
615 /* Only update the threads of this process. */
616 ptid_t pid_ptid = pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
617
618 iterate_over_lwps (pid_ptid, update_debug_registers_callback, NULL);
619 }
620
621 /* Return the inferior's DR7 debug control register. */
622
623 static unsigned long
624 x86_linux_dr_get_control (void)
625 {
626 return x86_linux_dr_get (current_lwp_ptid (), DR_CONTROL);
627 }
628
629 /* Get the value of the DR6 debug status register from the inferior
630 and record it in STATE. */
631
632 static unsigned long
633 x86_linux_dr_get_status (void)
634 {
635 return x86_linux_dr_get (current_lwp_ptid (), DR_STATUS);
636 }
637
638 /* Low-level function vector. */
639 struct x86_dr_low_type x86_dr_low =
640 {
641 x86_linux_dr_set_control,
642 x86_linux_dr_set_addr,
643 x86_linux_dr_get_addr,
644 x86_linux_dr_get_status,
645 x86_linux_dr_get_control,
646 sizeof (void *),
647 };
648 \f
649 /* Breakpoint/Watchpoint support. */
650
651 static int
652 x86_supports_z_point_type (char z_type)
653 {
654 switch (z_type)
655 {
656 case Z_PACKET_SW_BP:
657 case Z_PACKET_HW_BP:
658 case Z_PACKET_WRITE_WP:
659 case Z_PACKET_ACCESS_WP:
660 return 1;
661 default:
662 return 0;
663 }
664 }
665
666 static int
667 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
668 int size, struct raw_breakpoint *bp)
669 {
670 struct process_info *proc = current_process ();
671
672 switch (type)
673 {
674 case raw_bkpt_type_sw:
675 return insert_memory_breakpoint (bp);
676
677 case raw_bkpt_type_hw:
678 case raw_bkpt_type_write_wp:
679 case raw_bkpt_type_access_wp:
680 {
681 enum target_hw_bp_type hw_type
682 = raw_bkpt_type_to_target_hw_bp_type (type);
683 struct x86_debug_reg_state *state
684 = &proc->priv->arch_private->debug_reg_state;
685
686 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
687 }
688
689 default:
690 /* Unsupported. */
691 return 1;
692 }
693 }
694
695 static int
696 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
697 int size, struct raw_breakpoint *bp)
698 {
699 struct process_info *proc = current_process ();
700
701 switch (type)
702 {
703 case raw_bkpt_type_sw:
704 return remove_memory_breakpoint (bp);
705
706 case raw_bkpt_type_hw:
707 case raw_bkpt_type_write_wp:
708 case raw_bkpt_type_access_wp:
709 {
710 enum target_hw_bp_type hw_type
711 = raw_bkpt_type_to_target_hw_bp_type (type);
712 struct x86_debug_reg_state *state
713 = &proc->priv->arch_private->debug_reg_state;
714
715 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
716 }
717 default:
718 /* Unsupported. */
719 return 1;
720 }
721 }
722
723 static int
724 x86_stopped_by_watchpoint (void)
725 {
726 struct process_info *proc = current_process ();
727 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
728 }
729
730 static CORE_ADDR
731 x86_stopped_data_address (void)
732 {
733 struct process_info *proc = current_process ();
734 CORE_ADDR addr;
735 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
736 &addr))
737 return addr;
738 return 0;
739 }
740 \f
741 /* Called when a new process is created. */
742
743 static struct arch_process_info *
744 x86_linux_new_process (void)
745 {
746 struct arch_process_info *info = XCNEW (struct arch_process_info);
747
748 x86_low_init_dregs (&info->debug_reg_state);
749
750 return info;
751 }
752
753 /* Called when a new thread is detected. */
754
755 static void
756 x86_linux_new_thread (struct lwp_info *lwp)
757 {
758 lwp_set_debug_registers_changed (lwp, 1);
759 }
760
761 /* See nat/x86-dregs.h. */
762
763 struct x86_debug_reg_state *
764 x86_debug_reg_state (pid_t pid)
765 {
766 struct process_info *proc = find_process_pid (pid);
767
768 return &proc->priv->arch_private->debug_reg_state;
769 }
770
771 /* Called when resuming a thread.
772 If the debug regs have changed, update the thread's copies. */
773
774 static void
775 x86_linux_prepare_to_resume (struct lwp_info *lwp)
776 {
777 ptid_t ptid = ptid_of_lwp (lwp);
778 int clear_status = 0;
779
780 if (lwp_debug_registers_changed (lwp))
781 {
782 struct x86_debug_reg_state *state
783 = x86_debug_reg_state (ptid_get_pid (ptid));
784 int i;
785
786 x86_linux_dr_set (ptid, DR_CONTROL, 0);
787
788 ALL_DEBUG_ADDRESS_REGISTERS (i)
789 if (state->dr_ref_count[i] > 0)
790 {
791 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
792
793 /* If we're setting a watchpoint, any change the inferior
794 had done itself to the debug registers needs to be
795 discarded, otherwise, x86_dr_stopped_data_address can
796 get confused. */
797 clear_status = 1;
798 }
799
800 if (state->dr_control_mirror != 0)
801 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
802
803 lwp_set_debug_registers_changed (lwp, 0);
804 }
805
806 if (clear_status
807 || lwp_stop_reason (lwp) == TARGET_STOPPED_BY_WATCHPOINT)
808 x86_linux_dr_set (ptid, DR_STATUS, 0);
809 }
810 \f
811 /* When GDBSERVER is built as a 64-bit application on linux, the
812 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
813 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
814 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
815 conversion in-place ourselves. */
816
817 /* These types below (compat_*) define a siginfo type that is layout
818 compatible with the siginfo type exported by the 32-bit userspace
819 support. */
820
821 #ifdef __x86_64__
822
823 typedef int compat_int_t;
824 typedef unsigned int compat_uptr_t;
825
826 typedef int compat_time_t;
827 typedef int compat_timer_t;
828 typedef int compat_clock_t;
829
830 struct compat_timeval
831 {
832 compat_time_t tv_sec;
833 int tv_usec;
834 };
835
836 typedef union compat_sigval
837 {
838 compat_int_t sival_int;
839 compat_uptr_t sival_ptr;
840 } compat_sigval_t;
841
842 typedef struct compat_siginfo
843 {
844 int si_signo;
845 int si_errno;
846 int si_code;
847
848 union
849 {
850 int _pad[((128 / sizeof (int)) - 3)];
851
852 /* kill() */
853 struct
854 {
855 unsigned int _pid;
856 unsigned int _uid;
857 } _kill;
858
859 /* POSIX.1b timers */
860 struct
861 {
862 compat_timer_t _tid;
863 int _overrun;
864 compat_sigval_t _sigval;
865 } _timer;
866
867 /* POSIX.1b signals */
868 struct
869 {
870 unsigned int _pid;
871 unsigned int _uid;
872 compat_sigval_t _sigval;
873 } _rt;
874
875 /* SIGCHLD */
876 struct
877 {
878 unsigned int _pid;
879 unsigned int _uid;
880 int _status;
881 compat_clock_t _utime;
882 compat_clock_t _stime;
883 } _sigchld;
884
885 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
886 struct
887 {
888 unsigned int _addr;
889 } _sigfault;
890
891 /* SIGPOLL */
892 struct
893 {
894 int _band;
895 int _fd;
896 } _sigpoll;
897 } _sifields;
898 } compat_siginfo_t;
899
900 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
901 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
902
903 typedef struct compat_x32_siginfo
904 {
905 int si_signo;
906 int si_errno;
907 int si_code;
908
909 union
910 {
911 int _pad[((128 / sizeof (int)) - 3)];
912
913 /* kill() */
914 struct
915 {
916 unsigned int _pid;
917 unsigned int _uid;
918 } _kill;
919
920 /* POSIX.1b timers */
921 struct
922 {
923 compat_timer_t _tid;
924 int _overrun;
925 compat_sigval_t _sigval;
926 } _timer;
927
928 /* POSIX.1b signals */
929 struct
930 {
931 unsigned int _pid;
932 unsigned int _uid;
933 compat_sigval_t _sigval;
934 } _rt;
935
936 /* SIGCHLD */
937 struct
938 {
939 unsigned int _pid;
940 unsigned int _uid;
941 int _status;
942 compat_x32_clock_t _utime;
943 compat_x32_clock_t _stime;
944 } _sigchld;
945
946 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
947 struct
948 {
949 unsigned int _addr;
950 } _sigfault;
951
952 /* SIGPOLL */
953 struct
954 {
955 int _band;
956 int _fd;
957 } _sigpoll;
958 } _sifields;
959 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
960
961 #define cpt_si_pid _sifields._kill._pid
962 #define cpt_si_uid _sifields._kill._uid
963 #define cpt_si_timerid _sifields._timer._tid
964 #define cpt_si_overrun _sifields._timer._overrun
965 #define cpt_si_status _sifields._sigchld._status
966 #define cpt_si_utime _sifields._sigchld._utime
967 #define cpt_si_stime _sifields._sigchld._stime
968 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
969 #define cpt_si_addr _sifields._sigfault._addr
970 #define cpt_si_band _sifields._sigpoll._band
971 #define cpt_si_fd _sifields._sigpoll._fd
972
973 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
974 In their place is si_timer1,si_timer2. */
975 #ifndef si_timerid
976 #define si_timerid si_timer1
977 #endif
978 #ifndef si_overrun
979 #define si_overrun si_timer2
980 #endif
981
982 static void
983 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
984 {
985 memset (to, 0, sizeof (*to));
986
987 to->si_signo = from->si_signo;
988 to->si_errno = from->si_errno;
989 to->si_code = from->si_code;
990
991 if (to->si_code == SI_TIMER)
992 {
993 to->cpt_si_timerid = from->si_timerid;
994 to->cpt_si_overrun = from->si_overrun;
995 to->cpt_si_ptr = (intptr_t) from->si_ptr;
996 }
997 else if (to->si_code == SI_USER)
998 {
999 to->cpt_si_pid = from->si_pid;
1000 to->cpt_si_uid = from->si_uid;
1001 }
1002 else if (to->si_code < 0)
1003 {
1004 to->cpt_si_pid = from->si_pid;
1005 to->cpt_si_uid = from->si_uid;
1006 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1007 }
1008 else
1009 {
1010 switch (to->si_signo)
1011 {
1012 case SIGCHLD:
1013 to->cpt_si_pid = from->si_pid;
1014 to->cpt_si_uid = from->si_uid;
1015 to->cpt_si_status = from->si_status;
1016 to->cpt_si_utime = from->si_utime;
1017 to->cpt_si_stime = from->si_stime;
1018 break;
1019 case SIGILL:
1020 case SIGFPE:
1021 case SIGSEGV:
1022 case SIGBUS:
1023 to->cpt_si_addr = (intptr_t) from->si_addr;
1024 break;
1025 case SIGPOLL:
1026 to->cpt_si_band = from->si_band;
1027 to->cpt_si_fd = from->si_fd;
1028 break;
1029 default:
1030 to->cpt_si_pid = from->si_pid;
1031 to->cpt_si_uid = from->si_uid;
1032 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1033 break;
1034 }
1035 }
1036 }
1037
1038 static void
1039 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1040 {
1041 memset (to, 0, sizeof (*to));
1042
1043 to->si_signo = from->si_signo;
1044 to->si_errno = from->si_errno;
1045 to->si_code = from->si_code;
1046
1047 if (to->si_code == SI_TIMER)
1048 {
1049 to->si_timerid = from->cpt_si_timerid;
1050 to->si_overrun = from->cpt_si_overrun;
1051 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1052 }
1053 else if (to->si_code == SI_USER)
1054 {
1055 to->si_pid = from->cpt_si_pid;
1056 to->si_uid = from->cpt_si_uid;
1057 }
1058 else if (to->si_code < 0)
1059 {
1060 to->si_pid = from->cpt_si_pid;
1061 to->si_uid = from->cpt_si_uid;
1062 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1063 }
1064 else
1065 {
1066 switch (to->si_signo)
1067 {
1068 case SIGCHLD:
1069 to->si_pid = from->cpt_si_pid;
1070 to->si_uid = from->cpt_si_uid;
1071 to->si_status = from->cpt_si_status;
1072 to->si_utime = from->cpt_si_utime;
1073 to->si_stime = from->cpt_si_stime;
1074 break;
1075 case SIGILL:
1076 case SIGFPE:
1077 case SIGSEGV:
1078 case SIGBUS:
1079 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1080 break;
1081 case SIGPOLL:
1082 to->si_band = from->cpt_si_band;
1083 to->si_fd = from->cpt_si_fd;
1084 break;
1085 default:
1086 to->si_pid = from->cpt_si_pid;
1087 to->si_uid = from->cpt_si_uid;
1088 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1089 break;
1090 }
1091 }
1092 }
1093
1094 static void
1095 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1096 siginfo_t *from)
1097 {
1098 memset (to, 0, sizeof (*to));
1099
1100 to->si_signo = from->si_signo;
1101 to->si_errno = from->si_errno;
1102 to->si_code = from->si_code;
1103
1104 if (to->si_code == SI_TIMER)
1105 {
1106 to->cpt_si_timerid = from->si_timerid;
1107 to->cpt_si_overrun = from->si_overrun;
1108 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1109 }
1110 else if (to->si_code == SI_USER)
1111 {
1112 to->cpt_si_pid = from->si_pid;
1113 to->cpt_si_uid = from->si_uid;
1114 }
1115 else if (to->si_code < 0)
1116 {
1117 to->cpt_si_pid = from->si_pid;
1118 to->cpt_si_uid = from->si_uid;
1119 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1120 }
1121 else
1122 {
1123 switch (to->si_signo)
1124 {
1125 case SIGCHLD:
1126 to->cpt_si_pid = from->si_pid;
1127 to->cpt_si_uid = from->si_uid;
1128 to->cpt_si_status = from->si_status;
1129 to->cpt_si_utime = from->si_utime;
1130 to->cpt_si_stime = from->si_stime;
1131 break;
1132 case SIGILL:
1133 case SIGFPE:
1134 case SIGSEGV:
1135 case SIGBUS:
1136 to->cpt_si_addr = (intptr_t) from->si_addr;
1137 break;
1138 case SIGPOLL:
1139 to->cpt_si_band = from->si_band;
1140 to->cpt_si_fd = from->si_fd;
1141 break;
1142 default:
1143 to->cpt_si_pid = from->si_pid;
1144 to->cpt_si_uid = from->si_uid;
1145 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1146 break;
1147 }
1148 }
1149 }
1150
1151 static void
1152 siginfo_from_compat_x32_siginfo (siginfo_t *to,
1153 compat_x32_siginfo_t *from)
1154 {
1155 memset (to, 0, sizeof (*to));
1156
1157 to->si_signo = from->si_signo;
1158 to->si_errno = from->si_errno;
1159 to->si_code = from->si_code;
1160
1161 if (to->si_code == SI_TIMER)
1162 {
1163 to->si_timerid = from->cpt_si_timerid;
1164 to->si_overrun = from->cpt_si_overrun;
1165 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1166 }
1167 else if (to->si_code == SI_USER)
1168 {
1169 to->si_pid = from->cpt_si_pid;
1170 to->si_uid = from->cpt_si_uid;
1171 }
1172 else if (to->si_code < 0)
1173 {
1174 to->si_pid = from->cpt_si_pid;
1175 to->si_uid = from->cpt_si_uid;
1176 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1177 }
1178 else
1179 {
1180 switch (to->si_signo)
1181 {
1182 case SIGCHLD:
1183 to->si_pid = from->cpt_si_pid;
1184 to->si_uid = from->cpt_si_uid;
1185 to->si_status = from->cpt_si_status;
1186 to->si_utime = from->cpt_si_utime;
1187 to->si_stime = from->cpt_si_stime;
1188 break;
1189 case SIGILL:
1190 case SIGFPE:
1191 case SIGSEGV:
1192 case SIGBUS:
1193 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1194 break;
1195 case SIGPOLL:
1196 to->si_band = from->cpt_si_band;
1197 to->si_fd = from->cpt_si_fd;
1198 break;
1199 default:
1200 to->si_pid = from->cpt_si_pid;
1201 to->si_uid = from->cpt_si_uid;
1202 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1203 break;
1204 }
1205 }
1206 }
1207
1208 #endif /* __x86_64__ */
1209
1210 /* Convert a native/host siginfo object, into/from the siginfo in the
1211 layout of the inferiors' architecture. Returns true if any
1212 conversion was done; false otherwise. If DIRECTION is 1, then copy
1213 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1214 INF. */
1215
1216 static int
1217 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1218 {
1219 #ifdef __x86_64__
1220 unsigned int machine;
1221 int tid = lwpid_of (current_thread);
1222 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1223
1224 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1225 if (!is_64bit_tdesc ())
1226 {
1227 gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
1228
1229 if (direction == 0)
1230 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1231 else
1232 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1233
1234 return 1;
1235 }
1236 /* No fixup for native x32 GDB. */
1237 else if (!is_elf64 && sizeof (void *) == 8)
1238 {
1239 gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
1240
1241 if (direction == 0)
1242 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1243 native);
1244 else
1245 siginfo_from_compat_x32_siginfo (native,
1246 (struct compat_x32_siginfo *) inf);
1247
1248 return 1;
1249 }
1250 #endif
1251
1252 return 0;
1253 }
1254 \f
1255 static int use_xml;
1256
1257 /* Format of XSAVE extended state is:
1258 struct
1259 {
1260 fxsave_bytes[0..463]
1261 sw_usable_bytes[464..511]
1262 xstate_hdr_bytes[512..575]
1263 avx_bytes[576..831]
1264 future_state etc
1265 };
1266
1267 Same memory layout will be used for the coredump NT_X86_XSTATE
1268 representing the XSAVE extended state registers.
1269
1270 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1271 extended state mask, which is the same as the extended control register
1272 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1273 together with the mask saved in the xstate_hdr_bytes to determine what
1274 states the processor/OS supports and what state, used or initialized,
1275 the process/thread is in. */
1276 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1277
1278 /* Does the current host support the GETFPXREGS request? The header
1279 file may or may not define it, and even if it is defined, the
1280 kernel will return EIO if it's running on a pre-SSE processor. */
1281 int have_ptrace_getfpxregs =
1282 #ifdef HAVE_PTRACE_GETFPXREGS
1283 -1
1284 #else
1285 0
1286 #endif
1287 ;
1288
1289 /* Does the current host support PTRACE_GETREGSET? */
1290 static int have_ptrace_getregset = -1;
1291
1292 /* Get Linux/x86 target description from running target. */
1293
1294 static const struct target_desc *
1295 x86_linux_read_description (void)
1296 {
1297 unsigned int machine;
1298 int is_elf64;
1299 int xcr0_features;
1300 int tid;
1301 static uint64_t xcr0;
1302 struct regset_info *regset;
1303
1304 tid = lwpid_of (current_thread);
1305
1306 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1307
1308 if (sizeof (void *) == 4)
1309 {
1310 if (is_elf64 > 0)
1311 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1312 #ifndef __x86_64__
1313 else if (machine == EM_X86_64)
1314 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1315 #endif
1316 }
1317
1318 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1319 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1320 {
1321 elf_fpxregset_t fpxregs;
1322
1323 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1324 {
1325 have_ptrace_getfpxregs = 0;
1326 have_ptrace_getregset = 0;
1327 return tdesc_i386_mmx_linux;
1328 }
1329 else
1330 have_ptrace_getfpxregs = 1;
1331 }
1332 #endif
1333
1334 if (!use_xml)
1335 {
1336 x86_xcr0 = X86_XSTATE_SSE_MASK;
1337
1338 /* Don't use XML. */
1339 #ifdef __x86_64__
1340 if (machine == EM_X86_64)
1341 return tdesc_amd64_linux_no_xml;
1342 else
1343 #endif
1344 return tdesc_i386_linux_no_xml;
1345 }
1346
1347 if (have_ptrace_getregset == -1)
1348 {
1349 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1350 struct iovec iov;
1351
1352 iov.iov_base = xstateregs;
1353 iov.iov_len = sizeof (xstateregs);
1354
1355 /* Check if PTRACE_GETREGSET works. */
1356 if (ptrace (PTRACE_GETREGSET, tid,
1357 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1358 have_ptrace_getregset = 0;
1359 else
1360 {
1361 have_ptrace_getregset = 1;
1362
1363 /* Get XCR0 from XSAVE extended state. */
1364 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1365 / sizeof (uint64_t))];
1366
1367 /* Use PTRACE_GETREGSET if it is available. */
1368 for (regset = x86_regsets;
1369 regset->fill_function != NULL; regset++)
1370 if (regset->get_request == PTRACE_GETREGSET)
1371 regset->size = X86_XSTATE_SIZE (xcr0);
1372 else if (regset->type != GENERAL_REGS)
1373 regset->size = 0;
1374 }
1375 }
1376
1377 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1378 xcr0_features = (have_ptrace_getregset
1379 && (xcr0 & X86_XSTATE_ALL_MASK));
1380
1381 if (xcr0_features)
1382 x86_xcr0 = xcr0;
1383
1384 if (machine == EM_X86_64)
1385 {
1386 #ifdef __x86_64__
1387 if (is_elf64)
1388 {
1389 if (xcr0_features)
1390 {
1391 switch (xcr0 & X86_XSTATE_ALL_MASK)
1392 {
1393 case X86_XSTATE_AVX512_MASK:
1394 return tdesc_amd64_avx512_linux;
1395
1396 case X86_XSTATE_MPX_MASK:
1397 return tdesc_amd64_mpx_linux;
1398
1399 case X86_XSTATE_AVX_MASK:
1400 return tdesc_amd64_avx_linux;
1401
1402 default:
1403 return tdesc_amd64_linux;
1404 }
1405 }
1406 else
1407 return tdesc_amd64_linux;
1408 }
1409 else
1410 {
1411 if (xcr0_features)
1412 {
1413 switch (xcr0 & X86_XSTATE_ALL_MASK)
1414 {
1415 case X86_XSTATE_AVX512_MASK:
1416 return tdesc_x32_avx512_linux;
1417
1418 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1419 case X86_XSTATE_AVX_MASK:
1420 return tdesc_x32_avx_linux;
1421
1422 default:
1423 return tdesc_x32_linux;
1424 }
1425 }
1426 else
1427 return tdesc_x32_linux;
1428 }
1429 #endif
1430 }
1431 else
1432 {
1433 if (xcr0_features)
1434 {
1435 switch (xcr0 & X86_XSTATE_ALL_MASK)
1436 {
1437 case (X86_XSTATE_AVX512_MASK):
1438 return tdesc_i386_avx512_linux;
1439
1440 case (X86_XSTATE_MPX_MASK):
1441 return tdesc_i386_mpx_linux;
1442
1443 case (X86_XSTATE_AVX_MASK):
1444 return tdesc_i386_avx_linux;
1445
1446 default:
1447 return tdesc_i386_linux;
1448 }
1449 }
1450 else
1451 return tdesc_i386_linux;
1452 }
1453
1454 gdb_assert_not_reached ("failed to return tdesc");
1455 }
1456
1457 /* Callback for find_inferior. Stops iteration when a thread with a
1458 given PID is found. */
1459
1460 static int
1461 same_process_callback (struct inferior_list_entry *entry, void *data)
1462 {
1463 int pid = *(int *) data;
1464
1465 return (ptid_get_pid (entry->id) == pid);
1466 }
1467
1468 /* Callback for for_each_inferior. Calls the arch_setup routine for
1469 each process. */
1470
1471 static void
1472 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1473 {
1474 int pid = ptid_get_pid (entry->id);
1475
1476 /* Look up any thread of this processes. */
1477 current_thread
1478 = (struct thread_info *) find_inferior (&all_threads,
1479 same_process_callback, &pid);
1480
1481 the_low_target.arch_setup ();
1482 }
1483
1484 /* Update all the target description of all processes; a new GDB
1485 connected, and it may or not support xml target descriptions. */
1486
1487 static void
1488 x86_linux_update_xmltarget (void)
1489 {
1490 struct thread_info *saved_thread = current_thread;
1491
1492 /* Before changing the register cache's internal layout, flush the
1493 contents of the current valid caches back to the threads, and
1494 release the current regcache objects. */
1495 regcache_release ();
1496
1497 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1498
1499 current_thread = saved_thread;
1500 }
1501
1502 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1503 PTRACE_GETREGSET. */
1504
1505 static void
1506 x86_linux_process_qsupported (const char *query)
1507 {
1508 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1509 with "i386" in qSupported query, it supports x86 XML target
1510 descriptions. */
1511 use_xml = 0;
1512 if (query != NULL && startswith (query, "xmlRegisters="))
1513 {
1514 char *copy = xstrdup (query + 13);
1515 char *p;
1516
1517 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1518 {
1519 if (strcmp (p, "i386") == 0)
1520 {
1521 use_xml = 1;
1522 break;
1523 }
1524 }
1525
1526 free (copy);
1527 }
1528
1529 x86_linux_update_xmltarget ();
1530 }
1531
1532 /* Common for x86/x86-64. */
1533
1534 static struct regsets_info x86_regsets_info =
1535 {
1536 x86_regsets, /* regsets */
1537 0, /* num_regsets */
1538 NULL, /* disabled_regsets */
1539 };
1540
1541 #ifdef __x86_64__
1542 static struct regs_info amd64_linux_regs_info =
1543 {
1544 NULL, /* regset_bitmap */
1545 NULL, /* usrregs_info */
1546 &x86_regsets_info
1547 };
1548 #endif
1549 static struct usrregs_info i386_linux_usrregs_info =
1550 {
1551 I386_NUM_REGS,
1552 i386_regmap,
1553 };
1554
1555 static struct regs_info i386_linux_regs_info =
1556 {
1557 NULL, /* regset_bitmap */
1558 &i386_linux_usrregs_info,
1559 &x86_regsets_info
1560 };
1561
1562 const struct regs_info *
1563 x86_linux_regs_info (void)
1564 {
1565 #ifdef __x86_64__
1566 if (is_64bit_tdesc ())
1567 return &amd64_linux_regs_info;
1568 else
1569 #endif
1570 return &i386_linux_regs_info;
1571 }
1572
1573 /* Initialize the target description for the architecture of the
1574 inferior. */
1575
1576 static void
1577 x86_arch_setup (void)
1578 {
1579 current_process ()->tdesc = x86_linux_read_description ();
1580 }
1581
1582 static int
1583 x86_supports_tracepoints (void)
1584 {
1585 return 1;
1586 }
1587
1588 static void
1589 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1590 {
1591 write_inferior_memory (*to, buf, len);
1592 *to += len;
1593 }
1594
1595 static int
1596 push_opcode (unsigned char *buf, char *op)
1597 {
1598 unsigned char *buf_org = buf;
1599
1600 while (1)
1601 {
1602 char *endptr;
1603 unsigned long ul = strtoul (op, &endptr, 16);
1604
1605 if (endptr == op)
1606 break;
1607
1608 *buf++ = ul;
1609 op = endptr;
1610 }
1611
1612 return buf - buf_org;
1613 }
1614
1615 #ifdef __x86_64__
1616
1617 /* Build a jump pad that saves registers and calls a collection
1618 function. Writes a jump instruction to the jump pad to
1619 JJUMPAD_INSN. The caller is responsible to write it in at the
1620 tracepoint address. */
1621
1622 static int
1623 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1624 CORE_ADDR collector,
1625 CORE_ADDR lockaddr,
1626 ULONGEST orig_size,
1627 CORE_ADDR *jump_entry,
1628 CORE_ADDR *trampoline,
1629 ULONGEST *trampoline_size,
1630 unsigned char *jjump_pad_insn,
1631 ULONGEST *jjump_pad_insn_size,
1632 CORE_ADDR *adjusted_insn_addr,
1633 CORE_ADDR *adjusted_insn_addr_end,
1634 char *err)
1635 {
1636 unsigned char buf[40];
1637 int i, offset;
1638 int64_t loffset;
1639
1640 CORE_ADDR buildaddr = *jump_entry;
1641
1642 /* Build the jump pad. */
1643
1644 /* First, do tracepoint data collection. Save registers. */
1645 i = 0;
1646 /* Need to ensure stack pointer saved first. */
1647 buf[i++] = 0x54; /* push %rsp */
1648 buf[i++] = 0x55; /* push %rbp */
1649 buf[i++] = 0x57; /* push %rdi */
1650 buf[i++] = 0x56; /* push %rsi */
1651 buf[i++] = 0x52; /* push %rdx */
1652 buf[i++] = 0x51; /* push %rcx */
1653 buf[i++] = 0x53; /* push %rbx */
1654 buf[i++] = 0x50; /* push %rax */
1655 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1656 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1657 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1658 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1659 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1660 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1661 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1662 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1663 buf[i++] = 0x9c; /* pushfq */
1664 buf[i++] = 0x48; /* movl <addr>,%rdi */
1665 buf[i++] = 0xbf;
1666 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1667 i += sizeof (unsigned long);
1668 buf[i++] = 0x57; /* push %rdi */
1669 append_insns (&buildaddr, i, buf);
1670
1671 /* Stack space for the collecting_t object. */
1672 i = 0;
1673 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1674 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1675 memcpy (buf + i, &tpoint, 8);
1676 i += 8;
1677 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1678 i += push_opcode (&buf[i],
1679 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1680 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1681 append_insns (&buildaddr, i, buf);
1682
1683 /* spin-lock. */
1684 i = 0;
1685 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1686 memcpy (&buf[i], (void *) &lockaddr, 8);
1687 i += 8;
1688 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1689 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1690 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1691 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1692 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1693 append_insns (&buildaddr, i, buf);
1694
1695 /* Set up the gdb_collect call. */
1696 /* At this point, (stack pointer + 0x18) is the base of our saved
1697 register block. */
1698
1699 i = 0;
1700 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1701 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1702
1703 /* tpoint address may be 64-bit wide. */
1704 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1705 memcpy (buf + i, &tpoint, 8);
1706 i += 8;
1707 append_insns (&buildaddr, i, buf);
1708
1709 /* The collector function being in the shared library, may be
1710 >31-bits away off the jump pad. */
1711 i = 0;
1712 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1713 memcpy (buf + i, &collector, 8);
1714 i += 8;
1715 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1716 append_insns (&buildaddr, i, buf);
1717
1718 /* Clear the spin-lock. */
1719 i = 0;
1720 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1721 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1722 memcpy (buf + i, &lockaddr, 8);
1723 i += 8;
1724 append_insns (&buildaddr, i, buf);
1725
1726 /* Remove stack that had been used for the collect_t object. */
1727 i = 0;
1728 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1729 append_insns (&buildaddr, i, buf);
1730
1731 /* Restore register state. */
1732 i = 0;
1733 buf[i++] = 0x48; /* add $0x8,%rsp */
1734 buf[i++] = 0x83;
1735 buf[i++] = 0xc4;
1736 buf[i++] = 0x08;
1737 buf[i++] = 0x9d; /* popfq */
1738 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1739 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1740 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1741 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1742 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1743 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1744 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1745 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1746 buf[i++] = 0x58; /* pop %rax */
1747 buf[i++] = 0x5b; /* pop %rbx */
1748 buf[i++] = 0x59; /* pop %rcx */
1749 buf[i++] = 0x5a; /* pop %rdx */
1750 buf[i++] = 0x5e; /* pop %rsi */
1751 buf[i++] = 0x5f; /* pop %rdi */
1752 buf[i++] = 0x5d; /* pop %rbp */
1753 buf[i++] = 0x5c; /* pop %rsp */
1754 append_insns (&buildaddr, i, buf);
1755
1756 /* Now, adjust the original instruction to execute in the jump
1757 pad. */
1758 *adjusted_insn_addr = buildaddr;
1759 relocate_instruction (&buildaddr, tpaddr);
1760 *adjusted_insn_addr_end = buildaddr;
1761
1762 /* Finally, write a jump back to the program. */
1763
1764 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1765 if (loffset > INT_MAX || loffset < INT_MIN)
1766 {
1767 sprintf (err,
1768 "E.Jump back from jump pad too far from tracepoint "
1769 "(offset 0x%" PRIx64 " > int32).", loffset);
1770 return 1;
1771 }
1772
1773 offset = (int) loffset;
1774 memcpy (buf, jump_insn, sizeof (jump_insn));
1775 memcpy (buf + 1, &offset, 4);
1776 append_insns (&buildaddr, sizeof (jump_insn), buf);
1777
1778 /* The jump pad is now built. Wire in a jump to our jump pad. This
1779 is always done last (by our caller actually), so that we can
1780 install fast tracepoints with threads running. This relies on
1781 the agent's atomic write support. */
1782 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1783 if (loffset > INT_MAX || loffset < INT_MIN)
1784 {
1785 sprintf (err,
1786 "E.Jump pad too far from tracepoint "
1787 "(offset 0x%" PRIx64 " > int32).", loffset);
1788 return 1;
1789 }
1790
1791 offset = (int) loffset;
1792
1793 memcpy (buf, jump_insn, sizeof (jump_insn));
1794 memcpy (buf + 1, &offset, 4);
1795 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1796 *jjump_pad_insn_size = sizeof (jump_insn);
1797
1798 /* Return the end address of our pad. */
1799 *jump_entry = buildaddr;
1800
1801 return 0;
1802 }
1803
1804 #endif /* __x86_64__ */
1805
1806 /* Build a jump pad that saves registers and calls a collection
1807 function. Writes a jump instruction to the jump pad to
1808 JJUMPAD_INSN. The caller is responsible to write it in at the
1809 tracepoint address. */
1810
1811 static int
1812 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1813 CORE_ADDR collector,
1814 CORE_ADDR lockaddr,
1815 ULONGEST orig_size,
1816 CORE_ADDR *jump_entry,
1817 CORE_ADDR *trampoline,
1818 ULONGEST *trampoline_size,
1819 unsigned char *jjump_pad_insn,
1820 ULONGEST *jjump_pad_insn_size,
1821 CORE_ADDR *adjusted_insn_addr,
1822 CORE_ADDR *adjusted_insn_addr_end,
1823 char *err)
1824 {
1825 unsigned char buf[0x100];
1826 int i, offset;
1827 CORE_ADDR buildaddr = *jump_entry;
1828
1829 /* Build the jump pad. */
1830
1831 /* First, do tracepoint data collection. Save registers. */
1832 i = 0;
1833 buf[i++] = 0x60; /* pushad */
1834 buf[i++] = 0x68; /* push tpaddr aka $pc */
1835 *((int *)(buf + i)) = (int) tpaddr;
1836 i += 4;
1837 buf[i++] = 0x9c; /* pushf */
1838 buf[i++] = 0x1e; /* push %ds */
1839 buf[i++] = 0x06; /* push %es */
1840 buf[i++] = 0x0f; /* push %fs */
1841 buf[i++] = 0xa0;
1842 buf[i++] = 0x0f; /* push %gs */
1843 buf[i++] = 0xa8;
1844 buf[i++] = 0x16; /* push %ss */
1845 buf[i++] = 0x0e; /* push %cs */
1846 append_insns (&buildaddr, i, buf);
1847
1848 /* Stack space for the collecting_t object. */
1849 i = 0;
1850 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1851
1852 /* Build the object. */
1853 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1854 memcpy (buf + i, &tpoint, 4);
1855 i += 4;
1856 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1857
1858 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1859 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1860 append_insns (&buildaddr, i, buf);
1861
1862 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1863 If we cared for it, this could be using xchg alternatively. */
1864
1865 i = 0;
1866 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1867 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1868 %esp,<lockaddr> */
1869 memcpy (&buf[i], (void *) &lockaddr, 4);
1870 i += 4;
1871 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1872 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1873 append_insns (&buildaddr, i, buf);
1874
1875
1876 /* Set up arguments to the gdb_collect call. */
1877 i = 0;
1878 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1879 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1880 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1881 append_insns (&buildaddr, i, buf);
1882
1883 i = 0;
1884 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1885 append_insns (&buildaddr, i, buf);
1886
1887 i = 0;
1888 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1889 memcpy (&buf[i], (void *) &tpoint, 4);
1890 i += 4;
1891 append_insns (&buildaddr, i, buf);
1892
1893 buf[0] = 0xe8; /* call <reladdr> */
1894 offset = collector - (buildaddr + sizeof (jump_insn));
1895 memcpy (buf + 1, &offset, 4);
1896 append_insns (&buildaddr, 5, buf);
1897 /* Clean up after the call. */
1898 buf[0] = 0x83; /* add $0x8,%esp */
1899 buf[1] = 0xc4;
1900 buf[2] = 0x08;
1901 append_insns (&buildaddr, 3, buf);
1902
1903
1904 /* Clear the spin-lock. This would need the LOCK prefix on older
1905 broken archs. */
1906 i = 0;
1907 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1908 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1909 memcpy (buf + i, &lockaddr, 4);
1910 i += 4;
1911 append_insns (&buildaddr, i, buf);
1912
1913
1914 /* Remove stack that had been used for the collect_t object. */
1915 i = 0;
1916 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1917 append_insns (&buildaddr, i, buf);
1918
1919 i = 0;
1920 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1921 buf[i++] = 0xc4;
1922 buf[i++] = 0x04;
1923 buf[i++] = 0x17; /* pop %ss */
1924 buf[i++] = 0x0f; /* pop %gs */
1925 buf[i++] = 0xa9;
1926 buf[i++] = 0x0f; /* pop %fs */
1927 buf[i++] = 0xa1;
1928 buf[i++] = 0x07; /* pop %es */
1929 buf[i++] = 0x1f; /* pop %ds */
1930 buf[i++] = 0x9d; /* popf */
1931 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1932 buf[i++] = 0xc4;
1933 buf[i++] = 0x04;
1934 buf[i++] = 0x61; /* popad */
1935 append_insns (&buildaddr, i, buf);
1936
1937 /* Now, adjust the original instruction to execute in the jump
1938 pad. */
1939 *adjusted_insn_addr = buildaddr;
1940 relocate_instruction (&buildaddr, tpaddr);
1941 *adjusted_insn_addr_end = buildaddr;
1942
1943 /* Write the jump back to the program. */
1944 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1945 memcpy (buf, jump_insn, sizeof (jump_insn));
1946 memcpy (buf + 1, &offset, 4);
1947 append_insns (&buildaddr, sizeof (jump_insn), buf);
1948
1949 /* The jump pad is now built. Wire in a jump to our jump pad. This
1950 is always done last (by our caller actually), so that we can
1951 install fast tracepoints with threads running. This relies on
1952 the agent's atomic write support. */
1953 if (orig_size == 4)
1954 {
1955 /* Create a trampoline. */
1956 *trampoline_size = sizeof (jump_insn);
1957 if (!claim_trampoline_space (*trampoline_size, trampoline))
1958 {
1959 /* No trampoline space available. */
1960 strcpy (err,
1961 "E.Cannot allocate trampoline space needed for fast "
1962 "tracepoints on 4-byte instructions.");
1963 return 1;
1964 }
1965
1966 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1967 memcpy (buf, jump_insn, sizeof (jump_insn));
1968 memcpy (buf + 1, &offset, 4);
1969 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1970
1971 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1972 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1973 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1974 memcpy (buf + 2, &offset, 2);
1975 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1976 *jjump_pad_insn_size = sizeof (small_jump_insn);
1977 }
1978 else
1979 {
1980 /* Else use a 32-bit relative jump instruction. */
1981 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1982 memcpy (buf, jump_insn, sizeof (jump_insn));
1983 memcpy (buf + 1, &offset, 4);
1984 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1985 *jjump_pad_insn_size = sizeof (jump_insn);
1986 }
1987
1988 /* Return the end address of our pad. */
1989 *jump_entry = buildaddr;
1990
1991 return 0;
1992 }
1993
1994 static int
1995 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1996 CORE_ADDR collector,
1997 CORE_ADDR lockaddr,
1998 ULONGEST orig_size,
1999 CORE_ADDR *jump_entry,
2000 CORE_ADDR *trampoline,
2001 ULONGEST *trampoline_size,
2002 unsigned char *jjump_pad_insn,
2003 ULONGEST *jjump_pad_insn_size,
2004 CORE_ADDR *adjusted_insn_addr,
2005 CORE_ADDR *adjusted_insn_addr_end,
2006 char *err)
2007 {
2008 #ifdef __x86_64__
2009 if (is_64bit_tdesc ())
2010 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2011 collector, lockaddr,
2012 orig_size, jump_entry,
2013 trampoline, trampoline_size,
2014 jjump_pad_insn,
2015 jjump_pad_insn_size,
2016 adjusted_insn_addr,
2017 adjusted_insn_addr_end,
2018 err);
2019 #endif
2020
2021 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2022 collector, lockaddr,
2023 orig_size, jump_entry,
2024 trampoline, trampoline_size,
2025 jjump_pad_insn,
2026 jjump_pad_insn_size,
2027 adjusted_insn_addr,
2028 adjusted_insn_addr_end,
2029 err);
2030 }
2031
2032 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2033 architectures. */
2034
2035 static int
2036 x86_get_min_fast_tracepoint_insn_len (void)
2037 {
2038 static int warned_about_fast_tracepoints = 0;
2039
2040 #ifdef __x86_64__
2041 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2042 used for fast tracepoints. */
2043 if (is_64bit_tdesc ())
2044 return 5;
2045 #endif
2046
2047 if (agent_loaded_p ())
2048 {
2049 char errbuf[IPA_BUFSIZ];
2050
2051 errbuf[0] = '\0';
2052
2053 /* On x86, if trampolines are available, then 4-byte jump instructions
2054 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2055 with a 4-byte offset are used instead. */
2056 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2057 return 4;
2058 else
2059 {
2060 /* GDB has no channel to explain to user why a shorter fast
2061 tracepoint is not possible, but at least make GDBserver
2062 mention that something has gone awry. */
2063 if (!warned_about_fast_tracepoints)
2064 {
2065 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2066 warned_about_fast_tracepoints = 1;
2067 }
2068 return 5;
2069 }
2070 }
2071 else
2072 {
2073 /* Indicate that the minimum length is currently unknown since the IPA
2074 has not loaded yet. */
2075 return 0;
2076 }
2077 }
2078
2079 static void
2080 add_insns (unsigned char *start, int len)
2081 {
2082 CORE_ADDR buildaddr = current_insn_ptr;
2083
2084 if (debug_threads)
2085 debug_printf ("Adding %d bytes of insn at %s\n",
2086 len, paddress (buildaddr));
2087
2088 append_insns (&buildaddr, len, start);
2089 current_insn_ptr = buildaddr;
2090 }
2091
2092 /* Our general strategy for emitting code is to avoid specifying raw
2093 bytes whenever possible, and instead copy a block of inline asm
2094 that is embedded in the function. This is a little messy, because
2095 we need to keep the compiler from discarding what looks like dead
2096 code, plus suppress various warnings. */
2097
2098 #define EMIT_ASM(NAME, INSNS) \
2099 do \
2100 { \
2101 extern unsigned char start_ ## NAME, end_ ## NAME; \
2102 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2103 __asm__ ("jmp end_" #NAME "\n" \
2104 "\t" "start_" #NAME ":" \
2105 "\t" INSNS "\n" \
2106 "\t" "end_" #NAME ":"); \
2107 } while (0)
2108
2109 #ifdef __x86_64__
2110
2111 #define EMIT_ASM32(NAME,INSNS) \
2112 do \
2113 { \
2114 extern unsigned char start_ ## NAME, end_ ## NAME; \
2115 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2116 __asm__ (".code32\n" \
2117 "\t" "jmp end_" #NAME "\n" \
2118 "\t" "start_" #NAME ":\n" \
2119 "\t" INSNS "\n" \
2120 "\t" "end_" #NAME ":\n" \
2121 ".code64\n"); \
2122 } while (0)
2123
2124 #else
2125
2126 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2127
2128 #endif
2129
2130 #ifdef __x86_64__
2131
2132 static void
2133 amd64_emit_prologue (void)
2134 {
2135 EMIT_ASM (amd64_prologue,
2136 "pushq %rbp\n\t"
2137 "movq %rsp,%rbp\n\t"
2138 "sub $0x20,%rsp\n\t"
2139 "movq %rdi,-8(%rbp)\n\t"
2140 "movq %rsi,-16(%rbp)");
2141 }
2142
2143
2144 static void
2145 amd64_emit_epilogue (void)
2146 {
2147 EMIT_ASM (amd64_epilogue,
2148 "movq -16(%rbp),%rdi\n\t"
2149 "movq %rax,(%rdi)\n\t"
2150 "xor %rax,%rax\n\t"
2151 "leave\n\t"
2152 "ret");
2153 }
2154
2155 static void
2156 amd64_emit_add (void)
2157 {
2158 EMIT_ASM (amd64_add,
2159 "add (%rsp),%rax\n\t"
2160 "lea 0x8(%rsp),%rsp");
2161 }
2162
2163 static void
2164 amd64_emit_sub (void)
2165 {
2166 EMIT_ASM (amd64_sub,
2167 "sub %rax,(%rsp)\n\t"
2168 "pop %rax");
2169 }
2170
2171 static void
2172 amd64_emit_mul (void)
2173 {
2174 emit_error = 1;
2175 }
2176
2177 static void
2178 amd64_emit_lsh (void)
2179 {
2180 emit_error = 1;
2181 }
2182
2183 static void
2184 amd64_emit_rsh_signed (void)
2185 {
2186 emit_error = 1;
2187 }
2188
2189 static void
2190 amd64_emit_rsh_unsigned (void)
2191 {
2192 emit_error = 1;
2193 }
2194
2195 static void
2196 amd64_emit_ext (int arg)
2197 {
2198 switch (arg)
2199 {
2200 case 8:
2201 EMIT_ASM (amd64_ext_8,
2202 "cbtw\n\t"
2203 "cwtl\n\t"
2204 "cltq");
2205 break;
2206 case 16:
2207 EMIT_ASM (amd64_ext_16,
2208 "cwtl\n\t"
2209 "cltq");
2210 break;
2211 case 32:
2212 EMIT_ASM (amd64_ext_32,
2213 "cltq");
2214 break;
2215 default:
2216 emit_error = 1;
2217 }
2218 }
2219
2220 static void
2221 amd64_emit_log_not (void)
2222 {
2223 EMIT_ASM (amd64_log_not,
2224 "test %rax,%rax\n\t"
2225 "sete %cl\n\t"
2226 "movzbq %cl,%rax");
2227 }
2228
2229 static void
2230 amd64_emit_bit_and (void)
2231 {
2232 EMIT_ASM (amd64_and,
2233 "and (%rsp),%rax\n\t"
2234 "lea 0x8(%rsp),%rsp");
2235 }
2236
2237 static void
2238 amd64_emit_bit_or (void)
2239 {
2240 EMIT_ASM (amd64_or,
2241 "or (%rsp),%rax\n\t"
2242 "lea 0x8(%rsp),%rsp");
2243 }
2244
2245 static void
2246 amd64_emit_bit_xor (void)
2247 {
2248 EMIT_ASM (amd64_xor,
2249 "xor (%rsp),%rax\n\t"
2250 "lea 0x8(%rsp),%rsp");
2251 }
2252
2253 static void
2254 amd64_emit_bit_not (void)
2255 {
2256 EMIT_ASM (amd64_bit_not,
2257 "xorq $0xffffffffffffffff,%rax");
2258 }
2259
2260 static void
2261 amd64_emit_equal (void)
2262 {
2263 EMIT_ASM (amd64_equal,
2264 "cmp %rax,(%rsp)\n\t"
2265 "je .Lamd64_equal_true\n\t"
2266 "xor %rax,%rax\n\t"
2267 "jmp .Lamd64_equal_end\n\t"
2268 ".Lamd64_equal_true:\n\t"
2269 "mov $0x1,%rax\n\t"
2270 ".Lamd64_equal_end:\n\t"
2271 "lea 0x8(%rsp),%rsp");
2272 }
2273
2274 static void
2275 amd64_emit_less_signed (void)
2276 {
2277 EMIT_ASM (amd64_less_signed,
2278 "cmp %rax,(%rsp)\n\t"
2279 "jl .Lamd64_less_signed_true\n\t"
2280 "xor %rax,%rax\n\t"
2281 "jmp .Lamd64_less_signed_end\n\t"
2282 ".Lamd64_less_signed_true:\n\t"
2283 "mov $1,%rax\n\t"
2284 ".Lamd64_less_signed_end:\n\t"
2285 "lea 0x8(%rsp),%rsp");
2286 }
2287
2288 static void
2289 amd64_emit_less_unsigned (void)
2290 {
2291 EMIT_ASM (amd64_less_unsigned,
2292 "cmp %rax,(%rsp)\n\t"
2293 "jb .Lamd64_less_unsigned_true\n\t"
2294 "xor %rax,%rax\n\t"
2295 "jmp .Lamd64_less_unsigned_end\n\t"
2296 ".Lamd64_less_unsigned_true:\n\t"
2297 "mov $1,%rax\n\t"
2298 ".Lamd64_less_unsigned_end:\n\t"
2299 "lea 0x8(%rsp),%rsp");
2300 }
2301
2302 static void
2303 amd64_emit_ref (int size)
2304 {
2305 switch (size)
2306 {
2307 case 1:
2308 EMIT_ASM (amd64_ref1,
2309 "movb (%rax),%al");
2310 break;
2311 case 2:
2312 EMIT_ASM (amd64_ref2,
2313 "movw (%rax),%ax");
2314 break;
2315 case 4:
2316 EMIT_ASM (amd64_ref4,
2317 "movl (%rax),%eax");
2318 break;
2319 case 8:
2320 EMIT_ASM (amd64_ref8,
2321 "movq (%rax),%rax");
2322 break;
2323 }
2324 }
2325
2326 static void
2327 amd64_emit_if_goto (int *offset_p, int *size_p)
2328 {
2329 EMIT_ASM (amd64_if_goto,
2330 "mov %rax,%rcx\n\t"
2331 "pop %rax\n\t"
2332 "cmp $0,%rcx\n\t"
2333 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2334 if (offset_p)
2335 *offset_p = 10;
2336 if (size_p)
2337 *size_p = 4;
2338 }
2339
2340 static void
2341 amd64_emit_goto (int *offset_p, int *size_p)
2342 {
2343 EMIT_ASM (amd64_goto,
2344 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2345 if (offset_p)
2346 *offset_p = 1;
2347 if (size_p)
2348 *size_p = 4;
2349 }
2350
2351 static void
2352 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2353 {
2354 int diff = (to - (from + size));
2355 unsigned char buf[sizeof (int)];
2356
2357 if (size != 4)
2358 {
2359 emit_error = 1;
2360 return;
2361 }
2362
2363 memcpy (buf, &diff, sizeof (int));
2364 write_inferior_memory (from, buf, sizeof (int));
2365 }
2366
2367 static void
2368 amd64_emit_const (LONGEST num)
2369 {
2370 unsigned char buf[16];
2371 int i;
2372 CORE_ADDR buildaddr = current_insn_ptr;
2373
2374 i = 0;
2375 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2376 memcpy (&buf[i], &num, sizeof (num));
2377 i += 8;
2378 append_insns (&buildaddr, i, buf);
2379 current_insn_ptr = buildaddr;
2380 }
2381
2382 static void
2383 amd64_emit_call (CORE_ADDR fn)
2384 {
2385 unsigned char buf[16];
2386 int i;
2387 CORE_ADDR buildaddr;
2388 LONGEST offset64;
2389
2390 /* The destination function being in the shared library, may be
2391 >31-bits away off the compiled code pad. */
2392
2393 buildaddr = current_insn_ptr;
2394
2395 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2396
2397 i = 0;
2398
2399 if (offset64 > INT_MAX || offset64 < INT_MIN)
2400 {
2401 /* Offset is too large for a call. Use callq, but that requires
2402 a register, so avoid it if possible. Use r10, since it is
2403 call-clobbered, we don't have to push/pop it. */
2404 buf[i++] = 0x48; /* mov $fn,%r10 */
2405 buf[i++] = 0xba;
2406 memcpy (buf + i, &fn, 8);
2407 i += 8;
2408 buf[i++] = 0xff; /* callq *%r10 */
2409 buf[i++] = 0xd2;
2410 }
2411 else
2412 {
2413 int offset32 = offset64; /* we know we can't overflow here. */
2414 memcpy (buf + i, &offset32, 4);
2415 i += 4;
2416 }
2417
2418 append_insns (&buildaddr, i, buf);
2419 current_insn_ptr = buildaddr;
2420 }
2421
2422 static void
2423 amd64_emit_reg (int reg)
2424 {
2425 unsigned char buf[16];
2426 int i;
2427 CORE_ADDR buildaddr;
2428
2429 /* Assume raw_regs is still in %rdi. */
2430 buildaddr = current_insn_ptr;
2431 i = 0;
2432 buf[i++] = 0xbe; /* mov $<n>,%esi */
2433 memcpy (&buf[i], &reg, sizeof (reg));
2434 i += 4;
2435 append_insns (&buildaddr, i, buf);
2436 current_insn_ptr = buildaddr;
2437 amd64_emit_call (get_raw_reg_func_addr ());
2438 }
2439
2440 static void
2441 amd64_emit_pop (void)
2442 {
2443 EMIT_ASM (amd64_pop,
2444 "pop %rax");
2445 }
2446
2447 static void
2448 amd64_emit_stack_flush (void)
2449 {
2450 EMIT_ASM (amd64_stack_flush,
2451 "push %rax");
2452 }
2453
2454 static void
2455 amd64_emit_zero_ext (int arg)
2456 {
2457 switch (arg)
2458 {
2459 case 8:
2460 EMIT_ASM (amd64_zero_ext_8,
2461 "and $0xff,%rax");
2462 break;
2463 case 16:
2464 EMIT_ASM (amd64_zero_ext_16,
2465 "and $0xffff,%rax");
2466 break;
2467 case 32:
2468 EMIT_ASM (amd64_zero_ext_32,
2469 "mov $0xffffffff,%rcx\n\t"
2470 "and %rcx,%rax");
2471 break;
2472 default:
2473 emit_error = 1;
2474 }
2475 }
2476
2477 static void
2478 amd64_emit_swap (void)
2479 {
2480 EMIT_ASM (amd64_swap,
2481 "mov %rax,%rcx\n\t"
2482 "pop %rax\n\t"
2483 "push %rcx");
2484 }
2485
2486 static void
2487 amd64_emit_stack_adjust (int n)
2488 {
2489 unsigned char buf[16];
2490 int i;
2491 CORE_ADDR buildaddr = current_insn_ptr;
2492
2493 i = 0;
2494 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2495 buf[i++] = 0x8d;
2496 buf[i++] = 0x64;
2497 buf[i++] = 0x24;
2498 /* This only handles adjustments up to 16, but we don't expect any more. */
2499 buf[i++] = n * 8;
2500 append_insns (&buildaddr, i, buf);
2501 current_insn_ptr = buildaddr;
2502 }
2503
2504 /* FN's prototype is `LONGEST(*fn)(int)'. */
2505
2506 static void
2507 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2508 {
2509 unsigned char buf[16];
2510 int i;
2511 CORE_ADDR buildaddr;
2512
2513 buildaddr = current_insn_ptr;
2514 i = 0;
2515 buf[i++] = 0xbf; /* movl $<n>,%edi */
2516 memcpy (&buf[i], &arg1, sizeof (arg1));
2517 i += 4;
2518 append_insns (&buildaddr, i, buf);
2519 current_insn_ptr = buildaddr;
2520 amd64_emit_call (fn);
2521 }
2522
2523 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2524
2525 static void
2526 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2527 {
2528 unsigned char buf[16];
2529 int i;
2530 CORE_ADDR buildaddr;
2531
2532 buildaddr = current_insn_ptr;
2533 i = 0;
2534 buf[i++] = 0xbf; /* movl $<n>,%edi */
2535 memcpy (&buf[i], &arg1, sizeof (arg1));
2536 i += 4;
2537 append_insns (&buildaddr, i, buf);
2538 current_insn_ptr = buildaddr;
2539 EMIT_ASM (amd64_void_call_2_a,
2540 /* Save away a copy of the stack top. */
2541 "push %rax\n\t"
2542 /* Also pass top as the second argument. */
2543 "mov %rax,%rsi");
2544 amd64_emit_call (fn);
2545 EMIT_ASM (amd64_void_call_2_b,
2546 /* Restore the stack top, %rax may have been trashed. */
2547 "pop %rax");
2548 }
2549
2550 void
2551 amd64_emit_eq_goto (int *offset_p, int *size_p)
2552 {
2553 EMIT_ASM (amd64_eq,
2554 "cmp %rax,(%rsp)\n\t"
2555 "jne .Lamd64_eq_fallthru\n\t"
2556 "lea 0x8(%rsp),%rsp\n\t"
2557 "pop %rax\n\t"
2558 /* jmp, but don't trust the assembler to choose the right jump */
2559 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2560 ".Lamd64_eq_fallthru:\n\t"
2561 "lea 0x8(%rsp),%rsp\n\t"
2562 "pop %rax");
2563
2564 if (offset_p)
2565 *offset_p = 13;
2566 if (size_p)
2567 *size_p = 4;
2568 }
2569
2570 void
2571 amd64_emit_ne_goto (int *offset_p, int *size_p)
2572 {
2573 EMIT_ASM (amd64_ne,
2574 "cmp %rax,(%rsp)\n\t"
2575 "je .Lamd64_ne_fallthru\n\t"
2576 "lea 0x8(%rsp),%rsp\n\t"
2577 "pop %rax\n\t"
2578 /* jmp, but don't trust the assembler to choose the right jump */
2579 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2580 ".Lamd64_ne_fallthru:\n\t"
2581 "lea 0x8(%rsp),%rsp\n\t"
2582 "pop %rax");
2583
2584 if (offset_p)
2585 *offset_p = 13;
2586 if (size_p)
2587 *size_p = 4;
2588 }
2589
2590 void
2591 amd64_emit_lt_goto (int *offset_p, int *size_p)
2592 {
2593 EMIT_ASM (amd64_lt,
2594 "cmp %rax,(%rsp)\n\t"
2595 "jnl .Lamd64_lt_fallthru\n\t"
2596 "lea 0x8(%rsp),%rsp\n\t"
2597 "pop %rax\n\t"
2598 /* jmp, but don't trust the assembler to choose the right jump */
2599 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2600 ".Lamd64_lt_fallthru:\n\t"
2601 "lea 0x8(%rsp),%rsp\n\t"
2602 "pop %rax");
2603
2604 if (offset_p)
2605 *offset_p = 13;
2606 if (size_p)
2607 *size_p = 4;
2608 }
2609
2610 void
2611 amd64_emit_le_goto (int *offset_p, int *size_p)
2612 {
2613 EMIT_ASM (amd64_le,
2614 "cmp %rax,(%rsp)\n\t"
2615 "jnle .Lamd64_le_fallthru\n\t"
2616 "lea 0x8(%rsp),%rsp\n\t"
2617 "pop %rax\n\t"
2618 /* jmp, but don't trust the assembler to choose the right jump */
2619 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2620 ".Lamd64_le_fallthru:\n\t"
2621 "lea 0x8(%rsp),%rsp\n\t"
2622 "pop %rax");
2623
2624 if (offset_p)
2625 *offset_p = 13;
2626 if (size_p)
2627 *size_p = 4;
2628 }
2629
2630 void
2631 amd64_emit_gt_goto (int *offset_p, int *size_p)
2632 {
2633 EMIT_ASM (amd64_gt,
2634 "cmp %rax,(%rsp)\n\t"
2635 "jng .Lamd64_gt_fallthru\n\t"
2636 "lea 0x8(%rsp),%rsp\n\t"
2637 "pop %rax\n\t"
2638 /* jmp, but don't trust the assembler to choose the right jump */
2639 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2640 ".Lamd64_gt_fallthru:\n\t"
2641 "lea 0x8(%rsp),%rsp\n\t"
2642 "pop %rax");
2643
2644 if (offset_p)
2645 *offset_p = 13;
2646 if (size_p)
2647 *size_p = 4;
2648 }
2649
2650 void
2651 amd64_emit_ge_goto (int *offset_p, int *size_p)
2652 {
2653 EMIT_ASM (amd64_ge,
2654 "cmp %rax,(%rsp)\n\t"
2655 "jnge .Lamd64_ge_fallthru\n\t"
2656 ".Lamd64_ge_jump:\n\t"
2657 "lea 0x8(%rsp),%rsp\n\t"
2658 "pop %rax\n\t"
2659 /* jmp, but don't trust the assembler to choose the right jump */
2660 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2661 ".Lamd64_ge_fallthru:\n\t"
2662 "lea 0x8(%rsp),%rsp\n\t"
2663 "pop %rax");
2664
2665 if (offset_p)
2666 *offset_p = 13;
2667 if (size_p)
2668 *size_p = 4;
2669 }
2670
2671 struct emit_ops amd64_emit_ops =
2672 {
2673 amd64_emit_prologue,
2674 amd64_emit_epilogue,
2675 amd64_emit_add,
2676 amd64_emit_sub,
2677 amd64_emit_mul,
2678 amd64_emit_lsh,
2679 amd64_emit_rsh_signed,
2680 amd64_emit_rsh_unsigned,
2681 amd64_emit_ext,
2682 amd64_emit_log_not,
2683 amd64_emit_bit_and,
2684 amd64_emit_bit_or,
2685 amd64_emit_bit_xor,
2686 amd64_emit_bit_not,
2687 amd64_emit_equal,
2688 amd64_emit_less_signed,
2689 amd64_emit_less_unsigned,
2690 amd64_emit_ref,
2691 amd64_emit_if_goto,
2692 amd64_emit_goto,
2693 amd64_write_goto_address,
2694 amd64_emit_const,
2695 amd64_emit_call,
2696 amd64_emit_reg,
2697 amd64_emit_pop,
2698 amd64_emit_stack_flush,
2699 amd64_emit_zero_ext,
2700 amd64_emit_swap,
2701 amd64_emit_stack_adjust,
2702 amd64_emit_int_call_1,
2703 amd64_emit_void_call_2,
2704 amd64_emit_eq_goto,
2705 amd64_emit_ne_goto,
2706 amd64_emit_lt_goto,
2707 amd64_emit_le_goto,
2708 amd64_emit_gt_goto,
2709 amd64_emit_ge_goto
2710 };
2711
2712 #endif /* __x86_64__ */
2713
2714 static void
2715 i386_emit_prologue (void)
2716 {
2717 EMIT_ASM32 (i386_prologue,
2718 "push %ebp\n\t"
2719 "mov %esp,%ebp\n\t"
2720 "push %ebx");
2721 /* At this point, the raw regs base address is at 8(%ebp), and the
2722 value pointer is at 12(%ebp). */
2723 }
2724
2725 static void
2726 i386_emit_epilogue (void)
2727 {
2728 EMIT_ASM32 (i386_epilogue,
2729 "mov 12(%ebp),%ecx\n\t"
2730 "mov %eax,(%ecx)\n\t"
2731 "mov %ebx,0x4(%ecx)\n\t"
2732 "xor %eax,%eax\n\t"
2733 "pop %ebx\n\t"
2734 "pop %ebp\n\t"
2735 "ret");
2736 }
2737
2738 static void
2739 i386_emit_add (void)
2740 {
2741 EMIT_ASM32 (i386_add,
2742 "add (%esp),%eax\n\t"
2743 "adc 0x4(%esp),%ebx\n\t"
2744 "lea 0x8(%esp),%esp");
2745 }
2746
2747 static void
2748 i386_emit_sub (void)
2749 {
2750 EMIT_ASM32 (i386_sub,
2751 "subl %eax,(%esp)\n\t"
2752 "sbbl %ebx,4(%esp)\n\t"
2753 "pop %eax\n\t"
2754 "pop %ebx\n\t");
2755 }
2756
2757 static void
2758 i386_emit_mul (void)
2759 {
2760 emit_error = 1;
2761 }
2762
2763 static void
2764 i386_emit_lsh (void)
2765 {
2766 emit_error = 1;
2767 }
2768
2769 static void
2770 i386_emit_rsh_signed (void)
2771 {
2772 emit_error = 1;
2773 }
2774
2775 static void
2776 i386_emit_rsh_unsigned (void)
2777 {
2778 emit_error = 1;
2779 }
2780
2781 static void
2782 i386_emit_ext (int arg)
2783 {
2784 switch (arg)
2785 {
2786 case 8:
2787 EMIT_ASM32 (i386_ext_8,
2788 "cbtw\n\t"
2789 "cwtl\n\t"
2790 "movl %eax,%ebx\n\t"
2791 "sarl $31,%ebx");
2792 break;
2793 case 16:
2794 EMIT_ASM32 (i386_ext_16,
2795 "cwtl\n\t"
2796 "movl %eax,%ebx\n\t"
2797 "sarl $31,%ebx");
2798 break;
2799 case 32:
2800 EMIT_ASM32 (i386_ext_32,
2801 "movl %eax,%ebx\n\t"
2802 "sarl $31,%ebx");
2803 break;
2804 default:
2805 emit_error = 1;
2806 }
2807 }
2808
2809 static void
2810 i386_emit_log_not (void)
2811 {
2812 EMIT_ASM32 (i386_log_not,
2813 "or %ebx,%eax\n\t"
2814 "test %eax,%eax\n\t"
2815 "sete %cl\n\t"
2816 "xor %ebx,%ebx\n\t"
2817 "movzbl %cl,%eax");
2818 }
2819
2820 static void
2821 i386_emit_bit_and (void)
2822 {
2823 EMIT_ASM32 (i386_and,
2824 "and (%esp),%eax\n\t"
2825 "and 0x4(%esp),%ebx\n\t"
2826 "lea 0x8(%esp),%esp");
2827 }
2828
2829 static void
2830 i386_emit_bit_or (void)
2831 {
2832 EMIT_ASM32 (i386_or,
2833 "or (%esp),%eax\n\t"
2834 "or 0x4(%esp),%ebx\n\t"
2835 "lea 0x8(%esp),%esp");
2836 }
2837
2838 static void
2839 i386_emit_bit_xor (void)
2840 {
2841 EMIT_ASM32 (i386_xor,
2842 "xor (%esp),%eax\n\t"
2843 "xor 0x4(%esp),%ebx\n\t"
2844 "lea 0x8(%esp),%esp");
2845 }
2846
2847 static void
2848 i386_emit_bit_not (void)
2849 {
2850 EMIT_ASM32 (i386_bit_not,
2851 "xor $0xffffffff,%eax\n\t"
2852 "xor $0xffffffff,%ebx\n\t");
2853 }
2854
2855 static void
2856 i386_emit_equal (void)
2857 {
2858 EMIT_ASM32 (i386_equal,
2859 "cmpl %ebx,4(%esp)\n\t"
2860 "jne .Li386_equal_false\n\t"
2861 "cmpl %eax,(%esp)\n\t"
2862 "je .Li386_equal_true\n\t"
2863 ".Li386_equal_false:\n\t"
2864 "xor %eax,%eax\n\t"
2865 "jmp .Li386_equal_end\n\t"
2866 ".Li386_equal_true:\n\t"
2867 "mov $1,%eax\n\t"
2868 ".Li386_equal_end:\n\t"
2869 "xor %ebx,%ebx\n\t"
2870 "lea 0x8(%esp),%esp");
2871 }
2872
2873 static void
2874 i386_emit_less_signed (void)
2875 {
2876 EMIT_ASM32 (i386_less_signed,
2877 "cmpl %ebx,4(%esp)\n\t"
2878 "jl .Li386_less_signed_true\n\t"
2879 "jne .Li386_less_signed_false\n\t"
2880 "cmpl %eax,(%esp)\n\t"
2881 "jl .Li386_less_signed_true\n\t"
2882 ".Li386_less_signed_false:\n\t"
2883 "xor %eax,%eax\n\t"
2884 "jmp .Li386_less_signed_end\n\t"
2885 ".Li386_less_signed_true:\n\t"
2886 "mov $1,%eax\n\t"
2887 ".Li386_less_signed_end:\n\t"
2888 "xor %ebx,%ebx\n\t"
2889 "lea 0x8(%esp),%esp");
2890 }
2891
2892 static void
2893 i386_emit_less_unsigned (void)
2894 {
2895 EMIT_ASM32 (i386_less_unsigned,
2896 "cmpl %ebx,4(%esp)\n\t"
2897 "jb .Li386_less_unsigned_true\n\t"
2898 "jne .Li386_less_unsigned_false\n\t"
2899 "cmpl %eax,(%esp)\n\t"
2900 "jb .Li386_less_unsigned_true\n\t"
2901 ".Li386_less_unsigned_false:\n\t"
2902 "xor %eax,%eax\n\t"
2903 "jmp .Li386_less_unsigned_end\n\t"
2904 ".Li386_less_unsigned_true:\n\t"
2905 "mov $1,%eax\n\t"
2906 ".Li386_less_unsigned_end:\n\t"
2907 "xor %ebx,%ebx\n\t"
2908 "lea 0x8(%esp),%esp");
2909 }
2910
2911 static void
2912 i386_emit_ref (int size)
2913 {
2914 switch (size)
2915 {
2916 case 1:
2917 EMIT_ASM32 (i386_ref1,
2918 "movb (%eax),%al");
2919 break;
2920 case 2:
2921 EMIT_ASM32 (i386_ref2,
2922 "movw (%eax),%ax");
2923 break;
2924 case 4:
2925 EMIT_ASM32 (i386_ref4,
2926 "movl (%eax),%eax");
2927 break;
2928 case 8:
2929 EMIT_ASM32 (i386_ref8,
2930 "movl 4(%eax),%ebx\n\t"
2931 "movl (%eax),%eax");
2932 break;
2933 }
2934 }
2935
2936 static void
2937 i386_emit_if_goto (int *offset_p, int *size_p)
2938 {
2939 EMIT_ASM32 (i386_if_goto,
2940 "mov %eax,%ecx\n\t"
2941 "or %ebx,%ecx\n\t"
2942 "pop %eax\n\t"
2943 "pop %ebx\n\t"
2944 "cmpl $0,%ecx\n\t"
2945 /* Don't trust the assembler to choose the right jump */
2946 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2947
2948 if (offset_p)
2949 *offset_p = 11; /* be sure that this matches the sequence above */
2950 if (size_p)
2951 *size_p = 4;
2952 }
2953
2954 static void
2955 i386_emit_goto (int *offset_p, int *size_p)
2956 {
2957 EMIT_ASM32 (i386_goto,
2958 /* Don't trust the assembler to choose the right jump */
2959 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2960 if (offset_p)
2961 *offset_p = 1;
2962 if (size_p)
2963 *size_p = 4;
2964 }
2965
2966 static void
2967 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2968 {
2969 int diff = (to - (from + size));
2970 unsigned char buf[sizeof (int)];
2971
2972 /* We're only doing 4-byte sizes at the moment. */
2973 if (size != 4)
2974 {
2975 emit_error = 1;
2976 return;
2977 }
2978
2979 memcpy (buf, &diff, sizeof (int));
2980 write_inferior_memory (from, buf, sizeof (int));
2981 }
2982
2983 static void
2984 i386_emit_const (LONGEST num)
2985 {
2986 unsigned char buf[16];
2987 int i, hi, lo;
2988 CORE_ADDR buildaddr = current_insn_ptr;
2989
2990 i = 0;
2991 buf[i++] = 0xb8; /* mov $<n>,%eax */
2992 lo = num & 0xffffffff;
2993 memcpy (&buf[i], &lo, sizeof (lo));
2994 i += 4;
2995 hi = ((num >> 32) & 0xffffffff);
2996 if (hi)
2997 {
2998 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2999 memcpy (&buf[i], &hi, sizeof (hi));
3000 i += 4;
3001 }
3002 else
3003 {
3004 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
3005 }
3006 append_insns (&buildaddr, i, buf);
3007 current_insn_ptr = buildaddr;
3008 }
3009
3010 static void
3011 i386_emit_call (CORE_ADDR fn)
3012 {
3013 unsigned char buf[16];
3014 int i, offset;
3015 CORE_ADDR buildaddr;
3016
3017 buildaddr = current_insn_ptr;
3018 i = 0;
3019 buf[i++] = 0xe8; /* call <reladdr> */
3020 offset = ((int) fn) - (buildaddr + 5);
3021 memcpy (buf + 1, &offset, 4);
3022 append_insns (&buildaddr, 5, buf);
3023 current_insn_ptr = buildaddr;
3024 }
3025
3026 static void
3027 i386_emit_reg (int reg)
3028 {
3029 unsigned char buf[16];
3030 int i;
3031 CORE_ADDR buildaddr;
3032
3033 EMIT_ASM32 (i386_reg_a,
3034 "sub $0x8,%esp");
3035 buildaddr = current_insn_ptr;
3036 i = 0;
3037 buf[i++] = 0xb8; /* mov $<n>,%eax */
3038 memcpy (&buf[i], &reg, sizeof (reg));
3039 i += 4;
3040 append_insns (&buildaddr, i, buf);
3041 current_insn_ptr = buildaddr;
3042 EMIT_ASM32 (i386_reg_b,
3043 "mov %eax,4(%esp)\n\t"
3044 "mov 8(%ebp),%eax\n\t"
3045 "mov %eax,(%esp)");
3046 i386_emit_call (get_raw_reg_func_addr ());
3047 EMIT_ASM32 (i386_reg_c,
3048 "xor %ebx,%ebx\n\t"
3049 "lea 0x8(%esp),%esp");
3050 }
3051
3052 static void
3053 i386_emit_pop (void)
3054 {
3055 EMIT_ASM32 (i386_pop,
3056 "pop %eax\n\t"
3057 "pop %ebx");
3058 }
3059
3060 static void
3061 i386_emit_stack_flush (void)
3062 {
3063 EMIT_ASM32 (i386_stack_flush,
3064 "push %ebx\n\t"
3065 "push %eax");
3066 }
3067
3068 static void
3069 i386_emit_zero_ext (int arg)
3070 {
3071 switch (arg)
3072 {
3073 case 8:
3074 EMIT_ASM32 (i386_zero_ext_8,
3075 "and $0xff,%eax\n\t"
3076 "xor %ebx,%ebx");
3077 break;
3078 case 16:
3079 EMIT_ASM32 (i386_zero_ext_16,
3080 "and $0xffff,%eax\n\t"
3081 "xor %ebx,%ebx");
3082 break;
3083 case 32:
3084 EMIT_ASM32 (i386_zero_ext_32,
3085 "xor %ebx,%ebx");
3086 break;
3087 default:
3088 emit_error = 1;
3089 }
3090 }
3091
3092 static void
3093 i386_emit_swap (void)
3094 {
3095 EMIT_ASM32 (i386_swap,
3096 "mov %eax,%ecx\n\t"
3097 "mov %ebx,%edx\n\t"
3098 "pop %eax\n\t"
3099 "pop %ebx\n\t"
3100 "push %edx\n\t"
3101 "push %ecx");
3102 }
3103
3104 static void
3105 i386_emit_stack_adjust (int n)
3106 {
3107 unsigned char buf[16];
3108 int i;
3109 CORE_ADDR buildaddr = current_insn_ptr;
3110
3111 i = 0;
3112 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3113 buf[i++] = 0x64;
3114 buf[i++] = 0x24;
3115 buf[i++] = n * 8;
3116 append_insns (&buildaddr, i, buf);
3117 current_insn_ptr = buildaddr;
3118 }
3119
3120 /* FN's prototype is `LONGEST(*fn)(int)'. */
3121
3122 static void
3123 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3124 {
3125 unsigned char buf[16];
3126 int i;
3127 CORE_ADDR buildaddr;
3128
3129 EMIT_ASM32 (i386_int_call_1_a,
3130 /* Reserve a bit of stack space. */
3131 "sub $0x8,%esp");
3132 /* Put the one argument on the stack. */
3133 buildaddr = current_insn_ptr;
3134 i = 0;
3135 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3136 buf[i++] = 0x04;
3137 buf[i++] = 0x24;
3138 memcpy (&buf[i], &arg1, sizeof (arg1));
3139 i += 4;
3140 append_insns (&buildaddr, i, buf);
3141 current_insn_ptr = buildaddr;
3142 i386_emit_call (fn);
3143 EMIT_ASM32 (i386_int_call_1_c,
3144 "mov %edx,%ebx\n\t"
3145 "lea 0x8(%esp),%esp");
3146 }
3147
3148 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3149
3150 static void
3151 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3152 {
3153 unsigned char buf[16];
3154 int i;
3155 CORE_ADDR buildaddr;
3156
3157 EMIT_ASM32 (i386_void_call_2_a,
3158 /* Preserve %eax only; we don't have to worry about %ebx. */
3159 "push %eax\n\t"
3160 /* Reserve a bit of stack space for arguments. */
3161 "sub $0x10,%esp\n\t"
3162 /* Copy "top" to the second argument position. (Note that
3163 we can't assume function won't scribble on its
3164 arguments, so don't try to restore from this.) */
3165 "mov %eax,4(%esp)\n\t"
3166 "mov %ebx,8(%esp)");
3167 /* Put the first argument on the stack. */
3168 buildaddr = current_insn_ptr;
3169 i = 0;
3170 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3171 buf[i++] = 0x04;
3172 buf[i++] = 0x24;
3173 memcpy (&buf[i], &arg1, sizeof (arg1));
3174 i += 4;
3175 append_insns (&buildaddr, i, buf);
3176 current_insn_ptr = buildaddr;
3177 i386_emit_call (fn);
3178 EMIT_ASM32 (i386_void_call_2_b,
3179 "lea 0x10(%esp),%esp\n\t"
3180 /* Restore original stack top. */
3181 "pop %eax");
3182 }
3183
3184
3185 void
3186 i386_emit_eq_goto (int *offset_p, int *size_p)
3187 {
3188 EMIT_ASM32 (eq,
3189 /* Check low half first, more likely to be decider */
3190 "cmpl %eax,(%esp)\n\t"
3191 "jne .Leq_fallthru\n\t"
3192 "cmpl %ebx,4(%esp)\n\t"
3193 "jne .Leq_fallthru\n\t"
3194 "lea 0x8(%esp),%esp\n\t"
3195 "pop %eax\n\t"
3196 "pop %ebx\n\t"
3197 /* jmp, but don't trust the assembler to choose the right jump */
3198 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3199 ".Leq_fallthru:\n\t"
3200 "lea 0x8(%esp),%esp\n\t"
3201 "pop %eax\n\t"
3202 "pop %ebx");
3203
3204 if (offset_p)
3205 *offset_p = 18;
3206 if (size_p)
3207 *size_p = 4;
3208 }
3209
3210 void
3211 i386_emit_ne_goto (int *offset_p, int *size_p)
3212 {
3213 EMIT_ASM32 (ne,
3214 /* Check low half first, more likely to be decider */
3215 "cmpl %eax,(%esp)\n\t"
3216 "jne .Lne_jump\n\t"
3217 "cmpl %ebx,4(%esp)\n\t"
3218 "je .Lne_fallthru\n\t"
3219 ".Lne_jump:\n\t"
3220 "lea 0x8(%esp),%esp\n\t"
3221 "pop %eax\n\t"
3222 "pop %ebx\n\t"
3223 /* jmp, but don't trust the assembler to choose the right jump */
3224 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3225 ".Lne_fallthru:\n\t"
3226 "lea 0x8(%esp),%esp\n\t"
3227 "pop %eax\n\t"
3228 "pop %ebx");
3229
3230 if (offset_p)
3231 *offset_p = 18;
3232 if (size_p)
3233 *size_p = 4;
3234 }
3235
3236 void
3237 i386_emit_lt_goto (int *offset_p, int *size_p)
3238 {
3239 EMIT_ASM32 (lt,
3240 "cmpl %ebx,4(%esp)\n\t"
3241 "jl .Llt_jump\n\t"
3242 "jne .Llt_fallthru\n\t"
3243 "cmpl %eax,(%esp)\n\t"
3244 "jnl .Llt_fallthru\n\t"
3245 ".Llt_jump:\n\t"
3246 "lea 0x8(%esp),%esp\n\t"
3247 "pop %eax\n\t"
3248 "pop %ebx\n\t"
3249 /* jmp, but don't trust the assembler to choose the right jump */
3250 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3251 ".Llt_fallthru:\n\t"
3252 "lea 0x8(%esp),%esp\n\t"
3253 "pop %eax\n\t"
3254 "pop %ebx");
3255
3256 if (offset_p)
3257 *offset_p = 20;
3258 if (size_p)
3259 *size_p = 4;
3260 }
3261
3262 void
3263 i386_emit_le_goto (int *offset_p, int *size_p)
3264 {
3265 EMIT_ASM32 (le,
3266 "cmpl %ebx,4(%esp)\n\t"
3267 "jle .Lle_jump\n\t"
3268 "jne .Lle_fallthru\n\t"
3269 "cmpl %eax,(%esp)\n\t"
3270 "jnle .Lle_fallthru\n\t"
3271 ".Lle_jump:\n\t"
3272 "lea 0x8(%esp),%esp\n\t"
3273 "pop %eax\n\t"
3274 "pop %ebx\n\t"
3275 /* jmp, but don't trust the assembler to choose the right jump */
3276 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3277 ".Lle_fallthru:\n\t"
3278 "lea 0x8(%esp),%esp\n\t"
3279 "pop %eax\n\t"
3280 "pop %ebx");
3281
3282 if (offset_p)
3283 *offset_p = 20;
3284 if (size_p)
3285 *size_p = 4;
3286 }
3287
3288 void
3289 i386_emit_gt_goto (int *offset_p, int *size_p)
3290 {
3291 EMIT_ASM32 (gt,
3292 "cmpl %ebx,4(%esp)\n\t"
3293 "jg .Lgt_jump\n\t"
3294 "jne .Lgt_fallthru\n\t"
3295 "cmpl %eax,(%esp)\n\t"
3296 "jng .Lgt_fallthru\n\t"
3297 ".Lgt_jump:\n\t"
3298 "lea 0x8(%esp),%esp\n\t"
3299 "pop %eax\n\t"
3300 "pop %ebx\n\t"
3301 /* jmp, but don't trust the assembler to choose the right jump */
3302 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3303 ".Lgt_fallthru:\n\t"
3304 "lea 0x8(%esp),%esp\n\t"
3305 "pop %eax\n\t"
3306 "pop %ebx");
3307
3308 if (offset_p)
3309 *offset_p = 20;
3310 if (size_p)
3311 *size_p = 4;
3312 }
3313
3314 void
3315 i386_emit_ge_goto (int *offset_p, int *size_p)
3316 {
3317 EMIT_ASM32 (ge,
3318 "cmpl %ebx,4(%esp)\n\t"
3319 "jge .Lge_jump\n\t"
3320 "jne .Lge_fallthru\n\t"
3321 "cmpl %eax,(%esp)\n\t"
3322 "jnge .Lge_fallthru\n\t"
3323 ".Lge_jump:\n\t"
3324 "lea 0x8(%esp),%esp\n\t"
3325 "pop %eax\n\t"
3326 "pop %ebx\n\t"
3327 /* jmp, but don't trust the assembler to choose the right jump */
3328 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3329 ".Lge_fallthru:\n\t"
3330 "lea 0x8(%esp),%esp\n\t"
3331 "pop %eax\n\t"
3332 "pop %ebx");
3333
3334 if (offset_p)
3335 *offset_p = 20;
3336 if (size_p)
3337 *size_p = 4;
3338 }
3339
3340 struct emit_ops i386_emit_ops =
3341 {
3342 i386_emit_prologue,
3343 i386_emit_epilogue,
3344 i386_emit_add,
3345 i386_emit_sub,
3346 i386_emit_mul,
3347 i386_emit_lsh,
3348 i386_emit_rsh_signed,
3349 i386_emit_rsh_unsigned,
3350 i386_emit_ext,
3351 i386_emit_log_not,
3352 i386_emit_bit_and,
3353 i386_emit_bit_or,
3354 i386_emit_bit_xor,
3355 i386_emit_bit_not,
3356 i386_emit_equal,
3357 i386_emit_less_signed,
3358 i386_emit_less_unsigned,
3359 i386_emit_ref,
3360 i386_emit_if_goto,
3361 i386_emit_goto,
3362 i386_write_goto_address,
3363 i386_emit_const,
3364 i386_emit_call,
3365 i386_emit_reg,
3366 i386_emit_pop,
3367 i386_emit_stack_flush,
3368 i386_emit_zero_ext,
3369 i386_emit_swap,
3370 i386_emit_stack_adjust,
3371 i386_emit_int_call_1,
3372 i386_emit_void_call_2,
3373 i386_emit_eq_goto,
3374 i386_emit_ne_goto,
3375 i386_emit_lt_goto,
3376 i386_emit_le_goto,
3377 i386_emit_gt_goto,
3378 i386_emit_ge_goto
3379 };
3380
3381
3382 static struct emit_ops *
3383 x86_emit_ops (void)
3384 {
3385 #ifdef __x86_64__
3386 if (is_64bit_tdesc ())
3387 return &amd64_emit_ops;
3388 else
3389 #endif
3390 return &i386_emit_ops;
3391 }
3392
3393 static int
3394 x86_supports_range_stepping (void)
3395 {
3396 return 1;
3397 }
3398
3399 /* This is initialized assuming an amd64 target.
3400 x86_arch_setup will correct it for i386 or amd64 targets. */
3401
3402 struct linux_target_ops the_low_target =
3403 {
3404 x86_arch_setup,
3405 x86_linux_regs_info,
3406 x86_cannot_fetch_register,
3407 x86_cannot_store_register,
3408 NULL, /* fetch_register */
3409 x86_get_pc,
3410 x86_set_pc,
3411 x86_breakpoint,
3412 x86_breakpoint_len,
3413 NULL,
3414 1,
3415 x86_breakpoint_at,
3416 x86_supports_z_point_type,
3417 x86_insert_point,
3418 x86_remove_point,
3419 x86_stopped_by_watchpoint,
3420 x86_stopped_data_address,
3421 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3422 native i386 case (no registers smaller than an xfer unit), and are not
3423 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3424 NULL,
3425 NULL,
3426 /* need to fix up i386 siginfo if host is amd64 */
3427 x86_siginfo_fixup,
3428 x86_linux_new_process,
3429 x86_linux_new_thread,
3430 x86_linux_prepare_to_resume,
3431 x86_linux_process_qsupported,
3432 x86_supports_tracepoints,
3433 x86_get_thread_area,
3434 x86_install_fast_tracepoint_jump_pad,
3435 x86_emit_ops,
3436 x86_get_min_fast_tracepoint_insn_len,
3437 x86_supports_range_stepping,
3438 };
3439
3440 void
3441 initialize_low_arch (void)
3442 {
3443 /* Initialize the Linux target descriptions. */
3444 #ifdef __x86_64__
3445 init_registers_amd64_linux ();
3446 init_registers_amd64_avx_linux ();
3447 init_registers_amd64_avx512_linux ();
3448 init_registers_amd64_mpx_linux ();
3449
3450 init_registers_x32_linux ();
3451 init_registers_x32_avx_linux ();
3452 init_registers_x32_avx512_linux ();
3453
3454 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3455 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3456 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3457 #endif
3458 init_registers_i386_linux ();
3459 init_registers_i386_mmx_linux ();
3460 init_registers_i386_avx_linux ();
3461 init_registers_i386_avx512_linux ();
3462 init_registers_i386_mpx_linux ();
3463
3464 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3465 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3466 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3467
3468 initialize_regsets_info (&x86_regsets_info);
3469 }
This page took 0.10507 seconds and 4 git commands to generate.