e371873296e8bf9bb5c754e390eddf069c464d93
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
32 #ifndef ELFMAG0
33 #include "elf/common.h"
34 #endif
35
36 #include "agent.h"
37 #include "tdesc.h"
38 #include "tracepoint.h"
39 #include "ax.h"
40 #include "nat/linux-nat.h"
41 #include "nat/x86-linux.h"
42
43 #ifdef __x86_64__
44 /* Defined in auto-generated file amd64-linux.c. */
45 void init_registers_amd64_linux (void);
46 extern const struct target_desc *tdesc_amd64_linux;
47
48 /* Defined in auto-generated file amd64-avx-linux.c. */
49 void init_registers_amd64_avx_linux (void);
50 extern const struct target_desc *tdesc_amd64_avx_linux;
51
52 /* Defined in auto-generated file amd64-avx512-linux.c. */
53 void init_registers_amd64_avx512_linux (void);
54 extern const struct target_desc *tdesc_amd64_avx512_linux;
55
56 /* Defined in auto-generated file amd64-mpx-linux.c. */
57 void init_registers_amd64_mpx_linux (void);
58 extern const struct target_desc *tdesc_amd64_mpx_linux;
59
60 /* Defined in auto-generated file x32-linux.c. */
61 void init_registers_x32_linux (void);
62 extern const struct target_desc *tdesc_x32_linux;
63
64 /* Defined in auto-generated file x32-avx-linux.c. */
65 void init_registers_x32_avx_linux (void);
66 extern const struct target_desc *tdesc_x32_avx_linux;
67
68 /* Defined in auto-generated file x32-avx512-linux.c. */
69 void init_registers_x32_avx512_linux (void);
70 extern const struct target_desc *tdesc_x32_avx512_linux;
71
72 #endif
73
74 /* Defined in auto-generated file i386-linux.c. */
75 void init_registers_i386_linux (void);
76 extern const struct target_desc *tdesc_i386_linux;
77
78 /* Defined in auto-generated file i386-mmx-linux.c. */
79 void init_registers_i386_mmx_linux (void);
80 extern const struct target_desc *tdesc_i386_mmx_linux;
81
82 /* Defined in auto-generated file i386-avx-linux.c. */
83 void init_registers_i386_avx_linux (void);
84 extern const struct target_desc *tdesc_i386_avx_linux;
85
86 /* Defined in auto-generated file i386-avx512-linux.c. */
87 void init_registers_i386_avx512_linux (void);
88 extern const struct target_desc *tdesc_i386_avx512_linux;
89
90 /* Defined in auto-generated file i386-mpx-linux.c. */
91 void init_registers_i386_mpx_linux (void);
92 extern const struct target_desc *tdesc_i386_mpx_linux;
93
94 #ifdef __x86_64__
95 static struct target_desc *tdesc_amd64_linux_no_xml;
96 #endif
97 static struct target_desc *tdesc_i386_linux_no_xml;
98
99
100 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
101 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
102
103 /* Backward compatibility for gdb without XML support. */
104
105 static const char *xmltarget_i386_linux_no_xml = "@<target>\
106 <architecture>i386</architecture>\
107 <osabi>GNU/Linux</osabi>\
108 </target>";
109
110 #ifdef __x86_64__
111 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
112 <architecture>i386:x86-64</architecture>\
113 <osabi>GNU/Linux</osabi>\
114 </target>";
115 #endif
116
117 #include <sys/reg.h>
118 #include <sys/procfs.h>
119 #include <sys/ptrace.h>
120 #include <sys/uio.h>
121
122 #ifndef PTRACE_GETREGSET
123 #define PTRACE_GETREGSET 0x4204
124 #endif
125
126 #ifndef PTRACE_SETREGSET
127 #define PTRACE_SETREGSET 0x4205
128 #endif
129
130
131 #ifndef PTRACE_GET_THREAD_AREA
132 #define PTRACE_GET_THREAD_AREA 25
133 #endif
134
135 /* This definition comes from prctl.h, but some kernels may not have it. */
136 #ifndef PTRACE_ARCH_PRCTL
137 #define PTRACE_ARCH_PRCTL 30
138 #endif
139
140 /* The following definitions come from prctl.h, but may be absent
141 for certain configurations. */
142 #ifndef ARCH_GET_FS
143 #define ARCH_SET_GS 0x1001
144 #define ARCH_SET_FS 0x1002
145 #define ARCH_GET_FS 0x1003
146 #define ARCH_GET_GS 0x1004
147 #endif
148
149 /* Per-process arch-specific data we want to keep. */
150
151 struct arch_process_info
152 {
153 struct x86_debug_reg_state debug_reg_state;
154 };
155
156 #ifdef __x86_64__
157
158 /* Mapping between the general-purpose registers in `struct user'
159 format and GDB's register array layout.
160 Note that the transfer layout uses 64-bit regs. */
161 static /*const*/ int i386_regmap[] =
162 {
163 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
164 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
165 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
166 DS * 8, ES * 8, FS * 8, GS * 8
167 };
168
169 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
170
171 /* So code below doesn't have to care, i386 or amd64. */
172 #define ORIG_EAX ORIG_RAX
173 #define REGSIZE 8
174
175 static const int x86_64_regmap[] =
176 {
177 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
178 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
179 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
180 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
181 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
182 DS * 8, ES * 8, FS * 8, GS * 8,
183 -1, -1, -1, -1, -1, -1, -1, -1,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1,
186 -1,
187 -1, -1, -1, -1, -1, -1, -1, -1,
188 ORIG_RAX * 8,
189 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
190 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
191 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
196 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1,
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1
200 };
201
202 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
203 #define X86_64_USER_REGS (GS + 1)
204
205 #else /* ! __x86_64__ */
206
207 /* Mapping between the general-purpose registers in `struct user'
208 format and GDB's register array layout. */
209 static /*const*/ int i386_regmap[] =
210 {
211 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
212 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
213 EIP * 4, EFL * 4, CS * 4, SS * 4,
214 DS * 4, ES * 4, FS * 4, GS * 4
215 };
216
217 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
218
219 #define REGSIZE 4
220
221 #endif
222
223 #ifdef __x86_64__
224
225 /* Returns true if the current inferior belongs to a x86-64 process,
226 per the tdesc. */
227
228 static int
229 is_64bit_tdesc (void)
230 {
231 struct regcache *regcache = get_thread_regcache (current_thread, 0);
232
233 return register_size (regcache->tdesc, 0) == 8;
234 }
235
236 #endif
237
238 \f
239 /* Called by libthread_db. */
240
241 ps_err_e
242 ps_get_thread_area (const struct ps_prochandle *ph,
243 lwpid_t lwpid, int idx, void **base)
244 {
245 #ifdef __x86_64__
246 int use_64bit = is_64bit_tdesc ();
247
248 if (use_64bit)
249 {
250 switch (idx)
251 {
252 case FS:
253 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
254 return PS_OK;
255 break;
256 case GS:
257 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
258 return PS_OK;
259 break;
260 default:
261 return PS_BADADDR;
262 }
263 return PS_ERR;
264 }
265 #endif
266
267 {
268 unsigned int desc[4];
269
270 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
271 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
272 return PS_ERR;
273
274 /* Ensure we properly extend the value to 64-bits for x86_64. */
275 *base = (void *) (uintptr_t) desc[1];
276 return PS_OK;
277 }
278 }
279
280 /* Get the thread area address. This is used to recognize which
281 thread is which when tracing with the in-process agent library. We
282 don't read anything from the address, and treat it as opaque; it's
283 the address itself that we assume is unique per-thread. */
284
285 static int
286 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
287 {
288 #ifdef __x86_64__
289 int use_64bit = is_64bit_tdesc ();
290
291 if (use_64bit)
292 {
293 void *base;
294 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
295 {
296 *addr = (CORE_ADDR) (uintptr_t) base;
297 return 0;
298 }
299
300 return -1;
301 }
302 #endif
303
304 {
305 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
306 struct thread_info *thr = get_lwp_thread (lwp);
307 struct regcache *regcache = get_thread_regcache (thr, 1);
308 unsigned int desc[4];
309 ULONGEST gs = 0;
310 const int reg_thread_area = 3; /* bits to scale down register value. */
311 int idx;
312
313 collect_register_by_name (regcache, "gs", &gs);
314
315 idx = gs >> reg_thread_area;
316
317 if (ptrace (PTRACE_GET_THREAD_AREA,
318 lwpid_of (thr),
319 (void *) (long) idx, (unsigned long) &desc) < 0)
320 return -1;
321
322 *addr = desc[1];
323 return 0;
324 }
325 }
326
327
328 \f
329 static int
330 x86_cannot_store_register (int regno)
331 {
332 #ifdef __x86_64__
333 if (is_64bit_tdesc ())
334 return 0;
335 #endif
336
337 return regno >= I386_NUM_REGS;
338 }
339
340 static int
341 x86_cannot_fetch_register (int regno)
342 {
343 #ifdef __x86_64__
344 if (is_64bit_tdesc ())
345 return 0;
346 #endif
347
348 return regno >= I386_NUM_REGS;
349 }
350
351 static void
352 x86_fill_gregset (struct regcache *regcache, void *buf)
353 {
354 int i;
355
356 #ifdef __x86_64__
357 if (register_size (regcache->tdesc, 0) == 8)
358 {
359 for (i = 0; i < X86_64_NUM_REGS; i++)
360 if (x86_64_regmap[i] != -1)
361 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
362 return;
363 }
364
365 /* 32-bit inferior registers need to be zero-extended.
366 Callers would read uninitialized memory otherwise. */
367 memset (buf, 0x00, X86_64_USER_REGS * 8);
368 #endif
369
370 for (i = 0; i < I386_NUM_REGS; i++)
371 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
372
373 collect_register_by_name (regcache, "orig_eax",
374 ((char *) buf) + ORIG_EAX * REGSIZE);
375 }
376
377 static void
378 x86_store_gregset (struct regcache *regcache, const void *buf)
379 {
380 int i;
381
382 #ifdef __x86_64__
383 if (register_size (regcache->tdesc, 0) == 8)
384 {
385 for (i = 0; i < X86_64_NUM_REGS; i++)
386 if (x86_64_regmap[i] != -1)
387 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
388 return;
389 }
390 #endif
391
392 for (i = 0; i < I386_NUM_REGS; i++)
393 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
394
395 supply_register_by_name (regcache, "orig_eax",
396 ((char *) buf) + ORIG_EAX * REGSIZE);
397 }
398
399 static void
400 x86_fill_fpregset (struct regcache *regcache, void *buf)
401 {
402 #ifdef __x86_64__
403 i387_cache_to_fxsave (regcache, buf);
404 #else
405 i387_cache_to_fsave (regcache, buf);
406 #endif
407 }
408
409 static void
410 x86_store_fpregset (struct regcache *regcache, const void *buf)
411 {
412 #ifdef __x86_64__
413 i387_fxsave_to_cache (regcache, buf);
414 #else
415 i387_fsave_to_cache (regcache, buf);
416 #endif
417 }
418
419 #ifndef __x86_64__
420
421 static void
422 x86_fill_fpxregset (struct regcache *regcache, void *buf)
423 {
424 i387_cache_to_fxsave (regcache, buf);
425 }
426
427 static void
428 x86_store_fpxregset (struct regcache *regcache, const void *buf)
429 {
430 i387_fxsave_to_cache (regcache, buf);
431 }
432
433 #endif
434
435 static void
436 x86_fill_xstateregset (struct regcache *regcache, void *buf)
437 {
438 i387_cache_to_xsave (regcache, buf);
439 }
440
441 static void
442 x86_store_xstateregset (struct regcache *regcache, const void *buf)
443 {
444 i387_xsave_to_cache (regcache, buf);
445 }
446
447 /* ??? The non-biarch i386 case stores all the i387 regs twice.
448 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
449 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
450 doesn't work. IWBN to avoid the duplication in the case where it
451 does work. Maybe the arch_setup routine could check whether it works
452 and update the supported regsets accordingly. */
453
454 static struct regset_info x86_regsets[] =
455 {
456 #ifdef HAVE_PTRACE_GETREGS
457 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
458 GENERAL_REGS,
459 x86_fill_gregset, x86_store_gregset },
460 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
461 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
462 # ifndef __x86_64__
463 # ifdef HAVE_PTRACE_GETFPXREGS
464 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
465 EXTENDED_REGS,
466 x86_fill_fpxregset, x86_store_fpxregset },
467 # endif
468 # endif
469 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
470 FP_REGS,
471 x86_fill_fpregset, x86_store_fpregset },
472 #endif /* HAVE_PTRACE_GETREGS */
473 { 0, 0, 0, -1, -1, NULL, NULL }
474 };
475
476 static CORE_ADDR
477 x86_get_pc (struct regcache *regcache)
478 {
479 int use_64bit = register_size (regcache->tdesc, 0) == 8;
480
481 if (use_64bit)
482 {
483 unsigned long pc;
484 collect_register_by_name (regcache, "rip", &pc);
485 return (CORE_ADDR) pc;
486 }
487 else
488 {
489 unsigned int pc;
490 collect_register_by_name (regcache, "eip", &pc);
491 return (CORE_ADDR) pc;
492 }
493 }
494
495 static void
496 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
497 {
498 int use_64bit = register_size (regcache->tdesc, 0) == 8;
499
500 if (use_64bit)
501 {
502 unsigned long newpc = pc;
503 supply_register_by_name (regcache, "rip", &newpc);
504 }
505 else
506 {
507 unsigned int newpc = pc;
508 supply_register_by_name (regcache, "eip", &newpc);
509 }
510 }
511 \f
512 static const unsigned char x86_breakpoint[] = { 0xCC };
513 #define x86_breakpoint_len 1
514
515 static int
516 x86_breakpoint_at (CORE_ADDR pc)
517 {
518 unsigned char c;
519
520 (*the_target->read_memory) (pc, &c, 1);
521 if (c == 0xCC)
522 return 1;
523
524 return 0;
525 }
526 \f
527
528 /* Return the offset of REGNUM in the u_debugreg field of struct
529 user. */
530
531 static int
532 u_debugreg_offset (int regnum)
533 {
534 return (offsetof (struct user, u_debugreg)
535 + sizeof (((struct user *) 0)->u_debugreg[0]) * regnum);
536 }
537
538
539 /* Support for debug registers. */
540
541 /* Get debug register REGNUM value from the LWP specified by PTID. */
542
543 static unsigned long
544 x86_linux_dr_get (ptid_t ptid, int regnum)
545 {
546 int tid;
547 unsigned long value;
548
549 gdb_assert (ptid_lwp_p (ptid));
550 tid = ptid_get_lwp (ptid);
551
552 errno = 0;
553 value = ptrace (PTRACE_PEEKUSER, tid, u_debugreg_offset (regnum), 0);
554 if (errno != 0)
555 perror_with_name (_("Couldn't read debug register"));
556
557 return value;
558 }
559
560 /* Set debug register REGNUM to VALUE in the LWP specified by PTID. */
561
562 static void
563 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
564 {
565 int tid;
566
567 gdb_assert (ptid_lwp_p (ptid));
568 tid = ptid_get_lwp (ptid);
569
570 errno = 0;
571 ptrace (PTRACE_POKEUSER, tid, u_debugreg_offset (regnum), value);
572 if (errno != 0)
573 perror_with_name (_("Couldn't write debug register"));
574 }
575
576 /* Callback for iterate_over_lwps. Mark that our local mirror of
577 LWP's debug registers has been changed, and cause LWP to stop if
578 it isn't already. Values are written from our local mirror to
579 the actual debug registers immediately prior to LWP resuming. */
580
581 static int
582 update_debug_registers_callback (struct lwp_info *lwp, void *arg)
583 {
584 lwp_set_debug_registers_changed (lwp, 1);
585
586 if (!lwp_is_stopped (lwp))
587 linux_stop_lwp (lwp);
588
589 /* Continue the iteration. */
590 return 0;
591 }
592
593 /* Store ADDR in debug register REGNUM of all LWPs of the current
594 inferior. */
595
596 static void
597 x86_linux_dr_set_addr (int regnum, CORE_ADDR addr)
598 {
599 ptid_t pid_ptid = pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
600
601 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
602
603 iterate_over_lwps (pid_ptid, update_debug_registers_callback, NULL);
604 }
605
606 /* Return the address stored in the current inferior's debug register
607 REGNUM. */
608
609 static CORE_ADDR
610 x86_linux_dr_get_addr (int regnum)
611 {
612 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
613
614 return x86_linux_dr_get (current_lwp_ptid (), regnum);
615 }
616
617 /* Store CONTROL in the debug control registers of all LWPs of the
618 current inferior. */
619
620 static void
621 x86_linux_dr_set_control (unsigned long control)
622 {
623 ptid_t pid_ptid = pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
624
625 iterate_over_lwps (pid_ptid, update_debug_registers_callback, NULL);
626 }
627
628 /* Return the value stored in the current inferior's debug control
629 register. */
630
631 static unsigned long
632 x86_linux_dr_get_control (void)
633 {
634 return x86_linux_dr_get (current_lwp_ptid (), DR_CONTROL);
635 }
636
637 /* Return the value stored in the current inferior's debug status
638 register. */
639
640 static unsigned long
641 x86_linux_dr_get_status (void)
642 {
643 return x86_linux_dr_get (current_lwp_ptid (), DR_STATUS);
644 }
645
646 /* Low-level function vector. */
647 struct x86_dr_low_type x86_dr_low =
648 {
649 x86_linux_dr_set_control,
650 x86_linux_dr_set_addr,
651 x86_linux_dr_get_addr,
652 x86_linux_dr_get_status,
653 x86_linux_dr_get_control,
654 sizeof (void *),
655 };
656 \f
657 /* Breakpoint/Watchpoint support. */
658
659 static int
660 x86_supports_z_point_type (char z_type)
661 {
662 switch (z_type)
663 {
664 case Z_PACKET_SW_BP:
665 case Z_PACKET_HW_BP:
666 case Z_PACKET_WRITE_WP:
667 case Z_PACKET_ACCESS_WP:
668 return 1;
669 default:
670 return 0;
671 }
672 }
673
674 static int
675 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
676 int size, struct raw_breakpoint *bp)
677 {
678 struct process_info *proc = current_process ();
679
680 switch (type)
681 {
682 case raw_bkpt_type_sw:
683 return insert_memory_breakpoint (bp);
684
685 case raw_bkpt_type_hw:
686 case raw_bkpt_type_write_wp:
687 case raw_bkpt_type_access_wp:
688 {
689 enum target_hw_bp_type hw_type
690 = raw_bkpt_type_to_target_hw_bp_type (type);
691 struct x86_debug_reg_state *state
692 = &proc->priv->arch_private->debug_reg_state;
693
694 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
695 }
696
697 default:
698 /* Unsupported. */
699 return 1;
700 }
701 }
702
703 static int
704 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
705 int size, struct raw_breakpoint *bp)
706 {
707 struct process_info *proc = current_process ();
708
709 switch (type)
710 {
711 case raw_bkpt_type_sw:
712 return remove_memory_breakpoint (bp);
713
714 case raw_bkpt_type_hw:
715 case raw_bkpt_type_write_wp:
716 case raw_bkpt_type_access_wp:
717 {
718 enum target_hw_bp_type hw_type
719 = raw_bkpt_type_to_target_hw_bp_type (type);
720 struct x86_debug_reg_state *state
721 = &proc->priv->arch_private->debug_reg_state;
722
723 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
724 }
725 default:
726 /* Unsupported. */
727 return 1;
728 }
729 }
730
731 static int
732 x86_stopped_by_watchpoint (void)
733 {
734 struct process_info *proc = current_process ();
735 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
736 }
737
738 static CORE_ADDR
739 x86_stopped_data_address (void)
740 {
741 struct process_info *proc = current_process ();
742 CORE_ADDR addr;
743 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
744 &addr))
745 return addr;
746 return 0;
747 }
748 \f
749 /* Called when a new process is created. */
750
751 static struct arch_process_info *
752 x86_linux_new_process (void)
753 {
754 struct arch_process_info *info = XCNEW (struct arch_process_info);
755
756 x86_low_init_dregs (&info->debug_reg_state);
757
758 return info;
759 }
760
761 /* Called when a new thread is detected. */
762
763 static void
764 x86_linux_new_thread (struct lwp_info *lwp)
765 {
766 lwp_set_debug_registers_changed (lwp, 1);
767 }
768
769 /* See nat/x86-dregs.h. */
770
771 struct x86_debug_reg_state *
772 x86_debug_reg_state (pid_t pid)
773 {
774 struct process_info *proc = find_process_pid (pid);
775
776 return &proc->priv->arch_private->debug_reg_state;
777 }
778
779 /* Update the thread's debug registers if the values in our local
780 mirror have been changed. */
781
782 static void
783 x86_linux_update_debug_registers (struct lwp_info *lwp)
784 {
785 ptid_t ptid = ptid_of_lwp (lwp);
786 int clear_status = 0;
787
788 gdb_assert (lwp_is_stopped (lwp));
789
790 if (lwp_debug_registers_changed (lwp))
791 {
792 struct x86_debug_reg_state *state
793 = x86_debug_reg_state (ptid_get_pid (ptid));
794 int i;
795
796 /* Prior to Linux kernel 2.6.33 commit
797 72f674d203cd230426437cdcf7dd6f681dad8b0d, setting DR0-3 to
798 a value that did not match what was enabled in DR_CONTROL
799 resulted in EINVAL. To avoid this we zero DR_CONTROL before
800 writing address registers, only writing DR_CONTROL's actual
801 value once all the addresses are in place. */
802 x86_linux_dr_set (ptid, DR_CONTROL, 0);
803
804 ALL_DEBUG_ADDRESS_REGISTERS (i)
805 if (state->dr_ref_count[i] > 0)
806 {
807 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
808
809 /* If we're setting a watchpoint, any change the inferior
810 has made to its debug registers needs to be discarded
811 to avoid x86_stopped_data_address getting confused. */
812 clear_status = 1;
813 }
814
815 /* If DR_CONTROL is supposed to be zero then it's already set. */
816 if (state->dr_control_mirror != 0)
817 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
818
819 lwp_set_debug_registers_changed (lwp, 0);
820 }
821
822 if (clear_status
823 || lwp_stop_reason (lwp) == TARGET_STOPPED_BY_WATCHPOINT)
824 x86_linux_dr_set (ptid, DR_STATUS, 0);
825 }
826
827 /* Called prior to resuming a thread. */
828
829 static void
830 x86_linux_prepare_to_resume (struct lwp_info *lwp)
831 {
832 x86_linux_update_debug_registers (lwp);
833 }
834 \f
835 /* When GDBSERVER is built as a 64-bit application on linux, the
836 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
837 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
838 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
839 conversion in-place ourselves. */
840
841 /* These types below (compat_*) define a siginfo type that is layout
842 compatible with the siginfo type exported by the 32-bit userspace
843 support. */
844
845 #ifdef __x86_64__
846
847 typedef int compat_int_t;
848 typedef unsigned int compat_uptr_t;
849
850 typedef int compat_time_t;
851 typedef int compat_timer_t;
852 typedef int compat_clock_t;
853
854 struct compat_timeval
855 {
856 compat_time_t tv_sec;
857 int tv_usec;
858 };
859
860 typedef union compat_sigval
861 {
862 compat_int_t sival_int;
863 compat_uptr_t sival_ptr;
864 } compat_sigval_t;
865
866 typedef struct compat_siginfo
867 {
868 int si_signo;
869 int si_errno;
870 int si_code;
871
872 union
873 {
874 int _pad[((128 / sizeof (int)) - 3)];
875
876 /* kill() */
877 struct
878 {
879 unsigned int _pid;
880 unsigned int _uid;
881 } _kill;
882
883 /* POSIX.1b timers */
884 struct
885 {
886 compat_timer_t _tid;
887 int _overrun;
888 compat_sigval_t _sigval;
889 } _timer;
890
891 /* POSIX.1b signals */
892 struct
893 {
894 unsigned int _pid;
895 unsigned int _uid;
896 compat_sigval_t _sigval;
897 } _rt;
898
899 /* SIGCHLD */
900 struct
901 {
902 unsigned int _pid;
903 unsigned int _uid;
904 int _status;
905 compat_clock_t _utime;
906 compat_clock_t _stime;
907 } _sigchld;
908
909 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
910 struct
911 {
912 unsigned int _addr;
913 } _sigfault;
914
915 /* SIGPOLL */
916 struct
917 {
918 int _band;
919 int _fd;
920 } _sigpoll;
921 } _sifields;
922 } compat_siginfo_t;
923
924 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
925 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
926
927 typedef struct compat_x32_siginfo
928 {
929 int si_signo;
930 int si_errno;
931 int si_code;
932
933 union
934 {
935 int _pad[((128 / sizeof (int)) - 3)];
936
937 /* kill() */
938 struct
939 {
940 unsigned int _pid;
941 unsigned int _uid;
942 } _kill;
943
944 /* POSIX.1b timers */
945 struct
946 {
947 compat_timer_t _tid;
948 int _overrun;
949 compat_sigval_t _sigval;
950 } _timer;
951
952 /* POSIX.1b signals */
953 struct
954 {
955 unsigned int _pid;
956 unsigned int _uid;
957 compat_sigval_t _sigval;
958 } _rt;
959
960 /* SIGCHLD */
961 struct
962 {
963 unsigned int _pid;
964 unsigned int _uid;
965 int _status;
966 compat_x32_clock_t _utime;
967 compat_x32_clock_t _stime;
968 } _sigchld;
969
970 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
971 struct
972 {
973 unsigned int _addr;
974 } _sigfault;
975
976 /* SIGPOLL */
977 struct
978 {
979 int _band;
980 int _fd;
981 } _sigpoll;
982 } _sifields;
983 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
984
985 #define cpt_si_pid _sifields._kill._pid
986 #define cpt_si_uid _sifields._kill._uid
987 #define cpt_si_timerid _sifields._timer._tid
988 #define cpt_si_overrun _sifields._timer._overrun
989 #define cpt_si_status _sifields._sigchld._status
990 #define cpt_si_utime _sifields._sigchld._utime
991 #define cpt_si_stime _sifields._sigchld._stime
992 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
993 #define cpt_si_addr _sifields._sigfault._addr
994 #define cpt_si_band _sifields._sigpoll._band
995 #define cpt_si_fd _sifields._sigpoll._fd
996
997 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
998 In their place is si_timer1,si_timer2. */
999 #ifndef si_timerid
1000 #define si_timerid si_timer1
1001 #endif
1002 #ifndef si_overrun
1003 #define si_overrun si_timer2
1004 #endif
1005
1006 static void
1007 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
1008 {
1009 memset (to, 0, sizeof (*to));
1010
1011 to->si_signo = from->si_signo;
1012 to->si_errno = from->si_errno;
1013 to->si_code = from->si_code;
1014
1015 if (to->si_code == SI_TIMER)
1016 {
1017 to->cpt_si_timerid = from->si_timerid;
1018 to->cpt_si_overrun = from->si_overrun;
1019 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1020 }
1021 else if (to->si_code == SI_USER)
1022 {
1023 to->cpt_si_pid = from->si_pid;
1024 to->cpt_si_uid = from->si_uid;
1025 }
1026 else if (to->si_code < 0)
1027 {
1028 to->cpt_si_pid = from->si_pid;
1029 to->cpt_si_uid = from->si_uid;
1030 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1031 }
1032 else
1033 {
1034 switch (to->si_signo)
1035 {
1036 case SIGCHLD:
1037 to->cpt_si_pid = from->si_pid;
1038 to->cpt_si_uid = from->si_uid;
1039 to->cpt_si_status = from->si_status;
1040 to->cpt_si_utime = from->si_utime;
1041 to->cpt_si_stime = from->si_stime;
1042 break;
1043 case SIGILL:
1044 case SIGFPE:
1045 case SIGSEGV:
1046 case SIGBUS:
1047 to->cpt_si_addr = (intptr_t) from->si_addr;
1048 break;
1049 case SIGPOLL:
1050 to->cpt_si_band = from->si_band;
1051 to->cpt_si_fd = from->si_fd;
1052 break;
1053 default:
1054 to->cpt_si_pid = from->si_pid;
1055 to->cpt_si_uid = from->si_uid;
1056 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1057 break;
1058 }
1059 }
1060 }
1061
1062 static void
1063 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1064 {
1065 memset (to, 0, sizeof (*to));
1066
1067 to->si_signo = from->si_signo;
1068 to->si_errno = from->si_errno;
1069 to->si_code = from->si_code;
1070
1071 if (to->si_code == SI_TIMER)
1072 {
1073 to->si_timerid = from->cpt_si_timerid;
1074 to->si_overrun = from->cpt_si_overrun;
1075 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1076 }
1077 else if (to->si_code == SI_USER)
1078 {
1079 to->si_pid = from->cpt_si_pid;
1080 to->si_uid = from->cpt_si_uid;
1081 }
1082 else if (to->si_code < 0)
1083 {
1084 to->si_pid = from->cpt_si_pid;
1085 to->si_uid = from->cpt_si_uid;
1086 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1087 }
1088 else
1089 {
1090 switch (to->si_signo)
1091 {
1092 case SIGCHLD:
1093 to->si_pid = from->cpt_si_pid;
1094 to->si_uid = from->cpt_si_uid;
1095 to->si_status = from->cpt_si_status;
1096 to->si_utime = from->cpt_si_utime;
1097 to->si_stime = from->cpt_si_stime;
1098 break;
1099 case SIGILL:
1100 case SIGFPE:
1101 case SIGSEGV:
1102 case SIGBUS:
1103 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1104 break;
1105 case SIGPOLL:
1106 to->si_band = from->cpt_si_band;
1107 to->si_fd = from->cpt_si_fd;
1108 break;
1109 default:
1110 to->si_pid = from->cpt_si_pid;
1111 to->si_uid = from->cpt_si_uid;
1112 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1113 break;
1114 }
1115 }
1116 }
1117
1118 static void
1119 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1120 siginfo_t *from)
1121 {
1122 memset (to, 0, sizeof (*to));
1123
1124 to->si_signo = from->si_signo;
1125 to->si_errno = from->si_errno;
1126 to->si_code = from->si_code;
1127
1128 if (to->si_code == SI_TIMER)
1129 {
1130 to->cpt_si_timerid = from->si_timerid;
1131 to->cpt_si_overrun = from->si_overrun;
1132 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1133 }
1134 else if (to->si_code == SI_USER)
1135 {
1136 to->cpt_si_pid = from->si_pid;
1137 to->cpt_si_uid = from->si_uid;
1138 }
1139 else if (to->si_code < 0)
1140 {
1141 to->cpt_si_pid = from->si_pid;
1142 to->cpt_si_uid = from->si_uid;
1143 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1144 }
1145 else
1146 {
1147 switch (to->si_signo)
1148 {
1149 case SIGCHLD:
1150 to->cpt_si_pid = from->si_pid;
1151 to->cpt_si_uid = from->si_uid;
1152 to->cpt_si_status = from->si_status;
1153 to->cpt_si_utime = from->si_utime;
1154 to->cpt_si_stime = from->si_stime;
1155 break;
1156 case SIGILL:
1157 case SIGFPE:
1158 case SIGSEGV:
1159 case SIGBUS:
1160 to->cpt_si_addr = (intptr_t) from->si_addr;
1161 break;
1162 case SIGPOLL:
1163 to->cpt_si_band = from->si_band;
1164 to->cpt_si_fd = from->si_fd;
1165 break;
1166 default:
1167 to->cpt_si_pid = from->si_pid;
1168 to->cpt_si_uid = from->si_uid;
1169 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1170 break;
1171 }
1172 }
1173 }
1174
1175 static void
1176 siginfo_from_compat_x32_siginfo (siginfo_t *to,
1177 compat_x32_siginfo_t *from)
1178 {
1179 memset (to, 0, sizeof (*to));
1180
1181 to->si_signo = from->si_signo;
1182 to->si_errno = from->si_errno;
1183 to->si_code = from->si_code;
1184
1185 if (to->si_code == SI_TIMER)
1186 {
1187 to->si_timerid = from->cpt_si_timerid;
1188 to->si_overrun = from->cpt_si_overrun;
1189 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1190 }
1191 else if (to->si_code == SI_USER)
1192 {
1193 to->si_pid = from->cpt_si_pid;
1194 to->si_uid = from->cpt_si_uid;
1195 }
1196 else if (to->si_code < 0)
1197 {
1198 to->si_pid = from->cpt_si_pid;
1199 to->si_uid = from->cpt_si_uid;
1200 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1201 }
1202 else
1203 {
1204 switch (to->si_signo)
1205 {
1206 case SIGCHLD:
1207 to->si_pid = from->cpt_si_pid;
1208 to->si_uid = from->cpt_si_uid;
1209 to->si_status = from->cpt_si_status;
1210 to->si_utime = from->cpt_si_utime;
1211 to->si_stime = from->cpt_si_stime;
1212 break;
1213 case SIGILL:
1214 case SIGFPE:
1215 case SIGSEGV:
1216 case SIGBUS:
1217 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1218 break;
1219 case SIGPOLL:
1220 to->si_band = from->cpt_si_band;
1221 to->si_fd = from->cpt_si_fd;
1222 break;
1223 default:
1224 to->si_pid = from->cpt_si_pid;
1225 to->si_uid = from->cpt_si_uid;
1226 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1227 break;
1228 }
1229 }
1230 }
1231
1232 #endif /* __x86_64__ */
1233
1234 /* Convert a native/host siginfo object, into/from the siginfo in the
1235 layout of the inferiors' architecture. Returns true if any
1236 conversion was done; false otherwise. If DIRECTION is 1, then copy
1237 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1238 INF. */
1239
1240 static int
1241 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1242 {
1243 #ifdef __x86_64__
1244 unsigned int machine;
1245 int tid = lwpid_of (current_thread);
1246 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1247
1248 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1249 if (!is_64bit_tdesc ())
1250 {
1251 gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
1252
1253 if (direction == 0)
1254 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1255 else
1256 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1257
1258 return 1;
1259 }
1260 /* No fixup for native x32 GDB. */
1261 else if (!is_elf64 && sizeof (void *) == 8)
1262 {
1263 gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
1264
1265 if (direction == 0)
1266 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1267 native);
1268 else
1269 siginfo_from_compat_x32_siginfo (native,
1270 (struct compat_x32_siginfo *) inf);
1271
1272 return 1;
1273 }
1274 #endif
1275
1276 return 0;
1277 }
1278 \f
1279 static int use_xml;
1280
1281 /* Format of XSAVE extended state is:
1282 struct
1283 {
1284 fxsave_bytes[0..463]
1285 sw_usable_bytes[464..511]
1286 xstate_hdr_bytes[512..575]
1287 avx_bytes[576..831]
1288 future_state etc
1289 };
1290
1291 Same memory layout will be used for the coredump NT_X86_XSTATE
1292 representing the XSAVE extended state registers.
1293
1294 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1295 extended state mask, which is the same as the extended control register
1296 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1297 together with the mask saved in the xstate_hdr_bytes to determine what
1298 states the processor/OS supports and what state, used or initialized,
1299 the process/thread is in. */
1300 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1301
1302 /* Does the current host support the GETFPXREGS request? The header
1303 file may or may not define it, and even if it is defined, the
1304 kernel will return EIO if it's running on a pre-SSE processor. */
1305 int have_ptrace_getfpxregs =
1306 #ifdef HAVE_PTRACE_GETFPXREGS
1307 -1
1308 #else
1309 0
1310 #endif
1311 ;
1312
1313 /* Does the current host support PTRACE_GETREGSET? */
1314 static int have_ptrace_getregset = -1;
1315
1316 /* Get Linux/x86 target description from running target. */
1317
1318 static const struct target_desc *
1319 x86_linux_read_description (void)
1320 {
1321 unsigned int machine;
1322 int is_elf64;
1323 int xcr0_features;
1324 int tid;
1325 static uint64_t xcr0;
1326 struct regset_info *regset;
1327
1328 tid = lwpid_of (current_thread);
1329
1330 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1331
1332 if (sizeof (void *) == 4)
1333 {
1334 if (is_elf64 > 0)
1335 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1336 #ifndef __x86_64__
1337 else if (machine == EM_X86_64)
1338 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1339 #endif
1340 }
1341
1342 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1343 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1344 {
1345 elf_fpxregset_t fpxregs;
1346
1347 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1348 {
1349 have_ptrace_getfpxregs = 0;
1350 have_ptrace_getregset = 0;
1351 return tdesc_i386_mmx_linux;
1352 }
1353 else
1354 have_ptrace_getfpxregs = 1;
1355 }
1356 #endif
1357
1358 if (!use_xml)
1359 {
1360 x86_xcr0 = X86_XSTATE_SSE_MASK;
1361
1362 /* Don't use XML. */
1363 #ifdef __x86_64__
1364 if (machine == EM_X86_64)
1365 return tdesc_amd64_linux_no_xml;
1366 else
1367 #endif
1368 return tdesc_i386_linux_no_xml;
1369 }
1370
1371 if (have_ptrace_getregset == -1)
1372 {
1373 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1374 struct iovec iov;
1375
1376 iov.iov_base = xstateregs;
1377 iov.iov_len = sizeof (xstateregs);
1378
1379 /* Check if PTRACE_GETREGSET works. */
1380 if (ptrace (PTRACE_GETREGSET, tid,
1381 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1382 have_ptrace_getregset = 0;
1383 else
1384 {
1385 have_ptrace_getregset = 1;
1386
1387 /* Get XCR0 from XSAVE extended state. */
1388 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1389 / sizeof (uint64_t))];
1390
1391 /* Use PTRACE_GETREGSET if it is available. */
1392 for (regset = x86_regsets;
1393 regset->fill_function != NULL; regset++)
1394 if (regset->get_request == PTRACE_GETREGSET)
1395 regset->size = X86_XSTATE_SIZE (xcr0);
1396 else if (regset->type != GENERAL_REGS)
1397 regset->size = 0;
1398 }
1399 }
1400
1401 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1402 xcr0_features = (have_ptrace_getregset
1403 && (xcr0 & X86_XSTATE_ALL_MASK));
1404
1405 if (xcr0_features)
1406 x86_xcr0 = xcr0;
1407
1408 if (machine == EM_X86_64)
1409 {
1410 #ifdef __x86_64__
1411 if (is_elf64)
1412 {
1413 if (xcr0_features)
1414 {
1415 switch (xcr0 & X86_XSTATE_ALL_MASK)
1416 {
1417 case X86_XSTATE_AVX512_MASK:
1418 return tdesc_amd64_avx512_linux;
1419
1420 case X86_XSTATE_MPX_MASK:
1421 return tdesc_amd64_mpx_linux;
1422
1423 case X86_XSTATE_AVX_MASK:
1424 return tdesc_amd64_avx_linux;
1425
1426 default:
1427 return tdesc_amd64_linux;
1428 }
1429 }
1430 else
1431 return tdesc_amd64_linux;
1432 }
1433 else
1434 {
1435 if (xcr0_features)
1436 {
1437 switch (xcr0 & X86_XSTATE_ALL_MASK)
1438 {
1439 case X86_XSTATE_AVX512_MASK:
1440 return tdesc_x32_avx512_linux;
1441
1442 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1443 case X86_XSTATE_AVX_MASK:
1444 return tdesc_x32_avx_linux;
1445
1446 default:
1447 return tdesc_x32_linux;
1448 }
1449 }
1450 else
1451 return tdesc_x32_linux;
1452 }
1453 #endif
1454 }
1455 else
1456 {
1457 if (xcr0_features)
1458 {
1459 switch (xcr0 & X86_XSTATE_ALL_MASK)
1460 {
1461 case (X86_XSTATE_AVX512_MASK):
1462 return tdesc_i386_avx512_linux;
1463
1464 case (X86_XSTATE_MPX_MASK):
1465 return tdesc_i386_mpx_linux;
1466
1467 case (X86_XSTATE_AVX_MASK):
1468 return tdesc_i386_avx_linux;
1469
1470 default:
1471 return tdesc_i386_linux;
1472 }
1473 }
1474 else
1475 return tdesc_i386_linux;
1476 }
1477
1478 gdb_assert_not_reached ("failed to return tdesc");
1479 }
1480
1481 /* Callback for find_inferior. Stops iteration when a thread with a
1482 given PID is found. */
1483
1484 static int
1485 same_process_callback (struct inferior_list_entry *entry, void *data)
1486 {
1487 int pid = *(int *) data;
1488
1489 return (ptid_get_pid (entry->id) == pid);
1490 }
1491
1492 /* Callback for for_each_inferior. Calls the arch_setup routine for
1493 each process. */
1494
1495 static void
1496 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1497 {
1498 int pid = ptid_get_pid (entry->id);
1499
1500 /* Look up any thread of this processes. */
1501 current_thread
1502 = (struct thread_info *) find_inferior (&all_threads,
1503 same_process_callback, &pid);
1504
1505 the_low_target.arch_setup ();
1506 }
1507
1508 /* Update all the target description of all processes; a new GDB
1509 connected, and it may or not support xml target descriptions. */
1510
1511 static void
1512 x86_linux_update_xmltarget (void)
1513 {
1514 struct thread_info *saved_thread = current_thread;
1515
1516 /* Before changing the register cache's internal layout, flush the
1517 contents of the current valid caches back to the threads, and
1518 release the current regcache objects. */
1519 regcache_release ();
1520
1521 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1522
1523 current_thread = saved_thread;
1524 }
1525
1526 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1527 PTRACE_GETREGSET. */
1528
1529 static void
1530 x86_linux_process_qsupported (const char *query)
1531 {
1532 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1533 with "i386" in qSupported query, it supports x86 XML target
1534 descriptions. */
1535 use_xml = 0;
1536 if (query != NULL && startswith (query, "xmlRegisters="))
1537 {
1538 char *copy = xstrdup (query + 13);
1539 char *p;
1540
1541 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1542 {
1543 if (strcmp (p, "i386") == 0)
1544 {
1545 use_xml = 1;
1546 break;
1547 }
1548 }
1549
1550 free (copy);
1551 }
1552
1553 x86_linux_update_xmltarget ();
1554 }
1555
1556 /* Common for x86/x86-64. */
1557
1558 static struct regsets_info x86_regsets_info =
1559 {
1560 x86_regsets, /* regsets */
1561 0, /* num_regsets */
1562 NULL, /* disabled_regsets */
1563 };
1564
1565 #ifdef __x86_64__
1566 static struct regs_info amd64_linux_regs_info =
1567 {
1568 NULL, /* regset_bitmap */
1569 NULL, /* usrregs_info */
1570 &x86_regsets_info
1571 };
1572 #endif
1573 static struct usrregs_info i386_linux_usrregs_info =
1574 {
1575 I386_NUM_REGS,
1576 i386_regmap,
1577 };
1578
1579 static struct regs_info i386_linux_regs_info =
1580 {
1581 NULL, /* regset_bitmap */
1582 &i386_linux_usrregs_info,
1583 &x86_regsets_info
1584 };
1585
1586 const struct regs_info *
1587 x86_linux_regs_info (void)
1588 {
1589 #ifdef __x86_64__
1590 if (is_64bit_tdesc ())
1591 return &amd64_linux_regs_info;
1592 else
1593 #endif
1594 return &i386_linux_regs_info;
1595 }
1596
1597 /* Initialize the target description for the architecture of the
1598 inferior. */
1599
1600 static void
1601 x86_arch_setup (void)
1602 {
1603 current_process ()->tdesc = x86_linux_read_description ();
1604 }
1605
1606 static int
1607 x86_supports_tracepoints (void)
1608 {
1609 return 1;
1610 }
1611
1612 static void
1613 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1614 {
1615 write_inferior_memory (*to, buf, len);
1616 *to += len;
1617 }
1618
1619 static int
1620 push_opcode (unsigned char *buf, char *op)
1621 {
1622 unsigned char *buf_org = buf;
1623
1624 while (1)
1625 {
1626 char *endptr;
1627 unsigned long ul = strtoul (op, &endptr, 16);
1628
1629 if (endptr == op)
1630 break;
1631
1632 *buf++ = ul;
1633 op = endptr;
1634 }
1635
1636 return buf - buf_org;
1637 }
1638
1639 #ifdef __x86_64__
1640
1641 /* Build a jump pad that saves registers and calls a collection
1642 function. Writes a jump instruction to the jump pad to
1643 JJUMPAD_INSN. The caller is responsible to write it in at the
1644 tracepoint address. */
1645
1646 static int
1647 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1648 CORE_ADDR collector,
1649 CORE_ADDR lockaddr,
1650 ULONGEST orig_size,
1651 CORE_ADDR *jump_entry,
1652 CORE_ADDR *trampoline,
1653 ULONGEST *trampoline_size,
1654 unsigned char *jjump_pad_insn,
1655 ULONGEST *jjump_pad_insn_size,
1656 CORE_ADDR *adjusted_insn_addr,
1657 CORE_ADDR *adjusted_insn_addr_end,
1658 char *err)
1659 {
1660 unsigned char buf[40];
1661 int i, offset;
1662 int64_t loffset;
1663
1664 CORE_ADDR buildaddr = *jump_entry;
1665
1666 /* Build the jump pad. */
1667
1668 /* First, do tracepoint data collection. Save registers. */
1669 i = 0;
1670 /* Need to ensure stack pointer saved first. */
1671 buf[i++] = 0x54; /* push %rsp */
1672 buf[i++] = 0x55; /* push %rbp */
1673 buf[i++] = 0x57; /* push %rdi */
1674 buf[i++] = 0x56; /* push %rsi */
1675 buf[i++] = 0x52; /* push %rdx */
1676 buf[i++] = 0x51; /* push %rcx */
1677 buf[i++] = 0x53; /* push %rbx */
1678 buf[i++] = 0x50; /* push %rax */
1679 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1680 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1681 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1682 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1683 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1684 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1685 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1686 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1687 buf[i++] = 0x9c; /* pushfq */
1688 buf[i++] = 0x48; /* movl <addr>,%rdi */
1689 buf[i++] = 0xbf;
1690 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1691 i += sizeof (unsigned long);
1692 buf[i++] = 0x57; /* push %rdi */
1693 append_insns (&buildaddr, i, buf);
1694
1695 /* Stack space for the collecting_t object. */
1696 i = 0;
1697 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1698 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1699 memcpy (buf + i, &tpoint, 8);
1700 i += 8;
1701 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1702 i += push_opcode (&buf[i],
1703 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1704 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1705 append_insns (&buildaddr, i, buf);
1706
1707 /* spin-lock. */
1708 i = 0;
1709 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1710 memcpy (&buf[i], (void *) &lockaddr, 8);
1711 i += 8;
1712 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1713 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1714 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1715 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1716 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1717 append_insns (&buildaddr, i, buf);
1718
1719 /* Set up the gdb_collect call. */
1720 /* At this point, (stack pointer + 0x18) is the base of our saved
1721 register block. */
1722
1723 i = 0;
1724 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1725 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1726
1727 /* tpoint address may be 64-bit wide. */
1728 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1729 memcpy (buf + i, &tpoint, 8);
1730 i += 8;
1731 append_insns (&buildaddr, i, buf);
1732
1733 /* The collector function being in the shared library, may be
1734 >31-bits away off the jump pad. */
1735 i = 0;
1736 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1737 memcpy (buf + i, &collector, 8);
1738 i += 8;
1739 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1740 append_insns (&buildaddr, i, buf);
1741
1742 /* Clear the spin-lock. */
1743 i = 0;
1744 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1745 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1746 memcpy (buf + i, &lockaddr, 8);
1747 i += 8;
1748 append_insns (&buildaddr, i, buf);
1749
1750 /* Remove stack that had been used for the collect_t object. */
1751 i = 0;
1752 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1753 append_insns (&buildaddr, i, buf);
1754
1755 /* Restore register state. */
1756 i = 0;
1757 buf[i++] = 0x48; /* add $0x8,%rsp */
1758 buf[i++] = 0x83;
1759 buf[i++] = 0xc4;
1760 buf[i++] = 0x08;
1761 buf[i++] = 0x9d; /* popfq */
1762 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1763 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1764 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1765 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1766 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1767 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1768 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1769 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1770 buf[i++] = 0x58; /* pop %rax */
1771 buf[i++] = 0x5b; /* pop %rbx */
1772 buf[i++] = 0x59; /* pop %rcx */
1773 buf[i++] = 0x5a; /* pop %rdx */
1774 buf[i++] = 0x5e; /* pop %rsi */
1775 buf[i++] = 0x5f; /* pop %rdi */
1776 buf[i++] = 0x5d; /* pop %rbp */
1777 buf[i++] = 0x5c; /* pop %rsp */
1778 append_insns (&buildaddr, i, buf);
1779
1780 /* Now, adjust the original instruction to execute in the jump
1781 pad. */
1782 *adjusted_insn_addr = buildaddr;
1783 relocate_instruction (&buildaddr, tpaddr);
1784 *adjusted_insn_addr_end = buildaddr;
1785
1786 /* Finally, write a jump back to the program. */
1787
1788 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1789 if (loffset > INT_MAX || loffset < INT_MIN)
1790 {
1791 sprintf (err,
1792 "E.Jump back from jump pad too far from tracepoint "
1793 "(offset 0x%" PRIx64 " > int32).", loffset);
1794 return 1;
1795 }
1796
1797 offset = (int) loffset;
1798 memcpy (buf, jump_insn, sizeof (jump_insn));
1799 memcpy (buf + 1, &offset, 4);
1800 append_insns (&buildaddr, sizeof (jump_insn), buf);
1801
1802 /* The jump pad is now built. Wire in a jump to our jump pad. This
1803 is always done last (by our caller actually), so that we can
1804 install fast tracepoints with threads running. This relies on
1805 the agent's atomic write support. */
1806 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1807 if (loffset > INT_MAX || loffset < INT_MIN)
1808 {
1809 sprintf (err,
1810 "E.Jump pad too far from tracepoint "
1811 "(offset 0x%" PRIx64 " > int32).", loffset);
1812 return 1;
1813 }
1814
1815 offset = (int) loffset;
1816
1817 memcpy (buf, jump_insn, sizeof (jump_insn));
1818 memcpy (buf + 1, &offset, 4);
1819 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1820 *jjump_pad_insn_size = sizeof (jump_insn);
1821
1822 /* Return the end address of our pad. */
1823 *jump_entry = buildaddr;
1824
1825 return 0;
1826 }
1827
1828 #endif /* __x86_64__ */
1829
1830 /* Build a jump pad that saves registers and calls a collection
1831 function. Writes a jump instruction to the jump pad to
1832 JJUMPAD_INSN. The caller is responsible to write it in at the
1833 tracepoint address. */
1834
1835 static int
1836 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1837 CORE_ADDR collector,
1838 CORE_ADDR lockaddr,
1839 ULONGEST orig_size,
1840 CORE_ADDR *jump_entry,
1841 CORE_ADDR *trampoline,
1842 ULONGEST *trampoline_size,
1843 unsigned char *jjump_pad_insn,
1844 ULONGEST *jjump_pad_insn_size,
1845 CORE_ADDR *adjusted_insn_addr,
1846 CORE_ADDR *adjusted_insn_addr_end,
1847 char *err)
1848 {
1849 unsigned char buf[0x100];
1850 int i, offset;
1851 CORE_ADDR buildaddr = *jump_entry;
1852
1853 /* Build the jump pad. */
1854
1855 /* First, do tracepoint data collection. Save registers. */
1856 i = 0;
1857 buf[i++] = 0x60; /* pushad */
1858 buf[i++] = 0x68; /* push tpaddr aka $pc */
1859 *((int *)(buf + i)) = (int) tpaddr;
1860 i += 4;
1861 buf[i++] = 0x9c; /* pushf */
1862 buf[i++] = 0x1e; /* push %ds */
1863 buf[i++] = 0x06; /* push %es */
1864 buf[i++] = 0x0f; /* push %fs */
1865 buf[i++] = 0xa0;
1866 buf[i++] = 0x0f; /* push %gs */
1867 buf[i++] = 0xa8;
1868 buf[i++] = 0x16; /* push %ss */
1869 buf[i++] = 0x0e; /* push %cs */
1870 append_insns (&buildaddr, i, buf);
1871
1872 /* Stack space for the collecting_t object. */
1873 i = 0;
1874 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1875
1876 /* Build the object. */
1877 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1878 memcpy (buf + i, &tpoint, 4);
1879 i += 4;
1880 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1881
1882 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1883 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1884 append_insns (&buildaddr, i, buf);
1885
1886 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1887 If we cared for it, this could be using xchg alternatively. */
1888
1889 i = 0;
1890 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1891 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1892 %esp,<lockaddr> */
1893 memcpy (&buf[i], (void *) &lockaddr, 4);
1894 i += 4;
1895 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1896 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1897 append_insns (&buildaddr, i, buf);
1898
1899
1900 /* Set up arguments to the gdb_collect call. */
1901 i = 0;
1902 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1903 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1904 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1905 append_insns (&buildaddr, i, buf);
1906
1907 i = 0;
1908 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1909 append_insns (&buildaddr, i, buf);
1910
1911 i = 0;
1912 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1913 memcpy (&buf[i], (void *) &tpoint, 4);
1914 i += 4;
1915 append_insns (&buildaddr, i, buf);
1916
1917 buf[0] = 0xe8; /* call <reladdr> */
1918 offset = collector - (buildaddr + sizeof (jump_insn));
1919 memcpy (buf + 1, &offset, 4);
1920 append_insns (&buildaddr, 5, buf);
1921 /* Clean up after the call. */
1922 buf[0] = 0x83; /* add $0x8,%esp */
1923 buf[1] = 0xc4;
1924 buf[2] = 0x08;
1925 append_insns (&buildaddr, 3, buf);
1926
1927
1928 /* Clear the spin-lock. This would need the LOCK prefix on older
1929 broken archs. */
1930 i = 0;
1931 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1932 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1933 memcpy (buf + i, &lockaddr, 4);
1934 i += 4;
1935 append_insns (&buildaddr, i, buf);
1936
1937
1938 /* Remove stack that had been used for the collect_t object. */
1939 i = 0;
1940 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1941 append_insns (&buildaddr, i, buf);
1942
1943 i = 0;
1944 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1945 buf[i++] = 0xc4;
1946 buf[i++] = 0x04;
1947 buf[i++] = 0x17; /* pop %ss */
1948 buf[i++] = 0x0f; /* pop %gs */
1949 buf[i++] = 0xa9;
1950 buf[i++] = 0x0f; /* pop %fs */
1951 buf[i++] = 0xa1;
1952 buf[i++] = 0x07; /* pop %es */
1953 buf[i++] = 0x1f; /* pop %ds */
1954 buf[i++] = 0x9d; /* popf */
1955 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1956 buf[i++] = 0xc4;
1957 buf[i++] = 0x04;
1958 buf[i++] = 0x61; /* popad */
1959 append_insns (&buildaddr, i, buf);
1960
1961 /* Now, adjust the original instruction to execute in the jump
1962 pad. */
1963 *adjusted_insn_addr = buildaddr;
1964 relocate_instruction (&buildaddr, tpaddr);
1965 *adjusted_insn_addr_end = buildaddr;
1966
1967 /* Write the jump back to the program. */
1968 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1969 memcpy (buf, jump_insn, sizeof (jump_insn));
1970 memcpy (buf + 1, &offset, 4);
1971 append_insns (&buildaddr, sizeof (jump_insn), buf);
1972
1973 /* The jump pad is now built. Wire in a jump to our jump pad. This
1974 is always done last (by our caller actually), so that we can
1975 install fast tracepoints with threads running. This relies on
1976 the agent's atomic write support. */
1977 if (orig_size == 4)
1978 {
1979 /* Create a trampoline. */
1980 *trampoline_size = sizeof (jump_insn);
1981 if (!claim_trampoline_space (*trampoline_size, trampoline))
1982 {
1983 /* No trampoline space available. */
1984 strcpy (err,
1985 "E.Cannot allocate trampoline space needed for fast "
1986 "tracepoints on 4-byte instructions.");
1987 return 1;
1988 }
1989
1990 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1991 memcpy (buf, jump_insn, sizeof (jump_insn));
1992 memcpy (buf + 1, &offset, 4);
1993 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1994
1995 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1996 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1997 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1998 memcpy (buf + 2, &offset, 2);
1999 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
2000 *jjump_pad_insn_size = sizeof (small_jump_insn);
2001 }
2002 else
2003 {
2004 /* Else use a 32-bit relative jump instruction. */
2005 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
2006 memcpy (buf, jump_insn, sizeof (jump_insn));
2007 memcpy (buf + 1, &offset, 4);
2008 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
2009 *jjump_pad_insn_size = sizeof (jump_insn);
2010 }
2011
2012 /* Return the end address of our pad. */
2013 *jump_entry = buildaddr;
2014
2015 return 0;
2016 }
2017
2018 static int
2019 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
2020 CORE_ADDR collector,
2021 CORE_ADDR lockaddr,
2022 ULONGEST orig_size,
2023 CORE_ADDR *jump_entry,
2024 CORE_ADDR *trampoline,
2025 ULONGEST *trampoline_size,
2026 unsigned char *jjump_pad_insn,
2027 ULONGEST *jjump_pad_insn_size,
2028 CORE_ADDR *adjusted_insn_addr,
2029 CORE_ADDR *adjusted_insn_addr_end,
2030 char *err)
2031 {
2032 #ifdef __x86_64__
2033 if (is_64bit_tdesc ())
2034 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2035 collector, lockaddr,
2036 orig_size, jump_entry,
2037 trampoline, trampoline_size,
2038 jjump_pad_insn,
2039 jjump_pad_insn_size,
2040 adjusted_insn_addr,
2041 adjusted_insn_addr_end,
2042 err);
2043 #endif
2044
2045 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2046 collector, lockaddr,
2047 orig_size, jump_entry,
2048 trampoline, trampoline_size,
2049 jjump_pad_insn,
2050 jjump_pad_insn_size,
2051 adjusted_insn_addr,
2052 adjusted_insn_addr_end,
2053 err);
2054 }
2055
2056 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2057 architectures. */
2058
2059 static int
2060 x86_get_min_fast_tracepoint_insn_len (void)
2061 {
2062 static int warned_about_fast_tracepoints = 0;
2063
2064 #ifdef __x86_64__
2065 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2066 used for fast tracepoints. */
2067 if (is_64bit_tdesc ())
2068 return 5;
2069 #endif
2070
2071 if (agent_loaded_p ())
2072 {
2073 char errbuf[IPA_BUFSIZ];
2074
2075 errbuf[0] = '\0';
2076
2077 /* On x86, if trampolines are available, then 4-byte jump instructions
2078 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2079 with a 4-byte offset are used instead. */
2080 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2081 return 4;
2082 else
2083 {
2084 /* GDB has no channel to explain to user why a shorter fast
2085 tracepoint is not possible, but at least make GDBserver
2086 mention that something has gone awry. */
2087 if (!warned_about_fast_tracepoints)
2088 {
2089 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2090 warned_about_fast_tracepoints = 1;
2091 }
2092 return 5;
2093 }
2094 }
2095 else
2096 {
2097 /* Indicate that the minimum length is currently unknown since the IPA
2098 has not loaded yet. */
2099 return 0;
2100 }
2101 }
2102
2103 static void
2104 add_insns (unsigned char *start, int len)
2105 {
2106 CORE_ADDR buildaddr = current_insn_ptr;
2107
2108 if (debug_threads)
2109 debug_printf ("Adding %d bytes of insn at %s\n",
2110 len, paddress (buildaddr));
2111
2112 append_insns (&buildaddr, len, start);
2113 current_insn_ptr = buildaddr;
2114 }
2115
2116 /* Our general strategy for emitting code is to avoid specifying raw
2117 bytes whenever possible, and instead copy a block of inline asm
2118 that is embedded in the function. This is a little messy, because
2119 we need to keep the compiler from discarding what looks like dead
2120 code, plus suppress various warnings. */
2121
2122 #define EMIT_ASM(NAME, INSNS) \
2123 do \
2124 { \
2125 extern unsigned char start_ ## NAME, end_ ## NAME; \
2126 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2127 __asm__ ("jmp end_" #NAME "\n" \
2128 "\t" "start_" #NAME ":" \
2129 "\t" INSNS "\n" \
2130 "\t" "end_" #NAME ":"); \
2131 } while (0)
2132
2133 #ifdef __x86_64__
2134
2135 #define EMIT_ASM32(NAME,INSNS) \
2136 do \
2137 { \
2138 extern unsigned char start_ ## NAME, end_ ## NAME; \
2139 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2140 __asm__ (".code32\n" \
2141 "\t" "jmp end_" #NAME "\n" \
2142 "\t" "start_" #NAME ":\n" \
2143 "\t" INSNS "\n" \
2144 "\t" "end_" #NAME ":\n" \
2145 ".code64\n"); \
2146 } while (0)
2147
2148 #else
2149
2150 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2151
2152 #endif
2153
2154 #ifdef __x86_64__
2155
2156 static void
2157 amd64_emit_prologue (void)
2158 {
2159 EMIT_ASM (amd64_prologue,
2160 "pushq %rbp\n\t"
2161 "movq %rsp,%rbp\n\t"
2162 "sub $0x20,%rsp\n\t"
2163 "movq %rdi,-8(%rbp)\n\t"
2164 "movq %rsi,-16(%rbp)");
2165 }
2166
2167
2168 static void
2169 amd64_emit_epilogue (void)
2170 {
2171 EMIT_ASM (amd64_epilogue,
2172 "movq -16(%rbp),%rdi\n\t"
2173 "movq %rax,(%rdi)\n\t"
2174 "xor %rax,%rax\n\t"
2175 "leave\n\t"
2176 "ret");
2177 }
2178
2179 static void
2180 amd64_emit_add (void)
2181 {
2182 EMIT_ASM (amd64_add,
2183 "add (%rsp),%rax\n\t"
2184 "lea 0x8(%rsp),%rsp");
2185 }
2186
2187 static void
2188 amd64_emit_sub (void)
2189 {
2190 EMIT_ASM (amd64_sub,
2191 "sub %rax,(%rsp)\n\t"
2192 "pop %rax");
2193 }
2194
2195 static void
2196 amd64_emit_mul (void)
2197 {
2198 emit_error = 1;
2199 }
2200
2201 static void
2202 amd64_emit_lsh (void)
2203 {
2204 emit_error = 1;
2205 }
2206
2207 static void
2208 amd64_emit_rsh_signed (void)
2209 {
2210 emit_error = 1;
2211 }
2212
2213 static void
2214 amd64_emit_rsh_unsigned (void)
2215 {
2216 emit_error = 1;
2217 }
2218
2219 static void
2220 amd64_emit_ext (int arg)
2221 {
2222 switch (arg)
2223 {
2224 case 8:
2225 EMIT_ASM (amd64_ext_8,
2226 "cbtw\n\t"
2227 "cwtl\n\t"
2228 "cltq");
2229 break;
2230 case 16:
2231 EMIT_ASM (amd64_ext_16,
2232 "cwtl\n\t"
2233 "cltq");
2234 break;
2235 case 32:
2236 EMIT_ASM (amd64_ext_32,
2237 "cltq");
2238 break;
2239 default:
2240 emit_error = 1;
2241 }
2242 }
2243
2244 static void
2245 amd64_emit_log_not (void)
2246 {
2247 EMIT_ASM (amd64_log_not,
2248 "test %rax,%rax\n\t"
2249 "sete %cl\n\t"
2250 "movzbq %cl,%rax");
2251 }
2252
2253 static void
2254 amd64_emit_bit_and (void)
2255 {
2256 EMIT_ASM (amd64_and,
2257 "and (%rsp),%rax\n\t"
2258 "lea 0x8(%rsp),%rsp");
2259 }
2260
2261 static void
2262 amd64_emit_bit_or (void)
2263 {
2264 EMIT_ASM (amd64_or,
2265 "or (%rsp),%rax\n\t"
2266 "lea 0x8(%rsp),%rsp");
2267 }
2268
2269 static void
2270 amd64_emit_bit_xor (void)
2271 {
2272 EMIT_ASM (amd64_xor,
2273 "xor (%rsp),%rax\n\t"
2274 "lea 0x8(%rsp),%rsp");
2275 }
2276
2277 static void
2278 amd64_emit_bit_not (void)
2279 {
2280 EMIT_ASM (amd64_bit_not,
2281 "xorq $0xffffffffffffffff,%rax");
2282 }
2283
2284 static void
2285 amd64_emit_equal (void)
2286 {
2287 EMIT_ASM (amd64_equal,
2288 "cmp %rax,(%rsp)\n\t"
2289 "je .Lamd64_equal_true\n\t"
2290 "xor %rax,%rax\n\t"
2291 "jmp .Lamd64_equal_end\n\t"
2292 ".Lamd64_equal_true:\n\t"
2293 "mov $0x1,%rax\n\t"
2294 ".Lamd64_equal_end:\n\t"
2295 "lea 0x8(%rsp),%rsp");
2296 }
2297
2298 static void
2299 amd64_emit_less_signed (void)
2300 {
2301 EMIT_ASM (amd64_less_signed,
2302 "cmp %rax,(%rsp)\n\t"
2303 "jl .Lamd64_less_signed_true\n\t"
2304 "xor %rax,%rax\n\t"
2305 "jmp .Lamd64_less_signed_end\n\t"
2306 ".Lamd64_less_signed_true:\n\t"
2307 "mov $1,%rax\n\t"
2308 ".Lamd64_less_signed_end:\n\t"
2309 "lea 0x8(%rsp),%rsp");
2310 }
2311
2312 static void
2313 amd64_emit_less_unsigned (void)
2314 {
2315 EMIT_ASM (amd64_less_unsigned,
2316 "cmp %rax,(%rsp)\n\t"
2317 "jb .Lamd64_less_unsigned_true\n\t"
2318 "xor %rax,%rax\n\t"
2319 "jmp .Lamd64_less_unsigned_end\n\t"
2320 ".Lamd64_less_unsigned_true:\n\t"
2321 "mov $1,%rax\n\t"
2322 ".Lamd64_less_unsigned_end:\n\t"
2323 "lea 0x8(%rsp),%rsp");
2324 }
2325
2326 static void
2327 amd64_emit_ref (int size)
2328 {
2329 switch (size)
2330 {
2331 case 1:
2332 EMIT_ASM (amd64_ref1,
2333 "movb (%rax),%al");
2334 break;
2335 case 2:
2336 EMIT_ASM (amd64_ref2,
2337 "movw (%rax),%ax");
2338 break;
2339 case 4:
2340 EMIT_ASM (amd64_ref4,
2341 "movl (%rax),%eax");
2342 break;
2343 case 8:
2344 EMIT_ASM (amd64_ref8,
2345 "movq (%rax),%rax");
2346 break;
2347 }
2348 }
2349
2350 static void
2351 amd64_emit_if_goto (int *offset_p, int *size_p)
2352 {
2353 EMIT_ASM (amd64_if_goto,
2354 "mov %rax,%rcx\n\t"
2355 "pop %rax\n\t"
2356 "cmp $0,%rcx\n\t"
2357 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2358 if (offset_p)
2359 *offset_p = 10;
2360 if (size_p)
2361 *size_p = 4;
2362 }
2363
2364 static void
2365 amd64_emit_goto (int *offset_p, int *size_p)
2366 {
2367 EMIT_ASM (amd64_goto,
2368 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2369 if (offset_p)
2370 *offset_p = 1;
2371 if (size_p)
2372 *size_p = 4;
2373 }
2374
2375 static void
2376 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2377 {
2378 int diff = (to - (from + size));
2379 unsigned char buf[sizeof (int)];
2380
2381 if (size != 4)
2382 {
2383 emit_error = 1;
2384 return;
2385 }
2386
2387 memcpy (buf, &diff, sizeof (int));
2388 write_inferior_memory (from, buf, sizeof (int));
2389 }
2390
2391 static void
2392 amd64_emit_const (LONGEST num)
2393 {
2394 unsigned char buf[16];
2395 int i;
2396 CORE_ADDR buildaddr = current_insn_ptr;
2397
2398 i = 0;
2399 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2400 memcpy (&buf[i], &num, sizeof (num));
2401 i += 8;
2402 append_insns (&buildaddr, i, buf);
2403 current_insn_ptr = buildaddr;
2404 }
2405
2406 static void
2407 amd64_emit_call (CORE_ADDR fn)
2408 {
2409 unsigned char buf[16];
2410 int i;
2411 CORE_ADDR buildaddr;
2412 LONGEST offset64;
2413
2414 /* The destination function being in the shared library, may be
2415 >31-bits away off the compiled code pad. */
2416
2417 buildaddr = current_insn_ptr;
2418
2419 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2420
2421 i = 0;
2422
2423 if (offset64 > INT_MAX || offset64 < INT_MIN)
2424 {
2425 /* Offset is too large for a call. Use callq, but that requires
2426 a register, so avoid it if possible. Use r10, since it is
2427 call-clobbered, we don't have to push/pop it. */
2428 buf[i++] = 0x48; /* mov $fn,%r10 */
2429 buf[i++] = 0xba;
2430 memcpy (buf + i, &fn, 8);
2431 i += 8;
2432 buf[i++] = 0xff; /* callq *%r10 */
2433 buf[i++] = 0xd2;
2434 }
2435 else
2436 {
2437 int offset32 = offset64; /* we know we can't overflow here. */
2438 memcpy (buf + i, &offset32, 4);
2439 i += 4;
2440 }
2441
2442 append_insns (&buildaddr, i, buf);
2443 current_insn_ptr = buildaddr;
2444 }
2445
2446 static void
2447 amd64_emit_reg (int reg)
2448 {
2449 unsigned char buf[16];
2450 int i;
2451 CORE_ADDR buildaddr;
2452
2453 /* Assume raw_regs is still in %rdi. */
2454 buildaddr = current_insn_ptr;
2455 i = 0;
2456 buf[i++] = 0xbe; /* mov $<n>,%esi */
2457 memcpy (&buf[i], &reg, sizeof (reg));
2458 i += 4;
2459 append_insns (&buildaddr, i, buf);
2460 current_insn_ptr = buildaddr;
2461 amd64_emit_call (get_raw_reg_func_addr ());
2462 }
2463
2464 static void
2465 amd64_emit_pop (void)
2466 {
2467 EMIT_ASM (amd64_pop,
2468 "pop %rax");
2469 }
2470
2471 static void
2472 amd64_emit_stack_flush (void)
2473 {
2474 EMIT_ASM (amd64_stack_flush,
2475 "push %rax");
2476 }
2477
2478 static void
2479 amd64_emit_zero_ext (int arg)
2480 {
2481 switch (arg)
2482 {
2483 case 8:
2484 EMIT_ASM (amd64_zero_ext_8,
2485 "and $0xff,%rax");
2486 break;
2487 case 16:
2488 EMIT_ASM (amd64_zero_ext_16,
2489 "and $0xffff,%rax");
2490 break;
2491 case 32:
2492 EMIT_ASM (amd64_zero_ext_32,
2493 "mov $0xffffffff,%rcx\n\t"
2494 "and %rcx,%rax");
2495 break;
2496 default:
2497 emit_error = 1;
2498 }
2499 }
2500
2501 static void
2502 amd64_emit_swap (void)
2503 {
2504 EMIT_ASM (amd64_swap,
2505 "mov %rax,%rcx\n\t"
2506 "pop %rax\n\t"
2507 "push %rcx");
2508 }
2509
2510 static void
2511 amd64_emit_stack_adjust (int n)
2512 {
2513 unsigned char buf[16];
2514 int i;
2515 CORE_ADDR buildaddr = current_insn_ptr;
2516
2517 i = 0;
2518 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2519 buf[i++] = 0x8d;
2520 buf[i++] = 0x64;
2521 buf[i++] = 0x24;
2522 /* This only handles adjustments up to 16, but we don't expect any more. */
2523 buf[i++] = n * 8;
2524 append_insns (&buildaddr, i, buf);
2525 current_insn_ptr = buildaddr;
2526 }
2527
2528 /* FN's prototype is `LONGEST(*fn)(int)'. */
2529
2530 static void
2531 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2532 {
2533 unsigned char buf[16];
2534 int i;
2535 CORE_ADDR buildaddr;
2536
2537 buildaddr = current_insn_ptr;
2538 i = 0;
2539 buf[i++] = 0xbf; /* movl $<n>,%edi */
2540 memcpy (&buf[i], &arg1, sizeof (arg1));
2541 i += 4;
2542 append_insns (&buildaddr, i, buf);
2543 current_insn_ptr = buildaddr;
2544 amd64_emit_call (fn);
2545 }
2546
2547 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2548
2549 static void
2550 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2551 {
2552 unsigned char buf[16];
2553 int i;
2554 CORE_ADDR buildaddr;
2555
2556 buildaddr = current_insn_ptr;
2557 i = 0;
2558 buf[i++] = 0xbf; /* movl $<n>,%edi */
2559 memcpy (&buf[i], &arg1, sizeof (arg1));
2560 i += 4;
2561 append_insns (&buildaddr, i, buf);
2562 current_insn_ptr = buildaddr;
2563 EMIT_ASM (amd64_void_call_2_a,
2564 /* Save away a copy of the stack top. */
2565 "push %rax\n\t"
2566 /* Also pass top as the second argument. */
2567 "mov %rax,%rsi");
2568 amd64_emit_call (fn);
2569 EMIT_ASM (amd64_void_call_2_b,
2570 /* Restore the stack top, %rax may have been trashed. */
2571 "pop %rax");
2572 }
2573
2574 void
2575 amd64_emit_eq_goto (int *offset_p, int *size_p)
2576 {
2577 EMIT_ASM (amd64_eq,
2578 "cmp %rax,(%rsp)\n\t"
2579 "jne .Lamd64_eq_fallthru\n\t"
2580 "lea 0x8(%rsp),%rsp\n\t"
2581 "pop %rax\n\t"
2582 /* jmp, but don't trust the assembler to choose the right jump */
2583 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2584 ".Lamd64_eq_fallthru:\n\t"
2585 "lea 0x8(%rsp),%rsp\n\t"
2586 "pop %rax");
2587
2588 if (offset_p)
2589 *offset_p = 13;
2590 if (size_p)
2591 *size_p = 4;
2592 }
2593
2594 void
2595 amd64_emit_ne_goto (int *offset_p, int *size_p)
2596 {
2597 EMIT_ASM (amd64_ne,
2598 "cmp %rax,(%rsp)\n\t"
2599 "je .Lamd64_ne_fallthru\n\t"
2600 "lea 0x8(%rsp),%rsp\n\t"
2601 "pop %rax\n\t"
2602 /* jmp, but don't trust the assembler to choose the right jump */
2603 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2604 ".Lamd64_ne_fallthru:\n\t"
2605 "lea 0x8(%rsp),%rsp\n\t"
2606 "pop %rax");
2607
2608 if (offset_p)
2609 *offset_p = 13;
2610 if (size_p)
2611 *size_p = 4;
2612 }
2613
2614 void
2615 amd64_emit_lt_goto (int *offset_p, int *size_p)
2616 {
2617 EMIT_ASM (amd64_lt,
2618 "cmp %rax,(%rsp)\n\t"
2619 "jnl .Lamd64_lt_fallthru\n\t"
2620 "lea 0x8(%rsp),%rsp\n\t"
2621 "pop %rax\n\t"
2622 /* jmp, but don't trust the assembler to choose the right jump */
2623 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2624 ".Lamd64_lt_fallthru:\n\t"
2625 "lea 0x8(%rsp),%rsp\n\t"
2626 "pop %rax");
2627
2628 if (offset_p)
2629 *offset_p = 13;
2630 if (size_p)
2631 *size_p = 4;
2632 }
2633
2634 void
2635 amd64_emit_le_goto (int *offset_p, int *size_p)
2636 {
2637 EMIT_ASM (amd64_le,
2638 "cmp %rax,(%rsp)\n\t"
2639 "jnle .Lamd64_le_fallthru\n\t"
2640 "lea 0x8(%rsp),%rsp\n\t"
2641 "pop %rax\n\t"
2642 /* jmp, but don't trust the assembler to choose the right jump */
2643 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2644 ".Lamd64_le_fallthru:\n\t"
2645 "lea 0x8(%rsp),%rsp\n\t"
2646 "pop %rax");
2647
2648 if (offset_p)
2649 *offset_p = 13;
2650 if (size_p)
2651 *size_p = 4;
2652 }
2653
2654 void
2655 amd64_emit_gt_goto (int *offset_p, int *size_p)
2656 {
2657 EMIT_ASM (amd64_gt,
2658 "cmp %rax,(%rsp)\n\t"
2659 "jng .Lamd64_gt_fallthru\n\t"
2660 "lea 0x8(%rsp),%rsp\n\t"
2661 "pop %rax\n\t"
2662 /* jmp, but don't trust the assembler to choose the right jump */
2663 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2664 ".Lamd64_gt_fallthru:\n\t"
2665 "lea 0x8(%rsp),%rsp\n\t"
2666 "pop %rax");
2667
2668 if (offset_p)
2669 *offset_p = 13;
2670 if (size_p)
2671 *size_p = 4;
2672 }
2673
2674 void
2675 amd64_emit_ge_goto (int *offset_p, int *size_p)
2676 {
2677 EMIT_ASM (amd64_ge,
2678 "cmp %rax,(%rsp)\n\t"
2679 "jnge .Lamd64_ge_fallthru\n\t"
2680 ".Lamd64_ge_jump:\n\t"
2681 "lea 0x8(%rsp),%rsp\n\t"
2682 "pop %rax\n\t"
2683 /* jmp, but don't trust the assembler to choose the right jump */
2684 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2685 ".Lamd64_ge_fallthru:\n\t"
2686 "lea 0x8(%rsp),%rsp\n\t"
2687 "pop %rax");
2688
2689 if (offset_p)
2690 *offset_p = 13;
2691 if (size_p)
2692 *size_p = 4;
2693 }
2694
2695 struct emit_ops amd64_emit_ops =
2696 {
2697 amd64_emit_prologue,
2698 amd64_emit_epilogue,
2699 amd64_emit_add,
2700 amd64_emit_sub,
2701 amd64_emit_mul,
2702 amd64_emit_lsh,
2703 amd64_emit_rsh_signed,
2704 amd64_emit_rsh_unsigned,
2705 amd64_emit_ext,
2706 amd64_emit_log_not,
2707 amd64_emit_bit_and,
2708 amd64_emit_bit_or,
2709 amd64_emit_bit_xor,
2710 amd64_emit_bit_not,
2711 amd64_emit_equal,
2712 amd64_emit_less_signed,
2713 amd64_emit_less_unsigned,
2714 amd64_emit_ref,
2715 amd64_emit_if_goto,
2716 amd64_emit_goto,
2717 amd64_write_goto_address,
2718 amd64_emit_const,
2719 amd64_emit_call,
2720 amd64_emit_reg,
2721 amd64_emit_pop,
2722 amd64_emit_stack_flush,
2723 amd64_emit_zero_ext,
2724 amd64_emit_swap,
2725 amd64_emit_stack_adjust,
2726 amd64_emit_int_call_1,
2727 amd64_emit_void_call_2,
2728 amd64_emit_eq_goto,
2729 amd64_emit_ne_goto,
2730 amd64_emit_lt_goto,
2731 amd64_emit_le_goto,
2732 amd64_emit_gt_goto,
2733 amd64_emit_ge_goto
2734 };
2735
2736 #endif /* __x86_64__ */
2737
2738 static void
2739 i386_emit_prologue (void)
2740 {
2741 EMIT_ASM32 (i386_prologue,
2742 "push %ebp\n\t"
2743 "mov %esp,%ebp\n\t"
2744 "push %ebx");
2745 /* At this point, the raw regs base address is at 8(%ebp), and the
2746 value pointer is at 12(%ebp). */
2747 }
2748
2749 static void
2750 i386_emit_epilogue (void)
2751 {
2752 EMIT_ASM32 (i386_epilogue,
2753 "mov 12(%ebp),%ecx\n\t"
2754 "mov %eax,(%ecx)\n\t"
2755 "mov %ebx,0x4(%ecx)\n\t"
2756 "xor %eax,%eax\n\t"
2757 "pop %ebx\n\t"
2758 "pop %ebp\n\t"
2759 "ret");
2760 }
2761
2762 static void
2763 i386_emit_add (void)
2764 {
2765 EMIT_ASM32 (i386_add,
2766 "add (%esp),%eax\n\t"
2767 "adc 0x4(%esp),%ebx\n\t"
2768 "lea 0x8(%esp),%esp");
2769 }
2770
2771 static void
2772 i386_emit_sub (void)
2773 {
2774 EMIT_ASM32 (i386_sub,
2775 "subl %eax,(%esp)\n\t"
2776 "sbbl %ebx,4(%esp)\n\t"
2777 "pop %eax\n\t"
2778 "pop %ebx\n\t");
2779 }
2780
2781 static void
2782 i386_emit_mul (void)
2783 {
2784 emit_error = 1;
2785 }
2786
2787 static void
2788 i386_emit_lsh (void)
2789 {
2790 emit_error = 1;
2791 }
2792
2793 static void
2794 i386_emit_rsh_signed (void)
2795 {
2796 emit_error = 1;
2797 }
2798
2799 static void
2800 i386_emit_rsh_unsigned (void)
2801 {
2802 emit_error = 1;
2803 }
2804
2805 static void
2806 i386_emit_ext (int arg)
2807 {
2808 switch (arg)
2809 {
2810 case 8:
2811 EMIT_ASM32 (i386_ext_8,
2812 "cbtw\n\t"
2813 "cwtl\n\t"
2814 "movl %eax,%ebx\n\t"
2815 "sarl $31,%ebx");
2816 break;
2817 case 16:
2818 EMIT_ASM32 (i386_ext_16,
2819 "cwtl\n\t"
2820 "movl %eax,%ebx\n\t"
2821 "sarl $31,%ebx");
2822 break;
2823 case 32:
2824 EMIT_ASM32 (i386_ext_32,
2825 "movl %eax,%ebx\n\t"
2826 "sarl $31,%ebx");
2827 break;
2828 default:
2829 emit_error = 1;
2830 }
2831 }
2832
2833 static void
2834 i386_emit_log_not (void)
2835 {
2836 EMIT_ASM32 (i386_log_not,
2837 "or %ebx,%eax\n\t"
2838 "test %eax,%eax\n\t"
2839 "sete %cl\n\t"
2840 "xor %ebx,%ebx\n\t"
2841 "movzbl %cl,%eax");
2842 }
2843
2844 static void
2845 i386_emit_bit_and (void)
2846 {
2847 EMIT_ASM32 (i386_and,
2848 "and (%esp),%eax\n\t"
2849 "and 0x4(%esp),%ebx\n\t"
2850 "lea 0x8(%esp),%esp");
2851 }
2852
2853 static void
2854 i386_emit_bit_or (void)
2855 {
2856 EMIT_ASM32 (i386_or,
2857 "or (%esp),%eax\n\t"
2858 "or 0x4(%esp),%ebx\n\t"
2859 "lea 0x8(%esp),%esp");
2860 }
2861
2862 static void
2863 i386_emit_bit_xor (void)
2864 {
2865 EMIT_ASM32 (i386_xor,
2866 "xor (%esp),%eax\n\t"
2867 "xor 0x4(%esp),%ebx\n\t"
2868 "lea 0x8(%esp),%esp");
2869 }
2870
2871 static void
2872 i386_emit_bit_not (void)
2873 {
2874 EMIT_ASM32 (i386_bit_not,
2875 "xor $0xffffffff,%eax\n\t"
2876 "xor $0xffffffff,%ebx\n\t");
2877 }
2878
2879 static void
2880 i386_emit_equal (void)
2881 {
2882 EMIT_ASM32 (i386_equal,
2883 "cmpl %ebx,4(%esp)\n\t"
2884 "jne .Li386_equal_false\n\t"
2885 "cmpl %eax,(%esp)\n\t"
2886 "je .Li386_equal_true\n\t"
2887 ".Li386_equal_false:\n\t"
2888 "xor %eax,%eax\n\t"
2889 "jmp .Li386_equal_end\n\t"
2890 ".Li386_equal_true:\n\t"
2891 "mov $1,%eax\n\t"
2892 ".Li386_equal_end:\n\t"
2893 "xor %ebx,%ebx\n\t"
2894 "lea 0x8(%esp),%esp");
2895 }
2896
2897 static void
2898 i386_emit_less_signed (void)
2899 {
2900 EMIT_ASM32 (i386_less_signed,
2901 "cmpl %ebx,4(%esp)\n\t"
2902 "jl .Li386_less_signed_true\n\t"
2903 "jne .Li386_less_signed_false\n\t"
2904 "cmpl %eax,(%esp)\n\t"
2905 "jl .Li386_less_signed_true\n\t"
2906 ".Li386_less_signed_false:\n\t"
2907 "xor %eax,%eax\n\t"
2908 "jmp .Li386_less_signed_end\n\t"
2909 ".Li386_less_signed_true:\n\t"
2910 "mov $1,%eax\n\t"
2911 ".Li386_less_signed_end:\n\t"
2912 "xor %ebx,%ebx\n\t"
2913 "lea 0x8(%esp),%esp");
2914 }
2915
2916 static void
2917 i386_emit_less_unsigned (void)
2918 {
2919 EMIT_ASM32 (i386_less_unsigned,
2920 "cmpl %ebx,4(%esp)\n\t"
2921 "jb .Li386_less_unsigned_true\n\t"
2922 "jne .Li386_less_unsigned_false\n\t"
2923 "cmpl %eax,(%esp)\n\t"
2924 "jb .Li386_less_unsigned_true\n\t"
2925 ".Li386_less_unsigned_false:\n\t"
2926 "xor %eax,%eax\n\t"
2927 "jmp .Li386_less_unsigned_end\n\t"
2928 ".Li386_less_unsigned_true:\n\t"
2929 "mov $1,%eax\n\t"
2930 ".Li386_less_unsigned_end:\n\t"
2931 "xor %ebx,%ebx\n\t"
2932 "lea 0x8(%esp),%esp");
2933 }
2934
2935 static void
2936 i386_emit_ref (int size)
2937 {
2938 switch (size)
2939 {
2940 case 1:
2941 EMIT_ASM32 (i386_ref1,
2942 "movb (%eax),%al");
2943 break;
2944 case 2:
2945 EMIT_ASM32 (i386_ref2,
2946 "movw (%eax),%ax");
2947 break;
2948 case 4:
2949 EMIT_ASM32 (i386_ref4,
2950 "movl (%eax),%eax");
2951 break;
2952 case 8:
2953 EMIT_ASM32 (i386_ref8,
2954 "movl 4(%eax),%ebx\n\t"
2955 "movl (%eax),%eax");
2956 break;
2957 }
2958 }
2959
2960 static void
2961 i386_emit_if_goto (int *offset_p, int *size_p)
2962 {
2963 EMIT_ASM32 (i386_if_goto,
2964 "mov %eax,%ecx\n\t"
2965 "or %ebx,%ecx\n\t"
2966 "pop %eax\n\t"
2967 "pop %ebx\n\t"
2968 "cmpl $0,%ecx\n\t"
2969 /* Don't trust the assembler to choose the right jump */
2970 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2971
2972 if (offset_p)
2973 *offset_p = 11; /* be sure that this matches the sequence above */
2974 if (size_p)
2975 *size_p = 4;
2976 }
2977
2978 static void
2979 i386_emit_goto (int *offset_p, int *size_p)
2980 {
2981 EMIT_ASM32 (i386_goto,
2982 /* Don't trust the assembler to choose the right jump */
2983 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2984 if (offset_p)
2985 *offset_p = 1;
2986 if (size_p)
2987 *size_p = 4;
2988 }
2989
2990 static void
2991 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2992 {
2993 int diff = (to - (from + size));
2994 unsigned char buf[sizeof (int)];
2995
2996 /* We're only doing 4-byte sizes at the moment. */
2997 if (size != 4)
2998 {
2999 emit_error = 1;
3000 return;
3001 }
3002
3003 memcpy (buf, &diff, sizeof (int));
3004 write_inferior_memory (from, buf, sizeof (int));
3005 }
3006
3007 static void
3008 i386_emit_const (LONGEST num)
3009 {
3010 unsigned char buf[16];
3011 int i, hi, lo;
3012 CORE_ADDR buildaddr = current_insn_ptr;
3013
3014 i = 0;
3015 buf[i++] = 0xb8; /* mov $<n>,%eax */
3016 lo = num & 0xffffffff;
3017 memcpy (&buf[i], &lo, sizeof (lo));
3018 i += 4;
3019 hi = ((num >> 32) & 0xffffffff);
3020 if (hi)
3021 {
3022 buf[i++] = 0xbb; /* mov $<n>,%ebx */
3023 memcpy (&buf[i], &hi, sizeof (hi));
3024 i += 4;
3025 }
3026 else
3027 {
3028 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
3029 }
3030 append_insns (&buildaddr, i, buf);
3031 current_insn_ptr = buildaddr;
3032 }
3033
3034 static void
3035 i386_emit_call (CORE_ADDR fn)
3036 {
3037 unsigned char buf[16];
3038 int i, offset;
3039 CORE_ADDR buildaddr;
3040
3041 buildaddr = current_insn_ptr;
3042 i = 0;
3043 buf[i++] = 0xe8; /* call <reladdr> */
3044 offset = ((int) fn) - (buildaddr + 5);
3045 memcpy (buf + 1, &offset, 4);
3046 append_insns (&buildaddr, 5, buf);
3047 current_insn_ptr = buildaddr;
3048 }
3049
3050 static void
3051 i386_emit_reg (int reg)
3052 {
3053 unsigned char buf[16];
3054 int i;
3055 CORE_ADDR buildaddr;
3056
3057 EMIT_ASM32 (i386_reg_a,
3058 "sub $0x8,%esp");
3059 buildaddr = current_insn_ptr;
3060 i = 0;
3061 buf[i++] = 0xb8; /* mov $<n>,%eax */
3062 memcpy (&buf[i], &reg, sizeof (reg));
3063 i += 4;
3064 append_insns (&buildaddr, i, buf);
3065 current_insn_ptr = buildaddr;
3066 EMIT_ASM32 (i386_reg_b,
3067 "mov %eax,4(%esp)\n\t"
3068 "mov 8(%ebp),%eax\n\t"
3069 "mov %eax,(%esp)");
3070 i386_emit_call (get_raw_reg_func_addr ());
3071 EMIT_ASM32 (i386_reg_c,
3072 "xor %ebx,%ebx\n\t"
3073 "lea 0x8(%esp),%esp");
3074 }
3075
3076 static void
3077 i386_emit_pop (void)
3078 {
3079 EMIT_ASM32 (i386_pop,
3080 "pop %eax\n\t"
3081 "pop %ebx");
3082 }
3083
3084 static void
3085 i386_emit_stack_flush (void)
3086 {
3087 EMIT_ASM32 (i386_stack_flush,
3088 "push %ebx\n\t"
3089 "push %eax");
3090 }
3091
3092 static void
3093 i386_emit_zero_ext (int arg)
3094 {
3095 switch (arg)
3096 {
3097 case 8:
3098 EMIT_ASM32 (i386_zero_ext_8,
3099 "and $0xff,%eax\n\t"
3100 "xor %ebx,%ebx");
3101 break;
3102 case 16:
3103 EMIT_ASM32 (i386_zero_ext_16,
3104 "and $0xffff,%eax\n\t"
3105 "xor %ebx,%ebx");
3106 break;
3107 case 32:
3108 EMIT_ASM32 (i386_zero_ext_32,
3109 "xor %ebx,%ebx");
3110 break;
3111 default:
3112 emit_error = 1;
3113 }
3114 }
3115
3116 static void
3117 i386_emit_swap (void)
3118 {
3119 EMIT_ASM32 (i386_swap,
3120 "mov %eax,%ecx\n\t"
3121 "mov %ebx,%edx\n\t"
3122 "pop %eax\n\t"
3123 "pop %ebx\n\t"
3124 "push %edx\n\t"
3125 "push %ecx");
3126 }
3127
3128 static void
3129 i386_emit_stack_adjust (int n)
3130 {
3131 unsigned char buf[16];
3132 int i;
3133 CORE_ADDR buildaddr = current_insn_ptr;
3134
3135 i = 0;
3136 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3137 buf[i++] = 0x64;
3138 buf[i++] = 0x24;
3139 buf[i++] = n * 8;
3140 append_insns (&buildaddr, i, buf);
3141 current_insn_ptr = buildaddr;
3142 }
3143
3144 /* FN's prototype is `LONGEST(*fn)(int)'. */
3145
3146 static void
3147 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3148 {
3149 unsigned char buf[16];
3150 int i;
3151 CORE_ADDR buildaddr;
3152
3153 EMIT_ASM32 (i386_int_call_1_a,
3154 /* Reserve a bit of stack space. */
3155 "sub $0x8,%esp");
3156 /* Put the one argument on the stack. */
3157 buildaddr = current_insn_ptr;
3158 i = 0;
3159 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3160 buf[i++] = 0x04;
3161 buf[i++] = 0x24;
3162 memcpy (&buf[i], &arg1, sizeof (arg1));
3163 i += 4;
3164 append_insns (&buildaddr, i, buf);
3165 current_insn_ptr = buildaddr;
3166 i386_emit_call (fn);
3167 EMIT_ASM32 (i386_int_call_1_c,
3168 "mov %edx,%ebx\n\t"
3169 "lea 0x8(%esp),%esp");
3170 }
3171
3172 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3173
3174 static void
3175 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3176 {
3177 unsigned char buf[16];
3178 int i;
3179 CORE_ADDR buildaddr;
3180
3181 EMIT_ASM32 (i386_void_call_2_a,
3182 /* Preserve %eax only; we don't have to worry about %ebx. */
3183 "push %eax\n\t"
3184 /* Reserve a bit of stack space for arguments. */
3185 "sub $0x10,%esp\n\t"
3186 /* Copy "top" to the second argument position. (Note that
3187 we can't assume function won't scribble on its
3188 arguments, so don't try to restore from this.) */
3189 "mov %eax,4(%esp)\n\t"
3190 "mov %ebx,8(%esp)");
3191 /* Put the first argument on the stack. */
3192 buildaddr = current_insn_ptr;
3193 i = 0;
3194 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3195 buf[i++] = 0x04;
3196 buf[i++] = 0x24;
3197 memcpy (&buf[i], &arg1, sizeof (arg1));
3198 i += 4;
3199 append_insns (&buildaddr, i, buf);
3200 current_insn_ptr = buildaddr;
3201 i386_emit_call (fn);
3202 EMIT_ASM32 (i386_void_call_2_b,
3203 "lea 0x10(%esp),%esp\n\t"
3204 /* Restore original stack top. */
3205 "pop %eax");
3206 }
3207
3208
3209 void
3210 i386_emit_eq_goto (int *offset_p, int *size_p)
3211 {
3212 EMIT_ASM32 (eq,
3213 /* Check low half first, more likely to be decider */
3214 "cmpl %eax,(%esp)\n\t"
3215 "jne .Leq_fallthru\n\t"
3216 "cmpl %ebx,4(%esp)\n\t"
3217 "jne .Leq_fallthru\n\t"
3218 "lea 0x8(%esp),%esp\n\t"
3219 "pop %eax\n\t"
3220 "pop %ebx\n\t"
3221 /* jmp, but don't trust the assembler to choose the right jump */
3222 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3223 ".Leq_fallthru:\n\t"
3224 "lea 0x8(%esp),%esp\n\t"
3225 "pop %eax\n\t"
3226 "pop %ebx");
3227
3228 if (offset_p)
3229 *offset_p = 18;
3230 if (size_p)
3231 *size_p = 4;
3232 }
3233
3234 void
3235 i386_emit_ne_goto (int *offset_p, int *size_p)
3236 {
3237 EMIT_ASM32 (ne,
3238 /* Check low half first, more likely to be decider */
3239 "cmpl %eax,(%esp)\n\t"
3240 "jne .Lne_jump\n\t"
3241 "cmpl %ebx,4(%esp)\n\t"
3242 "je .Lne_fallthru\n\t"
3243 ".Lne_jump:\n\t"
3244 "lea 0x8(%esp),%esp\n\t"
3245 "pop %eax\n\t"
3246 "pop %ebx\n\t"
3247 /* jmp, but don't trust the assembler to choose the right jump */
3248 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3249 ".Lne_fallthru:\n\t"
3250 "lea 0x8(%esp),%esp\n\t"
3251 "pop %eax\n\t"
3252 "pop %ebx");
3253
3254 if (offset_p)
3255 *offset_p = 18;
3256 if (size_p)
3257 *size_p = 4;
3258 }
3259
3260 void
3261 i386_emit_lt_goto (int *offset_p, int *size_p)
3262 {
3263 EMIT_ASM32 (lt,
3264 "cmpl %ebx,4(%esp)\n\t"
3265 "jl .Llt_jump\n\t"
3266 "jne .Llt_fallthru\n\t"
3267 "cmpl %eax,(%esp)\n\t"
3268 "jnl .Llt_fallthru\n\t"
3269 ".Llt_jump:\n\t"
3270 "lea 0x8(%esp),%esp\n\t"
3271 "pop %eax\n\t"
3272 "pop %ebx\n\t"
3273 /* jmp, but don't trust the assembler to choose the right jump */
3274 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3275 ".Llt_fallthru:\n\t"
3276 "lea 0x8(%esp),%esp\n\t"
3277 "pop %eax\n\t"
3278 "pop %ebx");
3279
3280 if (offset_p)
3281 *offset_p = 20;
3282 if (size_p)
3283 *size_p = 4;
3284 }
3285
3286 void
3287 i386_emit_le_goto (int *offset_p, int *size_p)
3288 {
3289 EMIT_ASM32 (le,
3290 "cmpl %ebx,4(%esp)\n\t"
3291 "jle .Lle_jump\n\t"
3292 "jne .Lle_fallthru\n\t"
3293 "cmpl %eax,(%esp)\n\t"
3294 "jnle .Lle_fallthru\n\t"
3295 ".Lle_jump:\n\t"
3296 "lea 0x8(%esp),%esp\n\t"
3297 "pop %eax\n\t"
3298 "pop %ebx\n\t"
3299 /* jmp, but don't trust the assembler to choose the right jump */
3300 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3301 ".Lle_fallthru:\n\t"
3302 "lea 0x8(%esp),%esp\n\t"
3303 "pop %eax\n\t"
3304 "pop %ebx");
3305
3306 if (offset_p)
3307 *offset_p = 20;
3308 if (size_p)
3309 *size_p = 4;
3310 }
3311
3312 void
3313 i386_emit_gt_goto (int *offset_p, int *size_p)
3314 {
3315 EMIT_ASM32 (gt,
3316 "cmpl %ebx,4(%esp)\n\t"
3317 "jg .Lgt_jump\n\t"
3318 "jne .Lgt_fallthru\n\t"
3319 "cmpl %eax,(%esp)\n\t"
3320 "jng .Lgt_fallthru\n\t"
3321 ".Lgt_jump:\n\t"
3322 "lea 0x8(%esp),%esp\n\t"
3323 "pop %eax\n\t"
3324 "pop %ebx\n\t"
3325 /* jmp, but don't trust the assembler to choose the right jump */
3326 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3327 ".Lgt_fallthru:\n\t"
3328 "lea 0x8(%esp),%esp\n\t"
3329 "pop %eax\n\t"
3330 "pop %ebx");
3331
3332 if (offset_p)
3333 *offset_p = 20;
3334 if (size_p)
3335 *size_p = 4;
3336 }
3337
3338 void
3339 i386_emit_ge_goto (int *offset_p, int *size_p)
3340 {
3341 EMIT_ASM32 (ge,
3342 "cmpl %ebx,4(%esp)\n\t"
3343 "jge .Lge_jump\n\t"
3344 "jne .Lge_fallthru\n\t"
3345 "cmpl %eax,(%esp)\n\t"
3346 "jnge .Lge_fallthru\n\t"
3347 ".Lge_jump:\n\t"
3348 "lea 0x8(%esp),%esp\n\t"
3349 "pop %eax\n\t"
3350 "pop %ebx\n\t"
3351 /* jmp, but don't trust the assembler to choose the right jump */
3352 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3353 ".Lge_fallthru:\n\t"
3354 "lea 0x8(%esp),%esp\n\t"
3355 "pop %eax\n\t"
3356 "pop %ebx");
3357
3358 if (offset_p)
3359 *offset_p = 20;
3360 if (size_p)
3361 *size_p = 4;
3362 }
3363
3364 struct emit_ops i386_emit_ops =
3365 {
3366 i386_emit_prologue,
3367 i386_emit_epilogue,
3368 i386_emit_add,
3369 i386_emit_sub,
3370 i386_emit_mul,
3371 i386_emit_lsh,
3372 i386_emit_rsh_signed,
3373 i386_emit_rsh_unsigned,
3374 i386_emit_ext,
3375 i386_emit_log_not,
3376 i386_emit_bit_and,
3377 i386_emit_bit_or,
3378 i386_emit_bit_xor,
3379 i386_emit_bit_not,
3380 i386_emit_equal,
3381 i386_emit_less_signed,
3382 i386_emit_less_unsigned,
3383 i386_emit_ref,
3384 i386_emit_if_goto,
3385 i386_emit_goto,
3386 i386_write_goto_address,
3387 i386_emit_const,
3388 i386_emit_call,
3389 i386_emit_reg,
3390 i386_emit_pop,
3391 i386_emit_stack_flush,
3392 i386_emit_zero_ext,
3393 i386_emit_swap,
3394 i386_emit_stack_adjust,
3395 i386_emit_int_call_1,
3396 i386_emit_void_call_2,
3397 i386_emit_eq_goto,
3398 i386_emit_ne_goto,
3399 i386_emit_lt_goto,
3400 i386_emit_le_goto,
3401 i386_emit_gt_goto,
3402 i386_emit_ge_goto
3403 };
3404
3405
3406 static struct emit_ops *
3407 x86_emit_ops (void)
3408 {
3409 #ifdef __x86_64__
3410 if (is_64bit_tdesc ())
3411 return &amd64_emit_ops;
3412 else
3413 #endif
3414 return &i386_emit_ops;
3415 }
3416
3417 static int
3418 x86_supports_range_stepping (void)
3419 {
3420 return 1;
3421 }
3422
3423 /* This is initialized assuming an amd64 target.
3424 x86_arch_setup will correct it for i386 or amd64 targets. */
3425
3426 struct linux_target_ops the_low_target =
3427 {
3428 x86_arch_setup,
3429 x86_linux_regs_info,
3430 x86_cannot_fetch_register,
3431 x86_cannot_store_register,
3432 NULL, /* fetch_register */
3433 x86_get_pc,
3434 x86_set_pc,
3435 x86_breakpoint,
3436 x86_breakpoint_len,
3437 NULL,
3438 1,
3439 x86_breakpoint_at,
3440 x86_supports_z_point_type,
3441 x86_insert_point,
3442 x86_remove_point,
3443 x86_stopped_by_watchpoint,
3444 x86_stopped_data_address,
3445 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3446 native i386 case (no registers smaller than an xfer unit), and are not
3447 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3448 NULL,
3449 NULL,
3450 /* need to fix up i386 siginfo if host is amd64 */
3451 x86_siginfo_fixup,
3452 x86_linux_new_process,
3453 x86_linux_new_thread,
3454 x86_linux_prepare_to_resume,
3455 x86_linux_process_qsupported,
3456 x86_supports_tracepoints,
3457 x86_get_thread_area,
3458 x86_install_fast_tracepoint_jump_pad,
3459 x86_emit_ops,
3460 x86_get_min_fast_tracepoint_insn_len,
3461 x86_supports_range_stepping,
3462 };
3463
3464 void
3465 initialize_low_arch (void)
3466 {
3467 /* Initialize the Linux target descriptions. */
3468 #ifdef __x86_64__
3469 init_registers_amd64_linux ();
3470 init_registers_amd64_avx_linux ();
3471 init_registers_amd64_avx512_linux ();
3472 init_registers_amd64_mpx_linux ();
3473
3474 init_registers_x32_linux ();
3475 init_registers_x32_avx_linux ();
3476 init_registers_x32_avx512_linux ();
3477
3478 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3479 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3480 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3481 #endif
3482 init_registers_i386_linux ();
3483 init_registers_i386_mmx_linux ();
3484 init_registers_i386_avx_linux ();
3485 init_registers_i386_avx512_linux ();
3486 init_registers_i386_mpx_linux ();
3487
3488 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3489 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3490 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3491
3492 initialize_regsets_info (&x86_regsets_info);
3493 }
This page took 0.096999 seconds and 3 git commands to generate.