New gdbserver option --debug-format=timestamp.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include <stddef.h>
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "server.h"
25 #include "linux-low.h"
26 #include "i387-fp.h"
27 #include "i386-low.h"
28 #include "i386-xstate.h"
29
30 #include "gdb_proc_service.h"
31 /* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
33 #ifndef ELFMAG0
34 #include "elf/common.h"
35 #endif
36
37 #include "agent.h"
38 #include "tdesc.h"
39 #include "tracepoint.h"
40 #include "ax.h"
41
42 #ifdef __x86_64__
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc *tdesc_amd64_linux;
46
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc *tdesc_amd64_avx_linux;
50
51 /* Defined in auto-generated file amd64-mpx-linux.c. */
52 void init_registers_amd64_mpx_linux (void);
53 extern const struct target_desc *tdesc_amd64_mpx_linux;
54
55 /* Defined in auto-generated file x32-linux.c. */
56 void init_registers_x32_linux (void);
57 extern const struct target_desc *tdesc_x32_linux;
58
59 /* Defined in auto-generated file x32-avx-linux.c. */
60 void init_registers_x32_avx_linux (void);
61 extern const struct target_desc *tdesc_x32_avx_linux;
62
63 #endif
64
65 /* Defined in auto-generated file i386-linux.c. */
66 void init_registers_i386_linux (void);
67 extern const struct target_desc *tdesc_i386_linux;
68
69 /* Defined in auto-generated file i386-mmx-linux.c. */
70 void init_registers_i386_mmx_linux (void);
71 extern const struct target_desc *tdesc_i386_mmx_linux;
72
73 /* Defined in auto-generated file i386-avx-linux.c. */
74 void init_registers_i386_avx_linux (void);
75 extern const struct target_desc *tdesc_i386_avx_linux;
76
77 /* Defined in auto-generated file i386-mpx-linux.c. */
78 void init_registers_i386_mpx_linux (void);
79 extern const struct target_desc *tdesc_i386_mpx_linux;
80
81 #ifdef __x86_64__
82 static struct target_desc *tdesc_amd64_linux_no_xml;
83 #endif
84 static struct target_desc *tdesc_i386_linux_no_xml;
85
86
87 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
88 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
89
90 /* Backward compatibility for gdb without XML support. */
91
92 static const char *xmltarget_i386_linux_no_xml = "@<target>\
93 <architecture>i386</architecture>\
94 <osabi>GNU/Linux</osabi>\
95 </target>";
96
97 #ifdef __x86_64__
98 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
99 <architecture>i386:x86-64</architecture>\
100 <osabi>GNU/Linux</osabi>\
101 </target>";
102 #endif
103
104 #include <sys/reg.h>
105 #include <sys/procfs.h>
106 #include <sys/ptrace.h>
107 #include <sys/uio.h>
108
109 #ifndef PTRACE_GETREGSET
110 #define PTRACE_GETREGSET 0x4204
111 #endif
112
113 #ifndef PTRACE_SETREGSET
114 #define PTRACE_SETREGSET 0x4205
115 #endif
116
117
118 #ifndef PTRACE_GET_THREAD_AREA
119 #define PTRACE_GET_THREAD_AREA 25
120 #endif
121
122 /* This definition comes from prctl.h, but some kernels may not have it. */
123 #ifndef PTRACE_ARCH_PRCTL
124 #define PTRACE_ARCH_PRCTL 30
125 #endif
126
127 /* The following definitions come from prctl.h, but may be absent
128 for certain configurations. */
129 #ifndef ARCH_GET_FS
130 #define ARCH_SET_GS 0x1001
131 #define ARCH_SET_FS 0x1002
132 #define ARCH_GET_FS 0x1003
133 #define ARCH_GET_GS 0x1004
134 #endif
135
136 /* Per-process arch-specific data we want to keep. */
137
138 struct arch_process_info
139 {
140 struct i386_debug_reg_state debug_reg_state;
141 };
142
143 /* Per-thread arch-specific data we want to keep. */
144
145 struct arch_lwp_info
146 {
147 /* Non-zero if our copy differs from what's recorded in the thread. */
148 int debug_registers_changed;
149 };
150
151 #ifdef __x86_64__
152
153 /* Mapping between the general-purpose registers in `struct user'
154 format and GDB's register array layout.
155 Note that the transfer layout uses 64-bit regs. */
156 static /*const*/ int i386_regmap[] =
157 {
158 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
159 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
160 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
161 DS * 8, ES * 8, FS * 8, GS * 8
162 };
163
164 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
165
166 /* So code below doesn't have to care, i386 or amd64. */
167 #define ORIG_EAX ORIG_RAX
168
169 static const int x86_64_regmap[] =
170 {
171 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
172 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
173 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
174 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
175 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
176 DS * 8, ES * 8, FS * 8, GS * 8,
177 -1, -1, -1, -1, -1, -1, -1, -1,
178 -1, -1, -1, -1, -1, -1, -1, -1,
179 -1, -1, -1, -1, -1, -1, -1, -1,
180 -1,
181 -1, -1, -1, -1, -1, -1, -1, -1,
182 ORIG_RAX * 8,
183 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
184 -1, -1 /* MPX registers BNDCFGU, BNDSTATUS. */
185 };
186
187 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
188
189 #else /* ! __x86_64__ */
190
191 /* Mapping between the general-purpose registers in `struct user'
192 format and GDB's register array layout. */
193 static /*const*/ int i386_regmap[] =
194 {
195 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
196 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
197 EIP * 4, EFL * 4, CS * 4, SS * 4,
198 DS * 4, ES * 4, FS * 4, GS * 4
199 };
200
201 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
202
203 #endif
204
205 #ifdef __x86_64__
206
207 /* Returns true if the current inferior belongs to a x86-64 process,
208 per the tdesc. */
209
210 static int
211 is_64bit_tdesc (void)
212 {
213 struct regcache *regcache = get_thread_regcache (current_inferior, 0);
214
215 return register_size (regcache->tdesc, 0) == 8;
216 }
217
218 #endif
219
220 \f
221 /* Called by libthread_db. */
222
223 ps_err_e
224 ps_get_thread_area (const struct ps_prochandle *ph,
225 lwpid_t lwpid, int idx, void **base)
226 {
227 #ifdef __x86_64__
228 int use_64bit = is_64bit_tdesc ();
229
230 if (use_64bit)
231 {
232 switch (idx)
233 {
234 case FS:
235 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
236 return PS_OK;
237 break;
238 case GS:
239 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
240 return PS_OK;
241 break;
242 default:
243 return PS_BADADDR;
244 }
245 return PS_ERR;
246 }
247 #endif
248
249 {
250 unsigned int desc[4];
251
252 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
253 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
254 return PS_ERR;
255
256 /* Ensure we properly extend the value to 64-bits for x86_64. */
257 *base = (void *) (uintptr_t) desc[1];
258 return PS_OK;
259 }
260 }
261
262 /* Get the thread area address. This is used to recognize which
263 thread is which when tracing with the in-process agent library. We
264 don't read anything from the address, and treat it as opaque; it's
265 the address itself that we assume is unique per-thread. */
266
267 static int
268 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
269 {
270 #ifdef __x86_64__
271 int use_64bit = is_64bit_tdesc ();
272
273 if (use_64bit)
274 {
275 void *base;
276 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
277 {
278 *addr = (CORE_ADDR) (uintptr_t) base;
279 return 0;
280 }
281
282 return -1;
283 }
284 #endif
285
286 {
287 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
288 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
289 unsigned int desc[4];
290 ULONGEST gs = 0;
291 const int reg_thread_area = 3; /* bits to scale down register value. */
292 int idx;
293
294 collect_register_by_name (regcache, "gs", &gs);
295
296 idx = gs >> reg_thread_area;
297
298 if (ptrace (PTRACE_GET_THREAD_AREA,
299 lwpid_of (lwp),
300 (void *) (long) idx, (unsigned long) &desc) < 0)
301 return -1;
302
303 *addr = desc[1];
304 return 0;
305 }
306 }
307
308
309 \f
310 static int
311 x86_cannot_store_register (int regno)
312 {
313 #ifdef __x86_64__
314 if (is_64bit_tdesc ())
315 return 0;
316 #endif
317
318 return regno >= I386_NUM_REGS;
319 }
320
321 static int
322 x86_cannot_fetch_register (int regno)
323 {
324 #ifdef __x86_64__
325 if (is_64bit_tdesc ())
326 return 0;
327 #endif
328
329 return regno >= I386_NUM_REGS;
330 }
331
332 static void
333 x86_fill_gregset (struct regcache *regcache, void *buf)
334 {
335 int i;
336
337 #ifdef __x86_64__
338 if (register_size (regcache->tdesc, 0) == 8)
339 {
340 for (i = 0; i < X86_64_NUM_REGS; i++)
341 if (x86_64_regmap[i] != -1)
342 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
343 return;
344 }
345 #endif
346
347 for (i = 0; i < I386_NUM_REGS; i++)
348 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
349
350 collect_register_by_name (regcache, "orig_eax",
351 ((char *) buf) + ORIG_EAX * 4);
352 }
353
354 static void
355 x86_store_gregset (struct regcache *regcache, const void *buf)
356 {
357 int i;
358
359 #ifdef __x86_64__
360 if (register_size (regcache->tdesc, 0) == 8)
361 {
362 for (i = 0; i < X86_64_NUM_REGS; i++)
363 if (x86_64_regmap[i] != -1)
364 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
365 return;
366 }
367 #endif
368
369 for (i = 0; i < I386_NUM_REGS; i++)
370 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
371
372 supply_register_by_name (regcache, "orig_eax",
373 ((char *) buf) + ORIG_EAX * 4);
374 }
375
376 static void
377 x86_fill_fpregset (struct regcache *regcache, void *buf)
378 {
379 #ifdef __x86_64__
380 i387_cache_to_fxsave (regcache, buf);
381 #else
382 i387_cache_to_fsave (regcache, buf);
383 #endif
384 }
385
386 static void
387 x86_store_fpregset (struct regcache *regcache, const void *buf)
388 {
389 #ifdef __x86_64__
390 i387_fxsave_to_cache (regcache, buf);
391 #else
392 i387_fsave_to_cache (regcache, buf);
393 #endif
394 }
395
396 #ifndef __x86_64__
397
398 static void
399 x86_fill_fpxregset (struct regcache *regcache, void *buf)
400 {
401 i387_cache_to_fxsave (regcache, buf);
402 }
403
404 static void
405 x86_store_fpxregset (struct regcache *regcache, const void *buf)
406 {
407 i387_fxsave_to_cache (regcache, buf);
408 }
409
410 #endif
411
412 static void
413 x86_fill_xstateregset (struct regcache *regcache, void *buf)
414 {
415 i387_cache_to_xsave (regcache, buf);
416 }
417
418 static void
419 x86_store_xstateregset (struct regcache *regcache, const void *buf)
420 {
421 i387_xsave_to_cache (regcache, buf);
422 }
423
424 /* ??? The non-biarch i386 case stores all the i387 regs twice.
425 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
426 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
427 doesn't work. IWBN to avoid the duplication in the case where it
428 does work. Maybe the arch_setup routine could check whether it works
429 and update the supported regsets accordingly. */
430
431 static struct regset_info x86_regsets[] =
432 {
433 #ifdef HAVE_PTRACE_GETREGS
434 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
435 GENERAL_REGS,
436 x86_fill_gregset, x86_store_gregset },
437 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
438 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
439 # ifndef __x86_64__
440 # ifdef HAVE_PTRACE_GETFPXREGS
441 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
442 EXTENDED_REGS,
443 x86_fill_fpxregset, x86_store_fpxregset },
444 # endif
445 # endif
446 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
447 FP_REGS,
448 x86_fill_fpregset, x86_store_fpregset },
449 #endif /* HAVE_PTRACE_GETREGS */
450 { 0, 0, 0, -1, -1, NULL, NULL }
451 };
452
453 static CORE_ADDR
454 x86_get_pc (struct regcache *regcache)
455 {
456 int use_64bit = register_size (regcache->tdesc, 0) == 8;
457
458 if (use_64bit)
459 {
460 unsigned long pc;
461 collect_register_by_name (regcache, "rip", &pc);
462 return (CORE_ADDR) pc;
463 }
464 else
465 {
466 unsigned int pc;
467 collect_register_by_name (regcache, "eip", &pc);
468 return (CORE_ADDR) pc;
469 }
470 }
471
472 static void
473 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
474 {
475 int use_64bit = register_size (regcache->tdesc, 0) == 8;
476
477 if (use_64bit)
478 {
479 unsigned long newpc = pc;
480 supply_register_by_name (regcache, "rip", &newpc);
481 }
482 else
483 {
484 unsigned int newpc = pc;
485 supply_register_by_name (regcache, "eip", &newpc);
486 }
487 }
488 \f
489 static const unsigned char x86_breakpoint[] = { 0xCC };
490 #define x86_breakpoint_len 1
491
492 static int
493 x86_breakpoint_at (CORE_ADDR pc)
494 {
495 unsigned char c;
496
497 (*the_target->read_memory) (pc, &c, 1);
498 if (c == 0xCC)
499 return 1;
500
501 return 0;
502 }
503 \f
504 /* Support for debug registers. */
505
506 static unsigned long
507 x86_linux_dr_get (ptid_t ptid, int regnum)
508 {
509 int tid;
510 unsigned long value;
511
512 tid = ptid_get_lwp (ptid);
513
514 errno = 0;
515 value = ptrace (PTRACE_PEEKUSER, tid,
516 offsetof (struct user, u_debugreg[regnum]), 0);
517 if (errno != 0)
518 error ("Couldn't read debug register");
519
520 return value;
521 }
522
523 static void
524 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
525 {
526 int tid;
527
528 tid = ptid_get_lwp (ptid);
529
530 errno = 0;
531 ptrace (PTRACE_POKEUSER, tid,
532 offsetof (struct user, u_debugreg[regnum]), value);
533 if (errno != 0)
534 error ("Couldn't write debug register");
535 }
536
537 static int
538 update_debug_registers_callback (struct inferior_list_entry *entry,
539 void *pid_p)
540 {
541 struct lwp_info *lwp = (struct lwp_info *) entry;
542 int pid = *(int *) pid_p;
543
544 /* Only update the threads of this process. */
545 if (pid_of (lwp) == pid)
546 {
547 /* The actual update is done later just before resuming the lwp,
548 we just mark that the registers need updating. */
549 lwp->arch_private->debug_registers_changed = 1;
550
551 /* If the lwp isn't stopped, force it to momentarily pause, so
552 we can update its debug registers. */
553 if (!lwp->stopped)
554 linux_stop_lwp (lwp);
555 }
556
557 return 0;
558 }
559
560 /* Update the inferior's debug register REGNUM from STATE. */
561
562 void
563 i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
564 {
565 /* Only update the threads of this process. */
566 int pid = pid_of (get_thread_lwp (current_inferior));
567
568 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
569 fatal ("Invalid debug register %d", regnum);
570
571 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
572 }
573
574 /* Return the inferior's debug register REGNUM. */
575
576 CORE_ADDR
577 i386_dr_low_get_addr (int regnum)
578 {
579 struct lwp_info *lwp = get_thread_lwp (current_inferior);
580 ptid_t ptid = ptid_of (lwp);
581
582 /* DR6 and DR7 are retrieved with some other way. */
583 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
584
585 return x86_linux_dr_get (ptid, regnum);
586 }
587
588 /* Update the inferior's DR7 debug control register from STATE. */
589
590 void
591 i386_dr_low_set_control (const struct i386_debug_reg_state *state)
592 {
593 /* Only update the threads of this process. */
594 int pid = pid_of (get_thread_lwp (current_inferior));
595
596 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
597 }
598
599 /* Return the inferior's DR7 debug control register. */
600
601 unsigned
602 i386_dr_low_get_control (void)
603 {
604 struct lwp_info *lwp = get_thread_lwp (current_inferior);
605 ptid_t ptid = ptid_of (lwp);
606
607 return x86_linux_dr_get (ptid, DR_CONTROL);
608 }
609
610 /* Get the value of the DR6 debug status register from the inferior
611 and record it in STATE. */
612
613 unsigned
614 i386_dr_low_get_status (void)
615 {
616 struct lwp_info *lwp = get_thread_lwp (current_inferior);
617 ptid_t ptid = ptid_of (lwp);
618
619 return x86_linux_dr_get (ptid, DR_STATUS);
620 }
621 \f
622 /* Breakpoint/Watchpoint support. */
623
624 static int
625 x86_insert_point (char type, CORE_ADDR addr, int len)
626 {
627 struct process_info *proc = current_process ();
628 switch (type)
629 {
630 case '0': /* software-breakpoint */
631 {
632 int ret;
633
634 ret = prepare_to_access_memory ();
635 if (ret)
636 return -1;
637 ret = set_gdb_breakpoint_at (addr);
638 done_accessing_memory ();
639 return ret;
640 }
641 case '1': /* hardware-breakpoint */
642 case '2': /* write watchpoint */
643 case '3': /* read watchpoint */
644 case '4': /* access watchpoint */
645 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
646 type, addr, len);
647
648 default:
649 /* Unsupported. */
650 return 1;
651 }
652 }
653
654 static int
655 x86_remove_point (char type, CORE_ADDR addr, int len)
656 {
657 struct process_info *proc = current_process ();
658 switch (type)
659 {
660 case '0': /* software-breakpoint */
661 {
662 int ret;
663
664 ret = prepare_to_access_memory ();
665 if (ret)
666 return -1;
667 ret = delete_gdb_breakpoint_at (addr);
668 done_accessing_memory ();
669 return ret;
670 }
671 case '1': /* hardware-breakpoint */
672 case '2': /* write watchpoint */
673 case '3': /* read watchpoint */
674 case '4': /* access watchpoint */
675 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
676 type, addr, len);
677 default:
678 /* Unsupported. */
679 return 1;
680 }
681 }
682
683 static int
684 x86_stopped_by_watchpoint (void)
685 {
686 struct process_info *proc = current_process ();
687 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
688 }
689
690 static CORE_ADDR
691 x86_stopped_data_address (void)
692 {
693 struct process_info *proc = current_process ();
694 CORE_ADDR addr;
695 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
696 &addr))
697 return addr;
698 return 0;
699 }
700 \f
701 /* Called when a new process is created. */
702
703 static struct arch_process_info *
704 x86_linux_new_process (void)
705 {
706 struct arch_process_info *info = xcalloc (1, sizeof (*info));
707
708 i386_low_init_dregs (&info->debug_reg_state);
709
710 return info;
711 }
712
713 /* Called when a new thread is detected. */
714
715 static struct arch_lwp_info *
716 x86_linux_new_thread (void)
717 {
718 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
719
720 info->debug_registers_changed = 1;
721
722 return info;
723 }
724
725 /* Called when resuming a thread.
726 If the debug regs have changed, update the thread's copies. */
727
728 static void
729 x86_linux_prepare_to_resume (struct lwp_info *lwp)
730 {
731 ptid_t ptid = ptid_of (lwp);
732 int clear_status = 0;
733
734 if (lwp->arch_private->debug_registers_changed)
735 {
736 int i;
737 int pid = ptid_get_pid (ptid);
738 struct process_info *proc = find_process_pid (pid);
739 struct i386_debug_reg_state *state
740 = &proc->private->arch_private->debug_reg_state;
741
742 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
743 if (state->dr_ref_count[i] > 0)
744 {
745 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
746
747 /* If we're setting a watchpoint, any change the inferior
748 had done itself to the debug registers needs to be
749 discarded, otherwise, i386_low_stopped_data_address can
750 get confused. */
751 clear_status = 1;
752 }
753
754 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
755
756 lwp->arch_private->debug_registers_changed = 0;
757 }
758
759 if (clear_status || lwp->stopped_by_watchpoint)
760 x86_linux_dr_set (ptid, DR_STATUS, 0);
761 }
762 \f
763 /* When GDBSERVER is built as a 64-bit application on linux, the
764 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
765 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
766 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
767 conversion in-place ourselves. */
768
769 /* These types below (compat_*) define a siginfo type that is layout
770 compatible with the siginfo type exported by the 32-bit userspace
771 support. */
772
773 #ifdef __x86_64__
774
775 typedef int compat_int_t;
776 typedef unsigned int compat_uptr_t;
777
778 typedef int compat_time_t;
779 typedef int compat_timer_t;
780 typedef int compat_clock_t;
781
782 struct compat_timeval
783 {
784 compat_time_t tv_sec;
785 int tv_usec;
786 };
787
788 typedef union compat_sigval
789 {
790 compat_int_t sival_int;
791 compat_uptr_t sival_ptr;
792 } compat_sigval_t;
793
794 typedef struct compat_siginfo
795 {
796 int si_signo;
797 int si_errno;
798 int si_code;
799
800 union
801 {
802 int _pad[((128 / sizeof (int)) - 3)];
803
804 /* kill() */
805 struct
806 {
807 unsigned int _pid;
808 unsigned int _uid;
809 } _kill;
810
811 /* POSIX.1b timers */
812 struct
813 {
814 compat_timer_t _tid;
815 int _overrun;
816 compat_sigval_t _sigval;
817 } _timer;
818
819 /* POSIX.1b signals */
820 struct
821 {
822 unsigned int _pid;
823 unsigned int _uid;
824 compat_sigval_t _sigval;
825 } _rt;
826
827 /* SIGCHLD */
828 struct
829 {
830 unsigned int _pid;
831 unsigned int _uid;
832 int _status;
833 compat_clock_t _utime;
834 compat_clock_t _stime;
835 } _sigchld;
836
837 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
838 struct
839 {
840 unsigned int _addr;
841 } _sigfault;
842
843 /* SIGPOLL */
844 struct
845 {
846 int _band;
847 int _fd;
848 } _sigpoll;
849 } _sifields;
850 } compat_siginfo_t;
851
852 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
853 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
854
855 typedef struct compat_x32_siginfo
856 {
857 int si_signo;
858 int si_errno;
859 int si_code;
860
861 union
862 {
863 int _pad[((128 / sizeof (int)) - 3)];
864
865 /* kill() */
866 struct
867 {
868 unsigned int _pid;
869 unsigned int _uid;
870 } _kill;
871
872 /* POSIX.1b timers */
873 struct
874 {
875 compat_timer_t _tid;
876 int _overrun;
877 compat_sigval_t _sigval;
878 } _timer;
879
880 /* POSIX.1b signals */
881 struct
882 {
883 unsigned int _pid;
884 unsigned int _uid;
885 compat_sigval_t _sigval;
886 } _rt;
887
888 /* SIGCHLD */
889 struct
890 {
891 unsigned int _pid;
892 unsigned int _uid;
893 int _status;
894 compat_x32_clock_t _utime;
895 compat_x32_clock_t _stime;
896 } _sigchld;
897
898 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
899 struct
900 {
901 unsigned int _addr;
902 } _sigfault;
903
904 /* SIGPOLL */
905 struct
906 {
907 int _band;
908 int _fd;
909 } _sigpoll;
910 } _sifields;
911 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
912
913 #define cpt_si_pid _sifields._kill._pid
914 #define cpt_si_uid _sifields._kill._uid
915 #define cpt_si_timerid _sifields._timer._tid
916 #define cpt_si_overrun _sifields._timer._overrun
917 #define cpt_si_status _sifields._sigchld._status
918 #define cpt_si_utime _sifields._sigchld._utime
919 #define cpt_si_stime _sifields._sigchld._stime
920 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
921 #define cpt_si_addr _sifields._sigfault._addr
922 #define cpt_si_band _sifields._sigpoll._band
923 #define cpt_si_fd _sifields._sigpoll._fd
924
925 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
926 In their place is si_timer1,si_timer2. */
927 #ifndef si_timerid
928 #define si_timerid si_timer1
929 #endif
930 #ifndef si_overrun
931 #define si_overrun si_timer2
932 #endif
933
934 static void
935 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
936 {
937 memset (to, 0, sizeof (*to));
938
939 to->si_signo = from->si_signo;
940 to->si_errno = from->si_errno;
941 to->si_code = from->si_code;
942
943 if (to->si_code == SI_TIMER)
944 {
945 to->cpt_si_timerid = from->si_timerid;
946 to->cpt_si_overrun = from->si_overrun;
947 to->cpt_si_ptr = (intptr_t) from->si_ptr;
948 }
949 else if (to->si_code == SI_USER)
950 {
951 to->cpt_si_pid = from->si_pid;
952 to->cpt_si_uid = from->si_uid;
953 }
954 else if (to->si_code < 0)
955 {
956 to->cpt_si_pid = from->si_pid;
957 to->cpt_si_uid = from->si_uid;
958 to->cpt_si_ptr = (intptr_t) from->si_ptr;
959 }
960 else
961 {
962 switch (to->si_signo)
963 {
964 case SIGCHLD:
965 to->cpt_si_pid = from->si_pid;
966 to->cpt_si_uid = from->si_uid;
967 to->cpt_si_status = from->si_status;
968 to->cpt_si_utime = from->si_utime;
969 to->cpt_si_stime = from->si_stime;
970 break;
971 case SIGILL:
972 case SIGFPE:
973 case SIGSEGV:
974 case SIGBUS:
975 to->cpt_si_addr = (intptr_t) from->si_addr;
976 break;
977 case SIGPOLL:
978 to->cpt_si_band = from->si_band;
979 to->cpt_si_fd = from->si_fd;
980 break;
981 default:
982 to->cpt_si_pid = from->si_pid;
983 to->cpt_si_uid = from->si_uid;
984 to->cpt_si_ptr = (intptr_t) from->si_ptr;
985 break;
986 }
987 }
988 }
989
990 static void
991 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
992 {
993 memset (to, 0, sizeof (*to));
994
995 to->si_signo = from->si_signo;
996 to->si_errno = from->si_errno;
997 to->si_code = from->si_code;
998
999 if (to->si_code == SI_TIMER)
1000 {
1001 to->si_timerid = from->cpt_si_timerid;
1002 to->si_overrun = from->cpt_si_overrun;
1003 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1004 }
1005 else if (to->si_code == SI_USER)
1006 {
1007 to->si_pid = from->cpt_si_pid;
1008 to->si_uid = from->cpt_si_uid;
1009 }
1010 else if (to->si_code < 0)
1011 {
1012 to->si_pid = from->cpt_si_pid;
1013 to->si_uid = from->cpt_si_uid;
1014 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1015 }
1016 else
1017 {
1018 switch (to->si_signo)
1019 {
1020 case SIGCHLD:
1021 to->si_pid = from->cpt_si_pid;
1022 to->si_uid = from->cpt_si_uid;
1023 to->si_status = from->cpt_si_status;
1024 to->si_utime = from->cpt_si_utime;
1025 to->si_stime = from->cpt_si_stime;
1026 break;
1027 case SIGILL:
1028 case SIGFPE:
1029 case SIGSEGV:
1030 case SIGBUS:
1031 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1032 break;
1033 case SIGPOLL:
1034 to->si_band = from->cpt_si_band;
1035 to->si_fd = from->cpt_si_fd;
1036 break;
1037 default:
1038 to->si_pid = from->cpt_si_pid;
1039 to->si_uid = from->cpt_si_uid;
1040 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1041 break;
1042 }
1043 }
1044 }
1045
1046 static void
1047 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1048 siginfo_t *from)
1049 {
1050 memset (to, 0, sizeof (*to));
1051
1052 to->si_signo = from->si_signo;
1053 to->si_errno = from->si_errno;
1054 to->si_code = from->si_code;
1055
1056 if (to->si_code == SI_TIMER)
1057 {
1058 to->cpt_si_timerid = from->si_timerid;
1059 to->cpt_si_overrun = from->si_overrun;
1060 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1061 }
1062 else if (to->si_code == SI_USER)
1063 {
1064 to->cpt_si_pid = from->si_pid;
1065 to->cpt_si_uid = from->si_uid;
1066 }
1067 else if (to->si_code < 0)
1068 {
1069 to->cpt_si_pid = from->si_pid;
1070 to->cpt_si_uid = from->si_uid;
1071 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1072 }
1073 else
1074 {
1075 switch (to->si_signo)
1076 {
1077 case SIGCHLD:
1078 to->cpt_si_pid = from->si_pid;
1079 to->cpt_si_uid = from->si_uid;
1080 to->cpt_si_status = from->si_status;
1081 to->cpt_si_utime = from->si_utime;
1082 to->cpt_si_stime = from->si_stime;
1083 break;
1084 case SIGILL:
1085 case SIGFPE:
1086 case SIGSEGV:
1087 case SIGBUS:
1088 to->cpt_si_addr = (intptr_t) from->si_addr;
1089 break;
1090 case SIGPOLL:
1091 to->cpt_si_band = from->si_band;
1092 to->cpt_si_fd = from->si_fd;
1093 break;
1094 default:
1095 to->cpt_si_pid = from->si_pid;
1096 to->cpt_si_uid = from->si_uid;
1097 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1098 break;
1099 }
1100 }
1101 }
1102
1103 static void
1104 siginfo_from_compat_x32_siginfo (siginfo_t *to,
1105 compat_x32_siginfo_t *from)
1106 {
1107 memset (to, 0, sizeof (*to));
1108
1109 to->si_signo = from->si_signo;
1110 to->si_errno = from->si_errno;
1111 to->si_code = from->si_code;
1112
1113 if (to->si_code == SI_TIMER)
1114 {
1115 to->si_timerid = from->cpt_si_timerid;
1116 to->si_overrun = from->cpt_si_overrun;
1117 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1118 }
1119 else if (to->si_code == SI_USER)
1120 {
1121 to->si_pid = from->cpt_si_pid;
1122 to->si_uid = from->cpt_si_uid;
1123 }
1124 else if (to->si_code < 0)
1125 {
1126 to->si_pid = from->cpt_si_pid;
1127 to->si_uid = from->cpt_si_uid;
1128 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1129 }
1130 else
1131 {
1132 switch (to->si_signo)
1133 {
1134 case SIGCHLD:
1135 to->si_pid = from->cpt_si_pid;
1136 to->si_uid = from->cpt_si_uid;
1137 to->si_status = from->cpt_si_status;
1138 to->si_utime = from->cpt_si_utime;
1139 to->si_stime = from->cpt_si_stime;
1140 break;
1141 case SIGILL:
1142 case SIGFPE:
1143 case SIGSEGV:
1144 case SIGBUS:
1145 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1146 break;
1147 case SIGPOLL:
1148 to->si_band = from->cpt_si_band;
1149 to->si_fd = from->cpt_si_fd;
1150 break;
1151 default:
1152 to->si_pid = from->cpt_si_pid;
1153 to->si_uid = from->cpt_si_uid;
1154 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1155 break;
1156 }
1157 }
1158 }
1159
1160 #endif /* __x86_64__ */
1161
1162 /* Convert a native/host siginfo object, into/from the siginfo in the
1163 layout of the inferiors' architecture. Returns true if any
1164 conversion was done; false otherwise. If DIRECTION is 1, then copy
1165 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1166 INF. */
1167
1168 static int
1169 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1170 {
1171 #ifdef __x86_64__
1172 unsigned int machine;
1173 int tid = lwpid_of (get_thread_lwp (current_inferior));
1174 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1175
1176 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1177 if (!is_64bit_tdesc ())
1178 {
1179 if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
1180 fatal ("unexpected difference in siginfo");
1181
1182 if (direction == 0)
1183 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1184 else
1185 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1186
1187 return 1;
1188 }
1189 /* No fixup for native x32 GDB. */
1190 else if (!is_elf64 && sizeof (void *) == 8)
1191 {
1192 if (sizeof (siginfo_t) != sizeof (compat_x32_siginfo_t))
1193 fatal ("unexpected difference in siginfo");
1194
1195 if (direction == 0)
1196 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1197 native);
1198 else
1199 siginfo_from_compat_x32_siginfo (native,
1200 (struct compat_x32_siginfo *) inf);
1201
1202 return 1;
1203 }
1204 #endif
1205
1206 return 0;
1207 }
1208 \f
1209 static int use_xml;
1210
1211 /* Format of XSAVE extended state is:
1212 struct
1213 {
1214 fxsave_bytes[0..463]
1215 sw_usable_bytes[464..511]
1216 xstate_hdr_bytes[512..575]
1217 avx_bytes[576..831]
1218 future_state etc
1219 };
1220
1221 Same memory layout will be used for the coredump NT_X86_XSTATE
1222 representing the XSAVE extended state registers.
1223
1224 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1225 extended state mask, which is the same as the extended control register
1226 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1227 together with the mask saved in the xstate_hdr_bytes to determine what
1228 states the processor/OS supports and what state, used or initialized,
1229 the process/thread is in. */
1230 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1231
1232 /* Does the current host support the GETFPXREGS request? The header
1233 file may or may not define it, and even if it is defined, the
1234 kernel will return EIO if it's running on a pre-SSE processor. */
1235 int have_ptrace_getfpxregs =
1236 #ifdef HAVE_PTRACE_GETFPXREGS
1237 -1
1238 #else
1239 0
1240 #endif
1241 ;
1242
1243 /* Does the current host support PTRACE_GETREGSET? */
1244 static int have_ptrace_getregset = -1;
1245
1246 /* Get Linux/x86 target description from running target. */
1247
1248 static const struct target_desc *
1249 x86_linux_read_description (void)
1250 {
1251 unsigned int machine;
1252 int is_elf64;
1253 int xcr0_features;
1254 int tid;
1255 static uint64_t xcr0;
1256 struct regset_info *regset;
1257
1258 tid = lwpid_of (get_thread_lwp (current_inferior));
1259
1260 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1261
1262 if (sizeof (void *) == 4)
1263 {
1264 if (is_elf64 > 0)
1265 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1266 #ifndef __x86_64__
1267 else if (machine == EM_X86_64)
1268 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1269 #endif
1270 }
1271
1272 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1273 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1274 {
1275 elf_fpxregset_t fpxregs;
1276
1277 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1278 {
1279 have_ptrace_getfpxregs = 0;
1280 have_ptrace_getregset = 0;
1281 return tdesc_i386_mmx_linux;
1282 }
1283 else
1284 have_ptrace_getfpxregs = 1;
1285 }
1286 #endif
1287
1288 if (!use_xml)
1289 {
1290 x86_xcr0 = I386_XSTATE_SSE_MASK;
1291
1292 /* Don't use XML. */
1293 #ifdef __x86_64__
1294 if (machine == EM_X86_64)
1295 return tdesc_amd64_linux_no_xml;
1296 else
1297 #endif
1298 return tdesc_i386_linux_no_xml;
1299 }
1300
1301 if (have_ptrace_getregset == -1)
1302 {
1303 uint64_t xstateregs[(I386_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1304 struct iovec iov;
1305
1306 iov.iov_base = xstateregs;
1307 iov.iov_len = sizeof (xstateregs);
1308
1309 /* Check if PTRACE_GETREGSET works. */
1310 if (ptrace (PTRACE_GETREGSET, tid,
1311 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1312 have_ptrace_getregset = 0;
1313 else
1314 {
1315 have_ptrace_getregset = 1;
1316
1317 /* Get XCR0 from XSAVE extended state. */
1318 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1319 / sizeof (uint64_t))];
1320
1321 /* Use PTRACE_GETREGSET if it is available. */
1322 for (regset = x86_regsets;
1323 regset->fill_function != NULL; regset++)
1324 if (regset->get_request == PTRACE_GETREGSET)
1325 regset->size = I386_XSTATE_SIZE (xcr0);
1326 else if (regset->type != GENERAL_REGS)
1327 regset->size = 0;
1328 }
1329 }
1330
1331 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1332 xcr0_features = (have_ptrace_getregset
1333 && (xcr0 & I386_XSTATE_ALL_MASK));
1334
1335 if (xcr0_features)
1336 x86_xcr0 = xcr0;
1337
1338 if (machine == EM_X86_64)
1339 {
1340 #ifdef __x86_64__
1341 if (is_elf64)
1342 {
1343 if (xcr0_features)
1344 {
1345 switch (xcr0 & I386_XSTATE_ALL_MASK)
1346 {
1347 case I386_XSTATE_MPX_MASK:
1348 return tdesc_amd64_mpx_linux;
1349
1350 case I386_XSTATE_AVX_MASK:
1351 return tdesc_amd64_avx_linux;
1352
1353 default:
1354 return tdesc_amd64_linux;
1355 }
1356 }
1357 else
1358 return tdesc_amd64_linux;
1359 }
1360 else
1361 {
1362 if (xcr0_features)
1363 {
1364 switch (xcr0 & I386_XSTATE_ALL_MASK)
1365 {
1366 case I386_XSTATE_MPX_MASK: /* No MPX on x32. */
1367 case I386_XSTATE_AVX_MASK:
1368 return tdesc_x32_avx_linux;
1369
1370 default:
1371 return tdesc_x32_linux;
1372 }
1373 }
1374 else
1375 return tdesc_x32_linux;
1376 }
1377 #endif
1378 }
1379 else
1380 {
1381 if (xcr0_features)
1382 {
1383 switch (xcr0 & I386_XSTATE_ALL_MASK)
1384 {
1385 case (I386_XSTATE_MPX_MASK):
1386 return tdesc_i386_mpx_linux;
1387
1388 case (I386_XSTATE_AVX_MASK):
1389 return tdesc_i386_avx_linux;
1390
1391 default:
1392 return tdesc_i386_linux;
1393 }
1394 }
1395 else
1396 return tdesc_i386_linux;
1397 }
1398
1399 gdb_assert_not_reached ("failed to return tdesc");
1400 }
1401
1402 /* Callback for find_inferior. Stops iteration when a thread with a
1403 given PID is found. */
1404
1405 static int
1406 same_process_callback (struct inferior_list_entry *entry, void *data)
1407 {
1408 int pid = *(int *) data;
1409
1410 return (ptid_get_pid (entry->id) == pid);
1411 }
1412
1413 /* Callback for for_each_inferior. Calls the arch_setup routine for
1414 each process. */
1415
1416 static void
1417 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1418 {
1419 int pid = ptid_get_pid (entry->id);
1420
1421 /* Look up any thread of this processes. */
1422 current_inferior
1423 = (struct thread_info *) find_inferior (&all_threads,
1424 same_process_callback, &pid);
1425
1426 the_low_target.arch_setup ();
1427 }
1428
1429 /* Update all the target description of all processes; a new GDB
1430 connected, and it may or not support xml target descriptions. */
1431
1432 static void
1433 x86_linux_update_xmltarget (void)
1434 {
1435 struct thread_info *save_inferior = current_inferior;
1436
1437 /* Before changing the register cache's internal layout, flush the
1438 contents of the current valid caches back to the threads, and
1439 release the current regcache objects. */
1440 regcache_release ();
1441
1442 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1443
1444 current_inferior = save_inferior;
1445 }
1446
1447 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1448 PTRACE_GETREGSET. */
1449
1450 static void
1451 x86_linux_process_qsupported (const char *query)
1452 {
1453 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1454 with "i386" in qSupported query, it supports x86 XML target
1455 descriptions. */
1456 use_xml = 0;
1457 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1458 {
1459 char *copy = xstrdup (query + 13);
1460 char *p;
1461
1462 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1463 {
1464 if (strcmp (p, "i386") == 0)
1465 {
1466 use_xml = 1;
1467 break;
1468 }
1469 }
1470
1471 free (copy);
1472 }
1473
1474 x86_linux_update_xmltarget ();
1475 }
1476
1477 /* Common for x86/x86-64. */
1478
1479 static struct regsets_info x86_regsets_info =
1480 {
1481 x86_regsets, /* regsets */
1482 0, /* num_regsets */
1483 NULL, /* disabled_regsets */
1484 };
1485
1486 #ifdef __x86_64__
1487 static struct regs_info amd64_linux_regs_info =
1488 {
1489 NULL, /* regset_bitmap */
1490 NULL, /* usrregs_info */
1491 &x86_regsets_info
1492 };
1493 #endif
1494 static struct usrregs_info i386_linux_usrregs_info =
1495 {
1496 I386_NUM_REGS,
1497 i386_regmap,
1498 };
1499
1500 static struct regs_info i386_linux_regs_info =
1501 {
1502 NULL, /* regset_bitmap */
1503 &i386_linux_usrregs_info,
1504 &x86_regsets_info
1505 };
1506
1507 const struct regs_info *
1508 x86_linux_regs_info (void)
1509 {
1510 #ifdef __x86_64__
1511 if (is_64bit_tdesc ())
1512 return &amd64_linux_regs_info;
1513 else
1514 #endif
1515 return &i386_linux_regs_info;
1516 }
1517
1518 /* Initialize the target description for the architecture of the
1519 inferior. */
1520
1521 static void
1522 x86_arch_setup (void)
1523 {
1524 current_process ()->tdesc = x86_linux_read_description ();
1525 }
1526
1527 static int
1528 x86_supports_tracepoints (void)
1529 {
1530 return 1;
1531 }
1532
1533 static void
1534 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1535 {
1536 write_inferior_memory (*to, buf, len);
1537 *to += len;
1538 }
1539
1540 static int
1541 push_opcode (unsigned char *buf, char *op)
1542 {
1543 unsigned char *buf_org = buf;
1544
1545 while (1)
1546 {
1547 char *endptr;
1548 unsigned long ul = strtoul (op, &endptr, 16);
1549
1550 if (endptr == op)
1551 break;
1552
1553 *buf++ = ul;
1554 op = endptr;
1555 }
1556
1557 return buf - buf_org;
1558 }
1559
1560 #ifdef __x86_64__
1561
1562 /* Build a jump pad that saves registers and calls a collection
1563 function. Writes a jump instruction to the jump pad to
1564 JJUMPAD_INSN. The caller is responsible to write it in at the
1565 tracepoint address. */
1566
1567 static int
1568 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1569 CORE_ADDR collector,
1570 CORE_ADDR lockaddr,
1571 ULONGEST orig_size,
1572 CORE_ADDR *jump_entry,
1573 CORE_ADDR *trampoline,
1574 ULONGEST *trampoline_size,
1575 unsigned char *jjump_pad_insn,
1576 ULONGEST *jjump_pad_insn_size,
1577 CORE_ADDR *adjusted_insn_addr,
1578 CORE_ADDR *adjusted_insn_addr_end,
1579 char *err)
1580 {
1581 unsigned char buf[40];
1582 int i, offset;
1583 int64_t loffset;
1584
1585 CORE_ADDR buildaddr = *jump_entry;
1586
1587 /* Build the jump pad. */
1588
1589 /* First, do tracepoint data collection. Save registers. */
1590 i = 0;
1591 /* Need to ensure stack pointer saved first. */
1592 buf[i++] = 0x54; /* push %rsp */
1593 buf[i++] = 0x55; /* push %rbp */
1594 buf[i++] = 0x57; /* push %rdi */
1595 buf[i++] = 0x56; /* push %rsi */
1596 buf[i++] = 0x52; /* push %rdx */
1597 buf[i++] = 0x51; /* push %rcx */
1598 buf[i++] = 0x53; /* push %rbx */
1599 buf[i++] = 0x50; /* push %rax */
1600 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1601 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1602 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1603 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1604 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1605 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1606 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1607 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1608 buf[i++] = 0x9c; /* pushfq */
1609 buf[i++] = 0x48; /* movl <addr>,%rdi */
1610 buf[i++] = 0xbf;
1611 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1612 i += sizeof (unsigned long);
1613 buf[i++] = 0x57; /* push %rdi */
1614 append_insns (&buildaddr, i, buf);
1615
1616 /* Stack space for the collecting_t object. */
1617 i = 0;
1618 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1619 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1620 memcpy (buf + i, &tpoint, 8);
1621 i += 8;
1622 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1623 i += push_opcode (&buf[i],
1624 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1625 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1626 append_insns (&buildaddr, i, buf);
1627
1628 /* spin-lock. */
1629 i = 0;
1630 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1631 memcpy (&buf[i], (void *) &lockaddr, 8);
1632 i += 8;
1633 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1634 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1635 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1636 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1637 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1638 append_insns (&buildaddr, i, buf);
1639
1640 /* Set up the gdb_collect call. */
1641 /* At this point, (stack pointer + 0x18) is the base of our saved
1642 register block. */
1643
1644 i = 0;
1645 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1646 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1647
1648 /* tpoint address may be 64-bit wide. */
1649 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1650 memcpy (buf + i, &tpoint, 8);
1651 i += 8;
1652 append_insns (&buildaddr, i, buf);
1653
1654 /* The collector function being in the shared library, may be
1655 >31-bits away off the jump pad. */
1656 i = 0;
1657 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1658 memcpy (buf + i, &collector, 8);
1659 i += 8;
1660 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1661 append_insns (&buildaddr, i, buf);
1662
1663 /* Clear the spin-lock. */
1664 i = 0;
1665 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1666 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1667 memcpy (buf + i, &lockaddr, 8);
1668 i += 8;
1669 append_insns (&buildaddr, i, buf);
1670
1671 /* Remove stack that had been used for the collect_t object. */
1672 i = 0;
1673 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1674 append_insns (&buildaddr, i, buf);
1675
1676 /* Restore register state. */
1677 i = 0;
1678 buf[i++] = 0x48; /* add $0x8,%rsp */
1679 buf[i++] = 0x83;
1680 buf[i++] = 0xc4;
1681 buf[i++] = 0x08;
1682 buf[i++] = 0x9d; /* popfq */
1683 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1684 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1685 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1686 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1687 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1688 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1689 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1690 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1691 buf[i++] = 0x58; /* pop %rax */
1692 buf[i++] = 0x5b; /* pop %rbx */
1693 buf[i++] = 0x59; /* pop %rcx */
1694 buf[i++] = 0x5a; /* pop %rdx */
1695 buf[i++] = 0x5e; /* pop %rsi */
1696 buf[i++] = 0x5f; /* pop %rdi */
1697 buf[i++] = 0x5d; /* pop %rbp */
1698 buf[i++] = 0x5c; /* pop %rsp */
1699 append_insns (&buildaddr, i, buf);
1700
1701 /* Now, adjust the original instruction to execute in the jump
1702 pad. */
1703 *adjusted_insn_addr = buildaddr;
1704 relocate_instruction (&buildaddr, tpaddr);
1705 *adjusted_insn_addr_end = buildaddr;
1706
1707 /* Finally, write a jump back to the program. */
1708
1709 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1710 if (loffset > INT_MAX || loffset < INT_MIN)
1711 {
1712 sprintf (err,
1713 "E.Jump back from jump pad too far from tracepoint "
1714 "(offset 0x%" PRIx64 " > int32).", loffset);
1715 return 1;
1716 }
1717
1718 offset = (int) loffset;
1719 memcpy (buf, jump_insn, sizeof (jump_insn));
1720 memcpy (buf + 1, &offset, 4);
1721 append_insns (&buildaddr, sizeof (jump_insn), buf);
1722
1723 /* The jump pad is now built. Wire in a jump to our jump pad. This
1724 is always done last (by our caller actually), so that we can
1725 install fast tracepoints with threads running. This relies on
1726 the agent's atomic write support. */
1727 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1728 if (loffset > INT_MAX || loffset < INT_MIN)
1729 {
1730 sprintf (err,
1731 "E.Jump pad too far from tracepoint "
1732 "(offset 0x%" PRIx64 " > int32).", loffset);
1733 return 1;
1734 }
1735
1736 offset = (int) loffset;
1737
1738 memcpy (buf, jump_insn, sizeof (jump_insn));
1739 memcpy (buf + 1, &offset, 4);
1740 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1741 *jjump_pad_insn_size = sizeof (jump_insn);
1742
1743 /* Return the end address of our pad. */
1744 *jump_entry = buildaddr;
1745
1746 return 0;
1747 }
1748
1749 #endif /* __x86_64__ */
1750
1751 /* Build a jump pad that saves registers and calls a collection
1752 function. Writes a jump instruction to the jump pad to
1753 JJUMPAD_INSN. The caller is responsible to write it in at the
1754 tracepoint address. */
1755
1756 static int
1757 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1758 CORE_ADDR collector,
1759 CORE_ADDR lockaddr,
1760 ULONGEST orig_size,
1761 CORE_ADDR *jump_entry,
1762 CORE_ADDR *trampoline,
1763 ULONGEST *trampoline_size,
1764 unsigned char *jjump_pad_insn,
1765 ULONGEST *jjump_pad_insn_size,
1766 CORE_ADDR *adjusted_insn_addr,
1767 CORE_ADDR *adjusted_insn_addr_end,
1768 char *err)
1769 {
1770 unsigned char buf[0x100];
1771 int i, offset;
1772 CORE_ADDR buildaddr = *jump_entry;
1773
1774 /* Build the jump pad. */
1775
1776 /* First, do tracepoint data collection. Save registers. */
1777 i = 0;
1778 buf[i++] = 0x60; /* pushad */
1779 buf[i++] = 0x68; /* push tpaddr aka $pc */
1780 *((int *)(buf + i)) = (int) tpaddr;
1781 i += 4;
1782 buf[i++] = 0x9c; /* pushf */
1783 buf[i++] = 0x1e; /* push %ds */
1784 buf[i++] = 0x06; /* push %es */
1785 buf[i++] = 0x0f; /* push %fs */
1786 buf[i++] = 0xa0;
1787 buf[i++] = 0x0f; /* push %gs */
1788 buf[i++] = 0xa8;
1789 buf[i++] = 0x16; /* push %ss */
1790 buf[i++] = 0x0e; /* push %cs */
1791 append_insns (&buildaddr, i, buf);
1792
1793 /* Stack space for the collecting_t object. */
1794 i = 0;
1795 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1796
1797 /* Build the object. */
1798 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1799 memcpy (buf + i, &tpoint, 4);
1800 i += 4;
1801 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1802
1803 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1804 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1805 append_insns (&buildaddr, i, buf);
1806
1807 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1808 If we cared for it, this could be using xchg alternatively. */
1809
1810 i = 0;
1811 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1812 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1813 %esp,<lockaddr> */
1814 memcpy (&buf[i], (void *) &lockaddr, 4);
1815 i += 4;
1816 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1817 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1818 append_insns (&buildaddr, i, buf);
1819
1820
1821 /* Set up arguments to the gdb_collect call. */
1822 i = 0;
1823 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1824 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1825 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1826 append_insns (&buildaddr, i, buf);
1827
1828 i = 0;
1829 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1830 append_insns (&buildaddr, i, buf);
1831
1832 i = 0;
1833 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1834 memcpy (&buf[i], (void *) &tpoint, 4);
1835 i += 4;
1836 append_insns (&buildaddr, i, buf);
1837
1838 buf[0] = 0xe8; /* call <reladdr> */
1839 offset = collector - (buildaddr + sizeof (jump_insn));
1840 memcpy (buf + 1, &offset, 4);
1841 append_insns (&buildaddr, 5, buf);
1842 /* Clean up after the call. */
1843 buf[0] = 0x83; /* add $0x8,%esp */
1844 buf[1] = 0xc4;
1845 buf[2] = 0x08;
1846 append_insns (&buildaddr, 3, buf);
1847
1848
1849 /* Clear the spin-lock. This would need the LOCK prefix on older
1850 broken archs. */
1851 i = 0;
1852 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1853 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1854 memcpy (buf + i, &lockaddr, 4);
1855 i += 4;
1856 append_insns (&buildaddr, i, buf);
1857
1858
1859 /* Remove stack that had been used for the collect_t object. */
1860 i = 0;
1861 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1862 append_insns (&buildaddr, i, buf);
1863
1864 i = 0;
1865 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1866 buf[i++] = 0xc4;
1867 buf[i++] = 0x04;
1868 buf[i++] = 0x17; /* pop %ss */
1869 buf[i++] = 0x0f; /* pop %gs */
1870 buf[i++] = 0xa9;
1871 buf[i++] = 0x0f; /* pop %fs */
1872 buf[i++] = 0xa1;
1873 buf[i++] = 0x07; /* pop %es */
1874 buf[i++] = 0x1f; /* pop %ds */
1875 buf[i++] = 0x9d; /* popf */
1876 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1877 buf[i++] = 0xc4;
1878 buf[i++] = 0x04;
1879 buf[i++] = 0x61; /* popad */
1880 append_insns (&buildaddr, i, buf);
1881
1882 /* Now, adjust the original instruction to execute in the jump
1883 pad. */
1884 *adjusted_insn_addr = buildaddr;
1885 relocate_instruction (&buildaddr, tpaddr);
1886 *adjusted_insn_addr_end = buildaddr;
1887
1888 /* Write the jump back to the program. */
1889 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1890 memcpy (buf, jump_insn, sizeof (jump_insn));
1891 memcpy (buf + 1, &offset, 4);
1892 append_insns (&buildaddr, sizeof (jump_insn), buf);
1893
1894 /* The jump pad is now built. Wire in a jump to our jump pad. This
1895 is always done last (by our caller actually), so that we can
1896 install fast tracepoints with threads running. This relies on
1897 the agent's atomic write support. */
1898 if (orig_size == 4)
1899 {
1900 /* Create a trampoline. */
1901 *trampoline_size = sizeof (jump_insn);
1902 if (!claim_trampoline_space (*trampoline_size, trampoline))
1903 {
1904 /* No trampoline space available. */
1905 strcpy (err,
1906 "E.Cannot allocate trampoline space needed for fast "
1907 "tracepoints on 4-byte instructions.");
1908 return 1;
1909 }
1910
1911 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1912 memcpy (buf, jump_insn, sizeof (jump_insn));
1913 memcpy (buf + 1, &offset, 4);
1914 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1915
1916 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1917 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1918 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1919 memcpy (buf + 2, &offset, 2);
1920 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1921 *jjump_pad_insn_size = sizeof (small_jump_insn);
1922 }
1923 else
1924 {
1925 /* Else use a 32-bit relative jump instruction. */
1926 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1927 memcpy (buf, jump_insn, sizeof (jump_insn));
1928 memcpy (buf + 1, &offset, 4);
1929 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1930 *jjump_pad_insn_size = sizeof (jump_insn);
1931 }
1932
1933 /* Return the end address of our pad. */
1934 *jump_entry = buildaddr;
1935
1936 return 0;
1937 }
1938
1939 static int
1940 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1941 CORE_ADDR collector,
1942 CORE_ADDR lockaddr,
1943 ULONGEST orig_size,
1944 CORE_ADDR *jump_entry,
1945 CORE_ADDR *trampoline,
1946 ULONGEST *trampoline_size,
1947 unsigned char *jjump_pad_insn,
1948 ULONGEST *jjump_pad_insn_size,
1949 CORE_ADDR *adjusted_insn_addr,
1950 CORE_ADDR *adjusted_insn_addr_end,
1951 char *err)
1952 {
1953 #ifdef __x86_64__
1954 if (is_64bit_tdesc ())
1955 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1956 collector, lockaddr,
1957 orig_size, jump_entry,
1958 trampoline, trampoline_size,
1959 jjump_pad_insn,
1960 jjump_pad_insn_size,
1961 adjusted_insn_addr,
1962 adjusted_insn_addr_end,
1963 err);
1964 #endif
1965
1966 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1967 collector, lockaddr,
1968 orig_size, jump_entry,
1969 trampoline, trampoline_size,
1970 jjump_pad_insn,
1971 jjump_pad_insn_size,
1972 adjusted_insn_addr,
1973 adjusted_insn_addr_end,
1974 err);
1975 }
1976
1977 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1978 architectures. */
1979
1980 static int
1981 x86_get_min_fast_tracepoint_insn_len (void)
1982 {
1983 static int warned_about_fast_tracepoints = 0;
1984
1985 #ifdef __x86_64__
1986 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1987 used for fast tracepoints. */
1988 if (is_64bit_tdesc ())
1989 return 5;
1990 #endif
1991
1992 if (agent_loaded_p ())
1993 {
1994 char errbuf[IPA_BUFSIZ];
1995
1996 errbuf[0] = '\0';
1997
1998 /* On x86, if trampolines are available, then 4-byte jump instructions
1999 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2000 with a 4-byte offset are used instead. */
2001 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2002 return 4;
2003 else
2004 {
2005 /* GDB has no channel to explain to user why a shorter fast
2006 tracepoint is not possible, but at least make GDBserver
2007 mention that something has gone awry. */
2008 if (!warned_about_fast_tracepoints)
2009 {
2010 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2011 warned_about_fast_tracepoints = 1;
2012 }
2013 return 5;
2014 }
2015 }
2016 else
2017 {
2018 /* Indicate that the minimum length is currently unknown since the IPA
2019 has not loaded yet. */
2020 return 0;
2021 }
2022 }
2023
2024 static void
2025 add_insns (unsigned char *start, int len)
2026 {
2027 CORE_ADDR buildaddr = current_insn_ptr;
2028
2029 if (debug_threads)
2030 debug_printf ("Adding %d bytes of insn at %s\n",
2031 len, paddress (buildaddr));
2032
2033 append_insns (&buildaddr, len, start);
2034 current_insn_ptr = buildaddr;
2035 }
2036
2037 /* Our general strategy for emitting code is to avoid specifying raw
2038 bytes whenever possible, and instead copy a block of inline asm
2039 that is embedded in the function. This is a little messy, because
2040 we need to keep the compiler from discarding what looks like dead
2041 code, plus suppress various warnings. */
2042
2043 #define EMIT_ASM(NAME, INSNS) \
2044 do \
2045 { \
2046 extern unsigned char start_ ## NAME, end_ ## NAME; \
2047 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2048 __asm__ ("jmp end_" #NAME "\n" \
2049 "\t" "start_" #NAME ":" \
2050 "\t" INSNS "\n" \
2051 "\t" "end_" #NAME ":"); \
2052 } while (0)
2053
2054 #ifdef __x86_64__
2055
2056 #define EMIT_ASM32(NAME,INSNS) \
2057 do \
2058 { \
2059 extern unsigned char start_ ## NAME, end_ ## NAME; \
2060 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2061 __asm__ (".code32\n" \
2062 "\t" "jmp end_" #NAME "\n" \
2063 "\t" "start_" #NAME ":\n" \
2064 "\t" INSNS "\n" \
2065 "\t" "end_" #NAME ":\n" \
2066 ".code64\n"); \
2067 } while (0)
2068
2069 #else
2070
2071 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2072
2073 #endif
2074
2075 #ifdef __x86_64__
2076
2077 static void
2078 amd64_emit_prologue (void)
2079 {
2080 EMIT_ASM (amd64_prologue,
2081 "pushq %rbp\n\t"
2082 "movq %rsp,%rbp\n\t"
2083 "sub $0x20,%rsp\n\t"
2084 "movq %rdi,-8(%rbp)\n\t"
2085 "movq %rsi,-16(%rbp)");
2086 }
2087
2088
2089 static void
2090 amd64_emit_epilogue (void)
2091 {
2092 EMIT_ASM (amd64_epilogue,
2093 "movq -16(%rbp),%rdi\n\t"
2094 "movq %rax,(%rdi)\n\t"
2095 "xor %rax,%rax\n\t"
2096 "leave\n\t"
2097 "ret");
2098 }
2099
2100 static void
2101 amd64_emit_add (void)
2102 {
2103 EMIT_ASM (amd64_add,
2104 "add (%rsp),%rax\n\t"
2105 "lea 0x8(%rsp),%rsp");
2106 }
2107
2108 static void
2109 amd64_emit_sub (void)
2110 {
2111 EMIT_ASM (amd64_sub,
2112 "sub %rax,(%rsp)\n\t"
2113 "pop %rax");
2114 }
2115
2116 static void
2117 amd64_emit_mul (void)
2118 {
2119 emit_error = 1;
2120 }
2121
2122 static void
2123 amd64_emit_lsh (void)
2124 {
2125 emit_error = 1;
2126 }
2127
2128 static void
2129 amd64_emit_rsh_signed (void)
2130 {
2131 emit_error = 1;
2132 }
2133
2134 static void
2135 amd64_emit_rsh_unsigned (void)
2136 {
2137 emit_error = 1;
2138 }
2139
2140 static void
2141 amd64_emit_ext (int arg)
2142 {
2143 switch (arg)
2144 {
2145 case 8:
2146 EMIT_ASM (amd64_ext_8,
2147 "cbtw\n\t"
2148 "cwtl\n\t"
2149 "cltq");
2150 break;
2151 case 16:
2152 EMIT_ASM (amd64_ext_16,
2153 "cwtl\n\t"
2154 "cltq");
2155 break;
2156 case 32:
2157 EMIT_ASM (amd64_ext_32,
2158 "cltq");
2159 break;
2160 default:
2161 emit_error = 1;
2162 }
2163 }
2164
2165 static void
2166 amd64_emit_log_not (void)
2167 {
2168 EMIT_ASM (amd64_log_not,
2169 "test %rax,%rax\n\t"
2170 "sete %cl\n\t"
2171 "movzbq %cl,%rax");
2172 }
2173
2174 static void
2175 amd64_emit_bit_and (void)
2176 {
2177 EMIT_ASM (amd64_and,
2178 "and (%rsp),%rax\n\t"
2179 "lea 0x8(%rsp),%rsp");
2180 }
2181
2182 static void
2183 amd64_emit_bit_or (void)
2184 {
2185 EMIT_ASM (amd64_or,
2186 "or (%rsp),%rax\n\t"
2187 "lea 0x8(%rsp),%rsp");
2188 }
2189
2190 static void
2191 amd64_emit_bit_xor (void)
2192 {
2193 EMIT_ASM (amd64_xor,
2194 "xor (%rsp),%rax\n\t"
2195 "lea 0x8(%rsp),%rsp");
2196 }
2197
2198 static void
2199 amd64_emit_bit_not (void)
2200 {
2201 EMIT_ASM (amd64_bit_not,
2202 "xorq $0xffffffffffffffff,%rax");
2203 }
2204
2205 static void
2206 amd64_emit_equal (void)
2207 {
2208 EMIT_ASM (amd64_equal,
2209 "cmp %rax,(%rsp)\n\t"
2210 "je .Lamd64_equal_true\n\t"
2211 "xor %rax,%rax\n\t"
2212 "jmp .Lamd64_equal_end\n\t"
2213 ".Lamd64_equal_true:\n\t"
2214 "mov $0x1,%rax\n\t"
2215 ".Lamd64_equal_end:\n\t"
2216 "lea 0x8(%rsp),%rsp");
2217 }
2218
2219 static void
2220 amd64_emit_less_signed (void)
2221 {
2222 EMIT_ASM (amd64_less_signed,
2223 "cmp %rax,(%rsp)\n\t"
2224 "jl .Lamd64_less_signed_true\n\t"
2225 "xor %rax,%rax\n\t"
2226 "jmp .Lamd64_less_signed_end\n\t"
2227 ".Lamd64_less_signed_true:\n\t"
2228 "mov $1,%rax\n\t"
2229 ".Lamd64_less_signed_end:\n\t"
2230 "lea 0x8(%rsp),%rsp");
2231 }
2232
2233 static void
2234 amd64_emit_less_unsigned (void)
2235 {
2236 EMIT_ASM (amd64_less_unsigned,
2237 "cmp %rax,(%rsp)\n\t"
2238 "jb .Lamd64_less_unsigned_true\n\t"
2239 "xor %rax,%rax\n\t"
2240 "jmp .Lamd64_less_unsigned_end\n\t"
2241 ".Lamd64_less_unsigned_true:\n\t"
2242 "mov $1,%rax\n\t"
2243 ".Lamd64_less_unsigned_end:\n\t"
2244 "lea 0x8(%rsp),%rsp");
2245 }
2246
2247 static void
2248 amd64_emit_ref (int size)
2249 {
2250 switch (size)
2251 {
2252 case 1:
2253 EMIT_ASM (amd64_ref1,
2254 "movb (%rax),%al");
2255 break;
2256 case 2:
2257 EMIT_ASM (amd64_ref2,
2258 "movw (%rax),%ax");
2259 break;
2260 case 4:
2261 EMIT_ASM (amd64_ref4,
2262 "movl (%rax),%eax");
2263 break;
2264 case 8:
2265 EMIT_ASM (amd64_ref8,
2266 "movq (%rax),%rax");
2267 break;
2268 }
2269 }
2270
2271 static void
2272 amd64_emit_if_goto (int *offset_p, int *size_p)
2273 {
2274 EMIT_ASM (amd64_if_goto,
2275 "mov %rax,%rcx\n\t"
2276 "pop %rax\n\t"
2277 "cmp $0,%rcx\n\t"
2278 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2279 if (offset_p)
2280 *offset_p = 10;
2281 if (size_p)
2282 *size_p = 4;
2283 }
2284
2285 static void
2286 amd64_emit_goto (int *offset_p, int *size_p)
2287 {
2288 EMIT_ASM (amd64_goto,
2289 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2290 if (offset_p)
2291 *offset_p = 1;
2292 if (size_p)
2293 *size_p = 4;
2294 }
2295
2296 static void
2297 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2298 {
2299 int diff = (to - (from + size));
2300 unsigned char buf[sizeof (int)];
2301
2302 if (size != 4)
2303 {
2304 emit_error = 1;
2305 return;
2306 }
2307
2308 memcpy (buf, &diff, sizeof (int));
2309 write_inferior_memory (from, buf, sizeof (int));
2310 }
2311
2312 static void
2313 amd64_emit_const (LONGEST num)
2314 {
2315 unsigned char buf[16];
2316 int i;
2317 CORE_ADDR buildaddr = current_insn_ptr;
2318
2319 i = 0;
2320 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2321 memcpy (&buf[i], &num, sizeof (num));
2322 i += 8;
2323 append_insns (&buildaddr, i, buf);
2324 current_insn_ptr = buildaddr;
2325 }
2326
2327 static void
2328 amd64_emit_call (CORE_ADDR fn)
2329 {
2330 unsigned char buf[16];
2331 int i;
2332 CORE_ADDR buildaddr;
2333 LONGEST offset64;
2334
2335 /* The destination function being in the shared library, may be
2336 >31-bits away off the compiled code pad. */
2337
2338 buildaddr = current_insn_ptr;
2339
2340 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2341
2342 i = 0;
2343
2344 if (offset64 > INT_MAX || offset64 < INT_MIN)
2345 {
2346 /* Offset is too large for a call. Use callq, but that requires
2347 a register, so avoid it if possible. Use r10, since it is
2348 call-clobbered, we don't have to push/pop it. */
2349 buf[i++] = 0x48; /* mov $fn,%r10 */
2350 buf[i++] = 0xba;
2351 memcpy (buf + i, &fn, 8);
2352 i += 8;
2353 buf[i++] = 0xff; /* callq *%r10 */
2354 buf[i++] = 0xd2;
2355 }
2356 else
2357 {
2358 int offset32 = offset64; /* we know we can't overflow here. */
2359 memcpy (buf + i, &offset32, 4);
2360 i += 4;
2361 }
2362
2363 append_insns (&buildaddr, i, buf);
2364 current_insn_ptr = buildaddr;
2365 }
2366
2367 static void
2368 amd64_emit_reg (int reg)
2369 {
2370 unsigned char buf[16];
2371 int i;
2372 CORE_ADDR buildaddr;
2373
2374 /* Assume raw_regs is still in %rdi. */
2375 buildaddr = current_insn_ptr;
2376 i = 0;
2377 buf[i++] = 0xbe; /* mov $<n>,%esi */
2378 memcpy (&buf[i], &reg, sizeof (reg));
2379 i += 4;
2380 append_insns (&buildaddr, i, buf);
2381 current_insn_ptr = buildaddr;
2382 amd64_emit_call (get_raw_reg_func_addr ());
2383 }
2384
2385 static void
2386 amd64_emit_pop (void)
2387 {
2388 EMIT_ASM (amd64_pop,
2389 "pop %rax");
2390 }
2391
2392 static void
2393 amd64_emit_stack_flush (void)
2394 {
2395 EMIT_ASM (amd64_stack_flush,
2396 "push %rax");
2397 }
2398
2399 static void
2400 amd64_emit_zero_ext (int arg)
2401 {
2402 switch (arg)
2403 {
2404 case 8:
2405 EMIT_ASM (amd64_zero_ext_8,
2406 "and $0xff,%rax");
2407 break;
2408 case 16:
2409 EMIT_ASM (amd64_zero_ext_16,
2410 "and $0xffff,%rax");
2411 break;
2412 case 32:
2413 EMIT_ASM (amd64_zero_ext_32,
2414 "mov $0xffffffff,%rcx\n\t"
2415 "and %rcx,%rax");
2416 break;
2417 default:
2418 emit_error = 1;
2419 }
2420 }
2421
2422 static void
2423 amd64_emit_swap (void)
2424 {
2425 EMIT_ASM (amd64_swap,
2426 "mov %rax,%rcx\n\t"
2427 "pop %rax\n\t"
2428 "push %rcx");
2429 }
2430
2431 static void
2432 amd64_emit_stack_adjust (int n)
2433 {
2434 unsigned char buf[16];
2435 int i;
2436 CORE_ADDR buildaddr = current_insn_ptr;
2437
2438 i = 0;
2439 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2440 buf[i++] = 0x8d;
2441 buf[i++] = 0x64;
2442 buf[i++] = 0x24;
2443 /* This only handles adjustments up to 16, but we don't expect any more. */
2444 buf[i++] = n * 8;
2445 append_insns (&buildaddr, i, buf);
2446 current_insn_ptr = buildaddr;
2447 }
2448
2449 /* FN's prototype is `LONGEST(*fn)(int)'. */
2450
2451 static void
2452 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2453 {
2454 unsigned char buf[16];
2455 int i;
2456 CORE_ADDR buildaddr;
2457
2458 buildaddr = current_insn_ptr;
2459 i = 0;
2460 buf[i++] = 0xbf; /* movl $<n>,%edi */
2461 memcpy (&buf[i], &arg1, sizeof (arg1));
2462 i += 4;
2463 append_insns (&buildaddr, i, buf);
2464 current_insn_ptr = buildaddr;
2465 amd64_emit_call (fn);
2466 }
2467
2468 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2469
2470 static void
2471 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2472 {
2473 unsigned char buf[16];
2474 int i;
2475 CORE_ADDR buildaddr;
2476
2477 buildaddr = current_insn_ptr;
2478 i = 0;
2479 buf[i++] = 0xbf; /* movl $<n>,%edi */
2480 memcpy (&buf[i], &arg1, sizeof (arg1));
2481 i += 4;
2482 append_insns (&buildaddr, i, buf);
2483 current_insn_ptr = buildaddr;
2484 EMIT_ASM (amd64_void_call_2_a,
2485 /* Save away a copy of the stack top. */
2486 "push %rax\n\t"
2487 /* Also pass top as the second argument. */
2488 "mov %rax,%rsi");
2489 amd64_emit_call (fn);
2490 EMIT_ASM (amd64_void_call_2_b,
2491 /* Restore the stack top, %rax may have been trashed. */
2492 "pop %rax");
2493 }
2494
2495 void
2496 amd64_emit_eq_goto (int *offset_p, int *size_p)
2497 {
2498 EMIT_ASM (amd64_eq,
2499 "cmp %rax,(%rsp)\n\t"
2500 "jne .Lamd64_eq_fallthru\n\t"
2501 "lea 0x8(%rsp),%rsp\n\t"
2502 "pop %rax\n\t"
2503 /* jmp, but don't trust the assembler to choose the right jump */
2504 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2505 ".Lamd64_eq_fallthru:\n\t"
2506 "lea 0x8(%rsp),%rsp\n\t"
2507 "pop %rax");
2508
2509 if (offset_p)
2510 *offset_p = 13;
2511 if (size_p)
2512 *size_p = 4;
2513 }
2514
2515 void
2516 amd64_emit_ne_goto (int *offset_p, int *size_p)
2517 {
2518 EMIT_ASM (amd64_ne,
2519 "cmp %rax,(%rsp)\n\t"
2520 "je .Lamd64_ne_fallthru\n\t"
2521 "lea 0x8(%rsp),%rsp\n\t"
2522 "pop %rax\n\t"
2523 /* jmp, but don't trust the assembler to choose the right jump */
2524 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2525 ".Lamd64_ne_fallthru:\n\t"
2526 "lea 0x8(%rsp),%rsp\n\t"
2527 "pop %rax");
2528
2529 if (offset_p)
2530 *offset_p = 13;
2531 if (size_p)
2532 *size_p = 4;
2533 }
2534
2535 void
2536 amd64_emit_lt_goto (int *offset_p, int *size_p)
2537 {
2538 EMIT_ASM (amd64_lt,
2539 "cmp %rax,(%rsp)\n\t"
2540 "jnl .Lamd64_lt_fallthru\n\t"
2541 "lea 0x8(%rsp),%rsp\n\t"
2542 "pop %rax\n\t"
2543 /* jmp, but don't trust the assembler to choose the right jump */
2544 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2545 ".Lamd64_lt_fallthru:\n\t"
2546 "lea 0x8(%rsp),%rsp\n\t"
2547 "pop %rax");
2548
2549 if (offset_p)
2550 *offset_p = 13;
2551 if (size_p)
2552 *size_p = 4;
2553 }
2554
2555 void
2556 amd64_emit_le_goto (int *offset_p, int *size_p)
2557 {
2558 EMIT_ASM (amd64_le,
2559 "cmp %rax,(%rsp)\n\t"
2560 "jnle .Lamd64_le_fallthru\n\t"
2561 "lea 0x8(%rsp),%rsp\n\t"
2562 "pop %rax\n\t"
2563 /* jmp, but don't trust the assembler to choose the right jump */
2564 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2565 ".Lamd64_le_fallthru:\n\t"
2566 "lea 0x8(%rsp),%rsp\n\t"
2567 "pop %rax");
2568
2569 if (offset_p)
2570 *offset_p = 13;
2571 if (size_p)
2572 *size_p = 4;
2573 }
2574
2575 void
2576 amd64_emit_gt_goto (int *offset_p, int *size_p)
2577 {
2578 EMIT_ASM (amd64_gt,
2579 "cmp %rax,(%rsp)\n\t"
2580 "jng .Lamd64_gt_fallthru\n\t"
2581 "lea 0x8(%rsp),%rsp\n\t"
2582 "pop %rax\n\t"
2583 /* jmp, but don't trust the assembler to choose the right jump */
2584 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2585 ".Lamd64_gt_fallthru:\n\t"
2586 "lea 0x8(%rsp),%rsp\n\t"
2587 "pop %rax");
2588
2589 if (offset_p)
2590 *offset_p = 13;
2591 if (size_p)
2592 *size_p = 4;
2593 }
2594
2595 void
2596 amd64_emit_ge_goto (int *offset_p, int *size_p)
2597 {
2598 EMIT_ASM (amd64_ge,
2599 "cmp %rax,(%rsp)\n\t"
2600 "jnge .Lamd64_ge_fallthru\n\t"
2601 ".Lamd64_ge_jump:\n\t"
2602 "lea 0x8(%rsp),%rsp\n\t"
2603 "pop %rax\n\t"
2604 /* jmp, but don't trust the assembler to choose the right jump */
2605 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2606 ".Lamd64_ge_fallthru:\n\t"
2607 "lea 0x8(%rsp),%rsp\n\t"
2608 "pop %rax");
2609
2610 if (offset_p)
2611 *offset_p = 13;
2612 if (size_p)
2613 *size_p = 4;
2614 }
2615
2616 struct emit_ops amd64_emit_ops =
2617 {
2618 amd64_emit_prologue,
2619 amd64_emit_epilogue,
2620 amd64_emit_add,
2621 amd64_emit_sub,
2622 amd64_emit_mul,
2623 amd64_emit_lsh,
2624 amd64_emit_rsh_signed,
2625 amd64_emit_rsh_unsigned,
2626 amd64_emit_ext,
2627 amd64_emit_log_not,
2628 amd64_emit_bit_and,
2629 amd64_emit_bit_or,
2630 amd64_emit_bit_xor,
2631 amd64_emit_bit_not,
2632 amd64_emit_equal,
2633 amd64_emit_less_signed,
2634 amd64_emit_less_unsigned,
2635 amd64_emit_ref,
2636 amd64_emit_if_goto,
2637 amd64_emit_goto,
2638 amd64_write_goto_address,
2639 amd64_emit_const,
2640 amd64_emit_call,
2641 amd64_emit_reg,
2642 amd64_emit_pop,
2643 amd64_emit_stack_flush,
2644 amd64_emit_zero_ext,
2645 amd64_emit_swap,
2646 amd64_emit_stack_adjust,
2647 amd64_emit_int_call_1,
2648 amd64_emit_void_call_2,
2649 amd64_emit_eq_goto,
2650 amd64_emit_ne_goto,
2651 amd64_emit_lt_goto,
2652 amd64_emit_le_goto,
2653 amd64_emit_gt_goto,
2654 amd64_emit_ge_goto
2655 };
2656
2657 #endif /* __x86_64__ */
2658
2659 static void
2660 i386_emit_prologue (void)
2661 {
2662 EMIT_ASM32 (i386_prologue,
2663 "push %ebp\n\t"
2664 "mov %esp,%ebp\n\t"
2665 "push %ebx");
2666 /* At this point, the raw regs base address is at 8(%ebp), and the
2667 value pointer is at 12(%ebp). */
2668 }
2669
2670 static void
2671 i386_emit_epilogue (void)
2672 {
2673 EMIT_ASM32 (i386_epilogue,
2674 "mov 12(%ebp),%ecx\n\t"
2675 "mov %eax,(%ecx)\n\t"
2676 "mov %ebx,0x4(%ecx)\n\t"
2677 "xor %eax,%eax\n\t"
2678 "pop %ebx\n\t"
2679 "pop %ebp\n\t"
2680 "ret");
2681 }
2682
2683 static void
2684 i386_emit_add (void)
2685 {
2686 EMIT_ASM32 (i386_add,
2687 "add (%esp),%eax\n\t"
2688 "adc 0x4(%esp),%ebx\n\t"
2689 "lea 0x8(%esp),%esp");
2690 }
2691
2692 static void
2693 i386_emit_sub (void)
2694 {
2695 EMIT_ASM32 (i386_sub,
2696 "subl %eax,(%esp)\n\t"
2697 "sbbl %ebx,4(%esp)\n\t"
2698 "pop %eax\n\t"
2699 "pop %ebx\n\t");
2700 }
2701
2702 static void
2703 i386_emit_mul (void)
2704 {
2705 emit_error = 1;
2706 }
2707
2708 static void
2709 i386_emit_lsh (void)
2710 {
2711 emit_error = 1;
2712 }
2713
2714 static void
2715 i386_emit_rsh_signed (void)
2716 {
2717 emit_error = 1;
2718 }
2719
2720 static void
2721 i386_emit_rsh_unsigned (void)
2722 {
2723 emit_error = 1;
2724 }
2725
2726 static void
2727 i386_emit_ext (int arg)
2728 {
2729 switch (arg)
2730 {
2731 case 8:
2732 EMIT_ASM32 (i386_ext_8,
2733 "cbtw\n\t"
2734 "cwtl\n\t"
2735 "movl %eax,%ebx\n\t"
2736 "sarl $31,%ebx");
2737 break;
2738 case 16:
2739 EMIT_ASM32 (i386_ext_16,
2740 "cwtl\n\t"
2741 "movl %eax,%ebx\n\t"
2742 "sarl $31,%ebx");
2743 break;
2744 case 32:
2745 EMIT_ASM32 (i386_ext_32,
2746 "movl %eax,%ebx\n\t"
2747 "sarl $31,%ebx");
2748 break;
2749 default:
2750 emit_error = 1;
2751 }
2752 }
2753
2754 static void
2755 i386_emit_log_not (void)
2756 {
2757 EMIT_ASM32 (i386_log_not,
2758 "or %ebx,%eax\n\t"
2759 "test %eax,%eax\n\t"
2760 "sete %cl\n\t"
2761 "xor %ebx,%ebx\n\t"
2762 "movzbl %cl,%eax");
2763 }
2764
2765 static void
2766 i386_emit_bit_and (void)
2767 {
2768 EMIT_ASM32 (i386_and,
2769 "and (%esp),%eax\n\t"
2770 "and 0x4(%esp),%ebx\n\t"
2771 "lea 0x8(%esp),%esp");
2772 }
2773
2774 static void
2775 i386_emit_bit_or (void)
2776 {
2777 EMIT_ASM32 (i386_or,
2778 "or (%esp),%eax\n\t"
2779 "or 0x4(%esp),%ebx\n\t"
2780 "lea 0x8(%esp),%esp");
2781 }
2782
2783 static void
2784 i386_emit_bit_xor (void)
2785 {
2786 EMIT_ASM32 (i386_xor,
2787 "xor (%esp),%eax\n\t"
2788 "xor 0x4(%esp),%ebx\n\t"
2789 "lea 0x8(%esp),%esp");
2790 }
2791
2792 static void
2793 i386_emit_bit_not (void)
2794 {
2795 EMIT_ASM32 (i386_bit_not,
2796 "xor $0xffffffff,%eax\n\t"
2797 "xor $0xffffffff,%ebx\n\t");
2798 }
2799
2800 static void
2801 i386_emit_equal (void)
2802 {
2803 EMIT_ASM32 (i386_equal,
2804 "cmpl %ebx,4(%esp)\n\t"
2805 "jne .Li386_equal_false\n\t"
2806 "cmpl %eax,(%esp)\n\t"
2807 "je .Li386_equal_true\n\t"
2808 ".Li386_equal_false:\n\t"
2809 "xor %eax,%eax\n\t"
2810 "jmp .Li386_equal_end\n\t"
2811 ".Li386_equal_true:\n\t"
2812 "mov $1,%eax\n\t"
2813 ".Li386_equal_end:\n\t"
2814 "xor %ebx,%ebx\n\t"
2815 "lea 0x8(%esp),%esp");
2816 }
2817
2818 static void
2819 i386_emit_less_signed (void)
2820 {
2821 EMIT_ASM32 (i386_less_signed,
2822 "cmpl %ebx,4(%esp)\n\t"
2823 "jl .Li386_less_signed_true\n\t"
2824 "jne .Li386_less_signed_false\n\t"
2825 "cmpl %eax,(%esp)\n\t"
2826 "jl .Li386_less_signed_true\n\t"
2827 ".Li386_less_signed_false:\n\t"
2828 "xor %eax,%eax\n\t"
2829 "jmp .Li386_less_signed_end\n\t"
2830 ".Li386_less_signed_true:\n\t"
2831 "mov $1,%eax\n\t"
2832 ".Li386_less_signed_end:\n\t"
2833 "xor %ebx,%ebx\n\t"
2834 "lea 0x8(%esp),%esp");
2835 }
2836
2837 static void
2838 i386_emit_less_unsigned (void)
2839 {
2840 EMIT_ASM32 (i386_less_unsigned,
2841 "cmpl %ebx,4(%esp)\n\t"
2842 "jb .Li386_less_unsigned_true\n\t"
2843 "jne .Li386_less_unsigned_false\n\t"
2844 "cmpl %eax,(%esp)\n\t"
2845 "jb .Li386_less_unsigned_true\n\t"
2846 ".Li386_less_unsigned_false:\n\t"
2847 "xor %eax,%eax\n\t"
2848 "jmp .Li386_less_unsigned_end\n\t"
2849 ".Li386_less_unsigned_true:\n\t"
2850 "mov $1,%eax\n\t"
2851 ".Li386_less_unsigned_end:\n\t"
2852 "xor %ebx,%ebx\n\t"
2853 "lea 0x8(%esp),%esp");
2854 }
2855
2856 static void
2857 i386_emit_ref (int size)
2858 {
2859 switch (size)
2860 {
2861 case 1:
2862 EMIT_ASM32 (i386_ref1,
2863 "movb (%eax),%al");
2864 break;
2865 case 2:
2866 EMIT_ASM32 (i386_ref2,
2867 "movw (%eax),%ax");
2868 break;
2869 case 4:
2870 EMIT_ASM32 (i386_ref4,
2871 "movl (%eax),%eax");
2872 break;
2873 case 8:
2874 EMIT_ASM32 (i386_ref8,
2875 "movl 4(%eax),%ebx\n\t"
2876 "movl (%eax),%eax");
2877 break;
2878 }
2879 }
2880
2881 static void
2882 i386_emit_if_goto (int *offset_p, int *size_p)
2883 {
2884 EMIT_ASM32 (i386_if_goto,
2885 "mov %eax,%ecx\n\t"
2886 "or %ebx,%ecx\n\t"
2887 "pop %eax\n\t"
2888 "pop %ebx\n\t"
2889 "cmpl $0,%ecx\n\t"
2890 /* Don't trust the assembler to choose the right jump */
2891 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2892
2893 if (offset_p)
2894 *offset_p = 11; /* be sure that this matches the sequence above */
2895 if (size_p)
2896 *size_p = 4;
2897 }
2898
2899 static void
2900 i386_emit_goto (int *offset_p, int *size_p)
2901 {
2902 EMIT_ASM32 (i386_goto,
2903 /* Don't trust the assembler to choose the right jump */
2904 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2905 if (offset_p)
2906 *offset_p = 1;
2907 if (size_p)
2908 *size_p = 4;
2909 }
2910
2911 static void
2912 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2913 {
2914 int diff = (to - (from + size));
2915 unsigned char buf[sizeof (int)];
2916
2917 /* We're only doing 4-byte sizes at the moment. */
2918 if (size != 4)
2919 {
2920 emit_error = 1;
2921 return;
2922 }
2923
2924 memcpy (buf, &diff, sizeof (int));
2925 write_inferior_memory (from, buf, sizeof (int));
2926 }
2927
2928 static void
2929 i386_emit_const (LONGEST num)
2930 {
2931 unsigned char buf[16];
2932 int i, hi, lo;
2933 CORE_ADDR buildaddr = current_insn_ptr;
2934
2935 i = 0;
2936 buf[i++] = 0xb8; /* mov $<n>,%eax */
2937 lo = num & 0xffffffff;
2938 memcpy (&buf[i], &lo, sizeof (lo));
2939 i += 4;
2940 hi = ((num >> 32) & 0xffffffff);
2941 if (hi)
2942 {
2943 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2944 memcpy (&buf[i], &hi, sizeof (hi));
2945 i += 4;
2946 }
2947 else
2948 {
2949 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2950 }
2951 append_insns (&buildaddr, i, buf);
2952 current_insn_ptr = buildaddr;
2953 }
2954
2955 static void
2956 i386_emit_call (CORE_ADDR fn)
2957 {
2958 unsigned char buf[16];
2959 int i, offset;
2960 CORE_ADDR buildaddr;
2961
2962 buildaddr = current_insn_ptr;
2963 i = 0;
2964 buf[i++] = 0xe8; /* call <reladdr> */
2965 offset = ((int) fn) - (buildaddr + 5);
2966 memcpy (buf + 1, &offset, 4);
2967 append_insns (&buildaddr, 5, buf);
2968 current_insn_ptr = buildaddr;
2969 }
2970
2971 static void
2972 i386_emit_reg (int reg)
2973 {
2974 unsigned char buf[16];
2975 int i;
2976 CORE_ADDR buildaddr;
2977
2978 EMIT_ASM32 (i386_reg_a,
2979 "sub $0x8,%esp");
2980 buildaddr = current_insn_ptr;
2981 i = 0;
2982 buf[i++] = 0xb8; /* mov $<n>,%eax */
2983 memcpy (&buf[i], &reg, sizeof (reg));
2984 i += 4;
2985 append_insns (&buildaddr, i, buf);
2986 current_insn_ptr = buildaddr;
2987 EMIT_ASM32 (i386_reg_b,
2988 "mov %eax,4(%esp)\n\t"
2989 "mov 8(%ebp),%eax\n\t"
2990 "mov %eax,(%esp)");
2991 i386_emit_call (get_raw_reg_func_addr ());
2992 EMIT_ASM32 (i386_reg_c,
2993 "xor %ebx,%ebx\n\t"
2994 "lea 0x8(%esp),%esp");
2995 }
2996
2997 static void
2998 i386_emit_pop (void)
2999 {
3000 EMIT_ASM32 (i386_pop,
3001 "pop %eax\n\t"
3002 "pop %ebx");
3003 }
3004
3005 static void
3006 i386_emit_stack_flush (void)
3007 {
3008 EMIT_ASM32 (i386_stack_flush,
3009 "push %ebx\n\t"
3010 "push %eax");
3011 }
3012
3013 static void
3014 i386_emit_zero_ext (int arg)
3015 {
3016 switch (arg)
3017 {
3018 case 8:
3019 EMIT_ASM32 (i386_zero_ext_8,
3020 "and $0xff,%eax\n\t"
3021 "xor %ebx,%ebx");
3022 break;
3023 case 16:
3024 EMIT_ASM32 (i386_zero_ext_16,
3025 "and $0xffff,%eax\n\t"
3026 "xor %ebx,%ebx");
3027 break;
3028 case 32:
3029 EMIT_ASM32 (i386_zero_ext_32,
3030 "xor %ebx,%ebx");
3031 break;
3032 default:
3033 emit_error = 1;
3034 }
3035 }
3036
3037 static void
3038 i386_emit_swap (void)
3039 {
3040 EMIT_ASM32 (i386_swap,
3041 "mov %eax,%ecx\n\t"
3042 "mov %ebx,%edx\n\t"
3043 "pop %eax\n\t"
3044 "pop %ebx\n\t"
3045 "push %edx\n\t"
3046 "push %ecx");
3047 }
3048
3049 static void
3050 i386_emit_stack_adjust (int n)
3051 {
3052 unsigned char buf[16];
3053 int i;
3054 CORE_ADDR buildaddr = current_insn_ptr;
3055
3056 i = 0;
3057 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3058 buf[i++] = 0x64;
3059 buf[i++] = 0x24;
3060 buf[i++] = n * 8;
3061 append_insns (&buildaddr, i, buf);
3062 current_insn_ptr = buildaddr;
3063 }
3064
3065 /* FN's prototype is `LONGEST(*fn)(int)'. */
3066
3067 static void
3068 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3069 {
3070 unsigned char buf[16];
3071 int i;
3072 CORE_ADDR buildaddr;
3073
3074 EMIT_ASM32 (i386_int_call_1_a,
3075 /* Reserve a bit of stack space. */
3076 "sub $0x8,%esp");
3077 /* Put the one argument on the stack. */
3078 buildaddr = current_insn_ptr;
3079 i = 0;
3080 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3081 buf[i++] = 0x04;
3082 buf[i++] = 0x24;
3083 memcpy (&buf[i], &arg1, sizeof (arg1));
3084 i += 4;
3085 append_insns (&buildaddr, i, buf);
3086 current_insn_ptr = buildaddr;
3087 i386_emit_call (fn);
3088 EMIT_ASM32 (i386_int_call_1_c,
3089 "mov %edx,%ebx\n\t"
3090 "lea 0x8(%esp),%esp");
3091 }
3092
3093 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3094
3095 static void
3096 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3097 {
3098 unsigned char buf[16];
3099 int i;
3100 CORE_ADDR buildaddr;
3101
3102 EMIT_ASM32 (i386_void_call_2_a,
3103 /* Preserve %eax only; we don't have to worry about %ebx. */
3104 "push %eax\n\t"
3105 /* Reserve a bit of stack space for arguments. */
3106 "sub $0x10,%esp\n\t"
3107 /* Copy "top" to the second argument position. (Note that
3108 we can't assume function won't scribble on its
3109 arguments, so don't try to restore from this.) */
3110 "mov %eax,4(%esp)\n\t"
3111 "mov %ebx,8(%esp)");
3112 /* Put the first argument on the stack. */
3113 buildaddr = current_insn_ptr;
3114 i = 0;
3115 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3116 buf[i++] = 0x04;
3117 buf[i++] = 0x24;
3118 memcpy (&buf[i], &arg1, sizeof (arg1));
3119 i += 4;
3120 append_insns (&buildaddr, i, buf);
3121 current_insn_ptr = buildaddr;
3122 i386_emit_call (fn);
3123 EMIT_ASM32 (i386_void_call_2_b,
3124 "lea 0x10(%esp),%esp\n\t"
3125 /* Restore original stack top. */
3126 "pop %eax");
3127 }
3128
3129
3130 void
3131 i386_emit_eq_goto (int *offset_p, int *size_p)
3132 {
3133 EMIT_ASM32 (eq,
3134 /* Check low half first, more likely to be decider */
3135 "cmpl %eax,(%esp)\n\t"
3136 "jne .Leq_fallthru\n\t"
3137 "cmpl %ebx,4(%esp)\n\t"
3138 "jne .Leq_fallthru\n\t"
3139 "lea 0x8(%esp),%esp\n\t"
3140 "pop %eax\n\t"
3141 "pop %ebx\n\t"
3142 /* jmp, but don't trust the assembler to choose the right jump */
3143 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3144 ".Leq_fallthru:\n\t"
3145 "lea 0x8(%esp),%esp\n\t"
3146 "pop %eax\n\t"
3147 "pop %ebx");
3148
3149 if (offset_p)
3150 *offset_p = 18;
3151 if (size_p)
3152 *size_p = 4;
3153 }
3154
3155 void
3156 i386_emit_ne_goto (int *offset_p, int *size_p)
3157 {
3158 EMIT_ASM32 (ne,
3159 /* Check low half first, more likely to be decider */
3160 "cmpl %eax,(%esp)\n\t"
3161 "jne .Lne_jump\n\t"
3162 "cmpl %ebx,4(%esp)\n\t"
3163 "je .Lne_fallthru\n\t"
3164 ".Lne_jump:\n\t"
3165 "lea 0x8(%esp),%esp\n\t"
3166 "pop %eax\n\t"
3167 "pop %ebx\n\t"
3168 /* jmp, but don't trust the assembler to choose the right jump */
3169 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3170 ".Lne_fallthru:\n\t"
3171 "lea 0x8(%esp),%esp\n\t"
3172 "pop %eax\n\t"
3173 "pop %ebx");
3174
3175 if (offset_p)
3176 *offset_p = 18;
3177 if (size_p)
3178 *size_p = 4;
3179 }
3180
3181 void
3182 i386_emit_lt_goto (int *offset_p, int *size_p)
3183 {
3184 EMIT_ASM32 (lt,
3185 "cmpl %ebx,4(%esp)\n\t"
3186 "jl .Llt_jump\n\t"
3187 "jne .Llt_fallthru\n\t"
3188 "cmpl %eax,(%esp)\n\t"
3189 "jnl .Llt_fallthru\n\t"
3190 ".Llt_jump:\n\t"
3191 "lea 0x8(%esp),%esp\n\t"
3192 "pop %eax\n\t"
3193 "pop %ebx\n\t"
3194 /* jmp, but don't trust the assembler to choose the right jump */
3195 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3196 ".Llt_fallthru:\n\t"
3197 "lea 0x8(%esp),%esp\n\t"
3198 "pop %eax\n\t"
3199 "pop %ebx");
3200
3201 if (offset_p)
3202 *offset_p = 20;
3203 if (size_p)
3204 *size_p = 4;
3205 }
3206
3207 void
3208 i386_emit_le_goto (int *offset_p, int *size_p)
3209 {
3210 EMIT_ASM32 (le,
3211 "cmpl %ebx,4(%esp)\n\t"
3212 "jle .Lle_jump\n\t"
3213 "jne .Lle_fallthru\n\t"
3214 "cmpl %eax,(%esp)\n\t"
3215 "jnle .Lle_fallthru\n\t"
3216 ".Lle_jump:\n\t"
3217 "lea 0x8(%esp),%esp\n\t"
3218 "pop %eax\n\t"
3219 "pop %ebx\n\t"
3220 /* jmp, but don't trust the assembler to choose the right jump */
3221 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3222 ".Lle_fallthru:\n\t"
3223 "lea 0x8(%esp),%esp\n\t"
3224 "pop %eax\n\t"
3225 "pop %ebx");
3226
3227 if (offset_p)
3228 *offset_p = 20;
3229 if (size_p)
3230 *size_p = 4;
3231 }
3232
3233 void
3234 i386_emit_gt_goto (int *offset_p, int *size_p)
3235 {
3236 EMIT_ASM32 (gt,
3237 "cmpl %ebx,4(%esp)\n\t"
3238 "jg .Lgt_jump\n\t"
3239 "jne .Lgt_fallthru\n\t"
3240 "cmpl %eax,(%esp)\n\t"
3241 "jng .Lgt_fallthru\n\t"
3242 ".Lgt_jump:\n\t"
3243 "lea 0x8(%esp),%esp\n\t"
3244 "pop %eax\n\t"
3245 "pop %ebx\n\t"
3246 /* jmp, but don't trust the assembler to choose the right jump */
3247 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3248 ".Lgt_fallthru:\n\t"
3249 "lea 0x8(%esp),%esp\n\t"
3250 "pop %eax\n\t"
3251 "pop %ebx");
3252
3253 if (offset_p)
3254 *offset_p = 20;
3255 if (size_p)
3256 *size_p = 4;
3257 }
3258
3259 void
3260 i386_emit_ge_goto (int *offset_p, int *size_p)
3261 {
3262 EMIT_ASM32 (ge,
3263 "cmpl %ebx,4(%esp)\n\t"
3264 "jge .Lge_jump\n\t"
3265 "jne .Lge_fallthru\n\t"
3266 "cmpl %eax,(%esp)\n\t"
3267 "jnge .Lge_fallthru\n\t"
3268 ".Lge_jump:\n\t"
3269 "lea 0x8(%esp),%esp\n\t"
3270 "pop %eax\n\t"
3271 "pop %ebx\n\t"
3272 /* jmp, but don't trust the assembler to choose the right jump */
3273 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3274 ".Lge_fallthru:\n\t"
3275 "lea 0x8(%esp),%esp\n\t"
3276 "pop %eax\n\t"
3277 "pop %ebx");
3278
3279 if (offset_p)
3280 *offset_p = 20;
3281 if (size_p)
3282 *size_p = 4;
3283 }
3284
3285 struct emit_ops i386_emit_ops =
3286 {
3287 i386_emit_prologue,
3288 i386_emit_epilogue,
3289 i386_emit_add,
3290 i386_emit_sub,
3291 i386_emit_mul,
3292 i386_emit_lsh,
3293 i386_emit_rsh_signed,
3294 i386_emit_rsh_unsigned,
3295 i386_emit_ext,
3296 i386_emit_log_not,
3297 i386_emit_bit_and,
3298 i386_emit_bit_or,
3299 i386_emit_bit_xor,
3300 i386_emit_bit_not,
3301 i386_emit_equal,
3302 i386_emit_less_signed,
3303 i386_emit_less_unsigned,
3304 i386_emit_ref,
3305 i386_emit_if_goto,
3306 i386_emit_goto,
3307 i386_write_goto_address,
3308 i386_emit_const,
3309 i386_emit_call,
3310 i386_emit_reg,
3311 i386_emit_pop,
3312 i386_emit_stack_flush,
3313 i386_emit_zero_ext,
3314 i386_emit_swap,
3315 i386_emit_stack_adjust,
3316 i386_emit_int_call_1,
3317 i386_emit_void_call_2,
3318 i386_emit_eq_goto,
3319 i386_emit_ne_goto,
3320 i386_emit_lt_goto,
3321 i386_emit_le_goto,
3322 i386_emit_gt_goto,
3323 i386_emit_ge_goto
3324 };
3325
3326
3327 static struct emit_ops *
3328 x86_emit_ops (void)
3329 {
3330 #ifdef __x86_64__
3331 if (is_64bit_tdesc ())
3332 return &amd64_emit_ops;
3333 else
3334 #endif
3335 return &i386_emit_ops;
3336 }
3337
3338 static int
3339 x86_supports_range_stepping (void)
3340 {
3341 return 1;
3342 }
3343
3344 /* This is initialized assuming an amd64 target.
3345 x86_arch_setup will correct it for i386 or amd64 targets. */
3346
3347 struct linux_target_ops the_low_target =
3348 {
3349 x86_arch_setup,
3350 x86_linux_regs_info,
3351 x86_cannot_fetch_register,
3352 x86_cannot_store_register,
3353 NULL, /* fetch_register */
3354 x86_get_pc,
3355 x86_set_pc,
3356 x86_breakpoint,
3357 x86_breakpoint_len,
3358 NULL,
3359 1,
3360 x86_breakpoint_at,
3361 x86_insert_point,
3362 x86_remove_point,
3363 x86_stopped_by_watchpoint,
3364 x86_stopped_data_address,
3365 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3366 native i386 case (no registers smaller than an xfer unit), and are not
3367 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3368 NULL,
3369 NULL,
3370 /* need to fix up i386 siginfo if host is amd64 */
3371 x86_siginfo_fixup,
3372 x86_linux_new_process,
3373 x86_linux_new_thread,
3374 x86_linux_prepare_to_resume,
3375 x86_linux_process_qsupported,
3376 x86_supports_tracepoints,
3377 x86_get_thread_area,
3378 x86_install_fast_tracepoint_jump_pad,
3379 x86_emit_ops,
3380 x86_get_min_fast_tracepoint_insn_len,
3381 x86_supports_range_stepping,
3382 };
3383
3384 void
3385 initialize_low_arch (void)
3386 {
3387 /* Initialize the Linux target descriptions. */
3388 #ifdef __x86_64__
3389 init_registers_amd64_linux ();
3390 init_registers_amd64_avx_linux ();
3391 init_registers_amd64_mpx_linux ();
3392
3393 init_registers_x32_linux ();
3394 init_registers_x32_avx_linux ();
3395
3396 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3397 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3398 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3399 #endif
3400 init_registers_i386_linux ();
3401 init_registers_i386_mmx_linux ();
3402 init_registers_i386_avx_linux ();
3403 init_registers_i386_mpx_linux ();
3404
3405 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3406 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3407 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3408
3409 initialize_regsets_info (&x86_regsets_info);
3410 }
This page took 0.146334 seconds and 4 git commands to generate.