gdb/
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002, 2004-2012 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include <stddef.h>
21 #include <signal.h>
22 #include <limits.h>
23 #include "server.h"
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "i386-low.h"
27 #include "i386-xstate.h"
28 #include "elf/common.h"
29
30 #include "gdb_proc_service.h"
31
32 /* Defined in auto-generated file i386-linux.c. */
33 void init_registers_i386_linux (void);
34 /* Defined in auto-generated file amd64-linux.c. */
35 void init_registers_amd64_linux (void);
36 /* Defined in auto-generated file i386-avx-linux.c. */
37 void init_registers_i386_avx_linux (void);
38 /* Defined in auto-generated file amd64-avx-linux.c. */
39 void init_registers_amd64_avx_linux (void);
40 /* Defined in auto-generated file i386-mmx-linux.c. */
41 void init_registers_i386_mmx_linux (void);
42
43 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
44 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
45
46 /* Backward compatibility for gdb without XML support. */
47
48 static const char *xmltarget_i386_linux_no_xml = "@<target>\
49 <architecture>i386</architecture>\
50 <osabi>GNU/Linux</osabi>\
51 </target>";
52
53 #ifdef __x86_64__
54 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
55 <architecture>i386:x86-64</architecture>\
56 <osabi>GNU/Linux</osabi>\
57 </target>";
58 #endif
59
60 #include <sys/reg.h>
61 #include <sys/procfs.h>
62 #include <sys/ptrace.h>
63 #include <sys/uio.h>
64
65 #ifndef PTRACE_GETREGSET
66 #define PTRACE_GETREGSET 0x4204
67 #endif
68
69 #ifndef PTRACE_SETREGSET
70 #define PTRACE_SETREGSET 0x4205
71 #endif
72
73
74 #ifndef PTRACE_GET_THREAD_AREA
75 #define PTRACE_GET_THREAD_AREA 25
76 #endif
77
78 /* This definition comes from prctl.h, but some kernels may not have it. */
79 #ifndef PTRACE_ARCH_PRCTL
80 #define PTRACE_ARCH_PRCTL 30
81 #endif
82
83 /* The following definitions come from prctl.h, but may be absent
84 for certain configurations. */
85 #ifndef ARCH_GET_FS
86 #define ARCH_SET_GS 0x1001
87 #define ARCH_SET_FS 0x1002
88 #define ARCH_GET_FS 0x1003
89 #define ARCH_GET_GS 0x1004
90 #endif
91
92 /* Per-process arch-specific data we want to keep. */
93
94 struct arch_process_info
95 {
96 struct i386_debug_reg_state debug_reg_state;
97 };
98
99 /* Per-thread arch-specific data we want to keep. */
100
101 struct arch_lwp_info
102 {
103 /* Non-zero if our copy differs from what's recorded in the thread. */
104 int debug_registers_changed;
105 };
106
107 #ifdef __x86_64__
108
109 /* Mapping between the general-purpose registers in `struct user'
110 format and GDB's register array layout.
111 Note that the transfer layout uses 64-bit regs. */
112 static /*const*/ int i386_regmap[] =
113 {
114 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
115 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
116 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
117 DS * 8, ES * 8, FS * 8, GS * 8
118 };
119
120 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
121
122 /* So code below doesn't have to care, i386 or amd64. */
123 #define ORIG_EAX ORIG_RAX
124
125 static const int x86_64_regmap[] =
126 {
127 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
128 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
129 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
130 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
131 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
132 DS * 8, ES * 8, FS * 8, GS * 8,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1, -1,
137 ORIG_RAX * 8
138 };
139
140 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
141
142 #else /* ! __x86_64__ */
143
144 /* Mapping between the general-purpose registers in `struct user'
145 format and GDB's register array layout. */
146 static /*const*/ int i386_regmap[] =
147 {
148 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
149 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
150 EIP * 4, EFL * 4, CS * 4, SS * 4,
151 DS * 4, ES * 4, FS * 4, GS * 4
152 };
153
154 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
155
156 #endif
157 \f
158 /* Called by libthread_db. */
159
160 ps_err_e
161 ps_get_thread_area (const struct ps_prochandle *ph,
162 lwpid_t lwpid, int idx, void **base)
163 {
164 #ifdef __x86_64__
165 int use_64bit = register_size (0) == 8;
166
167 if (use_64bit)
168 {
169 switch (idx)
170 {
171 case FS:
172 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
173 return PS_OK;
174 break;
175 case GS:
176 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
177 return PS_OK;
178 break;
179 default:
180 return PS_BADADDR;
181 }
182 return PS_ERR;
183 }
184 #endif
185
186 {
187 unsigned int desc[4];
188
189 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
190 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
191 return PS_ERR;
192
193 *(int *)base = desc[1];
194 return PS_OK;
195 }
196 }
197
198 /* Get the thread area address. This is used to recognize which
199 thread is which when tracing with the in-process agent library. We
200 don't read anything from the address, and treat it as opaque; it's
201 the address itself that we assume is unique per-thread. */
202
203 static int
204 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
205 {
206 #ifdef __x86_64__
207 int use_64bit = register_size (0) == 8;
208
209 if (use_64bit)
210 {
211 void *base;
212 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
213 {
214 *addr = (CORE_ADDR) (uintptr_t) base;
215 return 0;
216 }
217
218 return -1;
219 }
220 #endif
221
222 {
223 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
224 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
225 unsigned int desc[4];
226 ULONGEST gs = 0;
227 const int reg_thread_area = 3; /* bits to scale down register value. */
228 int idx;
229
230 collect_register_by_name (regcache, "gs", &gs);
231
232 idx = gs >> reg_thread_area;
233
234 if (ptrace (PTRACE_GET_THREAD_AREA,
235 lwpid_of (lwp),
236 (void *) (long) idx, (unsigned long) &desc) < 0)
237 return -1;
238
239 *addr = desc[1];
240 return 0;
241 }
242 }
243
244
245 \f
246 static int
247 i386_cannot_store_register (int regno)
248 {
249 return regno >= I386_NUM_REGS;
250 }
251
252 static int
253 i386_cannot_fetch_register (int regno)
254 {
255 return regno >= I386_NUM_REGS;
256 }
257
258 static void
259 x86_fill_gregset (struct regcache *regcache, void *buf)
260 {
261 int i;
262
263 #ifdef __x86_64__
264 if (register_size (0) == 8)
265 {
266 for (i = 0; i < X86_64_NUM_REGS; i++)
267 if (x86_64_regmap[i] != -1)
268 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
269 return;
270 }
271 #endif
272
273 for (i = 0; i < I386_NUM_REGS; i++)
274 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
275
276 collect_register_by_name (regcache, "orig_eax",
277 ((char *) buf) + ORIG_EAX * 4);
278 }
279
280 static void
281 x86_store_gregset (struct regcache *regcache, const void *buf)
282 {
283 int i;
284
285 #ifdef __x86_64__
286 if (register_size (0) == 8)
287 {
288 for (i = 0; i < X86_64_NUM_REGS; i++)
289 if (x86_64_regmap[i] != -1)
290 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
291 return;
292 }
293 #endif
294
295 for (i = 0; i < I386_NUM_REGS; i++)
296 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
297
298 supply_register_by_name (regcache, "orig_eax",
299 ((char *) buf) + ORIG_EAX * 4);
300 }
301
302 static void
303 x86_fill_fpregset (struct regcache *regcache, void *buf)
304 {
305 #ifdef __x86_64__
306 i387_cache_to_fxsave (regcache, buf);
307 #else
308 i387_cache_to_fsave (regcache, buf);
309 #endif
310 }
311
312 static void
313 x86_store_fpregset (struct regcache *regcache, const void *buf)
314 {
315 #ifdef __x86_64__
316 i387_fxsave_to_cache (regcache, buf);
317 #else
318 i387_fsave_to_cache (regcache, buf);
319 #endif
320 }
321
322 #ifndef __x86_64__
323
324 static void
325 x86_fill_fpxregset (struct regcache *regcache, void *buf)
326 {
327 i387_cache_to_fxsave (regcache, buf);
328 }
329
330 static void
331 x86_store_fpxregset (struct regcache *regcache, const void *buf)
332 {
333 i387_fxsave_to_cache (regcache, buf);
334 }
335
336 #endif
337
338 static void
339 x86_fill_xstateregset (struct regcache *regcache, void *buf)
340 {
341 i387_cache_to_xsave (regcache, buf);
342 }
343
344 static void
345 x86_store_xstateregset (struct regcache *regcache, const void *buf)
346 {
347 i387_xsave_to_cache (regcache, buf);
348 }
349
350 /* ??? The non-biarch i386 case stores all the i387 regs twice.
351 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
352 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
353 doesn't work. IWBN to avoid the duplication in the case where it
354 does work. Maybe the arch_setup routine could check whether it works
355 and update target_regsets accordingly, maybe by moving target_regsets
356 to linux_target_ops and set the right one there, rather than having to
357 modify the target_regsets global. */
358
359 struct regset_info target_regsets[] =
360 {
361 #ifdef HAVE_PTRACE_GETREGS
362 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
363 GENERAL_REGS,
364 x86_fill_gregset, x86_store_gregset },
365 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
366 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
367 # ifndef __x86_64__
368 # ifdef HAVE_PTRACE_GETFPXREGS
369 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
370 EXTENDED_REGS,
371 x86_fill_fpxregset, x86_store_fpxregset },
372 # endif
373 # endif
374 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
375 FP_REGS,
376 x86_fill_fpregset, x86_store_fpregset },
377 #endif /* HAVE_PTRACE_GETREGS */
378 { 0, 0, 0, -1, -1, NULL, NULL }
379 };
380
381 static CORE_ADDR
382 x86_get_pc (struct regcache *regcache)
383 {
384 int use_64bit = register_size (0) == 8;
385
386 if (use_64bit)
387 {
388 unsigned long pc;
389 collect_register_by_name (regcache, "rip", &pc);
390 return (CORE_ADDR) pc;
391 }
392 else
393 {
394 unsigned int pc;
395 collect_register_by_name (regcache, "eip", &pc);
396 return (CORE_ADDR) pc;
397 }
398 }
399
400 static void
401 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
402 {
403 int use_64bit = register_size (0) == 8;
404
405 if (use_64bit)
406 {
407 unsigned long newpc = pc;
408 supply_register_by_name (regcache, "rip", &newpc);
409 }
410 else
411 {
412 unsigned int newpc = pc;
413 supply_register_by_name (regcache, "eip", &newpc);
414 }
415 }
416 \f
417 static const unsigned char x86_breakpoint[] = { 0xCC };
418 #define x86_breakpoint_len 1
419
420 static int
421 x86_breakpoint_at (CORE_ADDR pc)
422 {
423 unsigned char c;
424
425 (*the_target->read_memory) (pc, &c, 1);
426 if (c == 0xCC)
427 return 1;
428
429 return 0;
430 }
431 \f
432 /* Support for debug registers. */
433
434 static unsigned long
435 x86_linux_dr_get (ptid_t ptid, int regnum)
436 {
437 int tid;
438 unsigned long value;
439
440 tid = ptid_get_lwp (ptid);
441
442 errno = 0;
443 value = ptrace (PTRACE_PEEKUSER, tid,
444 offsetof (struct user, u_debugreg[regnum]), 0);
445 if (errno != 0)
446 error ("Couldn't read debug register");
447
448 return value;
449 }
450
451 static void
452 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
453 {
454 int tid;
455
456 tid = ptid_get_lwp (ptid);
457
458 errno = 0;
459 ptrace (PTRACE_POKEUSER, tid,
460 offsetof (struct user, u_debugreg[regnum]), value);
461 if (errno != 0)
462 error ("Couldn't write debug register");
463 }
464
465 static int
466 update_debug_registers_callback (struct inferior_list_entry *entry,
467 void *pid_p)
468 {
469 struct lwp_info *lwp = (struct lwp_info *) entry;
470 int pid = *(int *) pid_p;
471
472 /* Only update the threads of this process. */
473 if (pid_of (lwp) == pid)
474 {
475 /* The actual update is done later just before resuming the lwp,
476 we just mark that the registers need updating. */
477 lwp->arch_private->debug_registers_changed = 1;
478
479 /* If the lwp isn't stopped, force it to momentarily pause, so
480 we can update its debug registers. */
481 if (!lwp->stopped)
482 linux_stop_lwp (lwp);
483 }
484
485 return 0;
486 }
487
488 /* Update the inferior's debug register REGNUM from STATE. */
489
490 void
491 i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
492 {
493 /* Only update the threads of this process. */
494 int pid = pid_of (get_thread_lwp (current_inferior));
495
496 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
497 fatal ("Invalid debug register %d", regnum);
498
499 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
500 }
501
502 /* Return the inferior's debug register REGNUM. */
503
504 CORE_ADDR
505 i386_dr_low_get_addr (int regnum)
506 {
507 struct lwp_info *lwp = get_thread_lwp (current_inferior);
508 ptid_t ptid = ptid_of (lwp);
509
510 /* DR6 and DR7 are retrieved with some other way. */
511 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
512
513 return x86_linux_dr_get (ptid, regnum);
514 }
515
516 /* Update the inferior's DR7 debug control register from STATE. */
517
518 void
519 i386_dr_low_set_control (const struct i386_debug_reg_state *state)
520 {
521 /* Only update the threads of this process. */
522 int pid = pid_of (get_thread_lwp (current_inferior));
523
524 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
525 }
526
527 /* Return the inferior's DR7 debug control register. */
528
529 unsigned
530 i386_dr_low_get_control (void)
531 {
532 struct lwp_info *lwp = get_thread_lwp (current_inferior);
533 ptid_t ptid = ptid_of (lwp);
534
535 return x86_linux_dr_get (ptid, DR_CONTROL);
536 }
537
538 /* Get the value of the DR6 debug status register from the inferior
539 and record it in STATE. */
540
541 unsigned
542 i386_dr_low_get_status (void)
543 {
544 struct lwp_info *lwp = get_thread_lwp (current_inferior);
545 ptid_t ptid = ptid_of (lwp);
546
547 return x86_linux_dr_get (ptid, DR_STATUS);
548 }
549 \f
550 /* Breakpoint/Watchpoint support. */
551
552 static int
553 x86_insert_point (char type, CORE_ADDR addr, int len)
554 {
555 struct process_info *proc = current_process ();
556 switch (type)
557 {
558 case '0':
559 {
560 int ret;
561
562 ret = prepare_to_access_memory ();
563 if (ret)
564 return -1;
565 ret = set_gdb_breakpoint_at (addr);
566 done_accessing_memory ();
567 return ret;
568 }
569 case '2':
570 case '3':
571 case '4':
572 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
573 type, addr, len);
574 default:
575 /* Unsupported. */
576 return 1;
577 }
578 }
579
580 static int
581 x86_remove_point (char type, CORE_ADDR addr, int len)
582 {
583 struct process_info *proc = current_process ();
584 switch (type)
585 {
586 case '0':
587 {
588 int ret;
589
590 ret = prepare_to_access_memory ();
591 if (ret)
592 return -1;
593 ret = delete_gdb_breakpoint_at (addr);
594 done_accessing_memory ();
595 return ret;
596 }
597 case '2':
598 case '3':
599 case '4':
600 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
601 type, addr, len);
602 default:
603 /* Unsupported. */
604 return 1;
605 }
606 }
607
608 static int
609 x86_stopped_by_watchpoint (void)
610 {
611 struct process_info *proc = current_process ();
612 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
613 }
614
615 static CORE_ADDR
616 x86_stopped_data_address (void)
617 {
618 struct process_info *proc = current_process ();
619 CORE_ADDR addr;
620 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
621 &addr))
622 return addr;
623 return 0;
624 }
625 \f
626 /* Called when a new process is created. */
627
628 static struct arch_process_info *
629 x86_linux_new_process (void)
630 {
631 struct arch_process_info *info = xcalloc (1, sizeof (*info));
632
633 i386_low_init_dregs (&info->debug_reg_state);
634
635 return info;
636 }
637
638 /* Called when a new thread is detected. */
639
640 static struct arch_lwp_info *
641 x86_linux_new_thread (void)
642 {
643 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
644
645 info->debug_registers_changed = 1;
646
647 return info;
648 }
649
650 /* Called when resuming a thread.
651 If the debug regs have changed, update the thread's copies. */
652
653 static void
654 x86_linux_prepare_to_resume (struct lwp_info *lwp)
655 {
656 ptid_t ptid = ptid_of (lwp);
657 int clear_status = 0;
658
659 if (lwp->arch_private->debug_registers_changed)
660 {
661 int i;
662 int pid = ptid_get_pid (ptid);
663 struct process_info *proc = find_process_pid (pid);
664 struct i386_debug_reg_state *state
665 = &proc->private->arch_private->debug_reg_state;
666
667 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
668 if (state->dr_ref_count[i] > 0)
669 {
670 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
671
672 /* If we're setting a watchpoint, any change the inferior
673 had done itself to the debug registers needs to be
674 discarded, otherwise, i386_low_stopped_data_address can
675 get confused. */
676 clear_status = 1;
677 }
678
679 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
680
681 lwp->arch_private->debug_registers_changed = 0;
682 }
683
684 if (clear_status || lwp->stopped_by_watchpoint)
685 x86_linux_dr_set (ptid, DR_STATUS, 0);
686 }
687 \f
688 /* When GDBSERVER is built as a 64-bit application on linux, the
689 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
690 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
691 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
692 conversion in-place ourselves. */
693
694 /* These types below (compat_*) define a siginfo type that is layout
695 compatible with the siginfo type exported by the 32-bit userspace
696 support. */
697
698 #ifdef __x86_64__
699
700 typedef int compat_int_t;
701 typedef unsigned int compat_uptr_t;
702
703 typedef int compat_time_t;
704 typedef int compat_timer_t;
705 typedef int compat_clock_t;
706
707 struct compat_timeval
708 {
709 compat_time_t tv_sec;
710 int tv_usec;
711 };
712
713 typedef union compat_sigval
714 {
715 compat_int_t sival_int;
716 compat_uptr_t sival_ptr;
717 } compat_sigval_t;
718
719 typedef struct compat_siginfo
720 {
721 int si_signo;
722 int si_errno;
723 int si_code;
724
725 union
726 {
727 int _pad[((128 / sizeof (int)) - 3)];
728
729 /* kill() */
730 struct
731 {
732 unsigned int _pid;
733 unsigned int _uid;
734 } _kill;
735
736 /* POSIX.1b timers */
737 struct
738 {
739 compat_timer_t _tid;
740 int _overrun;
741 compat_sigval_t _sigval;
742 } _timer;
743
744 /* POSIX.1b signals */
745 struct
746 {
747 unsigned int _pid;
748 unsigned int _uid;
749 compat_sigval_t _sigval;
750 } _rt;
751
752 /* SIGCHLD */
753 struct
754 {
755 unsigned int _pid;
756 unsigned int _uid;
757 int _status;
758 compat_clock_t _utime;
759 compat_clock_t _stime;
760 } _sigchld;
761
762 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
763 struct
764 {
765 unsigned int _addr;
766 } _sigfault;
767
768 /* SIGPOLL */
769 struct
770 {
771 int _band;
772 int _fd;
773 } _sigpoll;
774 } _sifields;
775 } compat_siginfo_t;
776
777 #define cpt_si_pid _sifields._kill._pid
778 #define cpt_si_uid _sifields._kill._uid
779 #define cpt_si_timerid _sifields._timer._tid
780 #define cpt_si_overrun _sifields._timer._overrun
781 #define cpt_si_status _sifields._sigchld._status
782 #define cpt_si_utime _sifields._sigchld._utime
783 #define cpt_si_stime _sifields._sigchld._stime
784 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
785 #define cpt_si_addr _sifields._sigfault._addr
786 #define cpt_si_band _sifields._sigpoll._band
787 #define cpt_si_fd _sifields._sigpoll._fd
788
789 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
790 In their place is si_timer1,si_timer2. */
791 #ifndef si_timerid
792 #define si_timerid si_timer1
793 #endif
794 #ifndef si_overrun
795 #define si_overrun si_timer2
796 #endif
797
798 static void
799 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
800 {
801 memset (to, 0, sizeof (*to));
802
803 to->si_signo = from->si_signo;
804 to->si_errno = from->si_errno;
805 to->si_code = from->si_code;
806
807 if (to->si_code == SI_TIMER)
808 {
809 to->cpt_si_timerid = from->si_timerid;
810 to->cpt_si_overrun = from->si_overrun;
811 to->cpt_si_ptr = (intptr_t) from->si_ptr;
812 }
813 else if (to->si_code == SI_USER)
814 {
815 to->cpt_si_pid = from->si_pid;
816 to->cpt_si_uid = from->si_uid;
817 }
818 else if (to->si_code < 0)
819 {
820 to->cpt_si_pid = from->si_pid;
821 to->cpt_si_uid = from->si_uid;
822 to->cpt_si_ptr = (intptr_t) from->si_ptr;
823 }
824 else
825 {
826 switch (to->si_signo)
827 {
828 case SIGCHLD:
829 to->cpt_si_pid = from->si_pid;
830 to->cpt_si_uid = from->si_uid;
831 to->cpt_si_status = from->si_status;
832 to->cpt_si_utime = from->si_utime;
833 to->cpt_si_stime = from->si_stime;
834 break;
835 case SIGILL:
836 case SIGFPE:
837 case SIGSEGV:
838 case SIGBUS:
839 to->cpt_si_addr = (intptr_t) from->si_addr;
840 break;
841 case SIGPOLL:
842 to->cpt_si_band = from->si_band;
843 to->cpt_si_fd = from->si_fd;
844 break;
845 default:
846 to->cpt_si_pid = from->si_pid;
847 to->cpt_si_uid = from->si_uid;
848 to->cpt_si_ptr = (intptr_t) from->si_ptr;
849 break;
850 }
851 }
852 }
853
854 static void
855 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
856 {
857 memset (to, 0, sizeof (*to));
858
859 to->si_signo = from->si_signo;
860 to->si_errno = from->si_errno;
861 to->si_code = from->si_code;
862
863 if (to->si_code == SI_TIMER)
864 {
865 to->si_timerid = from->cpt_si_timerid;
866 to->si_overrun = from->cpt_si_overrun;
867 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
868 }
869 else if (to->si_code == SI_USER)
870 {
871 to->si_pid = from->cpt_si_pid;
872 to->si_uid = from->cpt_si_uid;
873 }
874 else if (to->si_code < 0)
875 {
876 to->si_pid = from->cpt_si_pid;
877 to->si_uid = from->cpt_si_uid;
878 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
879 }
880 else
881 {
882 switch (to->si_signo)
883 {
884 case SIGCHLD:
885 to->si_pid = from->cpt_si_pid;
886 to->si_uid = from->cpt_si_uid;
887 to->si_status = from->cpt_si_status;
888 to->si_utime = from->cpt_si_utime;
889 to->si_stime = from->cpt_si_stime;
890 break;
891 case SIGILL:
892 case SIGFPE:
893 case SIGSEGV:
894 case SIGBUS:
895 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
896 break;
897 case SIGPOLL:
898 to->si_band = from->cpt_si_band;
899 to->si_fd = from->cpt_si_fd;
900 break;
901 default:
902 to->si_pid = from->cpt_si_pid;
903 to->si_uid = from->cpt_si_uid;
904 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
905 break;
906 }
907 }
908 }
909
910 #endif /* __x86_64__ */
911
912 /* Convert a native/host siginfo object, into/from the siginfo in the
913 layout of the inferiors' architecture. Returns true if any
914 conversion was done; false otherwise. If DIRECTION is 1, then copy
915 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
916 INF. */
917
918 static int
919 x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
920 {
921 #ifdef __x86_64__
922 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
923 if (register_size (0) == 4)
924 {
925 if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
926 fatal ("unexpected difference in siginfo");
927
928 if (direction == 0)
929 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
930 else
931 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
932
933 return 1;
934 }
935 #endif
936
937 return 0;
938 }
939 \f
940 static int use_xml;
941
942 /* Update gdbserver_xmltarget. */
943
944 static void
945 x86_linux_update_xmltarget (void)
946 {
947 int pid;
948 struct regset_info *regset;
949 static unsigned long long xcr0;
950 static int have_ptrace_getregset = -1;
951 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
952 static int have_ptrace_getfpxregs = -1;
953 #endif
954
955 if (!current_inferior)
956 return;
957
958 /* Before changing the register cache internal layout or the target
959 regsets, flush the contents of the current valid caches back to
960 the threads. */
961 regcache_invalidate ();
962
963 pid = pid_of (get_thread_lwp (current_inferior));
964 #ifdef __x86_64__
965 if (num_xmm_registers == 8)
966 init_registers_i386_linux ();
967 else
968 init_registers_amd64_linux ();
969 #else
970 {
971 # ifdef HAVE_PTRACE_GETFPXREGS
972 if (have_ptrace_getfpxregs == -1)
973 {
974 elf_fpxregset_t fpxregs;
975
976 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
977 {
978 have_ptrace_getfpxregs = 0;
979 x86_xcr0 = I386_XSTATE_X87_MASK;
980
981 /* Disable PTRACE_GETFPXREGS. */
982 for (regset = target_regsets;
983 regset->fill_function != NULL; regset++)
984 if (regset->get_request == PTRACE_GETFPXREGS)
985 {
986 regset->size = 0;
987 break;
988 }
989 }
990 else
991 have_ptrace_getfpxregs = 1;
992 }
993
994 if (!have_ptrace_getfpxregs)
995 {
996 init_registers_i386_mmx_linux ();
997 return;
998 }
999 # endif
1000 init_registers_i386_linux ();
1001 }
1002 #endif
1003
1004 if (!use_xml)
1005 {
1006 /* Don't use XML. */
1007 #ifdef __x86_64__
1008 if (num_xmm_registers == 8)
1009 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1010 else
1011 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
1012 #else
1013 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1014 #endif
1015
1016 x86_xcr0 = I386_XSTATE_SSE_MASK;
1017
1018 return;
1019 }
1020
1021 /* Check if XSAVE extended state is supported. */
1022 if (have_ptrace_getregset == -1)
1023 {
1024 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
1025 struct iovec iov;
1026
1027 iov.iov_base = xstateregs;
1028 iov.iov_len = sizeof (xstateregs);
1029
1030 /* Check if PTRACE_GETREGSET works. */
1031 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
1032 &iov) < 0)
1033 {
1034 have_ptrace_getregset = 0;
1035 return;
1036 }
1037 else
1038 have_ptrace_getregset = 1;
1039
1040 /* Get XCR0 from XSAVE extended state at byte 464. */
1041 xcr0 = xstateregs[464 / sizeof (long long)];
1042
1043 /* Use PTRACE_GETREGSET if it is available. */
1044 for (regset = target_regsets;
1045 regset->fill_function != NULL; regset++)
1046 if (regset->get_request == PTRACE_GETREGSET)
1047 regset->size = I386_XSTATE_SIZE (xcr0);
1048 else if (regset->type != GENERAL_REGS)
1049 regset->size = 0;
1050 }
1051
1052 if (have_ptrace_getregset)
1053 {
1054 /* AVX is the highest feature we support. */
1055 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
1056 {
1057 x86_xcr0 = xcr0;
1058
1059 #ifdef __x86_64__
1060 /* I386 has 8 xmm regs. */
1061 if (num_xmm_registers == 8)
1062 init_registers_i386_avx_linux ();
1063 else
1064 init_registers_amd64_avx_linux ();
1065 #else
1066 init_registers_i386_avx_linux ();
1067 #endif
1068 }
1069 }
1070 }
1071
1072 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1073 PTRACE_GETREGSET. */
1074
1075 static void
1076 x86_linux_process_qsupported (const char *query)
1077 {
1078 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1079 with "i386" in qSupported query, it supports x86 XML target
1080 descriptions. */
1081 use_xml = 0;
1082 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1083 {
1084 char *copy = xstrdup (query + 13);
1085 char *p;
1086
1087 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1088 {
1089 if (strcmp (p, "i386") == 0)
1090 {
1091 use_xml = 1;
1092 break;
1093 }
1094 }
1095
1096 free (copy);
1097 }
1098
1099 x86_linux_update_xmltarget ();
1100 }
1101
1102 /* Initialize gdbserver for the architecture of the inferior. */
1103
1104 static void
1105 x86_arch_setup (void)
1106 {
1107 #ifdef __x86_64__
1108 int pid = pid_of (get_thread_lwp (current_inferior));
1109 int use_64bit = linux_pid_exe_is_elf_64_file (pid);
1110
1111 if (use_64bit < 0)
1112 {
1113 /* This can only happen if /proc/<pid>/exe is unreadable,
1114 but "that can't happen" if we've gotten this far.
1115 Fall through and assume this is a 32-bit program. */
1116 }
1117 else if (use_64bit)
1118 {
1119 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1120 the_low_target.num_regs = -1;
1121 the_low_target.regmap = NULL;
1122 the_low_target.cannot_fetch_register = NULL;
1123 the_low_target.cannot_store_register = NULL;
1124
1125 /* Amd64 has 16 xmm regs. */
1126 num_xmm_registers = 16;
1127
1128 x86_linux_update_xmltarget ();
1129 return;
1130 }
1131 #endif
1132
1133 /* Ok we have a 32-bit inferior. */
1134
1135 the_low_target.num_regs = I386_NUM_REGS;
1136 the_low_target.regmap = i386_regmap;
1137 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1138 the_low_target.cannot_store_register = i386_cannot_store_register;
1139
1140 /* I386 has 8 xmm regs. */
1141 num_xmm_registers = 8;
1142
1143 x86_linux_update_xmltarget ();
1144 }
1145
1146 static int
1147 x86_supports_tracepoints (void)
1148 {
1149 return 1;
1150 }
1151
1152 static void
1153 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1154 {
1155 write_inferior_memory (*to, buf, len);
1156 *to += len;
1157 }
1158
1159 static int
1160 push_opcode (unsigned char *buf, char *op)
1161 {
1162 unsigned char *buf_org = buf;
1163
1164 while (1)
1165 {
1166 char *endptr;
1167 unsigned long ul = strtoul (op, &endptr, 16);
1168
1169 if (endptr == op)
1170 break;
1171
1172 *buf++ = ul;
1173 op = endptr;
1174 }
1175
1176 return buf - buf_org;
1177 }
1178
1179 #ifdef __x86_64__
1180
1181 /* Build a jump pad that saves registers and calls a collection
1182 function. Writes a jump instruction to the jump pad to
1183 JJUMPAD_INSN. The caller is responsible to write it in at the
1184 tracepoint address. */
1185
1186 static int
1187 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1188 CORE_ADDR collector,
1189 CORE_ADDR lockaddr,
1190 ULONGEST orig_size,
1191 CORE_ADDR *jump_entry,
1192 CORE_ADDR *trampoline,
1193 ULONGEST *trampoline_size,
1194 unsigned char *jjump_pad_insn,
1195 ULONGEST *jjump_pad_insn_size,
1196 CORE_ADDR *adjusted_insn_addr,
1197 CORE_ADDR *adjusted_insn_addr_end,
1198 char *err)
1199 {
1200 unsigned char buf[40];
1201 int i, offset;
1202 CORE_ADDR buildaddr = *jump_entry;
1203
1204 /* Build the jump pad. */
1205
1206 /* First, do tracepoint data collection. Save registers. */
1207 i = 0;
1208 /* Need to ensure stack pointer saved first. */
1209 buf[i++] = 0x54; /* push %rsp */
1210 buf[i++] = 0x55; /* push %rbp */
1211 buf[i++] = 0x57; /* push %rdi */
1212 buf[i++] = 0x56; /* push %rsi */
1213 buf[i++] = 0x52; /* push %rdx */
1214 buf[i++] = 0x51; /* push %rcx */
1215 buf[i++] = 0x53; /* push %rbx */
1216 buf[i++] = 0x50; /* push %rax */
1217 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1218 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1219 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1220 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1221 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1222 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1223 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1224 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1225 buf[i++] = 0x9c; /* pushfq */
1226 buf[i++] = 0x48; /* movl <addr>,%rdi */
1227 buf[i++] = 0xbf;
1228 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1229 i += sizeof (unsigned long);
1230 buf[i++] = 0x57; /* push %rdi */
1231 append_insns (&buildaddr, i, buf);
1232
1233 /* Stack space for the collecting_t object. */
1234 i = 0;
1235 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1236 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1237 memcpy (buf + i, &tpoint, 8);
1238 i += 8;
1239 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1240 i += push_opcode (&buf[i],
1241 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1242 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1243 append_insns (&buildaddr, i, buf);
1244
1245 /* spin-lock. */
1246 i = 0;
1247 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1248 memcpy (&buf[i], (void *) &lockaddr, 8);
1249 i += 8;
1250 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1251 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1252 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1253 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1254 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1255 append_insns (&buildaddr, i, buf);
1256
1257 /* Set up the gdb_collect call. */
1258 /* At this point, (stack pointer + 0x18) is the base of our saved
1259 register block. */
1260
1261 i = 0;
1262 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1263 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1264
1265 /* tpoint address may be 64-bit wide. */
1266 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1267 memcpy (buf + i, &tpoint, 8);
1268 i += 8;
1269 append_insns (&buildaddr, i, buf);
1270
1271 /* The collector function being in the shared library, may be
1272 >31-bits away off the jump pad. */
1273 i = 0;
1274 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1275 memcpy (buf + i, &collector, 8);
1276 i += 8;
1277 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1278 append_insns (&buildaddr, i, buf);
1279
1280 /* Clear the spin-lock. */
1281 i = 0;
1282 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1283 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1284 memcpy (buf + i, &lockaddr, 8);
1285 i += 8;
1286 append_insns (&buildaddr, i, buf);
1287
1288 /* Remove stack that had been used for the collect_t object. */
1289 i = 0;
1290 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1291 append_insns (&buildaddr, i, buf);
1292
1293 /* Restore register state. */
1294 i = 0;
1295 buf[i++] = 0x48; /* add $0x8,%rsp */
1296 buf[i++] = 0x83;
1297 buf[i++] = 0xc4;
1298 buf[i++] = 0x08;
1299 buf[i++] = 0x9d; /* popfq */
1300 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1301 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1302 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1303 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1304 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1305 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1306 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1307 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1308 buf[i++] = 0x58; /* pop %rax */
1309 buf[i++] = 0x5b; /* pop %rbx */
1310 buf[i++] = 0x59; /* pop %rcx */
1311 buf[i++] = 0x5a; /* pop %rdx */
1312 buf[i++] = 0x5e; /* pop %rsi */
1313 buf[i++] = 0x5f; /* pop %rdi */
1314 buf[i++] = 0x5d; /* pop %rbp */
1315 buf[i++] = 0x5c; /* pop %rsp */
1316 append_insns (&buildaddr, i, buf);
1317
1318 /* Now, adjust the original instruction to execute in the jump
1319 pad. */
1320 *adjusted_insn_addr = buildaddr;
1321 relocate_instruction (&buildaddr, tpaddr);
1322 *adjusted_insn_addr_end = buildaddr;
1323
1324 /* Finally, write a jump back to the program. */
1325 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1326 memcpy (buf, jump_insn, sizeof (jump_insn));
1327 memcpy (buf + 1, &offset, 4);
1328 append_insns (&buildaddr, sizeof (jump_insn), buf);
1329
1330 /* The jump pad is now built. Wire in a jump to our jump pad. This
1331 is always done last (by our caller actually), so that we can
1332 install fast tracepoints with threads running. This relies on
1333 the agent's atomic write support. */
1334 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1335 memcpy (buf, jump_insn, sizeof (jump_insn));
1336 memcpy (buf + 1, &offset, 4);
1337 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1338 *jjump_pad_insn_size = sizeof (jump_insn);
1339
1340 /* Return the end address of our pad. */
1341 *jump_entry = buildaddr;
1342
1343 return 0;
1344 }
1345
1346 #endif /* __x86_64__ */
1347
1348 /* Build a jump pad that saves registers and calls a collection
1349 function. Writes a jump instruction to the jump pad to
1350 JJUMPAD_INSN. The caller is responsible to write it in at the
1351 tracepoint address. */
1352
1353 static int
1354 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1355 CORE_ADDR collector,
1356 CORE_ADDR lockaddr,
1357 ULONGEST orig_size,
1358 CORE_ADDR *jump_entry,
1359 CORE_ADDR *trampoline,
1360 ULONGEST *trampoline_size,
1361 unsigned char *jjump_pad_insn,
1362 ULONGEST *jjump_pad_insn_size,
1363 CORE_ADDR *adjusted_insn_addr,
1364 CORE_ADDR *adjusted_insn_addr_end,
1365 char *err)
1366 {
1367 unsigned char buf[0x100];
1368 int i, offset;
1369 CORE_ADDR buildaddr = *jump_entry;
1370
1371 /* Build the jump pad. */
1372
1373 /* First, do tracepoint data collection. Save registers. */
1374 i = 0;
1375 buf[i++] = 0x60; /* pushad */
1376 buf[i++] = 0x68; /* push tpaddr aka $pc */
1377 *((int *)(buf + i)) = (int) tpaddr;
1378 i += 4;
1379 buf[i++] = 0x9c; /* pushf */
1380 buf[i++] = 0x1e; /* push %ds */
1381 buf[i++] = 0x06; /* push %es */
1382 buf[i++] = 0x0f; /* push %fs */
1383 buf[i++] = 0xa0;
1384 buf[i++] = 0x0f; /* push %gs */
1385 buf[i++] = 0xa8;
1386 buf[i++] = 0x16; /* push %ss */
1387 buf[i++] = 0x0e; /* push %cs */
1388 append_insns (&buildaddr, i, buf);
1389
1390 /* Stack space for the collecting_t object. */
1391 i = 0;
1392 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1393
1394 /* Build the object. */
1395 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1396 memcpy (buf + i, &tpoint, 4);
1397 i += 4;
1398 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1399
1400 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1401 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1402 append_insns (&buildaddr, i, buf);
1403
1404 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1405 If we cared for it, this could be using xchg alternatively. */
1406
1407 i = 0;
1408 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1409 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1410 %esp,<lockaddr> */
1411 memcpy (&buf[i], (void *) &lockaddr, 4);
1412 i += 4;
1413 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1414 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1415 append_insns (&buildaddr, i, buf);
1416
1417
1418 /* Set up arguments to the gdb_collect call. */
1419 i = 0;
1420 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1421 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1422 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1423 append_insns (&buildaddr, i, buf);
1424
1425 i = 0;
1426 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1427 append_insns (&buildaddr, i, buf);
1428
1429 i = 0;
1430 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1431 memcpy (&buf[i], (void *) &tpoint, 4);
1432 i += 4;
1433 append_insns (&buildaddr, i, buf);
1434
1435 buf[0] = 0xe8; /* call <reladdr> */
1436 offset = collector - (buildaddr + sizeof (jump_insn));
1437 memcpy (buf + 1, &offset, 4);
1438 append_insns (&buildaddr, 5, buf);
1439 /* Clean up after the call. */
1440 buf[0] = 0x83; /* add $0x8,%esp */
1441 buf[1] = 0xc4;
1442 buf[2] = 0x08;
1443 append_insns (&buildaddr, 3, buf);
1444
1445
1446 /* Clear the spin-lock. This would need the LOCK prefix on older
1447 broken archs. */
1448 i = 0;
1449 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1450 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1451 memcpy (buf + i, &lockaddr, 4);
1452 i += 4;
1453 append_insns (&buildaddr, i, buf);
1454
1455
1456 /* Remove stack that had been used for the collect_t object. */
1457 i = 0;
1458 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1459 append_insns (&buildaddr, i, buf);
1460
1461 i = 0;
1462 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1463 buf[i++] = 0xc4;
1464 buf[i++] = 0x04;
1465 buf[i++] = 0x17; /* pop %ss */
1466 buf[i++] = 0x0f; /* pop %gs */
1467 buf[i++] = 0xa9;
1468 buf[i++] = 0x0f; /* pop %fs */
1469 buf[i++] = 0xa1;
1470 buf[i++] = 0x07; /* pop %es */
1471 buf[i++] = 0x1f; /* pop %ds */
1472 buf[i++] = 0x9d; /* popf */
1473 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1474 buf[i++] = 0xc4;
1475 buf[i++] = 0x04;
1476 buf[i++] = 0x61; /* popad */
1477 append_insns (&buildaddr, i, buf);
1478
1479 /* Now, adjust the original instruction to execute in the jump
1480 pad. */
1481 *adjusted_insn_addr = buildaddr;
1482 relocate_instruction (&buildaddr, tpaddr);
1483 *adjusted_insn_addr_end = buildaddr;
1484
1485 /* Write the jump back to the program. */
1486 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1487 memcpy (buf, jump_insn, sizeof (jump_insn));
1488 memcpy (buf + 1, &offset, 4);
1489 append_insns (&buildaddr, sizeof (jump_insn), buf);
1490
1491 /* The jump pad is now built. Wire in a jump to our jump pad. This
1492 is always done last (by our caller actually), so that we can
1493 install fast tracepoints with threads running. This relies on
1494 the agent's atomic write support. */
1495 if (orig_size == 4)
1496 {
1497 /* Create a trampoline. */
1498 *trampoline_size = sizeof (jump_insn);
1499 if (!claim_trampoline_space (*trampoline_size, trampoline))
1500 {
1501 /* No trampoline space available. */
1502 strcpy (err,
1503 "E.Cannot allocate trampoline space needed for fast "
1504 "tracepoints on 4-byte instructions.");
1505 return 1;
1506 }
1507
1508 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1509 memcpy (buf, jump_insn, sizeof (jump_insn));
1510 memcpy (buf + 1, &offset, 4);
1511 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1512
1513 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1514 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1515 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1516 memcpy (buf + 2, &offset, 2);
1517 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1518 *jjump_pad_insn_size = sizeof (small_jump_insn);
1519 }
1520 else
1521 {
1522 /* Else use a 32-bit relative jump instruction. */
1523 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1524 memcpy (buf, jump_insn, sizeof (jump_insn));
1525 memcpy (buf + 1, &offset, 4);
1526 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1527 *jjump_pad_insn_size = sizeof (jump_insn);
1528 }
1529
1530 /* Return the end address of our pad. */
1531 *jump_entry = buildaddr;
1532
1533 return 0;
1534 }
1535
1536 static int
1537 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1538 CORE_ADDR collector,
1539 CORE_ADDR lockaddr,
1540 ULONGEST orig_size,
1541 CORE_ADDR *jump_entry,
1542 CORE_ADDR *trampoline,
1543 ULONGEST *trampoline_size,
1544 unsigned char *jjump_pad_insn,
1545 ULONGEST *jjump_pad_insn_size,
1546 CORE_ADDR *adjusted_insn_addr,
1547 CORE_ADDR *adjusted_insn_addr_end,
1548 char *err)
1549 {
1550 #ifdef __x86_64__
1551 if (register_size (0) == 8)
1552 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1553 collector, lockaddr,
1554 orig_size, jump_entry,
1555 trampoline, trampoline_size,
1556 jjump_pad_insn,
1557 jjump_pad_insn_size,
1558 adjusted_insn_addr,
1559 adjusted_insn_addr_end,
1560 err);
1561 #endif
1562
1563 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1564 collector, lockaddr,
1565 orig_size, jump_entry,
1566 trampoline, trampoline_size,
1567 jjump_pad_insn,
1568 jjump_pad_insn_size,
1569 adjusted_insn_addr,
1570 adjusted_insn_addr_end,
1571 err);
1572 }
1573
1574 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1575 architectures. */
1576
1577 static int
1578 x86_get_min_fast_tracepoint_insn_len (void)
1579 {
1580 static int warned_about_fast_tracepoints = 0;
1581
1582 #ifdef __x86_64__
1583 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1584 used for fast tracepoints. */
1585 if (register_size (0) == 8)
1586 return 5;
1587 #endif
1588
1589 if (in_process_agent_loaded ())
1590 {
1591 char errbuf[IPA_BUFSIZ];
1592
1593 errbuf[0] = '\0';
1594
1595 /* On x86, if trampolines are available, then 4-byte jump instructions
1596 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1597 with a 4-byte offset are used instead. */
1598 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1599 return 4;
1600 else
1601 {
1602 /* GDB has no channel to explain to user why a shorter fast
1603 tracepoint is not possible, but at least make GDBserver
1604 mention that something has gone awry. */
1605 if (!warned_about_fast_tracepoints)
1606 {
1607 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1608 warned_about_fast_tracepoints = 1;
1609 }
1610 return 5;
1611 }
1612 }
1613 else
1614 {
1615 /* Indicate that the minimum length is currently unknown since the IPA
1616 has not loaded yet. */
1617 return 0;
1618 }
1619 }
1620
1621 static void
1622 add_insns (unsigned char *start, int len)
1623 {
1624 CORE_ADDR buildaddr = current_insn_ptr;
1625
1626 if (debug_threads)
1627 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1628 len, paddress (buildaddr));
1629
1630 append_insns (&buildaddr, len, start);
1631 current_insn_ptr = buildaddr;
1632 }
1633
1634 /* Our general strategy for emitting code is to avoid specifying raw
1635 bytes whenever possible, and instead copy a block of inline asm
1636 that is embedded in the function. This is a little messy, because
1637 we need to keep the compiler from discarding what looks like dead
1638 code, plus suppress various warnings. */
1639
1640 #define EMIT_ASM(NAME, INSNS) \
1641 do \
1642 { \
1643 extern unsigned char start_ ## NAME, end_ ## NAME; \
1644 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1645 __asm__ ("jmp end_" #NAME "\n" \
1646 "\t" "start_" #NAME ":" \
1647 "\t" INSNS "\n" \
1648 "\t" "end_" #NAME ":"); \
1649 } while (0)
1650
1651 #ifdef __x86_64__
1652
1653 #define EMIT_ASM32(NAME,INSNS) \
1654 do \
1655 { \
1656 extern unsigned char start_ ## NAME, end_ ## NAME; \
1657 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1658 __asm__ (".code32\n" \
1659 "\t" "jmp end_" #NAME "\n" \
1660 "\t" "start_" #NAME ":\n" \
1661 "\t" INSNS "\n" \
1662 "\t" "end_" #NAME ":\n" \
1663 ".code64\n"); \
1664 } while (0)
1665
1666 #else
1667
1668 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1669
1670 #endif
1671
1672 #ifdef __x86_64__
1673
1674 static void
1675 amd64_emit_prologue (void)
1676 {
1677 EMIT_ASM (amd64_prologue,
1678 "pushq %rbp\n\t"
1679 "movq %rsp,%rbp\n\t"
1680 "sub $0x20,%rsp\n\t"
1681 "movq %rdi,-8(%rbp)\n\t"
1682 "movq %rsi,-16(%rbp)");
1683 }
1684
1685
1686 static void
1687 amd64_emit_epilogue (void)
1688 {
1689 EMIT_ASM (amd64_epilogue,
1690 "movq -16(%rbp),%rdi\n\t"
1691 "movq %rax,(%rdi)\n\t"
1692 "xor %rax,%rax\n\t"
1693 "leave\n\t"
1694 "ret");
1695 }
1696
1697 static void
1698 amd64_emit_add (void)
1699 {
1700 EMIT_ASM (amd64_add,
1701 "add (%rsp),%rax\n\t"
1702 "lea 0x8(%rsp),%rsp");
1703 }
1704
1705 static void
1706 amd64_emit_sub (void)
1707 {
1708 EMIT_ASM (amd64_sub,
1709 "sub %rax,(%rsp)\n\t"
1710 "pop %rax");
1711 }
1712
1713 static void
1714 amd64_emit_mul (void)
1715 {
1716 emit_error = 1;
1717 }
1718
1719 static void
1720 amd64_emit_lsh (void)
1721 {
1722 emit_error = 1;
1723 }
1724
1725 static void
1726 amd64_emit_rsh_signed (void)
1727 {
1728 emit_error = 1;
1729 }
1730
1731 static void
1732 amd64_emit_rsh_unsigned (void)
1733 {
1734 emit_error = 1;
1735 }
1736
1737 static void
1738 amd64_emit_ext (int arg)
1739 {
1740 switch (arg)
1741 {
1742 case 8:
1743 EMIT_ASM (amd64_ext_8,
1744 "cbtw\n\t"
1745 "cwtl\n\t"
1746 "cltq");
1747 break;
1748 case 16:
1749 EMIT_ASM (amd64_ext_16,
1750 "cwtl\n\t"
1751 "cltq");
1752 break;
1753 case 32:
1754 EMIT_ASM (amd64_ext_32,
1755 "cltq");
1756 break;
1757 default:
1758 emit_error = 1;
1759 }
1760 }
1761
1762 static void
1763 amd64_emit_log_not (void)
1764 {
1765 EMIT_ASM (amd64_log_not,
1766 "test %rax,%rax\n\t"
1767 "sete %cl\n\t"
1768 "movzbq %cl,%rax");
1769 }
1770
1771 static void
1772 amd64_emit_bit_and (void)
1773 {
1774 EMIT_ASM (amd64_and,
1775 "and (%rsp),%rax\n\t"
1776 "lea 0x8(%rsp),%rsp");
1777 }
1778
1779 static void
1780 amd64_emit_bit_or (void)
1781 {
1782 EMIT_ASM (amd64_or,
1783 "or (%rsp),%rax\n\t"
1784 "lea 0x8(%rsp),%rsp");
1785 }
1786
1787 static void
1788 amd64_emit_bit_xor (void)
1789 {
1790 EMIT_ASM (amd64_xor,
1791 "xor (%rsp),%rax\n\t"
1792 "lea 0x8(%rsp),%rsp");
1793 }
1794
1795 static void
1796 amd64_emit_bit_not (void)
1797 {
1798 EMIT_ASM (amd64_bit_not,
1799 "xorq $0xffffffffffffffff,%rax");
1800 }
1801
1802 static void
1803 amd64_emit_equal (void)
1804 {
1805 EMIT_ASM (amd64_equal,
1806 "cmp %rax,(%rsp)\n\t"
1807 "je .Lamd64_equal_true\n\t"
1808 "xor %rax,%rax\n\t"
1809 "jmp .Lamd64_equal_end\n\t"
1810 ".Lamd64_equal_true:\n\t"
1811 "mov $0x1,%rax\n\t"
1812 ".Lamd64_equal_end:\n\t"
1813 "lea 0x8(%rsp),%rsp");
1814 }
1815
1816 static void
1817 amd64_emit_less_signed (void)
1818 {
1819 EMIT_ASM (amd64_less_signed,
1820 "cmp %rax,(%rsp)\n\t"
1821 "jl .Lamd64_less_signed_true\n\t"
1822 "xor %rax,%rax\n\t"
1823 "jmp .Lamd64_less_signed_end\n\t"
1824 ".Lamd64_less_signed_true:\n\t"
1825 "mov $1,%rax\n\t"
1826 ".Lamd64_less_signed_end:\n\t"
1827 "lea 0x8(%rsp),%rsp");
1828 }
1829
1830 static void
1831 amd64_emit_less_unsigned (void)
1832 {
1833 EMIT_ASM (amd64_less_unsigned,
1834 "cmp %rax,(%rsp)\n\t"
1835 "jb .Lamd64_less_unsigned_true\n\t"
1836 "xor %rax,%rax\n\t"
1837 "jmp .Lamd64_less_unsigned_end\n\t"
1838 ".Lamd64_less_unsigned_true:\n\t"
1839 "mov $1,%rax\n\t"
1840 ".Lamd64_less_unsigned_end:\n\t"
1841 "lea 0x8(%rsp),%rsp");
1842 }
1843
1844 static void
1845 amd64_emit_ref (int size)
1846 {
1847 switch (size)
1848 {
1849 case 1:
1850 EMIT_ASM (amd64_ref1,
1851 "movb (%rax),%al");
1852 break;
1853 case 2:
1854 EMIT_ASM (amd64_ref2,
1855 "movw (%rax),%ax");
1856 break;
1857 case 4:
1858 EMIT_ASM (amd64_ref4,
1859 "movl (%rax),%eax");
1860 break;
1861 case 8:
1862 EMIT_ASM (amd64_ref8,
1863 "movq (%rax),%rax");
1864 break;
1865 }
1866 }
1867
1868 static void
1869 amd64_emit_if_goto (int *offset_p, int *size_p)
1870 {
1871 EMIT_ASM (amd64_if_goto,
1872 "mov %rax,%rcx\n\t"
1873 "pop %rax\n\t"
1874 "cmp $0,%rcx\n\t"
1875 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1876 if (offset_p)
1877 *offset_p = 10;
1878 if (size_p)
1879 *size_p = 4;
1880 }
1881
1882 static void
1883 amd64_emit_goto (int *offset_p, int *size_p)
1884 {
1885 EMIT_ASM (amd64_goto,
1886 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1887 if (offset_p)
1888 *offset_p = 1;
1889 if (size_p)
1890 *size_p = 4;
1891 }
1892
1893 static void
1894 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1895 {
1896 int diff = (to - (from + size));
1897 unsigned char buf[sizeof (int)];
1898
1899 if (size != 4)
1900 {
1901 emit_error = 1;
1902 return;
1903 }
1904
1905 memcpy (buf, &diff, sizeof (int));
1906 write_inferior_memory (from, buf, sizeof (int));
1907 }
1908
1909 static void
1910 amd64_emit_const (LONGEST num)
1911 {
1912 unsigned char buf[16];
1913 int i;
1914 CORE_ADDR buildaddr = current_insn_ptr;
1915
1916 i = 0;
1917 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1918 memcpy (&buf[i], &num, sizeof (num));
1919 i += 8;
1920 append_insns (&buildaddr, i, buf);
1921 current_insn_ptr = buildaddr;
1922 }
1923
1924 static void
1925 amd64_emit_call (CORE_ADDR fn)
1926 {
1927 unsigned char buf[16];
1928 int i;
1929 CORE_ADDR buildaddr;
1930 LONGEST offset64;
1931
1932 /* The destination function being in the shared library, may be
1933 >31-bits away off the compiled code pad. */
1934
1935 buildaddr = current_insn_ptr;
1936
1937 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1938
1939 i = 0;
1940
1941 if (offset64 > INT_MAX || offset64 < INT_MIN)
1942 {
1943 /* Offset is too large for a call. Use callq, but that requires
1944 a register, so avoid it if possible. Use r10, since it is
1945 call-clobbered, we don't have to push/pop it. */
1946 buf[i++] = 0x48; /* mov $fn,%r10 */
1947 buf[i++] = 0xba;
1948 memcpy (buf + i, &fn, 8);
1949 i += 8;
1950 buf[i++] = 0xff; /* callq *%r10 */
1951 buf[i++] = 0xd2;
1952 }
1953 else
1954 {
1955 int offset32 = offset64; /* we know we can't overflow here. */
1956 memcpy (buf + i, &offset32, 4);
1957 i += 4;
1958 }
1959
1960 append_insns (&buildaddr, i, buf);
1961 current_insn_ptr = buildaddr;
1962 }
1963
1964 static void
1965 amd64_emit_reg (int reg)
1966 {
1967 unsigned char buf[16];
1968 int i;
1969 CORE_ADDR buildaddr;
1970
1971 /* Assume raw_regs is still in %rdi. */
1972 buildaddr = current_insn_ptr;
1973 i = 0;
1974 buf[i++] = 0xbe; /* mov $<n>,%esi */
1975 memcpy (&buf[i], &reg, sizeof (reg));
1976 i += 4;
1977 append_insns (&buildaddr, i, buf);
1978 current_insn_ptr = buildaddr;
1979 amd64_emit_call (get_raw_reg_func_addr ());
1980 }
1981
1982 static void
1983 amd64_emit_pop (void)
1984 {
1985 EMIT_ASM (amd64_pop,
1986 "pop %rax");
1987 }
1988
1989 static void
1990 amd64_emit_stack_flush (void)
1991 {
1992 EMIT_ASM (amd64_stack_flush,
1993 "push %rax");
1994 }
1995
1996 static void
1997 amd64_emit_zero_ext (int arg)
1998 {
1999 switch (arg)
2000 {
2001 case 8:
2002 EMIT_ASM (amd64_zero_ext_8,
2003 "and $0xff,%rax");
2004 break;
2005 case 16:
2006 EMIT_ASM (amd64_zero_ext_16,
2007 "and $0xffff,%rax");
2008 break;
2009 case 32:
2010 EMIT_ASM (amd64_zero_ext_32,
2011 "mov $0xffffffff,%rcx\n\t"
2012 "and %rcx,%rax");
2013 break;
2014 default:
2015 emit_error = 1;
2016 }
2017 }
2018
2019 static void
2020 amd64_emit_swap (void)
2021 {
2022 EMIT_ASM (amd64_swap,
2023 "mov %rax,%rcx\n\t"
2024 "pop %rax\n\t"
2025 "push %rcx");
2026 }
2027
2028 static void
2029 amd64_emit_stack_adjust (int n)
2030 {
2031 unsigned char buf[16];
2032 int i;
2033 CORE_ADDR buildaddr = current_insn_ptr;
2034
2035 i = 0;
2036 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2037 buf[i++] = 0x8d;
2038 buf[i++] = 0x64;
2039 buf[i++] = 0x24;
2040 /* This only handles adjustments up to 16, but we don't expect any more. */
2041 buf[i++] = n * 8;
2042 append_insns (&buildaddr, i, buf);
2043 current_insn_ptr = buildaddr;
2044 }
2045
2046 /* FN's prototype is `LONGEST(*fn)(int)'. */
2047
2048 static void
2049 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2050 {
2051 unsigned char buf[16];
2052 int i;
2053 CORE_ADDR buildaddr;
2054
2055 buildaddr = current_insn_ptr;
2056 i = 0;
2057 buf[i++] = 0xbf; /* movl $<n>,%edi */
2058 memcpy (&buf[i], &arg1, sizeof (arg1));
2059 i += 4;
2060 append_insns (&buildaddr, i, buf);
2061 current_insn_ptr = buildaddr;
2062 amd64_emit_call (fn);
2063 }
2064
2065 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2066
2067 static void
2068 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2069 {
2070 unsigned char buf[16];
2071 int i;
2072 CORE_ADDR buildaddr;
2073
2074 buildaddr = current_insn_ptr;
2075 i = 0;
2076 buf[i++] = 0xbf; /* movl $<n>,%edi */
2077 memcpy (&buf[i], &arg1, sizeof (arg1));
2078 i += 4;
2079 append_insns (&buildaddr, i, buf);
2080 current_insn_ptr = buildaddr;
2081 EMIT_ASM (amd64_void_call_2_a,
2082 /* Save away a copy of the stack top. */
2083 "push %rax\n\t"
2084 /* Also pass top as the second argument. */
2085 "mov %rax,%rsi");
2086 amd64_emit_call (fn);
2087 EMIT_ASM (amd64_void_call_2_b,
2088 /* Restore the stack top, %rax may have been trashed. */
2089 "pop %rax");
2090 }
2091
2092 void
2093 amd64_emit_eq_goto (int *offset_p, int *size_p)
2094 {
2095 EMIT_ASM (amd64_eq,
2096 "cmp %rax,(%rsp)\n\t"
2097 "jne .Lamd64_eq_fallthru\n\t"
2098 "lea 0x8(%rsp),%rsp\n\t"
2099 "pop %rax\n\t"
2100 /* jmp, but don't trust the assembler to choose the right jump */
2101 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2102 ".Lamd64_eq_fallthru:\n\t"
2103 "lea 0x8(%rsp),%rsp\n\t"
2104 "pop %rax");
2105
2106 if (offset_p)
2107 *offset_p = 13;
2108 if (size_p)
2109 *size_p = 4;
2110 }
2111
2112 void
2113 amd64_emit_ne_goto (int *offset_p, int *size_p)
2114 {
2115 EMIT_ASM (amd64_ne,
2116 "cmp %rax,(%rsp)\n\t"
2117 "je .Lamd64_ne_fallthru\n\t"
2118 "lea 0x8(%rsp),%rsp\n\t"
2119 "pop %rax\n\t"
2120 /* jmp, but don't trust the assembler to choose the right jump */
2121 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2122 ".Lamd64_ne_fallthru:\n\t"
2123 "lea 0x8(%rsp),%rsp\n\t"
2124 "pop %rax");
2125
2126 if (offset_p)
2127 *offset_p = 13;
2128 if (size_p)
2129 *size_p = 4;
2130 }
2131
2132 void
2133 amd64_emit_lt_goto (int *offset_p, int *size_p)
2134 {
2135 EMIT_ASM (amd64_lt,
2136 "cmp %rax,(%rsp)\n\t"
2137 "jnl .Lamd64_lt_fallthru\n\t"
2138 "lea 0x8(%rsp),%rsp\n\t"
2139 "pop %rax\n\t"
2140 /* jmp, but don't trust the assembler to choose the right jump */
2141 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2142 ".Lamd64_lt_fallthru:\n\t"
2143 "lea 0x8(%rsp),%rsp\n\t"
2144 "pop %rax");
2145
2146 if (offset_p)
2147 *offset_p = 13;
2148 if (size_p)
2149 *size_p = 4;
2150 }
2151
2152 void
2153 amd64_emit_le_goto (int *offset_p, int *size_p)
2154 {
2155 EMIT_ASM (amd64_le,
2156 "cmp %rax,(%rsp)\n\t"
2157 "jnle .Lamd64_le_fallthru\n\t"
2158 "lea 0x8(%rsp),%rsp\n\t"
2159 "pop %rax\n\t"
2160 /* jmp, but don't trust the assembler to choose the right jump */
2161 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2162 ".Lamd64_le_fallthru:\n\t"
2163 "lea 0x8(%rsp),%rsp\n\t"
2164 "pop %rax");
2165
2166 if (offset_p)
2167 *offset_p = 13;
2168 if (size_p)
2169 *size_p = 4;
2170 }
2171
2172 void
2173 amd64_emit_gt_goto (int *offset_p, int *size_p)
2174 {
2175 EMIT_ASM (amd64_gt,
2176 "cmp %rax,(%rsp)\n\t"
2177 "jng .Lamd64_gt_fallthru\n\t"
2178 "lea 0x8(%rsp),%rsp\n\t"
2179 "pop %rax\n\t"
2180 /* jmp, but don't trust the assembler to choose the right jump */
2181 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2182 ".Lamd64_gt_fallthru:\n\t"
2183 "lea 0x8(%rsp),%rsp\n\t"
2184 "pop %rax");
2185
2186 if (offset_p)
2187 *offset_p = 13;
2188 if (size_p)
2189 *size_p = 4;
2190 }
2191
2192 void
2193 amd64_emit_ge_goto (int *offset_p, int *size_p)
2194 {
2195 EMIT_ASM (amd64_ge,
2196 "cmp %rax,(%rsp)\n\t"
2197 "jnge .Lamd64_ge_fallthru\n\t"
2198 ".Lamd64_ge_jump:\n\t"
2199 "lea 0x8(%rsp),%rsp\n\t"
2200 "pop %rax\n\t"
2201 /* jmp, but don't trust the assembler to choose the right jump */
2202 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2203 ".Lamd64_ge_fallthru:\n\t"
2204 "lea 0x8(%rsp),%rsp\n\t"
2205 "pop %rax");
2206
2207 if (offset_p)
2208 *offset_p = 13;
2209 if (size_p)
2210 *size_p = 4;
2211 }
2212
2213 struct emit_ops amd64_emit_ops =
2214 {
2215 amd64_emit_prologue,
2216 amd64_emit_epilogue,
2217 amd64_emit_add,
2218 amd64_emit_sub,
2219 amd64_emit_mul,
2220 amd64_emit_lsh,
2221 amd64_emit_rsh_signed,
2222 amd64_emit_rsh_unsigned,
2223 amd64_emit_ext,
2224 amd64_emit_log_not,
2225 amd64_emit_bit_and,
2226 amd64_emit_bit_or,
2227 amd64_emit_bit_xor,
2228 amd64_emit_bit_not,
2229 amd64_emit_equal,
2230 amd64_emit_less_signed,
2231 amd64_emit_less_unsigned,
2232 amd64_emit_ref,
2233 amd64_emit_if_goto,
2234 amd64_emit_goto,
2235 amd64_write_goto_address,
2236 amd64_emit_const,
2237 amd64_emit_call,
2238 amd64_emit_reg,
2239 amd64_emit_pop,
2240 amd64_emit_stack_flush,
2241 amd64_emit_zero_ext,
2242 amd64_emit_swap,
2243 amd64_emit_stack_adjust,
2244 amd64_emit_int_call_1,
2245 amd64_emit_void_call_2,
2246 amd64_emit_eq_goto,
2247 amd64_emit_ne_goto,
2248 amd64_emit_lt_goto,
2249 amd64_emit_le_goto,
2250 amd64_emit_gt_goto,
2251 amd64_emit_ge_goto
2252 };
2253
2254 #endif /* __x86_64__ */
2255
2256 static void
2257 i386_emit_prologue (void)
2258 {
2259 EMIT_ASM32 (i386_prologue,
2260 "push %ebp\n\t"
2261 "mov %esp,%ebp\n\t"
2262 "push %ebx");
2263 /* At this point, the raw regs base address is at 8(%ebp), and the
2264 value pointer is at 12(%ebp). */
2265 }
2266
2267 static void
2268 i386_emit_epilogue (void)
2269 {
2270 EMIT_ASM32 (i386_epilogue,
2271 "mov 12(%ebp),%ecx\n\t"
2272 "mov %eax,(%ecx)\n\t"
2273 "mov %ebx,0x4(%ecx)\n\t"
2274 "xor %eax,%eax\n\t"
2275 "pop %ebx\n\t"
2276 "pop %ebp\n\t"
2277 "ret");
2278 }
2279
2280 static void
2281 i386_emit_add (void)
2282 {
2283 EMIT_ASM32 (i386_add,
2284 "add (%esp),%eax\n\t"
2285 "adc 0x4(%esp),%ebx\n\t"
2286 "lea 0x8(%esp),%esp");
2287 }
2288
2289 static void
2290 i386_emit_sub (void)
2291 {
2292 EMIT_ASM32 (i386_sub,
2293 "subl %eax,(%esp)\n\t"
2294 "sbbl %ebx,4(%esp)\n\t"
2295 "pop %eax\n\t"
2296 "pop %ebx\n\t");
2297 }
2298
2299 static void
2300 i386_emit_mul (void)
2301 {
2302 emit_error = 1;
2303 }
2304
2305 static void
2306 i386_emit_lsh (void)
2307 {
2308 emit_error = 1;
2309 }
2310
2311 static void
2312 i386_emit_rsh_signed (void)
2313 {
2314 emit_error = 1;
2315 }
2316
2317 static void
2318 i386_emit_rsh_unsigned (void)
2319 {
2320 emit_error = 1;
2321 }
2322
2323 static void
2324 i386_emit_ext (int arg)
2325 {
2326 switch (arg)
2327 {
2328 case 8:
2329 EMIT_ASM32 (i386_ext_8,
2330 "cbtw\n\t"
2331 "cwtl\n\t"
2332 "movl %eax,%ebx\n\t"
2333 "sarl $31,%ebx");
2334 break;
2335 case 16:
2336 EMIT_ASM32 (i386_ext_16,
2337 "cwtl\n\t"
2338 "movl %eax,%ebx\n\t"
2339 "sarl $31,%ebx");
2340 break;
2341 case 32:
2342 EMIT_ASM32 (i386_ext_32,
2343 "movl %eax,%ebx\n\t"
2344 "sarl $31,%ebx");
2345 break;
2346 default:
2347 emit_error = 1;
2348 }
2349 }
2350
2351 static void
2352 i386_emit_log_not (void)
2353 {
2354 EMIT_ASM32 (i386_log_not,
2355 "or %ebx,%eax\n\t"
2356 "test %eax,%eax\n\t"
2357 "sete %cl\n\t"
2358 "xor %ebx,%ebx\n\t"
2359 "movzbl %cl,%eax");
2360 }
2361
2362 static void
2363 i386_emit_bit_and (void)
2364 {
2365 EMIT_ASM32 (i386_and,
2366 "and (%esp),%eax\n\t"
2367 "and 0x4(%esp),%ebx\n\t"
2368 "lea 0x8(%esp),%esp");
2369 }
2370
2371 static void
2372 i386_emit_bit_or (void)
2373 {
2374 EMIT_ASM32 (i386_or,
2375 "or (%esp),%eax\n\t"
2376 "or 0x4(%esp),%ebx\n\t"
2377 "lea 0x8(%esp),%esp");
2378 }
2379
2380 static void
2381 i386_emit_bit_xor (void)
2382 {
2383 EMIT_ASM32 (i386_xor,
2384 "xor (%esp),%eax\n\t"
2385 "xor 0x4(%esp),%ebx\n\t"
2386 "lea 0x8(%esp),%esp");
2387 }
2388
2389 static void
2390 i386_emit_bit_not (void)
2391 {
2392 EMIT_ASM32 (i386_bit_not,
2393 "xor $0xffffffff,%eax\n\t"
2394 "xor $0xffffffff,%ebx\n\t");
2395 }
2396
2397 static void
2398 i386_emit_equal (void)
2399 {
2400 EMIT_ASM32 (i386_equal,
2401 "cmpl %ebx,4(%esp)\n\t"
2402 "jne .Li386_equal_false\n\t"
2403 "cmpl %eax,(%esp)\n\t"
2404 "je .Li386_equal_true\n\t"
2405 ".Li386_equal_false:\n\t"
2406 "xor %eax,%eax\n\t"
2407 "jmp .Li386_equal_end\n\t"
2408 ".Li386_equal_true:\n\t"
2409 "mov $1,%eax\n\t"
2410 ".Li386_equal_end:\n\t"
2411 "xor %ebx,%ebx\n\t"
2412 "lea 0x8(%esp),%esp");
2413 }
2414
2415 static void
2416 i386_emit_less_signed (void)
2417 {
2418 EMIT_ASM32 (i386_less_signed,
2419 "cmpl %ebx,4(%esp)\n\t"
2420 "jl .Li386_less_signed_true\n\t"
2421 "jne .Li386_less_signed_false\n\t"
2422 "cmpl %eax,(%esp)\n\t"
2423 "jl .Li386_less_signed_true\n\t"
2424 ".Li386_less_signed_false:\n\t"
2425 "xor %eax,%eax\n\t"
2426 "jmp .Li386_less_signed_end\n\t"
2427 ".Li386_less_signed_true:\n\t"
2428 "mov $1,%eax\n\t"
2429 ".Li386_less_signed_end:\n\t"
2430 "xor %ebx,%ebx\n\t"
2431 "lea 0x8(%esp),%esp");
2432 }
2433
2434 static void
2435 i386_emit_less_unsigned (void)
2436 {
2437 EMIT_ASM32 (i386_less_unsigned,
2438 "cmpl %ebx,4(%esp)\n\t"
2439 "jb .Li386_less_unsigned_true\n\t"
2440 "jne .Li386_less_unsigned_false\n\t"
2441 "cmpl %eax,(%esp)\n\t"
2442 "jb .Li386_less_unsigned_true\n\t"
2443 ".Li386_less_unsigned_false:\n\t"
2444 "xor %eax,%eax\n\t"
2445 "jmp .Li386_less_unsigned_end\n\t"
2446 ".Li386_less_unsigned_true:\n\t"
2447 "mov $1,%eax\n\t"
2448 ".Li386_less_unsigned_end:\n\t"
2449 "xor %ebx,%ebx\n\t"
2450 "lea 0x8(%esp),%esp");
2451 }
2452
2453 static void
2454 i386_emit_ref (int size)
2455 {
2456 switch (size)
2457 {
2458 case 1:
2459 EMIT_ASM32 (i386_ref1,
2460 "movb (%eax),%al");
2461 break;
2462 case 2:
2463 EMIT_ASM32 (i386_ref2,
2464 "movw (%eax),%ax");
2465 break;
2466 case 4:
2467 EMIT_ASM32 (i386_ref4,
2468 "movl (%eax),%eax");
2469 break;
2470 case 8:
2471 EMIT_ASM32 (i386_ref8,
2472 "movl 4(%eax),%ebx\n\t"
2473 "movl (%eax),%eax");
2474 break;
2475 }
2476 }
2477
2478 static void
2479 i386_emit_if_goto (int *offset_p, int *size_p)
2480 {
2481 EMIT_ASM32 (i386_if_goto,
2482 "mov %eax,%ecx\n\t"
2483 "or %ebx,%ecx\n\t"
2484 "pop %eax\n\t"
2485 "pop %ebx\n\t"
2486 "cmpl $0,%ecx\n\t"
2487 /* Don't trust the assembler to choose the right jump */
2488 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2489
2490 if (offset_p)
2491 *offset_p = 11; /* be sure that this matches the sequence above */
2492 if (size_p)
2493 *size_p = 4;
2494 }
2495
2496 static void
2497 i386_emit_goto (int *offset_p, int *size_p)
2498 {
2499 EMIT_ASM32 (i386_goto,
2500 /* Don't trust the assembler to choose the right jump */
2501 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2502 if (offset_p)
2503 *offset_p = 1;
2504 if (size_p)
2505 *size_p = 4;
2506 }
2507
2508 static void
2509 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2510 {
2511 int diff = (to - (from + size));
2512 unsigned char buf[sizeof (int)];
2513
2514 /* We're only doing 4-byte sizes at the moment. */
2515 if (size != 4)
2516 {
2517 emit_error = 1;
2518 return;
2519 }
2520
2521 memcpy (buf, &diff, sizeof (int));
2522 write_inferior_memory (from, buf, sizeof (int));
2523 }
2524
2525 static void
2526 i386_emit_const (LONGEST num)
2527 {
2528 unsigned char buf[16];
2529 int i, hi, lo;
2530 CORE_ADDR buildaddr = current_insn_ptr;
2531
2532 i = 0;
2533 buf[i++] = 0xb8; /* mov $<n>,%eax */
2534 lo = num & 0xffffffff;
2535 memcpy (&buf[i], &lo, sizeof (lo));
2536 i += 4;
2537 hi = ((num >> 32) & 0xffffffff);
2538 if (hi)
2539 {
2540 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2541 memcpy (&buf[i], &hi, sizeof (hi));
2542 i += 4;
2543 }
2544 else
2545 {
2546 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2547 }
2548 append_insns (&buildaddr, i, buf);
2549 current_insn_ptr = buildaddr;
2550 }
2551
2552 static void
2553 i386_emit_call (CORE_ADDR fn)
2554 {
2555 unsigned char buf[16];
2556 int i, offset;
2557 CORE_ADDR buildaddr;
2558
2559 buildaddr = current_insn_ptr;
2560 i = 0;
2561 buf[i++] = 0xe8; /* call <reladdr> */
2562 offset = ((int) fn) - (buildaddr + 5);
2563 memcpy (buf + 1, &offset, 4);
2564 append_insns (&buildaddr, 5, buf);
2565 current_insn_ptr = buildaddr;
2566 }
2567
2568 static void
2569 i386_emit_reg (int reg)
2570 {
2571 unsigned char buf[16];
2572 int i;
2573 CORE_ADDR buildaddr;
2574
2575 EMIT_ASM32 (i386_reg_a,
2576 "sub $0x8,%esp");
2577 buildaddr = current_insn_ptr;
2578 i = 0;
2579 buf[i++] = 0xb8; /* mov $<n>,%eax */
2580 memcpy (&buf[i], &reg, sizeof (reg));
2581 i += 4;
2582 append_insns (&buildaddr, i, buf);
2583 current_insn_ptr = buildaddr;
2584 EMIT_ASM32 (i386_reg_b,
2585 "mov %eax,4(%esp)\n\t"
2586 "mov 8(%ebp),%eax\n\t"
2587 "mov %eax,(%esp)");
2588 i386_emit_call (get_raw_reg_func_addr ());
2589 EMIT_ASM32 (i386_reg_c,
2590 "xor %ebx,%ebx\n\t"
2591 "lea 0x8(%esp),%esp");
2592 }
2593
2594 static void
2595 i386_emit_pop (void)
2596 {
2597 EMIT_ASM32 (i386_pop,
2598 "pop %eax\n\t"
2599 "pop %ebx");
2600 }
2601
2602 static void
2603 i386_emit_stack_flush (void)
2604 {
2605 EMIT_ASM32 (i386_stack_flush,
2606 "push %ebx\n\t"
2607 "push %eax");
2608 }
2609
2610 static void
2611 i386_emit_zero_ext (int arg)
2612 {
2613 switch (arg)
2614 {
2615 case 8:
2616 EMIT_ASM32 (i386_zero_ext_8,
2617 "and $0xff,%eax\n\t"
2618 "xor %ebx,%ebx");
2619 break;
2620 case 16:
2621 EMIT_ASM32 (i386_zero_ext_16,
2622 "and $0xffff,%eax\n\t"
2623 "xor %ebx,%ebx");
2624 break;
2625 case 32:
2626 EMIT_ASM32 (i386_zero_ext_32,
2627 "xor %ebx,%ebx");
2628 break;
2629 default:
2630 emit_error = 1;
2631 }
2632 }
2633
2634 static void
2635 i386_emit_swap (void)
2636 {
2637 EMIT_ASM32 (i386_swap,
2638 "mov %eax,%ecx\n\t"
2639 "mov %ebx,%edx\n\t"
2640 "pop %eax\n\t"
2641 "pop %ebx\n\t"
2642 "push %edx\n\t"
2643 "push %ecx");
2644 }
2645
2646 static void
2647 i386_emit_stack_adjust (int n)
2648 {
2649 unsigned char buf[16];
2650 int i;
2651 CORE_ADDR buildaddr = current_insn_ptr;
2652
2653 i = 0;
2654 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2655 buf[i++] = 0x64;
2656 buf[i++] = 0x24;
2657 buf[i++] = n * 8;
2658 append_insns (&buildaddr, i, buf);
2659 current_insn_ptr = buildaddr;
2660 }
2661
2662 /* FN's prototype is `LONGEST(*fn)(int)'. */
2663
2664 static void
2665 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2666 {
2667 unsigned char buf[16];
2668 int i;
2669 CORE_ADDR buildaddr;
2670
2671 EMIT_ASM32 (i386_int_call_1_a,
2672 /* Reserve a bit of stack space. */
2673 "sub $0x8,%esp");
2674 /* Put the one argument on the stack. */
2675 buildaddr = current_insn_ptr;
2676 i = 0;
2677 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2678 buf[i++] = 0x04;
2679 buf[i++] = 0x24;
2680 memcpy (&buf[i], &arg1, sizeof (arg1));
2681 i += 4;
2682 append_insns (&buildaddr, i, buf);
2683 current_insn_ptr = buildaddr;
2684 i386_emit_call (fn);
2685 EMIT_ASM32 (i386_int_call_1_c,
2686 "mov %edx,%ebx\n\t"
2687 "lea 0x8(%esp),%esp");
2688 }
2689
2690 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2691
2692 static void
2693 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2694 {
2695 unsigned char buf[16];
2696 int i;
2697 CORE_ADDR buildaddr;
2698
2699 EMIT_ASM32 (i386_void_call_2_a,
2700 /* Preserve %eax only; we don't have to worry about %ebx. */
2701 "push %eax\n\t"
2702 /* Reserve a bit of stack space for arguments. */
2703 "sub $0x10,%esp\n\t"
2704 /* Copy "top" to the second argument position. (Note that
2705 we can't assume function won't scribble on its
2706 arguments, so don't try to restore from this.) */
2707 "mov %eax,4(%esp)\n\t"
2708 "mov %ebx,8(%esp)");
2709 /* Put the first argument on the stack. */
2710 buildaddr = current_insn_ptr;
2711 i = 0;
2712 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2713 buf[i++] = 0x04;
2714 buf[i++] = 0x24;
2715 memcpy (&buf[i], &arg1, sizeof (arg1));
2716 i += 4;
2717 append_insns (&buildaddr, i, buf);
2718 current_insn_ptr = buildaddr;
2719 i386_emit_call (fn);
2720 EMIT_ASM32 (i386_void_call_2_b,
2721 "lea 0x10(%esp),%esp\n\t"
2722 /* Restore original stack top. */
2723 "pop %eax");
2724 }
2725
2726
2727 void
2728 i386_emit_eq_goto (int *offset_p, int *size_p)
2729 {
2730 EMIT_ASM32 (eq,
2731 /* Check low half first, more likely to be decider */
2732 "cmpl %eax,(%esp)\n\t"
2733 "jne .Leq_fallthru\n\t"
2734 "cmpl %ebx,4(%esp)\n\t"
2735 "jne .Leq_fallthru\n\t"
2736 "lea 0x8(%esp),%esp\n\t"
2737 "pop %eax\n\t"
2738 "pop %ebx\n\t"
2739 /* jmp, but don't trust the assembler to choose the right jump */
2740 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2741 ".Leq_fallthru:\n\t"
2742 "lea 0x8(%esp),%esp\n\t"
2743 "pop %eax\n\t"
2744 "pop %ebx");
2745
2746 if (offset_p)
2747 *offset_p = 18;
2748 if (size_p)
2749 *size_p = 4;
2750 }
2751
2752 void
2753 i386_emit_ne_goto (int *offset_p, int *size_p)
2754 {
2755 EMIT_ASM32 (ne,
2756 /* Check low half first, more likely to be decider */
2757 "cmpl %eax,(%esp)\n\t"
2758 "jne .Lne_jump\n\t"
2759 "cmpl %ebx,4(%esp)\n\t"
2760 "je .Lne_fallthru\n\t"
2761 ".Lne_jump:\n\t"
2762 "lea 0x8(%esp),%esp\n\t"
2763 "pop %eax\n\t"
2764 "pop %ebx\n\t"
2765 /* jmp, but don't trust the assembler to choose the right jump */
2766 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2767 ".Lne_fallthru:\n\t"
2768 "lea 0x8(%esp),%esp\n\t"
2769 "pop %eax\n\t"
2770 "pop %ebx");
2771
2772 if (offset_p)
2773 *offset_p = 18;
2774 if (size_p)
2775 *size_p = 4;
2776 }
2777
2778 void
2779 i386_emit_lt_goto (int *offset_p, int *size_p)
2780 {
2781 EMIT_ASM32 (lt,
2782 "cmpl %ebx,4(%esp)\n\t"
2783 "jl .Llt_jump\n\t"
2784 "jne .Llt_fallthru\n\t"
2785 "cmpl %eax,(%esp)\n\t"
2786 "jnl .Llt_fallthru\n\t"
2787 ".Llt_jump:\n\t"
2788 "lea 0x8(%esp),%esp\n\t"
2789 "pop %eax\n\t"
2790 "pop %ebx\n\t"
2791 /* jmp, but don't trust the assembler to choose the right jump */
2792 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2793 ".Llt_fallthru:\n\t"
2794 "lea 0x8(%esp),%esp\n\t"
2795 "pop %eax\n\t"
2796 "pop %ebx");
2797
2798 if (offset_p)
2799 *offset_p = 20;
2800 if (size_p)
2801 *size_p = 4;
2802 }
2803
2804 void
2805 i386_emit_le_goto (int *offset_p, int *size_p)
2806 {
2807 EMIT_ASM32 (le,
2808 "cmpl %ebx,4(%esp)\n\t"
2809 "jle .Lle_jump\n\t"
2810 "jne .Lle_fallthru\n\t"
2811 "cmpl %eax,(%esp)\n\t"
2812 "jnle .Lle_fallthru\n\t"
2813 ".Lle_jump:\n\t"
2814 "lea 0x8(%esp),%esp\n\t"
2815 "pop %eax\n\t"
2816 "pop %ebx\n\t"
2817 /* jmp, but don't trust the assembler to choose the right jump */
2818 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2819 ".Lle_fallthru:\n\t"
2820 "lea 0x8(%esp),%esp\n\t"
2821 "pop %eax\n\t"
2822 "pop %ebx");
2823
2824 if (offset_p)
2825 *offset_p = 20;
2826 if (size_p)
2827 *size_p = 4;
2828 }
2829
2830 void
2831 i386_emit_gt_goto (int *offset_p, int *size_p)
2832 {
2833 EMIT_ASM32 (gt,
2834 "cmpl %ebx,4(%esp)\n\t"
2835 "jg .Lgt_jump\n\t"
2836 "jne .Lgt_fallthru\n\t"
2837 "cmpl %eax,(%esp)\n\t"
2838 "jng .Lgt_fallthru\n\t"
2839 ".Lgt_jump:\n\t"
2840 "lea 0x8(%esp),%esp\n\t"
2841 "pop %eax\n\t"
2842 "pop %ebx\n\t"
2843 /* jmp, but don't trust the assembler to choose the right jump */
2844 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2845 ".Lgt_fallthru:\n\t"
2846 "lea 0x8(%esp),%esp\n\t"
2847 "pop %eax\n\t"
2848 "pop %ebx");
2849
2850 if (offset_p)
2851 *offset_p = 20;
2852 if (size_p)
2853 *size_p = 4;
2854 }
2855
2856 void
2857 i386_emit_ge_goto (int *offset_p, int *size_p)
2858 {
2859 EMIT_ASM32 (ge,
2860 "cmpl %ebx,4(%esp)\n\t"
2861 "jge .Lge_jump\n\t"
2862 "jne .Lge_fallthru\n\t"
2863 "cmpl %eax,(%esp)\n\t"
2864 "jnge .Lge_fallthru\n\t"
2865 ".Lge_jump:\n\t"
2866 "lea 0x8(%esp),%esp\n\t"
2867 "pop %eax\n\t"
2868 "pop %ebx\n\t"
2869 /* jmp, but don't trust the assembler to choose the right jump */
2870 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2871 ".Lge_fallthru:\n\t"
2872 "lea 0x8(%esp),%esp\n\t"
2873 "pop %eax\n\t"
2874 "pop %ebx");
2875
2876 if (offset_p)
2877 *offset_p = 20;
2878 if (size_p)
2879 *size_p = 4;
2880 }
2881
2882 struct emit_ops i386_emit_ops =
2883 {
2884 i386_emit_prologue,
2885 i386_emit_epilogue,
2886 i386_emit_add,
2887 i386_emit_sub,
2888 i386_emit_mul,
2889 i386_emit_lsh,
2890 i386_emit_rsh_signed,
2891 i386_emit_rsh_unsigned,
2892 i386_emit_ext,
2893 i386_emit_log_not,
2894 i386_emit_bit_and,
2895 i386_emit_bit_or,
2896 i386_emit_bit_xor,
2897 i386_emit_bit_not,
2898 i386_emit_equal,
2899 i386_emit_less_signed,
2900 i386_emit_less_unsigned,
2901 i386_emit_ref,
2902 i386_emit_if_goto,
2903 i386_emit_goto,
2904 i386_write_goto_address,
2905 i386_emit_const,
2906 i386_emit_call,
2907 i386_emit_reg,
2908 i386_emit_pop,
2909 i386_emit_stack_flush,
2910 i386_emit_zero_ext,
2911 i386_emit_swap,
2912 i386_emit_stack_adjust,
2913 i386_emit_int_call_1,
2914 i386_emit_void_call_2,
2915 i386_emit_eq_goto,
2916 i386_emit_ne_goto,
2917 i386_emit_lt_goto,
2918 i386_emit_le_goto,
2919 i386_emit_gt_goto,
2920 i386_emit_ge_goto
2921 };
2922
2923
2924 static struct emit_ops *
2925 x86_emit_ops (void)
2926 {
2927 #ifdef __x86_64__
2928 int use_64bit = register_size (0) == 8;
2929
2930 if (use_64bit)
2931 return &amd64_emit_ops;
2932 else
2933 #endif
2934 return &i386_emit_ops;
2935 }
2936
2937 /* This is initialized assuming an amd64 target.
2938 x86_arch_setup will correct it for i386 or amd64 targets. */
2939
2940 struct linux_target_ops the_low_target =
2941 {
2942 x86_arch_setup,
2943 -1,
2944 NULL,
2945 NULL,
2946 NULL,
2947 NULL,
2948 x86_get_pc,
2949 x86_set_pc,
2950 x86_breakpoint,
2951 x86_breakpoint_len,
2952 NULL,
2953 1,
2954 x86_breakpoint_at,
2955 x86_insert_point,
2956 x86_remove_point,
2957 x86_stopped_by_watchpoint,
2958 x86_stopped_data_address,
2959 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2960 native i386 case (no registers smaller than an xfer unit), and are not
2961 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2962 NULL,
2963 NULL,
2964 /* need to fix up i386 siginfo if host is amd64 */
2965 x86_siginfo_fixup,
2966 x86_linux_new_process,
2967 x86_linux_new_thread,
2968 x86_linux_prepare_to_resume,
2969 x86_linux_process_qsupported,
2970 x86_supports_tracepoints,
2971 x86_get_thread_area,
2972 x86_install_fast_tracepoint_jump_pad,
2973 x86_emit_ops,
2974 x86_get_min_fast_tracepoint_insn_len,
2975 };
This page took 0.100184 seconds and 4 git commands to generate.