2011-11-14 Stan Shebs <stan@codesourcery.com>
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include <stddef.h>
22 #include <signal.h>
23 #include <limits.h>
24 #include "server.h"
25 #include "linux-low.h"
26 #include "i387-fp.h"
27 #include "i386-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
30
31 #include "gdb_proc_service.h"
32
33 /* Defined in auto-generated file i386-linux.c. */
34 void init_registers_i386_linux (void);
35 /* Defined in auto-generated file amd64-linux.c. */
36 void init_registers_amd64_linux (void);
37 /* Defined in auto-generated file i386-avx-linux.c. */
38 void init_registers_i386_avx_linux (void);
39 /* Defined in auto-generated file amd64-avx-linux.c. */
40 void init_registers_amd64_avx_linux (void);
41 /* Defined in auto-generated file i386-mmx-linux.c. */
42 void init_registers_i386_mmx_linux (void);
43
44 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
45 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
46
47 /* Backward compatibility for gdb without XML support. */
48
49 static const char *xmltarget_i386_linux_no_xml = "@<target>\
50 <architecture>i386</architecture>\
51 <osabi>GNU/Linux</osabi>\
52 </target>";
53
54 #ifdef __x86_64__
55 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
56 <architecture>i386:x86-64</architecture>\
57 <osabi>GNU/Linux</osabi>\
58 </target>";
59 #endif
60
61 #include <sys/reg.h>
62 #include <sys/procfs.h>
63 #include <sys/ptrace.h>
64 #include <sys/uio.h>
65
66 #ifndef PTRACE_GETREGSET
67 #define PTRACE_GETREGSET 0x4204
68 #endif
69
70 #ifndef PTRACE_SETREGSET
71 #define PTRACE_SETREGSET 0x4205
72 #endif
73
74
75 #ifndef PTRACE_GET_THREAD_AREA
76 #define PTRACE_GET_THREAD_AREA 25
77 #endif
78
79 /* This definition comes from prctl.h, but some kernels may not have it. */
80 #ifndef PTRACE_ARCH_PRCTL
81 #define PTRACE_ARCH_PRCTL 30
82 #endif
83
84 /* The following definitions come from prctl.h, but may be absent
85 for certain configurations. */
86 #ifndef ARCH_GET_FS
87 #define ARCH_SET_GS 0x1001
88 #define ARCH_SET_FS 0x1002
89 #define ARCH_GET_FS 0x1003
90 #define ARCH_GET_GS 0x1004
91 #endif
92
93 /* Per-process arch-specific data we want to keep. */
94
95 struct arch_process_info
96 {
97 struct i386_debug_reg_state debug_reg_state;
98 };
99
100 /* Per-thread arch-specific data we want to keep. */
101
102 struct arch_lwp_info
103 {
104 /* Non-zero if our copy differs from what's recorded in the thread. */
105 int debug_registers_changed;
106 };
107
108 #ifdef __x86_64__
109
110 /* Mapping between the general-purpose registers in `struct user'
111 format and GDB's register array layout.
112 Note that the transfer layout uses 64-bit regs. */
113 static /*const*/ int i386_regmap[] =
114 {
115 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
116 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
117 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
118 DS * 8, ES * 8, FS * 8, GS * 8
119 };
120
121 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
122
123 /* So code below doesn't have to care, i386 or amd64. */
124 #define ORIG_EAX ORIG_RAX
125
126 static const int x86_64_regmap[] =
127 {
128 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
129 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
130 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
131 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
132 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
133 DS * 8, ES * 8, FS * 8, GS * 8,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1,
137 -1, -1, -1, -1, -1, -1, -1, -1, -1,
138 ORIG_RAX * 8
139 };
140
141 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
142
143 #else /* ! __x86_64__ */
144
145 /* Mapping between the general-purpose registers in `struct user'
146 format and GDB's register array layout. */
147 static /*const*/ int i386_regmap[] =
148 {
149 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
150 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
151 EIP * 4, EFL * 4, CS * 4, SS * 4,
152 DS * 4, ES * 4, FS * 4, GS * 4
153 };
154
155 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
156
157 #endif
158 \f
159 /* Called by libthread_db. */
160
161 ps_err_e
162 ps_get_thread_area (const struct ps_prochandle *ph,
163 lwpid_t lwpid, int idx, void **base)
164 {
165 #ifdef __x86_64__
166 int use_64bit = register_size (0) == 8;
167
168 if (use_64bit)
169 {
170 switch (idx)
171 {
172 case FS:
173 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
174 return PS_OK;
175 break;
176 case GS:
177 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
178 return PS_OK;
179 break;
180 default:
181 return PS_BADADDR;
182 }
183 return PS_ERR;
184 }
185 #endif
186
187 {
188 unsigned int desc[4];
189
190 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
191 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
192 return PS_ERR;
193
194 *(int *)base = desc[1];
195 return PS_OK;
196 }
197 }
198
199 /* Get the thread area address. This is used to recognize which
200 thread is which when tracing with the in-process agent library. We
201 don't read anything from the address, and treat it as opaque; it's
202 the address itself that we assume is unique per-thread. */
203
204 static int
205 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
206 {
207 #ifdef __x86_64__
208 int use_64bit = register_size (0) == 8;
209
210 if (use_64bit)
211 {
212 void *base;
213 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
214 {
215 *addr = (CORE_ADDR) (uintptr_t) base;
216 return 0;
217 }
218
219 return -1;
220 }
221 #endif
222
223 {
224 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
225 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
226 unsigned int desc[4];
227 ULONGEST gs = 0;
228 const int reg_thread_area = 3; /* bits to scale down register value. */
229 int idx;
230
231 collect_register_by_name (regcache, "gs", &gs);
232
233 idx = gs >> reg_thread_area;
234
235 if (ptrace (PTRACE_GET_THREAD_AREA,
236 lwpid_of (lwp),
237 (void *) (long) idx, (unsigned long) &desc) < 0)
238 return -1;
239
240 *addr = desc[1];
241 return 0;
242 }
243 }
244
245
246 \f
247 static int
248 i386_cannot_store_register (int regno)
249 {
250 return regno >= I386_NUM_REGS;
251 }
252
253 static int
254 i386_cannot_fetch_register (int regno)
255 {
256 return regno >= I386_NUM_REGS;
257 }
258
259 static void
260 x86_fill_gregset (struct regcache *regcache, void *buf)
261 {
262 int i;
263
264 #ifdef __x86_64__
265 if (register_size (0) == 8)
266 {
267 for (i = 0; i < X86_64_NUM_REGS; i++)
268 if (x86_64_regmap[i] != -1)
269 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
270 return;
271 }
272 #endif
273
274 for (i = 0; i < I386_NUM_REGS; i++)
275 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
276
277 collect_register_by_name (regcache, "orig_eax",
278 ((char *) buf) + ORIG_EAX * 4);
279 }
280
281 static void
282 x86_store_gregset (struct regcache *regcache, const void *buf)
283 {
284 int i;
285
286 #ifdef __x86_64__
287 if (register_size (0) == 8)
288 {
289 for (i = 0; i < X86_64_NUM_REGS; i++)
290 if (x86_64_regmap[i] != -1)
291 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
292 return;
293 }
294 #endif
295
296 for (i = 0; i < I386_NUM_REGS; i++)
297 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
298
299 supply_register_by_name (regcache, "orig_eax",
300 ((char *) buf) + ORIG_EAX * 4);
301 }
302
303 static void
304 x86_fill_fpregset (struct regcache *regcache, void *buf)
305 {
306 #ifdef __x86_64__
307 i387_cache_to_fxsave (regcache, buf);
308 #else
309 i387_cache_to_fsave (regcache, buf);
310 #endif
311 }
312
313 static void
314 x86_store_fpregset (struct regcache *regcache, const void *buf)
315 {
316 #ifdef __x86_64__
317 i387_fxsave_to_cache (regcache, buf);
318 #else
319 i387_fsave_to_cache (regcache, buf);
320 #endif
321 }
322
323 #ifndef __x86_64__
324
325 static void
326 x86_fill_fpxregset (struct regcache *regcache, void *buf)
327 {
328 i387_cache_to_fxsave (regcache, buf);
329 }
330
331 static void
332 x86_store_fpxregset (struct regcache *regcache, const void *buf)
333 {
334 i387_fxsave_to_cache (regcache, buf);
335 }
336
337 #endif
338
339 static void
340 x86_fill_xstateregset (struct regcache *regcache, void *buf)
341 {
342 i387_cache_to_xsave (regcache, buf);
343 }
344
345 static void
346 x86_store_xstateregset (struct regcache *regcache, const void *buf)
347 {
348 i387_xsave_to_cache (regcache, buf);
349 }
350
351 /* ??? The non-biarch i386 case stores all the i387 regs twice.
352 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
353 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
354 doesn't work. IWBN to avoid the duplication in the case where it
355 does work. Maybe the arch_setup routine could check whether it works
356 and update target_regsets accordingly, maybe by moving target_regsets
357 to linux_target_ops and set the right one there, rather than having to
358 modify the target_regsets global. */
359
360 struct regset_info target_regsets[] =
361 {
362 #ifdef HAVE_PTRACE_GETREGS
363 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
364 GENERAL_REGS,
365 x86_fill_gregset, x86_store_gregset },
366 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
367 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
368 # ifndef __x86_64__
369 # ifdef HAVE_PTRACE_GETFPXREGS
370 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
371 EXTENDED_REGS,
372 x86_fill_fpxregset, x86_store_fpxregset },
373 # endif
374 # endif
375 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
376 FP_REGS,
377 x86_fill_fpregset, x86_store_fpregset },
378 #endif /* HAVE_PTRACE_GETREGS */
379 { 0, 0, 0, -1, -1, NULL, NULL }
380 };
381
382 static CORE_ADDR
383 x86_get_pc (struct regcache *regcache)
384 {
385 int use_64bit = register_size (0) == 8;
386
387 if (use_64bit)
388 {
389 unsigned long pc;
390 collect_register_by_name (regcache, "rip", &pc);
391 return (CORE_ADDR) pc;
392 }
393 else
394 {
395 unsigned int pc;
396 collect_register_by_name (regcache, "eip", &pc);
397 return (CORE_ADDR) pc;
398 }
399 }
400
401 static void
402 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
403 {
404 int use_64bit = register_size (0) == 8;
405
406 if (use_64bit)
407 {
408 unsigned long newpc = pc;
409 supply_register_by_name (regcache, "rip", &newpc);
410 }
411 else
412 {
413 unsigned int newpc = pc;
414 supply_register_by_name (regcache, "eip", &newpc);
415 }
416 }
417 \f
418 static const unsigned char x86_breakpoint[] = { 0xCC };
419 #define x86_breakpoint_len 1
420
421 static int
422 x86_breakpoint_at (CORE_ADDR pc)
423 {
424 unsigned char c;
425
426 (*the_target->read_memory) (pc, &c, 1);
427 if (c == 0xCC)
428 return 1;
429
430 return 0;
431 }
432 \f
433 /* Support for debug registers. */
434
435 static unsigned long
436 x86_linux_dr_get (ptid_t ptid, int regnum)
437 {
438 int tid;
439 unsigned long value;
440
441 tid = ptid_get_lwp (ptid);
442
443 errno = 0;
444 value = ptrace (PTRACE_PEEKUSER, tid,
445 offsetof (struct user, u_debugreg[regnum]), 0);
446 if (errno != 0)
447 error ("Couldn't read debug register");
448
449 return value;
450 }
451
452 static void
453 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
454 {
455 int tid;
456
457 tid = ptid_get_lwp (ptid);
458
459 errno = 0;
460 ptrace (PTRACE_POKEUSER, tid,
461 offsetof (struct user, u_debugreg[regnum]), value);
462 if (errno != 0)
463 error ("Couldn't write debug register");
464 }
465
466 static int
467 update_debug_registers_callback (struct inferior_list_entry *entry,
468 void *pid_p)
469 {
470 struct lwp_info *lwp = (struct lwp_info *) entry;
471 int pid = *(int *) pid_p;
472
473 /* Only update the threads of this process. */
474 if (pid_of (lwp) == pid)
475 {
476 /* The actual update is done later just before resuming the lwp,
477 we just mark that the registers need updating. */
478 lwp->arch_private->debug_registers_changed = 1;
479
480 /* If the lwp isn't stopped, force it to momentarily pause, so
481 we can update its debug registers. */
482 if (!lwp->stopped)
483 linux_stop_lwp (lwp);
484 }
485
486 return 0;
487 }
488
489 /* Update the inferior's debug register REGNUM from STATE. */
490
491 void
492 i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
493 {
494 /* Only update the threads of this process. */
495 int pid = pid_of (get_thread_lwp (current_inferior));
496
497 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
498 fatal ("Invalid debug register %d", regnum);
499
500 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
501 }
502
503 /* Return the inferior's debug register REGNUM. */
504
505 CORE_ADDR
506 i386_dr_low_get_addr (int regnum)
507 {
508 struct lwp_info *lwp = get_thread_lwp (current_inferior);
509 ptid_t ptid = ptid_of (lwp);
510
511 /* DR6 and DR7 are retrieved with some other way. */
512 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
513
514 return x86_linux_dr_get (ptid, regnum);
515 }
516
517 /* Update the inferior's DR7 debug control register from STATE. */
518
519 void
520 i386_dr_low_set_control (const struct i386_debug_reg_state *state)
521 {
522 /* Only update the threads of this process. */
523 int pid = pid_of (get_thread_lwp (current_inferior));
524
525 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
526 }
527
528 /* Return the inferior's DR7 debug control register. */
529
530 unsigned
531 i386_dr_low_get_control (void)
532 {
533 struct lwp_info *lwp = get_thread_lwp (current_inferior);
534 ptid_t ptid = ptid_of (lwp);
535
536 return x86_linux_dr_get (ptid, DR_CONTROL);
537 }
538
539 /* Get the value of the DR6 debug status register from the inferior
540 and record it in STATE. */
541
542 unsigned
543 i386_dr_low_get_status (void)
544 {
545 struct lwp_info *lwp = get_thread_lwp (current_inferior);
546 ptid_t ptid = ptid_of (lwp);
547
548 return x86_linux_dr_get (ptid, DR_STATUS);
549 }
550 \f
551 /* Breakpoint/Watchpoint support. */
552
553 static int
554 x86_insert_point (char type, CORE_ADDR addr, int len)
555 {
556 struct process_info *proc = current_process ();
557 switch (type)
558 {
559 case '0':
560 {
561 int ret;
562
563 ret = prepare_to_access_memory ();
564 if (ret)
565 return -1;
566 ret = set_gdb_breakpoint_at (addr);
567 done_accessing_memory ();
568 return ret;
569 }
570 case '2':
571 case '3':
572 case '4':
573 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
574 type, addr, len);
575 default:
576 /* Unsupported. */
577 return 1;
578 }
579 }
580
581 static int
582 x86_remove_point (char type, CORE_ADDR addr, int len)
583 {
584 struct process_info *proc = current_process ();
585 switch (type)
586 {
587 case '0':
588 {
589 int ret;
590
591 ret = prepare_to_access_memory ();
592 if (ret)
593 return -1;
594 ret = delete_gdb_breakpoint_at (addr);
595 done_accessing_memory ();
596 return ret;
597 }
598 case '2':
599 case '3':
600 case '4':
601 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
602 type, addr, len);
603 default:
604 /* Unsupported. */
605 return 1;
606 }
607 }
608
609 static int
610 x86_stopped_by_watchpoint (void)
611 {
612 struct process_info *proc = current_process ();
613 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
614 }
615
616 static CORE_ADDR
617 x86_stopped_data_address (void)
618 {
619 struct process_info *proc = current_process ();
620 CORE_ADDR addr;
621 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
622 &addr))
623 return addr;
624 return 0;
625 }
626 \f
627 /* Called when a new process is created. */
628
629 static struct arch_process_info *
630 x86_linux_new_process (void)
631 {
632 struct arch_process_info *info = xcalloc (1, sizeof (*info));
633
634 i386_low_init_dregs (&info->debug_reg_state);
635
636 return info;
637 }
638
639 /* Called when a new thread is detected. */
640
641 static struct arch_lwp_info *
642 x86_linux_new_thread (void)
643 {
644 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
645
646 info->debug_registers_changed = 1;
647
648 return info;
649 }
650
651 /* Called when resuming a thread.
652 If the debug regs have changed, update the thread's copies. */
653
654 static void
655 x86_linux_prepare_to_resume (struct lwp_info *lwp)
656 {
657 ptid_t ptid = ptid_of (lwp);
658
659 if (lwp->arch_private->debug_registers_changed)
660 {
661 int i;
662 int pid = ptid_get_pid (ptid);
663 struct process_info *proc = find_process_pid (pid);
664 struct i386_debug_reg_state *state
665 = &proc->private->arch_private->debug_reg_state;
666
667 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
668 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
669
670 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
671
672 lwp->arch_private->debug_registers_changed = 0;
673 }
674
675 if (lwp->stopped_by_watchpoint)
676 x86_linux_dr_set (ptid, DR_STATUS, 0);
677 }
678 \f
679 /* When GDBSERVER is built as a 64-bit application on linux, the
680 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
681 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
682 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
683 conversion in-place ourselves. */
684
685 /* These types below (compat_*) define a siginfo type that is layout
686 compatible with the siginfo type exported by the 32-bit userspace
687 support. */
688
689 #ifdef __x86_64__
690
691 typedef int compat_int_t;
692 typedef unsigned int compat_uptr_t;
693
694 typedef int compat_time_t;
695 typedef int compat_timer_t;
696 typedef int compat_clock_t;
697
698 struct compat_timeval
699 {
700 compat_time_t tv_sec;
701 int tv_usec;
702 };
703
704 typedef union compat_sigval
705 {
706 compat_int_t sival_int;
707 compat_uptr_t sival_ptr;
708 } compat_sigval_t;
709
710 typedef struct compat_siginfo
711 {
712 int si_signo;
713 int si_errno;
714 int si_code;
715
716 union
717 {
718 int _pad[((128 / sizeof (int)) - 3)];
719
720 /* kill() */
721 struct
722 {
723 unsigned int _pid;
724 unsigned int _uid;
725 } _kill;
726
727 /* POSIX.1b timers */
728 struct
729 {
730 compat_timer_t _tid;
731 int _overrun;
732 compat_sigval_t _sigval;
733 } _timer;
734
735 /* POSIX.1b signals */
736 struct
737 {
738 unsigned int _pid;
739 unsigned int _uid;
740 compat_sigval_t _sigval;
741 } _rt;
742
743 /* SIGCHLD */
744 struct
745 {
746 unsigned int _pid;
747 unsigned int _uid;
748 int _status;
749 compat_clock_t _utime;
750 compat_clock_t _stime;
751 } _sigchld;
752
753 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
754 struct
755 {
756 unsigned int _addr;
757 } _sigfault;
758
759 /* SIGPOLL */
760 struct
761 {
762 int _band;
763 int _fd;
764 } _sigpoll;
765 } _sifields;
766 } compat_siginfo_t;
767
768 #define cpt_si_pid _sifields._kill._pid
769 #define cpt_si_uid _sifields._kill._uid
770 #define cpt_si_timerid _sifields._timer._tid
771 #define cpt_si_overrun _sifields._timer._overrun
772 #define cpt_si_status _sifields._sigchld._status
773 #define cpt_si_utime _sifields._sigchld._utime
774 #define cpt_si_stime _sifields._sigchld._stime
775 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
776 #define cpt_si_addr _sifields._sigfault._addr
777 #define cpt_si_band _sifields._sigpoll._band
778 #define cpt_si_fd _sifields._sigpoll._fd
779
780 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
781 In their place is si_timer1,si_timer2. */
782 #ifndef si_timerid
783 #define si_timerid si_timer1
784 #endif
785 #ifndef si_overrun
786 #define si_overrun si_timer2
787 #endif
788
789 static void
790 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
791 {
792 memset (to, 0, sizeof (*to));
793
794 to->si_signo = from->si_signo;
795 to->si_errno = from->si_errno;
796 to->si_code = from->si_code;
797
798 if (to->si_code == SI_TIMER)
799 {
800 to->cpt_si_timerid = from->si_timerid;
801 to->cpt_si_overrun = from->si_overrun;
802 to->cpt_si_ptr = (intptr_t) from->si_ptr;
803 }
804 else if (to->si_code == SI_USER)
805 {
806 to->cpt_si_pid = from->si_pid;
807 to->cpt_si_uid = from->si_uid;
808 }
809 else if (to->si_code < 0)
810 {
811 to->cpt_si_pid = from->si_pid;
812 to->cpt_si_uid = from->si_uid;
813 to->cpt_si_ptr = (intptr_t) from->si_ptr;
814 }
815 else
816 {
817 switch (to->si_signo)
818 {
819 case SIGCHLD:
820 to->cpt_si_pid = from->si_pid;
821 to->cpt_si_uid = from->si_uid;
822 to->cpt_si_status = from->si_status;
823 to->cpt_si_utime = from->si_utime;
824 to->cpt_si_stime = from->si_stime;
825 break;
826 case SIGILL:
827 case SIGFPE:
828 case SIGSEGV:
829 case SIGBUS:
830 to->cpt_si_addr = (intptr_t) from->si_addr;
831 break;
832 case SIGPOLL:
833 to->cpt_si_band = from->si_band;
834 to->cpt_si_fd = from->si_fd;
835 break;
836 default:
837 to->cpt_si_pid = from->si_pid;
838 to->cpt_si_uid = from->si_uid;
839 to->cpt_si_ptr = (intptr_t) from->si_ptr;
840 break;
841 }
842 }
843 }
844
845 static void
846 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
847 {
848 memset (to, 0, sizeof (*to));
849
850 to->si_signo = from->si_signo;
851 to->si_errno = from->si_errno;
852 to->si_code = from->si_code;
853
854 if (to->si_code == SI_TIMER)
855 {
856 to->si_timerid = from->cpt_si_timerid;
857 to->si_overrun = from->cpt_si_overrun;
858 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
859 }
860 else if (to->si_code == SI_USER)
861 {
862 to->si_pid = from->cpt_si_pid;
863 to->si_uid = from->cpt_si_uid;
864 }
865 else if (to->si_code < 0)
866 {
867 to->si_pid = from->cpt_si_pid;
868 to->si_uid = from->cpt_si_uid;
869 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
870 }
871 else
872 {
873 switch (to->si_signo)
874 {
875 case SIGCHLD:
876 to->si_pid = from->cpt_si_pid;
877 to->si_uid = from->cpt_si_uid;
878 to->si_status = from->cpt_si_status;
879 to->si_utime = from->cpt_si_utime;
880 to->si_stime = from->cpt_si_stime;
881 break;
882 case SIGILL:
883 case SIGFPE:
884 case SIGSEGV:
885 case SIGBUS:
886 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
887 break;
888 case SIGPOLL:
889 to->si_band = from->cpt_si_band;
890 to->si_fd = from->cpt_si_fd;
891 break;
892 default:
893 to->si_pid = from->cpt_si_pid;
894 to->si_uid = from->cpt_si_uid;
895 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
896 break;
897 }
898 }
899 }
900
901 #endif /* __x86_64__ */
902
903 /* Convert a native/host siginfo object, into/from the siginfo in the
904 layout of the inferiors' architecture. Returns true if any
905 conversion was done; false otherwise. If DIRECTION is 1, then copy
906 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
907 INF. */
908
909 static int
910 x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
911 {
912 #ifdef __x86_64__
913 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
914 if (register_size (0) == 4)
915 {
916 if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
917 fatal ("unexpected difference in siginfo");
918
919 if (direction == 0)
920 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
921 else
922 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
923
924 return 1;
925 }
926 #endif
927
928 return 0;
929 }
930 \f
931 static int use_xml;
932
933 /* Update gdbserver_xmltarget. */
934
935 static void
936 x86_linux_update_xmltarget (void)
937 {
938 int pid;
939 struct regset_info *regset;
940 static unsigned long long xcr0;
941 static int have_ptrace_getregset = -1;
942 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
943 static int have_ptrace_getfpxregs = -1;
944 #endif
945
946 if (!current_inferior)
947 return;
948
949 /* Before changing the register cache internal layout or the target
950 regsets, flush the contents of the current valid caches back to
951 the threads. */
952 regcache_invalidate ();
953
954 pid = pid_of (get_thread_lwp (current_inferior));
955 #ifdef __x86_64__
956 if (num_xmm_registers == 8)
957 init_registers_i386_linux ();
958 else
959 init_registers_amd64_linux ();
960 #else
961 {
962 # ifdef HAVE_PTRACE_GETFPXREGS
963 if (have_ptrace_getfpxregs == -1)
964 {
965 elf_fpxregset_t fpxregs;
966
967 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
968 {
969 have_ptrace_getfpxregs = 0;
970 x86_xcr0 = I386_XSTATE_X87_MASK;
971
972 /* Disable PTRACE_GETFPXREGS. */
973 for (regset = target_regsets;
974 regset->fill_function != NULL; regset++)
975 if (regset->get_request == PTRACE_GETFPXREGS)
976 {
977 regset->size = 0;
978 break;
979 }
980 }
981 else
982 have_ptrace_getfpxregs = 1;
983 }
984
985 if (!have_ptrace_getfpxregs)
986 {
987 init_registers_i386_mmx_linux ();
988 return;
989 }
990 # endif
991 init_registers_i386_linux ();
992 }
993 #endif
994
995 if (!use_xml)
996 {
997 /* Don't use XML. */
998 #ifdef __x86_64__
999 if (num_xmm_registers == 8)
1000 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1001 else
1002 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
1003 #else
1004 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1005 #endif
1006
1007 x86_xcr0 = I386_XSTATE_SSE_MASK;
1008
1009 return;
1010 }
1011
1012 /* Check if XSAVE extended state is supported. */
1013 if (have_ptrace_getregset == -1)
1014 {
1015 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
1016 struct iovec iov;
1017
1018 iov.iov_base = xstateregs;
1019 iov.iov_len = sizeof (xstateregs);
1020
1021 /* Check if PTRACE_GETREGSET works. */
1022 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
1023 &iov) < 0)
1024 {
1025 have_ptrace_getregset = 0;
1026 return;
1027 }
1028 else
1029 have_ptrace_getregset = 1;
1030
1031 /* Get XCR0 from XSAVE extended state at byte 464. */
1032 xcr0 = xstateregs[464 / sizeof (long long)];
1033
1034 /* Use PTRACE_GETREGSET if it is available. */
1035 for (regset = target_regsets;
1036 regset->fill_function != NULL; regset++)
1037 if (regset->get_request == PTRACE_GETREGSET)
1038 regset->size = I386_XSTATE_SIZE (xcr0);
1039 else if (regset->type != GENERAL_REGS)
1040 regset->size = 0;
1041 }
1042
1043 if (have_ptrace_getregset)
1044 {
1045 /* AVX is the highest feature we support. */
1046 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
1047 {
1048 x86_xcr0 = xcr0;
1049
1050 #ifdef __x86_64__
1051 /* I386 has 8 xmm regs. */
1052 if (num_xmm_registers == 8)
1053 init_registers_i386_avx_linux ();
1054 else
1055 init_registers_amd64_avx_linux ();
1056 #else
1057 init_registers_i386_avx_linux ();
1058 #endif
1059 }
1060 }
1061 }
1062
1063 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1064 PTRACE_GETREGSET. */
1065
1066 static void
1067 x86_linux_process_qsupported (const char *query)
1068 {
1069 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1070 with "i386" in qSupported query, it supports x86 XML target
1071 descriptions. */
1072 use_xml = 0;
1073 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1074 {
1075 char *copy = xstrdup (query + 13);
1076 char *p;
1077
1078 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1079 {
1080 if (strcmp (p, "i386") == 0)
1081 {
1082 use_xml = 1;
1083 break;
1084 }
1085 }
1086
1087 free (copy);
1088 }
1089
1090 x86_linux_update_xmltarget ();
1091 }
1092
1093 /* Initialize gdbserver for the architecture of the inferior. */
1094
1095 static void
1096 x86_arch_setup (void)
1097 {
1098 #ifdef __x86_64__
1099 int pid = pid_of (get_thread_lwp (current_inferior));
1100 char *file = linux_child_pid_to_exec_file (pid);
1101 int use_64bit = elf_64_file_p (file);
1102
1103 free (file);
1104
1105 if (use_64bit < 0)
1106 {
1107 /* This can only happen if /proc/<pid>/exe is unreadable,
1108 but "that can't happen" if we've gotten this far.
1109 Fall through and assume this is a 32-bit program. */
1110 }
1111 else if (use_64bit)
1112 {
1113 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1114 the_low_target.num_regs = -1;
1115 the_low_target.regmap = NULL;
1116 the_low_target.cannot_fetch_register = NULL;
1117 the_low_target.cannot_store_register = NULL;
1118
1119 /* Amd64 has 16 xmm regs. */
1120 num_xmm_registers = 16;
1121
1122 x86_linux_update_xmltarget ();
1123 return;
1124 }
1125 #endif
1126
1127 /* Ok we have a 32-bit inferior. */
1128
1129 the_low_target.num_regs = I386_NUM_REGS;
1130 the_low_target.regmap = i386_regmap;
1131 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1132 the_low_target.cannot_store_register = i386_cannot_store_register;
1133
1134 /* I386 has 8 xmm regs. */
1135 num_xmm_registers = 8;
1136
1137 x86_linux_update_xmltarget ();
1138 }
1139
1140 static int
1141 x86_supports_tracepoints (void)
1142 {
1143 return 1;
1144 }
1145
1146 static void
1147 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1148 {
1149 write_inferior_memory (*to, buf, len);
1150 *to += len;
1151 }
1152
1153 static int
1154 push_opcode (unsigned char *buf, char *op)
1155 {
1156 unsigned char *buf_org = buf;
1157
1158 while (1)
1159 {
1160 char *endptr;
1161 unsigned long ul = strtoul (op, &endptr, 16);
1162
1163 if (endptr == op)
1164 break;
1165
1166 *buf++ = ul;
1167 op = endptr;
1168 }
1169
1170 return buf - buf_org;
1171 }
1172
1173 #ifdef __x86_64__
1174
1175 /* Build a jump pad that saves registers and calls a collection
1176 function. Writes a jump instruction to the jump pad to
1177 JJUMPAD_INSN. The caller is responsible to write it in at the
1178 tracepoint address. */
1179
1180 static int
1181 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1182 CORE_ADDR collector,
1183 CORE_ADDR lockaddr,
1184 ULONGEST orig_size,
1185 CORE_ADDR *jump_entry,
1186 CORE_ADDR *trampoline,
1187 ULONGEST *trampoline_size,
1188 unsigned char *jjump_pad_insn,
1189 ULONGEST *jjump_pad_insn_size,
1190 CORE_ADDR *adjusted_insn_addr,
1191 CORE_ADDR *adjusted_insn_addr_end,
1192 char *err)
1193 {
1194 unsigned char buf[40];
1195 int i, offset;
1196 CORE_ADDR buildaddr = *jump_entry;
1197
1198 /* Build the jump pad. */
1199
1200 /* First, do tracepoint data collection. Save registers. */
1201 i = 0;
1202 /* Need to ensure stack pointer saved first. */
1203 buf[i++] = 0x54; /* push %rsp */
1204 buf[i++] = 0x55; /* push %rbp */
1205 buf[i++] = 0x57; /* push %rdi */
1206 buf[i++] = 0x56; /* push %rsi */
1207 buf[i++] = 0x52; /* push %rdx */
1208 buf[i++] = 0x51; /* push %rcx */
1209 buf[i++] = 0x53; /* push %rbx */
1210 buf[i++] = 0x50; /* push %rax */
1211 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1212 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1213 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1214 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1215 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1216 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1217 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1218 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1219 buf[i++] = 0x9c; /* pushfq */
1220 buf[i++] = 0x48; /* movl <addr>,%rdi */
1221 buf[i++] = 0xbf;
1222 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1223 i += sizeof (unsigned long);
1224 buf[i++] = 0x57; /* push %rdi */
1225 append_insns (&buildaddr, i, buf);
1226
1227 /* Stack space for the collecting_t object. */
1228 i = 0;
1229 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1230 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1231 memcpy (buf + i, &tpoint, 8);
1232 i += 8;
1233 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1234 i += push_opcode (&buf[i],
1235 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1236 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1237 append_insns (&buildaddr, i, buf);
1238
1239 /* spin-lock. */
1240 i = 0;
1241 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1242 memcpy (&buf[i], (void *) &lockaddr, 8);
1243 i += 8;
1244 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1245 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1246 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1247 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1248 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1249 append_insns (&buildaddr, i, buf);
1250
1251 /* Set up the gdb_collect call. */
1252 /* At this point, (stack pointer + 0x18) is the base of our saved
1253 register block. */
1254
1255 i = 0;
1256 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1257 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1258
1259 /* tpoint address may be 64-bit wide. */
1260 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1261 memcpy (buf + i, &tpoint, 8);
1262 i += 8;
1263 append_insns (&buildaddr, i, buf);
1264
1265 /* The collector function being in the shared library, may be
1266 >31-bits away off the jump pad. */
1267 i = 0;
1268 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1269 memcpy (buf + i, &collector, 8);
1270 i += 8;
1271 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1272 append_insns (&buildaddr, i, buf);
1273
1274 /* Clear the spin-lock. */
1275 i = 0;
1276 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1277 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1278 memcpy (buf + i, &lockaddr, 8);
1279 i += 8;
1280 append_insns (&buildaddr, i, buf);
1281
1282 /* Remove stack that had been used for the collect_t object. */
1283 i = 0;
1284 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1285 append_insns (&buildaddr, i, buf);
1286
1287 /* Restore register state. */
1288 i = 0;
1289 buf[i++] = 0x48; /* add $0x8,%rsp */
1290 buf[i++] = 0x83;
1291 buf[i++] = 0xc4;
1292 buf[i++] = 0x08;
1293 buf[i++] = 0x9d; /* popfq */
1294 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1295 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1296 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1297 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1298 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1299 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1300 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1301 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1302 buf[i++] = 0x58; /* pop %rax */
1303 buf[i++] = 0x5b; /* pop %rbx */
1304 buf[i++] = 0x59; /* pop %rcx */
1305 buf[i++] = 0x5a; /* pop %rdx */
1306 buf[i++] = 0x5e; /* pop %rsi */
1307 buf[i++] = 0x5f; /* pop %rdi */
1308 buf[i++] = 0x5d; /* pop %rbp */
1309 buf[i++] = 0x5c; /* pop %rsp */
1310 append_insns (&buildaddr, i, buf);
1311
1312 /* Now, adjust the original instruction to execute in the jump
1313 pad. */
1314 *adjusted_insn_addr = buildaddr;
1315 relocate_instruction (&buildaddr, tpaddr);
1316 *adjusted_insn_addr_end = buildaddr;
1317
1318 /* Finally, write a jump back to the program. */
1319 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1320 memcpy (buf, jump_insn, sizeof (jump_insn));
1321 memcpy (buf + 1, &offset, 4);
1322 append_insns (&buildaddr, sizeof (jump_insn), buf);
1323
1324 /* The jump pad is now built. Wire in a jump to our jump pad. This
1325 is always done last (by our caller actually), so that we can
1326 install fast tracepoints with threads running. This relies on
1327 the agent's atomic write support. */
1328 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1329 memcpy (buf, jump_insn, sizeof (jump_insn));
1330 memcpy (buf + 1, &offset, 4);
1331 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1332 *jjump_pad_insn_size = sizeof (jump_insn);
1333
1334 /* Return the end address of our pad. */
1335 *jump_entry = buildaddr;
1336
1337 return 0;
1338 }
1339
1340 #endif /* __x86_64__ */
1341
1342 /* Build a jump pad that saves registers and calls a collection
1343 function. Writes a jump instruction to the jump pad to
1344 JJUMPAD_INSN. The caller is responsible to write it in at the
1345 tracepoint address. */
1346
1347 static int
1348 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1349 CORE_ADDR collector,
1350 CORE_ADDR lockaddr,
1351 ULONGEST orig_size,
1352 CORE_ADDR *jump_entry,
1353 CORE_ADDR *trampoline,
1354 ULONGEST *trampoline_size,
1355 unsigned char *jjump_pad_insn,
1356 ULONGEST *jjump_pad_insn_size,
1357 CORE_ADDR *adjusted_insn_addr,
1358 CORE_ADDR *adjusted_insn_addr_end,
1359 char *err)
1360 {
1361 unsigned char buf[0x100];
1362 int i, offset;
1363 CORE_ADDR buildaddr = *jump_entry;
1364
1365 /* Build the jump pad. */
1366
1367 /* First, do tracepoint data collection. Save registers. */
1368 i = 0;
1369 buf[i++] = 0x60; /* pushad */
1370 buf[i++] = 0x68; /* push tpaddr aka $pc */
1371 *((int *)(buf + i)) = (int) tpaddr;
1372 i += 4;
1373 buf[i++] = 0x9c; /* pushf */
1374 buf[i++] = 0x1e; /* push %ds */
1375 buf[i++] = 0x06; /* push %es */
1376 buf[i++] = 0x0f; /* push %fs */
1377 buf[i++] = 0xa0;
1378 buf[i++] = 0x0f; /* push %gs */
1379 buf[i++] = 0xa8;
1380 buf[i++] = 0x16; /* push %ss */
1381 buf[i++] = 0x0e; /* push %cs */
1382 append_insns (&buildaddr, i, buf);
1383
1384 /* Stack space for the collecting_t object. */
1385 i = 0;
1386 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1387
1388 /* Build the object. */
1389 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1390 memcpy (buf + i, &tpoint, 4);
1391 i += 4;
1392 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1393
1394 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1395 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1396 append_insns (&buildaddr, i, buf);
1397
1398 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1399 If we cared for it, this could be using xchg alternatively. */
1400
1401 i = 0;
1402 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1403 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1404 %esp,<lockaddr> */
1405 memcpy (&buf[i], (void *) &lockaddr, 4);
1406 i += 4;
1407 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1408 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1409 append_insns (&buildaddr, i, buf);
1410
1411
1412 /* Set up arguments to the gdb_collect call. */
1413 i = 0;
1414 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1415 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1416 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1417 append_insns (&buildaddr, i, buf);
1418
1419 i = 0;
1420 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1421 append_insns (&buildaddr, i, buf);
1422
1423 i = 0;
1424 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1425 memcpy (&buf[i], (void *) &tpoint, 4);
1426 i += 4;
1427 append_insns (&buildaddr, i, buf);
1428
1429 buf[0] = 0xe8; /* call <reladdr> */
1430 offset = collector - (buildaddr + sizeof (jump_insn));
1431 memcpy (buf + 1, &offset, 4);
1432 append_insns (&buildaddr, 5, buf);
1433 /* Clean up after the call. */
1434 buf[0] = 0x83; /* add $0x8,%esp */
1435 buf[1] = 0xc4;
1436 buf[2] = 0x08;
1437 append_insns (&buildaddr, 3, buf);
1438
1439
1440 /* Clear the spin-lock. This would need the LOCK prefix on older
1441 broken archs. */
1442 i = 0;
1443 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1444 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1445 memcpy (buf + i, &lockaddr, 4);
1446 i += 4;
1447 append_insns (&buildaddr, i, buf);
1448
1449
1450 /* Remove stack that had been used for the collect_t object. */
1451 i = 0;
1452 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1453 append_insns (&buildaddr, i, buf);
1454
1455 i = 0;
1456 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1457 buf[i++] = 0xc4;
1458 buf[i++] = 0x04;
1459 buf[i++] = 0x17; /* pop %ss */
1460 buf[i++] = 0x0f; /* pop %gs */
1461 buf[i++] = 0xa9;
1462 buf[i++] = 0x0f; /* pop %fs */
1463 buf[i++] = 0xa1;
1464 buf[i++] = 0x07; /* pop %es */
1465 buf[i++] = 0x1f; /* pop %ds */
1466 buf[i++] = 0x9d; /* popf */
1467 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1468 buf[i++] = 0xc4;
1469 buf[i++] = 0x04;
1470 buf[i++] = 0x61; /* popad */
1471 append_insns (&buildaddr, i, buf);
1472
1473 /* Now, adjust the original instruction to execute in the jump
1474 pad. */
1475 *adjusted_insn_addr = buildaddr;
1476 relocate_instruction (&buildaddr, tpaddr);
1477 *adjusted_insn_addr_end = buildaddr;
1478
1479 /* Write the jump back to the program. */
1480 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1481 memcpy (buf, jump_insn, sizeof (jump_insn));
1482 memcpy (buf + 1, &offset, 4);
1483 append_insns (&buildaddr, sizeof (jump_insn), buf);
1484
1485 /* The jump pad is now built. Wire in a jump to our jump pad. This
1486 is always done last (by our caller actually), so that we can
1487 install fast tracepoints with threads running. This relies on
1488 the agent's atomic write support. */
1489 if (orig_size == 4)
1490 {
1491 /* Create a trampoline. */
1492 *trampoline_size = sizeof (jump_insn);
1493 if (!claim_trampoline_space (*trampoline_size, trampoline))
1494 {
1495 /* No trampoline space available. */
1496 strcpy (err,
1497 "E.Cannot allocate trampoline space needed for fast "
1498 "tracepoints on 4-byte instructions.");
1499 return 1;
1500 }
1501
1502 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1503 memcpy (buf, jump_insn, sizeof (jump_insn));
1504 memcpy (buf + 1, &offset, 4);
1505 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1506
1507 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1508 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1509 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1510 memcpy (buf + 2, &offset, 2);
1511 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1512 *jjump_pad_insn_size = sizeof (small_jump_insn);
1513 }
1514 else
1515 {
1516 /* Else use a 32-bit relative jump instruction. */
1517 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1518 memcpy (buf, jump_insn, sizeof (jump_insn));
1519 memcpy (buf + 1, &offset, 4);
1520 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1521 *jjump_pad_insn_size = sizeof (jump_insn);
1522 }
1523
1524 /* Return the end address of our pad. */
1525 *jump_entry = buildaddr;
1526
1527 return 0;
1528 }
1529
1530 static int
1531 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1532 CORE_ADDR collector,
1533 CORE_ADDR lockaddr,
1534 ULONGEST orig_size,
1535 CORE_ADDR *jump_entry,
1536 CORE_ADDR *trampoline,
1537 ULONGEST *trampoline_size,
1538 unsigned char *jjump_pad_insn,
1539 ULONGEST *jjump_pad_insn_size,
1540 CORE_ADDR *adjusted_insn_addr,
1541 CORE_ADDR *adjusted_insn_addr_end,
1542 char *err)
1543 {
1544 #ifdef __x86_64__
1545 if (register_size (0) == 8)
1546 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1547 collector, lockaddr,
1548 orig_size, jump_entry,
1549 trampoline, trampoline_size,
1550 jjump_pad_insn,
1551 jjump_pad_insn_size,
1552 adjusted_insn_addr,
1553 adjusted_insn_addr_end,
1554 err);
1555 #endif
1556
1557 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1558 collector, lockaddr,
1559 orig_size, jump_entry,
1560 trampoline, trampoline_size,
1561 jjump_pad_insn,
1562 jjump_pad_insn_size,
1563 adjusted_insn_addr,
1564 adjusted_insn_addr_end,
1565 err);
1566 }
1567
1568 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1569 architectures. */
1570
1571 static int
1572 x86_get_min_fast_tracepoint_insn_len (void)
1573 {
1574 static int warned_about_fast_tracepoints = 0;
1575
1576 #ifdef __x86_64__
1577 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1578 used for fast tracepoints. */
1579 if (register_size (0) == 8)
1580 return 5;
1581 #endif
1582
1583 if (in_process_agent_loaded ())
1584 {
1585 char errbuf[IPA_BUFSIZ];
1586
1587 errbuf[0] = '\0';
1588
1589 /* On x86, if trampolines are available, then 4-byte jump instructions
1590 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1591 with a 4-byte offset are used instead. */
1592 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1593 return 4;
1594 else
1595 {
1596 /* GDB has no channel to explain to user why a shorter fast
1597 tracepoint is not possible, but at least make GDBserver
1598 mention that something has gone awry. */
1599 if (!warned_about_fast_tracepoints)
1600 {
1601 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1602 warned_about_fast_tracepoints = 1;
1603 }
1604 return 5;
1605 }
1606 }
1607 else
1608 {
1609 /* Indicate that the minimum length is currently unknown since the IPA
1610 has not loaded yet. */
1611 return 0;
1612 }
1613 }
1614
1615 static void
1616 add_insns (unsigned char *start, int len)
1617 {
1618 CORE_ADDR buildaddr = current_insn_ptr;
1619
1620 if (debug_threads)
1621 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1622 len, paddress (buildaddr));
1623
1624 append_insns (&buildaddr, len, start);
1625 current_insn_ptr = buildaddr;
1626 }
1627
1628 /* Our general strategy for emitting code is to avoid specifying raw
1629 bytes whenever possible, and instead copy a block of inline asm
1630 that is embedded in the function. This is a little messy, because
1631 we need to keep the compiler from discarding what looks like dead
1632 code, plus suppress various warnings. */
1633
1634 #define EMIT_ASM(NAME, INSNS) \
1635 do \
1636 { \
1637 extern unsigned char start_ ## NAME, end_ ## NAME; \
1638 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1639 __asm__ ("jmp end_" #NAME "\n" \
1640 "\t" "start_" #NAME ":" \
1641 "\t" INSNS "\n" \
1642 "\t" "end_" #NAME ":"); \
1643 } while (0)
1644
1645 #ifdef __x86_64__
1646
1647 #define EMIT_ASM32(NAME,INSNS) \
1648 do \
1649 { \
1650 extern unsigned char start_ ## NAME, end_ ## NAME; \
1651 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1652 __asm__ (".code32\n" \
1653 "\t" "jmp end_" #NAME "\n" \
1654 "\t" "start_" #NAME ":\n" \
1655 "\t" INSNS "\n" \
1656 "\t" "end_" #NAME ":\n" \
1657 ".code64\n"); \
1658 } while (0)
1659
1660 #else
1661
1662 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1663
1664 #endif
1665
1666 #ifdef __x86_64__
1667
1668 static void
1669 amd64_emit_prologue (void)
1670 {
1671 EMIT_ASM (amd64_prologue,
1672 "pushq %rbp\n\t"
1673 "movq %rsp,%rbp\n\t"
1674 "sub $0x20,%rsp\n\t"
1675 "movq %rdi,-8(%rbp)\n\t"
1676 "movq %rsi,-16(%rbp)");
1677 }
1678
1679
1680 static void
1681 amd64_emit_epilogue (void)
1682 {
1683 EMIT_ASM (amd64_epilogue,
1684 "movq -16(%rbp),%rdi\n\t"
1685 "movq %rax,(%rdi)\n\t"
1686 "xor %rax,%rax\n\t"
1687 "leave\n\t"
1688 "ret");
1689 }
1690
1691 static void
1692 amd64_emit_add (void)
1693 {
1694 EMIT_ASM (amd64_add,
1695 "add (%rsp),%rax\n\t"
1696 "lea 0x8(%rsp),%rsp");
1697 }
1698
1699 static void
1700 amd64_emit_sub (void)
1701 {
1702 EMIT_ASM (amd64_sub,
1703 "sub %rax,(%rsp)\n\t"
1704 "pop %rax");
1705 }
1706
1707 static void
1708 amd64_emit_mul (void)
1709 {
1710 emit_error = 1;
1711 }
1712
1713 static void
1714 amd64_emit_lsh (void)
1715 {
1716 emit_error = 1;
1717 }
1718
1719 static void
1720 amd64_emit_rsh_signed (void)
1721 {
1722 emit_error = 1;
1723 }
1724
1725 static void
1726 amd64_emit_rsh_unsigned (void)
1727 {
1728 emit_error = 1;
1729 }
1730
1731 static void
1732 amd64_emit_ext (int arg)
1733 {
1734 switch (arg)
1735 {
1736 case 8:
1737 EMIT_ASM (amd64_ext_8,
1738 "cbtw\n\t"
1739 "cwtl\n\t"
1740 "cltq");
1741 break;
1742 case 16:
1743 EMIT_ASM (amd64_ext_16,
1744 "cwtl\n\t"
1745 "cltq");
1746 break;
1747 case 32:
1748 EMIT_ASM (amd64_ext_32,
1749 "cltq");
1750 break;
1751 default:
1752 emit_error = 1;
1753 }
1754 }
1755
1756 static void
1757 amd64_emit_log_not (void)
1758 {
1759 EMIT_ASM (amd64_log_not,
1760 "test %rax,%rax\n\t"
1761 "sete %cl\n\t"
1762 "movzbq %cl,%rax");
1763 }
1764
1765 static void
1766 amd64_emit_bit_and (void)
1767 {
1768 EMIT_ASM (amd64_and,
1769 "and (%rsp),%rax\n\t"
1770 "lea 0x8(%rsp),%rsp");
1771 }
1772
1773 static void
1774 amd64_emit_bit_or (void)
1775 {
1776 EMIT_ASM (amd64_or,
1777 "or (%rsp),%rax\n\t"
1778 "lea 0x8(%rsp),%rsp");
1779 }
1780
1781 static void
1782 amd64_emit_bit_xor (void)
1783 {
1784 EMIT_ASM (amd64_xor,
1785 "xor (%rsp),%rax\n\t"
1786 "lea 0x8(%rsp),%rsp");
1787 }
1788
1789 static void
1790 amd64_emit_bit_not (void)
1791 {
1792 EMIT_ASM (amd64_bit_not,
1793 "xorq $0xffffffffffffffff,%rax");
1794 }
1795
1796 static void
1797 amd64_emit_equal (void)
1798 {
1799 EMIT_ASM (amd64_equal,
1800 "cmp %rax,(%rsp)\n\t"
1801 "je .Lamd64_equal_true\n\t"
1802 "xor %rax,%rax\n\t"
1803 "jmp .Lamd64_equal_end\n\t"
1804 ".Lamd64_equal_true:\n\t"
1805 "mov $0x1,%rax\n\t"
1806 ".Lamd64_equal_end:\n\t"
1807 "lea 0x8(%rsp),%rsp");
1808 }
1809
1810 static void
1811 amd64_emit_less_signed (void)
1812 {
1813 EMIT_ASM (amd64_less_signed,
1814 "cmp %rax,(%rsp)\n\t"
1815 "jl .Lamd64_less_signed_true\n\t"
1816 "xor %rax,%rax\n\t"
1817 "jmp .Lamd64_less_signed_end\n\t"
1818 ".Lamd64_less_signed_true:\n\t"
1819 "mov $1,%rax\n\t"
1820 ".Lamd64_less_signed_end:\n\t"
1821 "lea 0x8(%rsp),%rsp");
1822 }
1823
1824 static void
1825 amd64_emit_less_unsigned (void)
1826 {
1827 EMIT_ASM (amd64_less_unsigned,
1828 "cmp %rax,(%rsp)\n\t"
1829 "jb .Lamd64_less_unsigned_true\n\t"
1830 "xor %rax,%rax\n\t"
1831 "jmp .Lamd64_less_unsigned_end\n\t"
1832 ".Lamd64_less_unsigned_true:\n\t"
1833 "mov $1,%rax\n\t"
1834 ".Lamd64_less_unsigned_end:\n\t"
1835 "lea 0x8(%rsp),%rsp");
1836 }
1837
1838 static void
1839 amd64_emit_ref (int size)
1840 {
1841 switch (size)
1842 {
1843 case 1:
1844 EMIT_ASM (amd64_ref1,
1845 "movb (%rax),%al");
1846 break;
1847 case 2:
1848 EMIT_ASM (amd64_ref2,
1849 "movw (%rax),%ax");
1850 break;
1851 case 4:
1852 EMIT_ASM (amd64_ref4,
1853 "movl (%rax),%eax");
1854 break;
1855 case 8:
1856 EMIT_ASM (amd64_ref8,
1857 "movq (%rax),%rax");
1858 break;
1859 }
1860 }
1861
1862 static void
1863 amd64_emit_if_goto (int *offset_p, int *size_p)
1864 {
1865 EMIT_ASM (amd64_if_goto,
1866 "mov %rax,%rcx\n\t"
1867 "pop %rax\n\t"
1868 "cmp $0,%rcx\n\t"
1869 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1870 if (offset_p)
1871 *offset_p = 10;
1872 if (size_p)
1873 *size_p = 4;
1874 }
1875
1876 static void
1877 amd64_emit_goto (int *offset_p, int *size_p)
1878 {
1879 EMIT_ASM (amd64_goto,
1880 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1881 if (offset_p)
1882 *offset_p = 1;
1883 if (size_p)
1884 *size_p = 4;
1885 }
1886
1887 static void
1888 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1889 {
1890 int diff = (to - (from + size));
1891 unsigned char buf[sizeof (int)];
1892
1893 if (size != 4)
1894 {
1895 emit_error = 1;
1896 return;
1897 }
1898
1899 memcpy (buf, &diff, sizeof (int));
1900 write_inferior_memory (from, buf, sizeof (int));
1901 }
1902
1903 static void
1904 amd64_emit_const (LONGEST num)
1905 {
1906 unsigned char buf[16];
1907 int i;
1908 CORE_ADDR buildaddr = current_insn_ptr;
1909
1910 i = 0;
1911 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1912 memcpy (&buf[i], &num, sizeof (num));
1913 i += 8;
1914 append_insns (&buildaddr, i, buf);
1915 current_insn_ptr = buildaddr;
1916 }
1917
1918 static void
1919 amd64_emit_call (CORE_ADDR fn)
1920 {
1921 unsigned char buf[16];
1922 int i;
1923 CORE_ADDR buildaddr;
1924 LONGEST offset64;
1925
1926 /* The destination function being in the shared library, may be
1927 >31-bits away off the compiled code pad. */
1928
1929 buildaddr = current_insn_ptr;
1930
1931 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1932
1933 i = 0;
1934
1935 if (offset64 > INT_MAX || offset64 < INT_MIN)
1936 {
1937 /* Offset is too large for a call. Use callq, but that requires
1938 a register, so avoid it if possible. Use r10, since it is
1939 call-clobbered, we don't have to push/pop it. */
1940 buf[i++] = 0x48; /* mov $fn,%r10 */
1941 buf[i++] = 0xba;
1942 memcpy (buf + i, &fn, 8);
1943 i += 8;
1944 buf[i++] = 0xff; /* callq *%r10 */
1945 buf[i++] = 0xd2;
1946 }
1947 else
1948 {
1949 int offset32 = offset64; /* we know we can't overflow here. */
1950 memcpy (buf + i, &offset32, 4);
1951 i += 4;
1952 }
1953
1954 append_insns (&buildaddr, i, buf);
1955 current_insn_ptr = buildaddr;
1956 }
1957
1958 static void
1959 amd64_emit_reg (int reg)
1960 {
1961 unsigned char buf[16];
1962 int i;
1963 CORE_ADDR buildaddr;
1964
1965 /* Assume raw_regs is still in %rdi. */
1966 buildaddr = current_insn_ptr;
1967 i = 0;
1968 buf[i++] = 0xbe; /* mov $<n>,%esi */
1969 memcpy (&buf[i], &reg, sizeof (reg));
1970 i += 4;
1971 append_insns (&buildaddr, i, buf);
1972 current_insn_ptr = buildaddr;
1973 amd64_emit_call (get_raw_reg_func_addr ());
1974 }
1975
1976 static void
1977 amd64_emit_pop (void)
1978 {
1979 EMIT_ASM (amd64_pop,
1980 "pop %rax");
1981 }
1982
1983 static void
1984 amd64_emit_stack_flush (void)
1985 {
1986 EMIT_ASM (amd64_stack_flush,
1987 "push %rax");
1988 }
1989
1990 static void
1991 amd64_emit_zero_ext (int arg)
1992 {
1993 switch (arg)
1994 {
1995 case 8:
1996 EMIT_ASM (amd64_zero_ext_8,
1997 "and $0xff,%rax");
1998 break;
1999 case 16:
2000 EMIT_ASM (amd64_zero_ext_16,
2001 "and $0xffff,%rax");
2002 break;
2003 case 32:
2004 EMIT_ASM (amd64_zero_ext_32,
2005 "mov $0xffffffff,%rcx\n\t"
2006 "and %rcx,%rax");
2007 break;
2008 default:
2009 emit_error = 1;
2010 }
2011 }
2012
2013 static void
2014 amd64_emit_swap (void)
2015 {
2016 EMIT_ASM (amd64_swap,
2017 "mov %rax,%rcx\n\t"
2018 "pop %rax\n\t"
2019 "push %rcx");
2020 }
2021
2022 static void
2023 amd64_emit_stack_adjust (int n)
2024 {
2025 unsigned char buf[16];
2026 int i;
2027 CORE_ADDR buildaddr = current_insn_ptr;
2028
2029 i = 0;
2030 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2031 buf[i++] = 0x8d;
2032 buf[i++] = 0x64;
2033 buf[i++] = 0x24;
2034 /* This only handles adjustments up to 16, but we don't expect any more. */
2035 buf[i++] = n * 8;
2036 append_insns (&buildaddr, i, buf);
2037 current_insn_ptr = buildaddr;
2038 }
2039
2040 /* FN's prototype is `LONGEST(*fn)(int)'. */
2041
2042 static void
2043 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2044 {
2045 unsigned char buf[16];
2046 int i;
2047 CORE_ADDR buildaddr;
2048
2049 buildaddr = current_insn_ptr;
2050 i = 0;
2051 buf[i++] = 0xbf; /* movl $<n>,%edi */
2052 memcpy (&buf[i], &arg1, sizeof (arg1));
2053 i += 4;
2054 append_insns (&buildaddr, i, buf);
2055 current_insn_ptr = buildaddr;
2056 amd64_emit_call (fn);
2057 }
2058
2059 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2060
2061 static void
2062 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2063 {
2064 unsigned char buf[16];
2065 int i;
2066 CORE_ADDR buildaddr;
2067
2068 buildaddr = current_insn_ptr;
2069 i = 0;
2070 buf[i++] = 0xbf; /* movl $<n>,%edi */
2071 memcpy (&buf[i], &arg1, sizeof (arg1));
2072 i += 4;
2073 append_insns (&buildaddr, i, buf);
2074 current_insn_ptr = buildaddr;
2075 EMIT_ASM (amd64_void_call_2_a,
2076 /* Save away a copy of the stack top. */
2077 "push %rax\n\t"
2078 /* Also pass top as the second argument. */
2079 "mov %rax,%rsi");
2080 amd64_emit_call (fn);
2081 EMIT_ASM (amd64_void_call_2_b,
2082 /* Restore the stack top, %rax may have been trashed. */
2083 "pop %rax");
2084 }
2085
2086 void
2087 amd64_emit_eq_goto (int *offset_p, int *size_p)
2088 {
2089 EMIT_ASM (amd64_eq,
2090 "cmp %rax,(%rsp)\n\t"
2091 "jne .Lamd64_eq_fallthru\n\t"
2092 "lea 0x8(%rsp),%rsp\n\t"
2093 "pop %rax\n\t"
2094 /* jmp, but don't trust the assembler to choose the right jump */
2095 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2096 ".Lamd64_eq_fallthru:\n\t"
2097 "lea 0x8(%rsp),%rsp\n\t"
2098 "pop %rax");
2099
2100 if (offset_p)
2101 *offset_p = 13;
2102 if (size_p)
2103 *size_p = 4;
2104 }
2105
2106 void
2107 amd64_emit_ne_goto (int *offset_p, int *size_p)
2108 {
2109 EMIT_ASM (amd64_ne,
2110 "cmp %rax,(%rsp)\n\t"
2111 "je .Lamd64_ne_fallthru\n\t"
2112 "lea 0x8(%rsp),%rsp\n\t"
2113 "pop %rax\n\t"
2114 /* jmp, but don't trust the assembler to choose the right jump */
2115 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2116 ".Lamd64_ne_fallthru:\n\t"
2117 "lea 0x8(%rsp),%rsp\n\t"
2118 "pop %rax");
2119
2120 if (offset_p)
2121 *offset_p = 13;
2122 if (size_p)
2123 *size_p = 4;
2124 }
2125
2126 void
2127 amd64_emit_lt_goto (int *offset_p, int *size_p)
2128 {
2129 EMIT_ASM (amd64_lt,
2130 "cmp %rax,(%rsp)\n\t"
2131 "jnl .Lamd64_lt_fallthru\n\t"
2132 "lea 0x8(%rsp),%rsp\n\t"
2133 "pop %rax\n\t"
2134 /* jmp, but don't trust the assembler to choose the right jump */
2135 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2136 ".Lamd64_lt_fallthru:\n\t"
2137 "lea 0x8(%rsp),%rsp\n\t"
2138 "pop %rax");
2139
2140 if (offset_p)
2141 *offset_p = 13;
2142 if (size_p)
2143 *size_p = 4;
2144 }
2145
2146 void
2147 amd64_emit_le_goto (int *offset_p, int *size_p)
2148 {
2149 EMIT_ASM (amd64_le,
2150 "cmp %rax,(%rsp)\n\t"
2151 "jnle .Lamd64_le_fallthru\n\t"
2152 "lea 0x8(%rsp),%rsp\n\t"
2153 "pop %rax\n\t"
2154 /* jmp, but don't trust the assembler to choose the right jump */
2155 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2156 ".Lamd64_le_fallthru:\n\t"
2157 "lea 0x8(%rsp),%rsp\n\t"
2158 "pop %rax");
2159
2160 if (offset_p)
2161 *offset_p = 13;
2162 if (size_p)
2163 *size_p = 4;
2164 }
2165
2166 void
2167 amd64_emit_gt_goto (int *offset_p, int *size_p)
2168 {
2169 EMIT_ASM (amd64_gt,
2170 "cmp %rax,(%rsp)\n\t"
2171 "jng .Lamd64_gt_fallthru\n\t"
2172 "lea 0x8(%rsp),%rsp\n\t"
2173 "pop %rax\n\t"
2174 /* jmp, but don't trust the assembler to choose the right jump */
2175 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2176 ".Lamd64_gt_fallthru:\n\t"
2177 "lea 0x8(%rsp),%rsp\n\t"
2178 "pop %rax");
2179
2180 if (offset_p)
2181 *offset_p = 13;
2182 if (size_p)
2183 *size_p = 4;
2184 }
2185
2186 void
2187 amd64_emit_ge_goto (int *offset_p, int *size_p)
2188 {
2189 EMIT_ASM (amd64_ge,
2190 "cmp %rax,(%rsp)\n\t"
2191 "jnge .Lamd64_ge_fallthru\n\t"
2192 ".Lamd64_ge_jump:\n\t"
2193 "lea 0x8(%rsp),%rsp\n\t"
2194 "pop %rax\n\t"
2195 /* jmp, but don't trust the assembler to choose the right jump */
2196 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2197 ".Lamd64_ge_fallthru:\n\t"
2198 "lea 0x8(%rsp),%rsp\n\t"
2199 "pop %rax");
2200
2201 if (offset_p)
2202 *offset_p = 13;
2203 if (size_p)
2204 *size_p = 4;
2205 }
2206
2207 struct emit_ops amd64_emit_ops =
2208 {
2209 amd64_emit_prologue,
2210 amd64_emit_epilogue,
2211 amd64_emit_add,
2212 amd64_emit_sub,
2213 amd64_emit_mul,
2214 amd64_emit_lsh,
2215 amd64_emit_rsh_signed,
2216 amd64_emit_rsh_unsigned,
2217 amd64_emit_ext,
2218 amd64_emit_log_not,
2219 amd64_emit_bit_and,
2220 amd64_emit_bit_or,
2221 amd64_emit_bit_xor,
2222 amd64_emit_bit_not,
2223 amd64_emit_equal,
2224 amd64_emit_less_signed,
2225 amd64_emit_less_unsigned,
2226 amd64_emit_ref,
2227 amd64_emit_if_goto,
2228 amd64_emit_goto,
2229 amd64_write_goto_address,
2230 amd64_emit_const,
2231 amd64_emit_call,
2232 amd64_emit_reg,
2233 amd64_emit_pop,
2234 amd64_emit_stack_flush,
2235 amd64_emit_zero_ext,
2236 amd64_emit_swap,
2237 amd64_emit_stack_adjust,
2238 amd64_emit_int_call_1,
2239 amd64_emit_void_call_2,
2240 amd64_emit_eq_goto,
2241 amd64_emit_ne_goto,
2242 amd64_emit_lt_goto,
2243 amd64_emit_le_goto,
2244 amd64_emit_gt_goto,
2245 amd64_emit_ge_goto
2246 };
2247
2248 #endif /* __x86_64__ */
2249
2250 static void
2251 i386_emit_prologue (void)
2252 {
2253 EMIT_ASM32 (i386_prologue,
2254 "push %ebp\n\t"
2255 "mov %esp,%ebp\n\t"
2256 "push %ebx");
2257 /* At this point, the raw regs base address is at 8(%ebp), and the
2258 value pointer is at 12(%ebp). */
2259 }
2260
2261 static void
2262 i386_emit_epilogue (void)
2263 {
2264 EMIT_ASM32 (i386_epilogue,
2265 "mov 12(%ebp),%ecx\n\t"
2266 "mov %eax,(%ecx)\n\t"
2267 "mov %ebx,0x4(%ecx)\n\t"
2268 "xor %eax,%eax\n\t"
2269 "pop %ebx\n\t"
2270 "pop %ebp\n\t"
2271 "ret");
2272 }
2273
2274 static void
2275 i386_emit_add (void)
2276 {
2277 EMIT_ASM32 (i386_add,
2278 "add (%esp),%eax\n\t"
2279 "adc 0x4(%esp),%ebx\n\t"
2280 "lea 0x8(%esp),%esp");
2281 }
2282
2283 static void
2284 i386_emit_sub (void)
2285 {
2286 EMIT_ASM32 (i386_sub,
2287 "subl %eax,(%esp)\n\t"
2288 "sbbl %ebx,4(%esp)\n\t"
2289 "pop %eax\n\t"
2290 "pop %ebx\n\t");
2291 }
2292
2293 static void
2294 i386_emit_mul (void)
2295 {
2296 emit_error = 1;
2297 }
2298
2299 static void
2300 i386_emit_lsh (void)
2301 {
2302 emit_error = 1;
2303 }
2304
2305 static void
2306 i386_emit_rsh_signed (void)
2307 {
2308 emit_error = 1;
2309 }
2310
2311 static void
2312 i386_emit_rsh_unsigned (void)
2313 {
2314 emit_error = 1;
2315 }
2316
2317 static void
2318 i386_emit_ext (int arg)
2319 {
2320 switch (arg)
2321 {
2322 case 8:
2323 EMIT_ASM32 (i386_ext_8,
2324 "cbtw\n\t"
2325 "cwtl\n\t"
2326 "movl %eax,%ebx\n\t"
2327 "sarl $31,%ebx");
2328 break;
2329 case 16:
2330 EMIT_ASM32 (i386_ext_16,
2331 "cwtl\n\t"
2332 "movl %eax,%ebx\n\t"
2333 "sarl $31,%ebx");
2334 break;
2335 case 32:
2336 EMIT_ASM32 (i386_ext_32,
2337 "movl %eax,%ebx\n\t"
2338 "sarl $31,%ebx");
2339 break;
2340 default:
2341 emit_error = 1;
2342 }
2343 }
2344
2345 static void
2346 i386_emit_log_not (void)
2347 {
2348 EMIT_ASM32 (i386_log_not,
2349 "or %ebx,%eax\n\t"
2350 "test %eax,%eax\n\t"
2351 "sete %cl\n\t"
2352 "xor %ebx,%ebx\n\t"
2353 "movzbl %cl,%eax");
2354 }
2355
2356 static void
2357 i386_emit_bit_and (void)
2358 {
2359 EMIT_ASM32 (i386_and,
2360 "and (%esp),%eax\n\t"
2361 "and 0x4(%esp),%ebx\n\t"
2362 "lea 0x8(%esp),%esp");
2363 }
2364
2365 static void
2366 i386_emit_bit_or (void)
2367 {
2368 EMIT_ASM32 (i386_or,
2369 "or (%esp),%eax\n\t"
2370 "or 0x4(%esp),%ebx\n\t"
2371 "lea 0x8(%esp),%esp");
2372 }
2373
2374 static void
2375 i386_emit_bit_xor (void)
2376 {
2377 EMIT_ASM32 (i386_xor,
2378 "xor (%esp),%eax\n\t"
2379 "xor 0x4(%esp),%ebx\n\t"
2380 "lea 0x8(%esp),%esp");
2381 }
2382
2383 static void
2384 i386_emit_bit_not (void)
2385 {
2386 EMIT_ASM32 (i386_bit_not,
2387 "xor $0xffffffff,%eax\n\t"
2388 "xor $0xffffffff,%ebx\n\t");
2389 }
2390
2391 static void
2392 i386_emit_equal (void)
2393 {
2394 EMIT_ASM32 (i386_equal,
2395 "cmpl %ebx,4(%esp)\n\t"
2396 "jne .Li386_equal_false\n\t"
2397 "cmpl %eax,(%esp)\n\t"
2398 "je .Li386_equal_true\n\t"
2399 ".Li386_equal_false:\n\t"
2400 "xor %eax,%eax\n\t"
2401 "jmp .Li386_equal_end\n\t"
2402 ".Li386_equal_true:\n\t"
2403 "mov $1,%eax\n\t"
2404 ".Li386_equal_end:\n\t"
2405 "xor %ebx,%ebx\n\t"
2406 "lea 0x8(%esp),%esp");
2407 }
2408
2409 static void
2410 i386_emit_less_signed (void)
2411 {
2412 EMIT_ASM32 (i386_less_signed,
2413 "cmpl %ebx,4(%esp)\n\t"
2414 "jl .Li386_less_signed_true\n\t"
2415 "jne .Li386_less_signed_false\n\t"
2416 "cmpl %eax,(%esp)\n\t"
2417 "jl .Li386_less_signed_true\n\t"
2418 ".Li386_less_signed_false:\n\t"
2419 "xor %eax,%eax\n\t"
2420 "jmp .Li386_less_signed_end\n\t"
2421 ".Li386_less_signed_true:\n\t"
2422 "mov $1,%eax\n\t"
2423 ".Li386_less_signed_end:\n\t"
2424 "xor %ebx,%ebx\n\t"
2425 "lea 0x8(%esp),%esp");
2426 }
2427
2428 static void
2429 i386_emit_less_unsigned (void)
2430 {
2431 EMIT_ASM32 (i386_less_unsigned,
2432 "cmpl %ebx,4(%esp)\n\t"
2433 "jb .Li386_less_unsigned_true\n\t"
2434 "jne .Li386_less_unsigned_false\n\t"
2435 "cmpl %eax,(%esp)\n\t"
2436 "jb .Li386_less_unsigned_true\n\t"
2437 ".Li386_less_unsigned_false:\n\t"
2438 "xor %eax,%eax\n\t"
2439 "jmp .Li386_less_unsigned_end\n\t"
2440 ".Li386_less_unsigned_true:\n\t"
2441 "mov $1,%eax\n\t"
2442 ".Li386_less_unsigned_end:\n\t"
2443 "xor %ebx,%ebx\n\t"
2444 "lea 0x8(%esp),%esp");
2445 }
2446
2447 static void
2448 i386_emit_ref (int size)
2449 {
2450 switch (size)
2451 {
2452 case 1:
2453 EMIT_ASM32 (i386_ref1,
2454 "movb (%eax),%al");
2455 break;
2456 case 2:
2457 EMIT_ASM32 (i386_ref2,
2458 "movw (%eax),%ax");
2459 break;
2460 case 4:
2461 EMIT_ASM32 (i386_ref4,
2462 "movl (%eax),%eax");
2463 break;
2464 case 8:
2465 EMIT_ASM32 (i386_ref8,
2466 "movl 4(%eax),%ebx\n\t"
2467 "movl (%eax),%eax");
2468 break;
2469 }
2470 }
2471
2472 static void
2473 i386_emit_if_goto (int *offset_p, int *size_p)
2474 {
2475 EMIT_ASM32 (i386_if_goto,
2476 "mov %eax,%ecx\n\t"
2477 "or %ebx,%ecx\n\t"
2478 "pop %eax\n\t"
2479 "pop %ebx\n\t"
2480 "cmpl $0,%ecx\n\t"
2481 /* Don't trust the assembler to choose the right jump */
2482 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2483
2484 if (offset_p)
2485 *offset_p = 11; /* be sure that this matches the sequence above */
2486 if (size_p)
2487 *size_p = 4;
2488 }
2489
2490 static void
2491 i386_emit_goto (int *offset_p, int *size_p)
2492 {
2493 EMIT_ASM32 (i386_goto,
2494 /* Don't trust the assembler to choose the right jump */
2495 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2496 if (offset_p)
2497 *offset_p = 1;
2498 if (size_p)
2499 *size_p = 4;
2500 }
2501
2502 static void
2503 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2504 {
2505 int diff = (to - (from + size));
2506 unsigned char buf[sizeof (int)];
2507
2508 /* We're only doing 4-byte sizes at the moment. */
2509 if (size != 4)
2510 {
2511 emit_error = 1;
2512 return;
2513 }
2514
2515 memcpy (buf, &diff, sizeof (int));
2516 write_inferior_memory (from, buf, sizeof (int));
2517 }
2518
2519 static void
2520 i386_emit_const (LONGEST num)
2521 {
2522 unsigned char buf[16];
2523 int i, hi, lo;
2524 CORE_ADDR buildaddr = current_insn_ptr;
2525
2526 i = 0;
2527 buf[i++] = 0xb8; /* mov $<n>,%eax */
2528 lo = num & 0xffffffff;
2529 memcpy (&buf[i], &lo, sizeof (lo));
2530 i += 4;
2531 hi = ((num >> 32) & 0xffffffff);
2532 if (hi)
2533 {
2534 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2535 memcpy (&buf[i], &hi, sizeof (hi));
2536 i += 4;
2537 }
2538 else
2539 {
2540 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2541 }
2542 append_insns (&buildaddr, i, buf);
2543 current_insn_ptr = buildaddr;
2544 }
2545
2546 static void
2547 i386_emit_call (CORE_ADDR fn)
2548 {
2549 unsigned char buf[16];
2550 int i, offset;
2551 CORE_ADDR buildaddr;
2552
2553 buildaddr = current_insn_ptr;
2554 i = 0;
2555 buf[i++] = 0xe8; /* call <reladdr> */
2556 offset = ((int) fn) - (buildaddr + 5);
2557 memcpy (buf + 1, &offset, 4);
2558 append_insns (&buildaddr, 5, buf);
2559 current_insn_ptr = buildaddr;
2560 }
2561
2562 static void
2563 i386_emit_reg (int reg)
2564 {
2565 unsigned char buf[16];
2566 int i;
2567 CORE_ADDR buildaddr;
2568
2569 EMIT_ASM32 (i386_reg_a,
2570 "sub $0x8,%esp");
2571 buildaddr = current_insn_ptr;
2572 i = 0;
2573 buf[i++] = 0xb8; /* mov $<n>,%eax */
2574 memcpy (&buf[i], &reg, sizeof (reg));
2575 i += 4;
2576 append_insns (&buildaddr, i, buf);
2577 current_insn_ptr = buildaddr;
2578 EMIT_ASM32 (i386_reg_b,
2579 "mov %eax,4(%esp)\n\t"
2580 "mov 8(%ebp),%eax\n\t"
2581 "mov %eax,(%esp)");
2582 i386_emit_call (get_raw_reg_func_addr ());
2583 EMIT_ASM32 (i386_reg_c,
2584 "xor %ebx,%ebx\n\t"
2585 "lea 0x8(%esp),%esp");
2586 }
2587
2588 static void
2589 i386_emit_pop (void)
2590 {
2591 EMIT_ASM32 (i386_pop,
2592 "pop %eax\n\t"
2593 "pop %ebx");
2594 }
2595
2596 static void
2597 i386_emit_stack_flush (void)
2598 {
2599 EMIT_ASM32 (i386_stack_flush,
2600 "push %ebx\n\t"
2601 "push %eax");
2602 }
2603
2604 static void
2605 i386_emit_zero_ext (int arg)
2606 {
2607 switch (arg)
2608 {
2609 case 8:
2610 EMIT_ASM32 (i386_zero_ext_8,
2611 "and $0xff,%eax\n\t"
2612 "xor %ebx,%ebx");
2613 break;
2614 case 16:
2615 EMIT_ASM32 (i386_zero_ext_16,
2616 "and $0xffff,%eax\n\t"
2617 "xor %ebx,%ebx");
2618 break;
2619 case 32:
2620 EMIT_ASM32 (i386_zero_ext_32,
2621 "xor %ebx,%ebx");
2622 break;
2623 default:
2624 emit_error = 1;
2625 }
2626 }
2627
2628 static void
2629 i386_emit_swap (void)
2630 {
2631 EMIT_ASM32 (i386_swap,
2632 "mov %eax,%ecx\n\t"
2633 "mov %ebx,%edx\n\t"
2634 "pop %eax\n\t"
2635 "pop %ebx\n\t"
2636 "push %edx\n\t"
2637 "push %ecx");
2638 }
2639
2640 static void
2641 i386_emit_stack_adjust (int n)
2642 {
2643 unsigned char buf[16];
2644 int i;
2645 CORE_ADDR buildaddr = current_insn_ptr;
2646
2647 i = 0;
2648 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2649 buf[i++] = 0x64;
2650 buf[i++] = 0x24;
2651 buf[i++] = n * 8;
2652 append_insns (&buildaddr, i, buf);
2653 current_insn_ptr = buildaddr;
2654 }
2655
2656 /* FN's prototype is `LONGEST(*fn)(int)'. */
2657
2658 static void
2659 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2660 {
2661 unsigned char buf[16];
2662 int i;
2663 CORE_ADDR buildaddr;
2664
2665 EMIT_ASM32 (i386_int_call_1_a,
2666 /* Reserve a bit of stack space. */
2667 "sub $0x8,%esp");
2668 /* Put the one argument on the stack. */
2669 buildaddr = current_insn_ptr;
2670 i = 0;
2671 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2672 buf[i++] = 0x04;
2673 buf[i++] = 0x24;
2674 memcpy (&buf[i], &arg1, sizeof (arg1));
2675 i += 4;
2676 append_insns (&buildaddr, i, buf);
2677 current_insn_ptr = buildaddr;
2678 i386_emit_call (fn);
2679 EMIT_ASM32 (i386_int_call_1_c,
2680 "mov %edx,%ebx\n\t"
2681 "lea 0x8(%esp),%esp");
2682 }
2683
2684 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2685
2686 static void
2687 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2688 {
2689 unsigned char buf[16];
2690 int i;
2691 CORE_ADDR buildaddr;
2692
2693 EMIT_ASM32 (i386_void_call_2_a,
2694 /* Preserve %eax only; we don't have to worry about %ebx. */
2695 "push %eax\n\t"
2696 /* Reserve a bit of stack space for arguments. */
2697 "sub $0x10,%esp\n\t"
2698 /* Copy "top" to the second argument position. (Note that
2699 we can't assume function won't scribble on its
2700 arguments, so don't try to restore from this.) */
2701 "mov %eax,4(%esp)\n\t"
2702 "mov %ebx,8(%esp)");
2703 /* Put the first argument on the stack. */
2704 buildaddr = current_insn_ptr;
2705 i = 0;
2706 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2707 buf[i++] = 0x04;
2708 buf[i++] = 0x24;
2709 memcpy (&buf[i], &arg1, sizeof (arg1));
2710 i += 4;
2711 append_insns (&buildaddr, i, buf);
2712 current_insn_ptr = buildaddr;
2713 i386_emit_call (fn);
2714 EMIT_ASM32 (i386_void_call_2_b,
2715 "lea 0x10(%esp),%esp\n\t"
2716 /* Restore original stack top. */
2717 "pop %eax");
2718 }
2719
2720
2721 void
2722 i386_emit_eq_goto (int *offset_p, int *size_p)
2723 {
2724 EMIT_ASM32 (eq,
2725 /* Check low half first, more likely to be decider */
2726 "cmpl %eax,(%esp)\n\t"
2727 "jne .Leq_fallthru\n\t"
2728 "cmpl %ebx,4(%esp)\n\t"
2729 "jne .Leq_fallthru\n\t"
2730 "lea 0x8(%esp),%esp\n\t"
2731 "pop %eax\n\t"
2732 "pop %ebx\n\t"
2733 /* jmp, but don't trust the assembler to choose the right jump */
2734 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2735 ".Leq_fallthru:\n\t"
2736 "lea 0x8(%esp),%esp\n\t"
2737 "pop %eax\n\t"
2738 "pop %ebx");
2739
2740 if (offset_p)
2741 *offset_p = 18;
2742 if (size_p)
2743 *size_p = 4;
2744 }
2745
2746 void
2747 i386_emit_ne_goto (int *offset_p, int *size_p)
2748 {
2749 EMIT_ASM32 (ne,
2750 /* Check low half first, more likely to be decider */
2751 "cmpl %eax,(%esp)\n\t"
2752 "jne .Lne_jump\n\t"
2753 "cmpl %ebx,4(%esp)\n\t"
2754 "je .Lne_fallthru\n\t"
2755 ".Lne_jump:\n\t"
2756 "lea 0x8(%esp),%esp\n\t"
2757 "pop %eax\n\t"
2758 "pop %ebx\n\t"
2759 /* jmp, but don't trust the assembler to choose the right jump */
2760 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2761 ".Lne_fallthru:\n\t"
2762 "lea 0x8(%esp),%esp\n\t"
2763 "pop %eax\n\t"
2764 "pop %ebx");
2765
2766 if (offset_p)
2767 *offset_p = 18;
2768 if (size_p)
2769 *size_p = 4;
2770 }
2771
2772 void
2773 i386_emit_lt_goto (int *offset_p, int *size_p)
2774 {
2775 EMIT_ASM32 (lt,
2776 "cmpl %ebx,4(%esp)\n\t"
2777 "jl .Llt_jump\n\t"
2778 "jne .Llt_fallthru\n\t"
2779 "cmpl %eax,(%esp)\n\t"
2780 "jnl .Llt_fallthru\n\t"
2781 ".Llt_jump:\n\t"
2782 "lea 0x8(%esp),%esp\n\t"
2783 "pop %eax\n\t"
2784 "pop %ebx\n\t"
2785 /* jmp, but don't trust the assembler to choose the right jump */
2786 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2787 ".Llt_fallthru:\n\t"
2788 "lea 0x8(%esp),%esp\n\t"
2789 "pop %eax\n\t"
2790 "pop %ebx");
2791
2792 if (offset_p)
2793 *offset_p = 20;
2794 if (size_p)
2795 *size_p = 4;
2796 }
2797
2798 void
2799 i386_emit_le_goto (int *offset_p, int *size_p)
2800 {
2801 EMIT_ASM32 (le,
2802 "cmpl %ebx,4(%esp)\n\t"
2803 "jle .Lle_jump\n\t"
2804 "jne .Lle_fallthru\n\t"
2805 "cmpl %eax,(%esp)\n\t"
2806 "jnle .Lle_fallthru\n\t"
2807 ".Lle_jump:\n\t"
2808 "lea 0x8(%esp),%esp\n\t"
2809 "pop %eax\n\t"
2810 "pop %ebx\n\t"
2811 /* jmp, but don't trust the assembler to choose the right jump */
2812 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2813 ".Lle_fallthru:\n\t"
2814 "lea 0x8(%esp),%esp\n\t"
2815 "pop %eax\n\t"
2816 "pop %ebx");
2817
2818 if (offset_p)
2819 *offset_p = 20;
2820 if (size_p)
2821 *size_p = 4;
2822 }
2823
2824 void
2825 i386_emit_gt_goto (int *offset_p, int *size_p)
2826 {
2827 EMIT_ASM32 (gt,
2828 "cmpl %ebx,4(%esp)\n\t"
2829 "jg .Lgt_jump\n\t"
2830 "jne .Lgt_fallthru\n\t"
2831 "cmpl %eax,(%esp)\n\t"
2832 "jng .Lgt_fallthru\n\t"
2833 ".Lgt_jump:\n\t"
2834 "lea 0x8(%esp),%esp\n\t"
2835 "pop %eax\n\t"
2836 "pop %ebx\n\t"
2837 /* jmp, but don't trust the assembler to choose the right jump */
2838 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2839 ".Lgt_fallthru:\n\t"
2840 "lea 0x8(%esp),%esp\n\t"
2841 "pop %eax\n\t"
2842 "pop %ebx");
2843
2844 if (offset_p)
2845 *offset_p = 20;
2846 if (size_p)
2847 *size_p = 4;
2848 }
2849
2850 void
2851 i386_emit_ge_goto (int *offset_p, int *size_p)
2852 {
2853 EMIT_ASM32 (ge,
2854 "cmpl %ebx,4(%esp)\n\t"
2855 "jge .Lge_jump\n\t"
2856 "jne .Lge_fallthru\n\t"
2857 "cmpl %eax,(%esp)\n\t"
2858 "jnge .Lge_fallthru\n\t"
2859 ".Lge_jump:\n\t"
2860 "lea 0x8(%esp),%esp\n\t"
2861 "pop %eax\n\t"
2862 "pop %ebx\n\t"
2863 /* jmp, but don't trust the assembler to choose the right jump */
2864 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2865 ".Lge_fallthru:\n\t"
2866 "lea 0x8(%esp),%esp\n\t"
2867 "pop %eax\n\t"
2868 "pop %ebx");
2869
2870 if (offset_p)
2871 *offset_p = 20;
2872 if (size_p)
2873 *size_p = 4;
2874 }
2875
2876 struct emit_ops i386_emit_ops =
2877 {
2878 i386_emit_prologue,
2879 i386_emit_epilogue,
2880 i386_emit_add,
2881 i386_emit_sub,
2882 i386_emit_mul,
2883 i386_emit_lsh,
2884 i386_emit_rsh_signed,
2885 i386_emit_rsh_unsigned,
2886 i386_emit_ext,
2887 i386_emit_log_not,
2888 i386_emit_bit_and,
2889 i386_emit_bit_or,
2890 i386_emit_bit_xor,
2891 i386_emit_bit_not,
2892 i386_emit_equal,
2893 i386_emit_less_signed,
2894 i386_emit_less_unsigned,
2895 i386_emit_ref,
2896 i386_emit_if_goto,
2897 i386_emit_goto,
2898 i386_write_goto_address,
2899 i386_emit_const,
2900 i386_emit_call,
2901 i386_emit_reg,
2902 i386_emit_pop,
2903 i386_emit_stack_flush,
2904 i386_emit_zero_ext,
2905 i386_emit_swap,
2906 i386_emit_stack_adjust,
2907 i386_emit_int_call_1,
2908 i386_emit_void_call_2,
2909 i386_emit_eq_goto,
2910 i386_emit_ne_goto,
2911 i386_emit_lt_goto,
2912 i386_emit_le_goto,
2913 i386_emit_gt_goto,
2914 i386_emit_ge_goto
2915 };
2916
2917
2918 static struct emit_ops *
2919 x86_emit_ops (void)
2920 {
2921 #ifdef __x86_64__
2922 int use_64bit = register_size (0) == 8;
2923
2924 if (use_64bit)
2925 return &amd64_emit_ops;
2926 else
2927 #endif
2928 return &i386_emit_ops;
2929 }
2930
2931 /* This is initialized assuming an amd64 target.
2932 x86_arch_setup will correct it for i386 or amd64 targets. */
2933
2934 struct linux_target_ops the_low_target =
2935 {
2936 x86_arch_setup,
2937 -1,
2938 NULL,
2939 NULL,
2940 NULL,
2941 x86_get_pc,
2942 x86_set_pc,
2943 x86_breakpoint,
2944 x86_breakpoint_len,
2945 NULL,
2946 1,
2947 x86_breakpoint_at,
2948 x86_insert_point,
2949 x86_remove_point,
2950 x86_stopped_by_watchpoint,
2951 x86_stopped_data_address,
2952 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2953 native i386 case (no registers smaller than an xfer unit), and are not
2954 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2955 NULL,
2956 NULL,
2957 /* need to fix up i386 siginfo if host is amd64 */
2958 x86_siginfo_fixup,
2959 x86_linux_new_process,
2960 x86_linux_new_thread,
2961 x86_linux_prepare_to_resume,
2962 x86_linux_process_qsupported,
2963 x86_supports_tracepoints,
2964 x86_get_thread_area,
2965 x86_install_fast_tracepoint_jump_pad,
2966 x86_emit_ops,
2967 x86_get_min_fast_tracepoint_insn_len,
2968 };
This page took 0.107757 seconds and 4 git commands to generate.