struct siginfo vs. siginfo_t
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002, 2004-2012 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include <stddef.h>
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "server.h"
25 #include "linux-low.h"
26 #include "i387-fp.h"
27 #include "i386-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
30
31 #include "gdb_proc_service.h"
32 #include "agent.h"
33
34 /* Defined in auto-generated file i386-linux.c. */
35 void init_registers_i386_linux (void);
36 /* Defined in auto-generated file amd64-linux.c. */
37 void init_registers_amd64_linux (void);
38 /* Defined in auto-generated file i386-avx-linux.c. */
39 void init_registers_i386_avx_linux (void);
40 /* Defined in auto-generated file amd64-avx-linux.c. */
41 void init_registers_amd64_avx_linux (void);
42 /* Defined in auto-generated file i386-mmx-linux.c. */
43 void init_registers_i386_mmx_linux (void);
44
45 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
46 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
47
48 /* Backward compatibility for gdb without XML support. */
49
50 static const char *xmltarget_i386_linux_no_xml = "@<target>\
51 <architecture>i386</architecture>\
52 <osabi>GNU/Linux</osabi>\
53 </target>";
54
55 #ifdef __x86_64__
56 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
57 <architecture>i386:x86-64</architecture>\
58 <osabi>GNU/Linux</osabi>\
59 </target>";
60 #endif
61
62 #include <sys/reg.h>
63 #include <sys/procfs.h>
64 #include <sys/ptrace.h>
65 #include <sys/uio.h>
66
67 #ifndef PTRACE_GETREGSET
68 #define PTRACE_GETREGSET 0x4204
69 #endif
70
71 #ifndef PTRACE_SETREGSET
72 #define PTRACE_SETREGSET 0x4205
73 #endif
74
75
76 #ifndef PTRACE_GET_THREAD_AREA
77 #define PTRACE_GET_THREAD_AREA 25
78 #endif
79
80 /* This definition comes from prctl.h, but some kernels may not have it. */
81 #ifndef PTRACE_ARCH_PRCTL
82 #define PTRACE_ARCH_PRCTL 30
83 #endif
84
85 /* The following definitions come from prctl.h, but may be absent
86 for certain configurations. */
87 #ifndef ARCH_GET_FS
88 #define ARCH_SET_GS 0x1001
89 #define ARCH_SET_FS 0x1002
90 #define ARCH_GET_FS 0x1003
91 #define ARCH_GET_GS 0x1004
92 #endif
93
94 /* Per-process arch-specific data we want to keep. */
95
96 struct arch_process_info
97 {
98 struct i386_debug_reg_state debug_reg_state;
99 };
100
101 /* Per-thread arch-specific data we want to keep. */
102
103 struct arch_lwp_info
104 {
105 /* Non-zero if our copy differs from what's recorded in the thread. */
106 int debug_registers_changed;
107 };
108
109 #ifdef __x86_64__
110
111 /* Mapping between the general-purpose registers in `struct user'
112 format and GDB's register array layout.
113 Note that the transfer layout uses 64-bit regs. */
114 static /*const*/ int i386_regmap[] =
115 {
116 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
117 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
118 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
119 DS * 8, ES * 8, FS * 8, GS * 8
120 };
121
122 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
123
124 /* So code below doesn't have to care, i386 or amd64. */
125 #define ORIG_EAX ORIG_RAX
126
127 static const int x86_64_regmap[] =
128 {
129 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
130 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
131 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
132 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
133 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
134 DS * 8, ES * 8, FS * 8, GS * 8,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1,
137 -1, -1, -1, -1, -1, -1, -1, -1,
138 -1, -1, -1, -1, -1, -1, -1, -1, -1,
139 ORIG_RAX * 8
140 };
141
142 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
143
144 #else /* ! __x86_64__ */
145
146 /* Mapping between the general-purpose registers in `struct user'
147 format and GDB's register array layout. */
148 static /*const*/ int i386_regmap[] =
149 {
150 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
151 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
152 EIP * 4, EFL * 4, CS * 4, SS * 4,
153 DS * 4, ES * 4, FS * 4, GS * 4
154 };
155
156 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
157
158 #endif
159 \f
160 /* Called by libthread_db. */
161
162 ps_err_e
163 ps_get_thread_area (const struct ps_prochandle *ph,
164 lwpid_t lwpid, int idx, void **base)
165 {
166 #ifdef __x86_64__
167 int use_64bit = register_size (0) == 8;
168
169 if (use_64bit)
170 {
171 switch (idx)
172 {
173 case FS:
174 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
175 return PS_OK;
176 break;
177 case GS:
178 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
179 return PS_OK;
180 break;
181 default:
182 return PS_BADADDR;
183 }
184 return PS_ERR;
185 }
186 #endif
187
188 {
189 unsigned int desc[4];
190
191 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
192 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
193 return PS_ERR;
194
195 *(int *)base = desc[1];
196 return PS_OK;
197 }
198 }
199
200 /* Get the thread area address. This is used to recognize which
201 thread is which when tracing with the in-process agent library. We
202 don't read anything from the address, and treat it as opaque; it's
203 the address itself that we assume is unique per-thread. */
204
205 static int
206 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
207 {
208 #ifdef __x86_64__
209 int use_64bit = register_size (0) == 8;
210
211 if (use_64bit)
212 {
213 void *base;
214 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
215 {
216 *addr = (CORE_ADDR) (uintptr_t) base;
217 return 0;
218 }
219
220 return -1;
221 }
222 #endif
223
224 {
225 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
226 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
227 unsigned int desc[4];
228 ULONGEST gs = 0;
229 const int reg_thread_area = 3; /* bits to scale down register value. */
230 int idx;
231
232 collect_register_by_name (regcache, "gs", &gs);
233
234 idx = gs >> reg_thread_area;
235
236 if (ptrace (PTRACE_GET_THREAD_AREA,
237 lwpid_of (lwp),
238 (void *) (long) idx, (unsigned long) &desc) < 0)
239 return -1;
240
241 *addr = desc[1];
242 return 0;
243 }
244 }
245
246
247 \f
248 static int
249 i386_cannot_store_register (int regno)
250 {
251 return regno >= I386_NUM_REGS;
252 }
253
254 static int
255 i386_cannot_fetch_register (int regno)
256 {
257 return regno >= I386_NUM_REGS;
258 }
259
260 static void
261 x86_fill_gregset (struct regcache *regcache, void *buf)
262 {
263 int i;
264
265 #ifdef __x86_64__
266 if (register_size (0) == 8)
267 {
268 for (i = 0; i < X86_64_NUM_REGS; i++)
269 if (x86_64_regmap[i] != -1)
270 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
271 return;
272 }
273 #endif
274
275 for (i = 0; i < I386_NUM_REGS; i++)
276 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
277
278 collect_register_by_name (regcache, "orig_eax",
279 ((char *) buf) + ORIG_EAX * 4);
280 }
281
282 static void
283 x86_store_gregset (struct regcache *regcache, const void *buf)
284 {
285 int i;
286
287 #ifdef __x86_64__
288 if (register_size (0) == 8)
289 {
290 for (i = 0; i < X86_64_NUM_REGS; i++)
291 if (x86_64_regmap[i] != -1)
292 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
293 return;
294 }
295 #endif
296
297 for (i = 0; i < I386_NUM_REGS; i++)
298 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
299
300 supply_register_by_name (regcache, "orig_eax",
301 ((char *) buf) + ORIG_EAX * 4);
302 }
303
304 static void
305 x86_fill_fpregset (struct regcache *regcache, void *buf)
306 {
307 #ifdef __x86_64__
308 i387_cache_to_fxsave (regcache, buf);
309 #else
310 i387_cache_to_fsave (regcache, buf);
311 #endif
312 }
313
314 static void
315 x86_store_fpregset (struct regcache *regcache, const void *buf)
316 {
317 #ifdef __x86_64__
318 i387_fxsave_to_cache (regcache, buf);
319 #else
320 i387_fsave_to_cache (regcache, buf);
321 #endif
322 }
323
324 #ifndef __x86_64__
325
326 static void
327 x86_fill_fpxregset (struct regcache *regcache, void *buf)
328 {
329 i387_cache_to_fxsave (regcache, buf);
330 }
331
332 static void
333 x86_store_fpxregset (struct regcache *regcache, const void *buf)
334 {
335 i387_fxsave_to_cache (regcache, buf);
336 }
337
338 #endif
339
340 static void
341 x86_fill_xstateregset (struct regcache *regcache, void *buf)
342 {
343 i387_cache_to_xsave (regcache, buf);
344 }
345
346 static void
347 x86_store_xstateregset (struct regcache *regcache, const void *buf)
348 {
349 i387_xsave_to_cache (regcache, buf);
350 }
351
352 /* ??? The non-biarch i386 case stores all the i387 regs twice.
353 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
354 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
355 doesn't work. IWBN to avoid the duplication in the case where it
356 does work. Maybe the arch_setup routine could check whether it works
357 and update target_regsets accordingly, maybe by moving target_regsets
358 to linux_target_ops and set the right one there, rather than having to
359 modify the target_regsets global. */
360
361 struct regset_info target_regsets[] =
362 {
363 #ifdef HAVE_PTRACE_GETREGS
364 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
365 GENERAL_REGS,
366 x86_fill_gregset, x86_store_gregset },
367 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
368 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
369 # ifndef __x86_64__
370 # ifdef HAVE_PTRACE_GETFPXREGS
371 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
372 EXTENDED_REGS,
373 x86_fill_fpxregset, x86_store_fpxregset },
374 # endif
375 # endif
376 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
377 FP_REGS,
378 x86_fill_fpregset, x86_store_fpregset },
379 #endif /* HAVE_PTRACE_GETREGS */
380 { 0, 0, 0, -1, -1, NULL, NULL }
381 };
382
383 static CORE_ADDR
384 x86_get_pc (struct regcache *regcache)
385 {
386 int use_64bit = register_size (0) == 8;
387
388 if (use_64bit)
389 {
390 unsigned long pc;
391 collect_register_by_name (regcache, "rip", &pc);
392 return (CORE_ADDR) pc;
393 }
394 else
395 {
396 unsigned int pc;
397 collect_register_by_name (regcache, "eip", &pc);
398 return (CORE_ADDR) pc;
399 }
400 }
401
402 static void
403 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
404 {
405 int use_64bit = register_size (0) == 8;
406
407 if (use_64bit)
408 {
409 unsigned long newpc = pc;
410 supply_register_by_name (regcache, "rip", &newpc);
411 }
412 else
413 {
414 unsigned int newpc = pc;
415 supply_register_by_name (regcache, "eip", &newpc);
416 }
417 }
418 \f
419 static const unsigned char x86_breakpoint[] = { 0xCC };
420 #define x86_breakpoint_len 1
421
422 static int
423 x86_breakpoint_at (CORE_ADDR pc)
424 {
425 unsigned char c;
426
427 (*the_target->read_memory) (pc, &c, 1);
428 if (c == 0xCC)
429 return 1;
430
431 return 0;
432 }
433 \f
434 /* Support for debug registers. */
435
436 static unsigned long
437 x86_linux_dr_get (ptid_t ptid, int regnum)
438 {
439 int tid;
440 unsigned long value;
441
442 tid = ptid_get_lwp (ptid);
443
444 errno = 0;
445 value = ptrace (PTRACE_PEEKUSER, tid,
446 offsetof (struct user, u_debugreg[regnum]), 0);
447 if (errno != 0)
448 error ("Couldn't read debug register");
449
450 return value;
451 }
452
453 static void
454 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
455 {
456 int tid;
457
458 tid = ptid_get_lwp (ptid);
459
460 errno = 0;
461 ptrace (PTRACE_POKEUSER, tid,
462 offsetof (struct user, u_debugreg[regnum]), value);
463 if (errno != 0)
464 error ("Couldn't write debug register");
465 }
466
467 static int
468 update_debug_registers_callback (struct inferior_list_entry *entry,
469 void *pid_p)
470 {
471 struct lwp_info *lwp = (struct lwp_info *) entry;
472 int pid = *(int *) pid_p;
473
474 /* Only update the threads of this process. */
475 if (pid_of (lwp) == pid)
476 {
477 /* The actual update is done later just before resuming the lwp,
478 we just mark that the registers need updating. */
479 lwp->arch_private->debug_registers_changed = 1;
480
481 /* If the lwp isn't stopped, force it to momentarily pause, so
482 we can update its debug registers. */
483 if (!lwp->stopped)
484 linux_stop_lwp (lwp);
485 }
486
487 return 0;
488 }
489
490 /* Update the inferior's debug register REGNUM from STATE. */
491
492 void
493 i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
494 {
495 /* Only update the threads of this process. */
496 int pid = pid_of (get_thread_lwp (current_inferior));
497
498 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
499 fatal ("Invalid debug register %d", regnum);
500
501 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
502 }
503
504 /* Return the inferior's debug register REGNUM. */
505
506 CORE_ADDR
507 i386_dr_low_get_addr (int regnum)
508 {
509 struct lwp_info *lwp = get_thread_lwp (current_inferior);
510 ptid_t ptid = ptid_of (lwp);
511
512 /* DR6 and DR7 are retrieved with some other way. */
513 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
514
515 return x86_linux_dr_get (ptid, regnum);
516 }
517
518 /* Update the inferior's DR7 debug control register from STATE. */
519
520 void
521 i386_dr_low_set_control (const struct i386_debug_reg_state *state)
522 {
523 /* Only update the threads of this process. */
524 int pid = pid_of (get_thread_lwp (current_inferior));
525
526 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
527 }
528
529 /* Return the inferior's DR7 debug control register. */
530
531 unsigned
532 i386_dr_low_get_control (void)
533 {
534 struct lwp_info *lwp = get_thread_lwp (current_inferior);
535 ptid_t ptid = ptid_of (lwp);
536
537 return x86_linux_dr_get (ptid, DR_CONTROL);
538 }
539
540 /* Get the value of the DR6 debug status register from the inferior
541 and record it in STATE. */
542
543 unsigned
544 i386_dr_low_get_status (void)
545 {
546 struct lwp_info *lwp = get_thread_lwp (current_inferior);
547 ptid_t ptid = ptid_of (lwp);
548
549 return x86_linux_dr_get (ptid, DR_STATUS);
550 }
551 \f
552 /* Breakpoint/Watchpoint support. */
553
554 static int
555 x86_insert_point (char type, CORE_ADDR addr, int len)
556 {
557 struct process_info *proc = current_process ();
558 switch (type)
559 {
560 case '0':
561 {
562 int ret;
563
564 ret = prepare_to_access_memory ();
565 if (ret)
566 return -1;
567 ret = set_gdb_breakpoint_at (addr);
568 done_accessing_memory ();
569 return ret;
570 }
571 case '2':
572 case '3':
573 case '4':
574 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
575 type, addr, len);
576 default:
577 /* Unsupported. */
578 return 1;
579 }
580 }
581
582 static int
583 x86_remove_point (char type, CORE_ADDR addr, int len)
584 {
585 struct process_info *proc = current_process ();
586 switch (type)
587 {
588 case '0':
589 {
590 int ret;
591
592 ret = prepare_to_access_memory ();
593 if (ret)
594 return -1;
595 ret = delete_gdb_breakpoint_at (addr);
596 done_accessing_memory ();
597 return ret;
598 }
599 case '2':
600 case '3':
601 case '4':
602 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
603 type, addr, len);
604 default:
605 /* Unsupported. */
606 return 1;
607 }
608 }
609
610 static int
611 x86_stopped_by_watchpoint (void)
612 {
613 struct process_info *proc = current_process ();
614 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
615 }
616
617 static CORE_ADDR
618 x86_stopped_data_address (void)
619 {
620 struct process_info *proc = current_process ();
621 CORE_ADDR addr;
622 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
623 &addr))
624 return addr;
625 return 0;
626 }
627 \f
628 /* Called when a new process is created. */
629
630 static struct arch_process_info *
631 x86_linux_new_process (void)
632 {
633 struct arch_process_info *info = xcalloc (1, sizeof (*info));
634
635 i386_low_init_dregs (&info->debug_reg_state);
636
637 return info;
638 }
639
640 /* Called when a new thread is detected. */
641
642 static struct arch_lwp_info *
643 x86_linux_new_thread (void)
644 {
645 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
646
647 info->debug_registers_changed = 1;
648
649 return info;
650 }
651
652 /* Called when resuming a thread.
653 If the debug regs have changed, update the thread's copies. */
654
655 static void
656 x86_linux_prepare_to_resume (struct lwp_info *lwp)
657 {
658 ptid_t ptid = ptid_of (lwp);
659 int clear_status = 0;
660
661 if (lwp->arch_private->debug_registers_changed)
662 {
663 int i;
664 int pid = ptid_get_pid (ptid);
665 struct process_info *proc = find_process_pid (pid);
666 struct i386_debug_reg_state *state
667 = &proc->private->arch_private->debug_reg_state;
668
669 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
670 if (state->dr_ref_count[i] > 0)
671 {
672 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
673
674 /* If we're setting a watchpoint, any change the inferior
675 had done itself to the debug registers needs to be
676 discarded, otherwise, i386_low_stopped_data_address can
677 get confused. */
678 clear_status = 1;
679 }
680
681 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
682
683 lwp->arch_private->debug_registers_changed = 0;
684 }
685
686 if (clear_status || lwp->stopped_by_watchpoint)
687 x86_linux_dr_set (ptid, DR_STATUS, 0);
688 }
689 \f
690 /* When GDBSERVER is built as a 64-bit application on linux, the
691 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
692 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
693 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
694 conversion in-place ourselves. */
695
696 /* These types below (compat_*) define a siginfo type that is layout
697 compatible with the siginfo type exported by the 32-bit userspace
698 support. */
699
700 #ifdef __x86_64__
701
702 typedef int compat_int_t;
703 typedef unsigned int compat_uptr_t;
704
705 typedef int compat_time_t;
706 typedef int compat_timer_t;
707 typedef int compat_clock_t;
708
709 struct compat_timeval
710 {
711 compat_time_t tv_sec;
712 int tv_usec;
713 };
714
715 typedef union compat_sigval
716 {
717 compat_int_t sival_int;
718 compat_uptr_t sival_ptr;
719 } compat_sigval_t;
720
721 typedef struct compat_siginfo
722 {
723 int si_signo;
724 int si_errno;
725 int si_code;
726
727 union
728 {
729 int _pad[((128 / sizeof (int)) - 3)];
730
731 /* kill() */
732 struct
733 {
734 unsigned int _pid;
735 unsigned int _uid;
736 } _kill;
737
738 /* POSIX.1b timers */
739 struct
740 {
741 compat_timer_t _tid;
742 int _overrun;
743 compat_sigval_t _sigval;
744 } _timer;
745
746 /* POSIX.1b signals */
747 struct
748 {
749 unsigned int _pid;
750 unsigned int _uid;
751 compat_sigval_t _sigval;
752 } _rt;
753
754 /* SIGCHLD */
755 struct
756 {
757 unsigned int _pid;
758 unsigned int _uid;
759 int _status;
760 compat_clock_t _utime;
761 compat_clock_t _stime;
762 } _sigchld;
763
764 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
765 struct
766 {
767 unsigned int _addr;
768 } _sigfault;
769
770 /* SIGPOLL */
771 struct
772 {
773 int _band;
774 int _fd;
775 } _sigpoll;
776 } _sifields;
777 } compat_siginfo_t;
778
779 #define cpt_si_pid _sifields._kill._pid
780 #define cpt_si_uid _sifields._kill._uid
781 #define cpt_si_timerid _sifields._timer._tid
782 #define cpt_si_overrun _sifields._timer._overrun
783 #define cpt_si_status _sifields._sigchld._status
784 #define cpt_si_utime _sifields._sigchld._utime
785 #define cpt_si_stime _sifields._sigchld._stime
786 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
787 #define cpt_si_addr _sifields._sigfault._addr
788 #define cpt_si_band _sifields._sigpoll._band
789 #define cpt_si_fd _sifields._sigpoll._fd
790
791 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
792 In their place is si_timer1,si_timer2. */
793 #ifndef si_timerid
794 #define si_timerid si_timer1
795 #endif
796 #ifndef si_overrun
797 #define si_overrun si_timer2
798 #endif
799
800 static void
801 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
802 {
803 memset (to, 0, sizeof (*to));
804
805 to->si_signo = from->si_signo;
806 to->si_errno = from->si_errno;
807 to->si_code = from->si_code;
808
809 if (to->si_code == SI_TIMER)
810 {
811 to->cpt_si_timerid = from->si_timerid;
812 to->cpt_si_overrun = from->si_overrun;
813 to->cpt_si_ptr = (intptr_t) from->si_ptr;
814 }
815 else if (to->si_code == SI_USER)
816 {
817 to->cpt_si_pid = from->si_pid;
818 to->cpt_si_uid = from->si_uid;
819 }
820 else if (to->si_code < 0)
821 {
822 to->cpt_si_pid = from->si_pid;
823 to->cpt_si_uid = from->si_uid;
824 to->cpt_si_ptr = (intptr_t) from->si_ptr;
825 }
826 else
827 {
828 switch (to->si_signo)
829 {
830 case SIGCHLD:
831 to->cpt_si_pid = from->si_pid;
832 to->cpt_si_uid = from->si_uid;
833 to->cpt_si_status = from->si_status;
834 to->cpt_si_utime = from->si_utime;
835 to->cpt_si_stime = from->si_stime;
836 break;
837 case SIGILL:
838 case SIGFPE:
839 case SIGSEGV:
840 case SIGBUS:
841 to->cpt_si_addr = (intptr_t) from->si_addr;
842 break;
843 case SIGPOLL:
844 to->cpt_si_band = from->si_band;
845 to->cpt_si_fd = from->si_fd;
846 break;
847 default:
848 to->cpt_si_pid = from->si_pid;
849 to->cpt_si_uid = from->si_uid;
850 to->cpt_si_ptr = (intptr_t) from->si_ptr;
851 break;
852 }
853 }
854 }
855
856 static void
857 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
858 {
859 memset (to, 0, sizeof (*to));
860
861 to->si_signo = from->si_signo;
862 to->si_errno = from->si_errno;
863 to->si_code = from->si_code;
864
865 if (to->si_code == SI_TIMER)
866 {
867 to->si_timerid = from->cpt_si_timerid;
868 to->si_overrun = from->cpt_si_overrun;
869 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
870 }
871 else if (to->si_code == SI_USER)
872 {
873 to->si_pid = from->cpt_si_pid;
874 to->si_uid = from->cpt_si_uid;
875 }
876 else if (to->si_code < 0)
877 {
878 to->si_pid = from->cpt_si_pid;
879 to->si_uid = from->cpt_si_uid;
880 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
881 }
882 else
883 {
884 switch (to->si_signo)
885 {
886 case SIGCHLD:
887 to->si_pid = from->cpt_si_pid;
888 to->si_uid = from->cpt_si_uid;
889 to->si_status = from->cpt_si_status;
890 to->si_utime = from->cpt_si_utime;
891 to->si_stime = from->cpt_si_stime;
892 break;
893 case SIGILL:
894 case SIGFPE:
895 case SIGSEGV:
896 case SIGBUS:
897 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
898 break;
899 case SIGPOLL:
900 to->si_band = from->cpt_si_band;
901 to->si_fd = from->cpt_si_fd;
902 break;
903 default:
904 to->si_pid = from->cpt_si_pid;
905 to->si_uid = from->cpt_si_uid;
906 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
907 break;
908 }
909 }
910 }
911
912 #endif /* __x86_64__ */
913
914 /* Convert a native/host siginfo object, into/from the siginfo in the
915 layout of the inferiors' architecture. Returns true if any
916 conversion was done; false otherwise. If DIRECTION is 1, then copy
917 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
918 INF. */
919
920 static int
921 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
922 {
923 #ifdef __x86_64__
924 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
925 if (register_size (0) == 4)
926 {
927 if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
928 fatal ("unexpected difference in siginfo");
929
930 if (direction == 0)
931 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
932 else
933 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
934
935 return 1;
936 }
937 #endif
938
939 return 0;
940 }
941 \f
942 static int use_xml;
943
944 /* Update gdbserver_xmltarget. */
945
946 static void
947 x86_linux_update_xmltarget (void)
948 {
949 int pid;
950 struct regset_info *regset;
951 static unsigned long long xcr0;
952 static int have_ptrace_getregset = -1;
953 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
954 static int have_ptrace_getfpxregs = -1;
955 #endif
956
957 if (!current_inferior)
958 return;
959
960 /* Before changing the register cache internal layout or the target
961 regsets, flush the contents of the current valid caches back to
962 the threads. */
963 regcache_invalidate ();
964
965 pid = pid_of (get_thread_lwp (current_inferior));
966 #ifdef __x86_64__
967 if (num_xmm_registers == 8)
968 init_registers_i386_linux ();
969 else
970 init_registers_amd64_linux ();
971 #else
972 {
973 # ifdef HAVE_PTRACE_GETFPXREGS
974 if (have_ptrace_getfpxregs == -1)
975 {
976 elf_fpxregset_t fpxregs;
977
978 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
979 {
980 have_ptrace_getfpxregs = 0;
981 x86_xcr0 = I386_XSTATE_X87_MASK;
982
983 /* Disable PTRACE_GETFPXREGS. */
984 for (regset = target_regsets;
985 regset->fill_function != NULL; regset++)
986 if (regset->get_request == PTRACE_GETFPXREGS)
987 {
988 regset->size = 0;
989 break;
990 }
991 }
992 else
993 have_ptrace_getfpxregs = 1;
994 }
995
996 if (!have_ptrace_getfpxregs)
997 {
998 init_registers_i386_mmx_linux ();
999 return;
1000 }
1001 # endif
1002 init_registers_i386_linux ();
1003 }
1004 #endif
1005
1006 if (!use_xml)
1007 {
1008 /* Don't use XML. */
1009 #ifdef __x86_64__
1010 if (num_xmm_registers == 8)
1011 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1012 else
1013 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
1014 #else
1015 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1016 #endif
1017
1018 x86_xcr0 = I386_XSTATE_SSE_MASK;
1019
1020 return;
1021 }
1022
1023 /* Check if XSAVE extended state is supported. */
1024 if (have_ptrace_getregset == -1)
1025 {
1026 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
1027 struct iovec iov;
1028
1029 iov.iov_base = xstateregs;
1030 iov.iov_len = sizeof (xstateregs);
1031
1032 /* Check if PTRACE_GETREGSET works. */
1033 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
1034 &iov) < 0)
1035 {
1036 have_ptrace_getregset = 0;
1037 return;
1038 }
1039 else
1040 have_ptrace_getregset = 1;
1041
1042 /* Get XCR0 from XSAVE extended state at byte 464. */
1043 xcr0 = xstateregs[464 / sizeof (long long)];
1044
1045 /* Use PTRACE_GETREGSET if it is available. */
1046 for (regset = target_regsets;
1047 regset->fill_function != NULL; regset++)
1048 if (regset->get_request == PTRACE_GETREGSET)
1049 regset->size = I386_XSTATE_SIZE (xcr0);
1050 else if (regset->type != GENERAL_REGS)
1051 regset->size = 0;
1052 }
1053
1054 if (have_ptrace_getregset)
1055 {
1056 /* AVX is the highest feature we support. */
1057 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
1058 {
1059 x86_xcr0 = xcr0;
1060
1061 #ifdef __x86_64__
1062 /* I386 has 8 xmm regs. */
1063 if (num_xmm_registers == 8)
1064 init_registers_i386_avx_linux ();
1065 else
1066 init_registers_amd64_avx_linux ();
1067 #else
1068 init_registers_i386_avx_linux ();
1069 #endif
1070 }
1071 }
1072 }
1073
1074 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1075 PTRACE_GETREGSET. */
1076
1077 static void
1078 x86_linux_process_qsupported (const char *query)
1079 {
1080 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1081 with "i386" in qSupported query, it supports x86 XML target
1082 descriptions. */
1083 use_xml = 0;
1084 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1085 {
1086 char *copy = xstrdup (query + 13);
1087 char *p;
1088
1089 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1090 {
1091 if (strcmp (p, "i386") == 0)
1092 {
1093 use_xml = 1;
1094 break;
1095 }
1096 }
1097
1098 free (copy);
1099 }
1100
1101 x86_linux_update_xmltarget ();
1102 }
1103
1104 /* Initialize gdbserver for the architecture of the inferior. */
1105
1106 static void
1107 x86_arch_setup (void)
1108 {
1109 #ifdef __x86_64__
1110 int pid = pid_of (get_thread_lwp (current_inferior));
1111 int use_64bit = linux_pid_exe_is_elf_64_file (pid);
1112
1113 if (use_64bit < 0)
1114 {
1115 /* This can only happen if /proc/<pid>/exe is unreadable,
1116 but "that can't happen" if we've gotten this far.
1117 Fall through and assume this is a 32-bit program. */
1118 }
1119 else if (use_64bit)
1120 {
1121 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1122 the_low_target.num_regs = -1;
1123 the_low_target.regmap = NULL;
1124 the_low_target.cannot_fetch_register = NULL;
1125 the_low_target.cannot_store_register = NULL;
1126
1127 /* Amd64 has 16 xmm regs. */
1128 num_xmm_registers = 16;
1129
1130 x86_linux_update_xmltarget ();
1131 return;
1132 }
1133 #endif
1134
1135 /* Ok we have a 32-bit inferior. */
1136
1137 the_low_target.num_regs = I386_NUM_REGS;
1138 the_low_target.regmap = i386_regmap;
1139 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1140 the_low_target.cannot_store_register = i386_cannot_store_register;
1141
1142 /* I386 has 8 xmm regs. */
1143 num_xmm_registers = 8;
1144
1145 x86_linux_update_xmltarget ();
1146 }
1147
1148 static int
1149 x86_supports_tracepoints (void)
1150 {
1151 return 1;
1152 }
1153
1154 static void
1155 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1156 {
1157 write_inferior_memory (*to, buf, len);
1158 *to += len;
1159 }
1160
1161 static int
1162 push_opcode (unsigned char *buf, char *op)
1163 {
1164 unsigned char *buf_org = buf;
1165
1166 while (1)
1167 {
1168 char *endptr;
1169 unsigned long ul = strtoul (op, &endptr, 16);
1170
1171 if (endptr == op)
1172 break;
1173
1174 *buf++ = ul;
1175 op = endptr;
1176 }
1177
1178 return buf - buf_org;
1179 }
1180
1181 #ifdef __x86_64__
1182
1183 /* Build a jump pad that saves registers and calls a collection
1184 function. Writes a jump instruction to the jump pad to
1185 JJUMPAD_INSN. The caller is responsible to write it in at the
1186 tracepoint address. */
1187
1188 static int
1189 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1190 CORE_ADDR collector,
1191 CORE_ADDR lockaddr,
1192 ULONGEST orig_size,
1193 CORE_ADDR *jump_entry,
1194 CORE_ADDR *trampoline,
1195 ULONGEST *trampoline_size,
1196 unsigned char *jjump_pad_insn,
1197 ULONGEST *jjump_pad_insn_size,
1198 CORE_ADDR *adjusted_insn_addr,
1199 CORE_ADDR *adjusted_insn_addr_end,
1200 char *err)
1201 {
1202 unsigned char buf[40];
1203 int i, offset;
1204 int64_t loffset;
1205
1206 CORE_ADDR buildaddr = *jump_entry;
1207
1208 /* Build the jump pad. */
1209
1210 /* First, do tracepoint data collection. Save registers. */
1211 i = 0;
1212 /* Need to ensure stack pointer saved first. */
1213 buf[i++] = 0x54; /* push %rsp */
1214 buf[i++] = 0x55; /* push %rbp */
1215 buf[i++] = 0x57; /* push %rdi */
1216 buf[i++] = 0x56; /* push %rsi */
1217 buf[i++] = 0x52; /* push %rdx */
1218 buf[i++] = 0x51; /* push %rcx */
1219 buf[i++] = 0x53; /* push %rbx */
1220 buf[i++] = 0x50; /* push %rax */
1221 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1222 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1223 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1224 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1225 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1226 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1227 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1228 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1229 buf[i++] = 0x9c; /* pushfq */
1230 buf[i++] = 0x48; /* movl <addr>,%rdi */
1231 buf[i++] = 0xbf;
1232 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1233 i += sizeof (unsigned long);
1234 buf[i++] = 0x57; /* push %rdi */
1235 append_insns (&buildaddr, i, buf);
1236
1237 /* Stack space for the collecting_t object. */
1238 i = 0;
1239 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1240 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1241 memcpy (buf + i, &tpoint, 8);
1242 i += 8;
1243 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1244 i += push_opcode (&buf[i],
1245 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1246 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1247 append_insns (&buildaddr, i, buf);
1248
1249 /* spin-lock. */
1250 i = 0;
1251 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1252 memcpy (&buf[i], (void *) &lockaddr, 8);
1253 i += 8;
1254 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1255 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1256 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1257 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1258 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1259 append_insns (&buildaddr, i, buf);
1260
1261 /* Set up the gdb_collect call. */
1262 /* At this point, (stack pointer + 0x18) is the base of our saved
1263 register block. */
1264
1265 i = 0;
1266 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1267 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1268
1269 /* tpoint address may be 64-bit wide. */
1270 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1271 memcpy (buf + i, &tpoint, 8);
1272 i += 8;
1273 append_insns (&buildaddr, i, buf);
1274
1275 /* The collector function being in the shared library, may be
1276 >31-bits away off the jump pad. */
1277 i = 0;
1278 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1279 memcpy (buf + i, &collector, 8);
1280 i += 8;
1281 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1282 append_insns (&buildaddr, i, buf);
1283
1284 /* Clear the spin-lock. */
1285 i = 0;
1286 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1287 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1288 memcpy (buf + i, &lockaddr, 8);
1289 i += 8;
1290 append_insns (&buildaddr, i, buf);
1291
1292 /* Remove stack that had been used for the collect_t object. */
1293 i = 0;
1294 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1295 append_insns (&buildaddr, i, buf);
1296
1297 /* Restore register state. */
1298 i = 0;
1299 buf[i++] = 0x48; /* add $0x8,%rsp */
1300 buf[i++] = 0x83;
1301 buf[i++] = 0xc4;
1302 buf[i++] = 0x08;
1303 buf[i++] = 0x9d; /* popfq */
1304 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1305 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1306 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1307 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1308 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1309 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1310 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1311 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1312 buf[i++] = 0x58; /* pop %rax */
1313 buf[i++] = 0x5b; /* pop %rbx */
1314 buf[i++] = 0x59; /* pop %rcx */
1315 buf[i++] = 0x5a; /* pop %rdx */
1316 buf[i++] = 0x5e; /* pop %rsi */
1317 buf[i++] = 0x5f; /* pop %rdi */
1318 buf[i++] = 0x5d; /* pop %rbp */
1319 buf[i++] = 0x5c; /* pop %rsp */
1320 append_insns (&buildaddr, i, buf);
1321
1322 /* Now, adjust the original instruction to execute in the jump
1323 pad. */
1324 *adjusted_insn_addr = buildaddr;
1325 relocate_instruction (&buildaddr, tpaddr);
1326 *adjusted_insn_addr_end = buildaddr;
1327
1328 /* Finally, write a jump back to the program. */
1329
1330 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1331 if (loffset > INT_MAX || loffset < INT_MIN)
1332 {
1333 sprintf (err,
1334 "E.Jump back from jump pad too far from tracepoint "
1335 "(offset 0x%" PRIx64 " > int32).", loffset);
1336 return 1;
1337 }
1338
1339 offset = (int) loffset;
1340 memcpy (buf, jump_insn, sizeof (jump_insn));
1341 memcpy (buf + 1, &offset, 4);
1342 append_insns (&buildaddr, sizeof (jump_insn), buf);
1343
1344 /* The jump pad is now built. Wire in a jump to our jump pad. This
1345 is always done last (by our caller actually), so that we can
1346 install fast tracepoints with threads running. This relies on
1347 the agent's atomic write support. */
1348 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1349 if (loffset > INT_MAX || loffset < INT_MIN)
1350 {
1351 sprintf (err,
1352 "E.Jump pad too far from tracepoint "
1353 "(offset 0x%" PRIx64 " > int32).", loffset);
1354 return 1;
1355 }
1356
1357 offset = (int) loffset;
1358
1359 memcpy (buf, jump_insn, sizeof (jump_insn));
1360 memcpy (buf + 1, &offset, 4);
1361 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1362 *jjump_pad_insn_size = sizeof (jump_insn);
1363
1364 /* Return the end address of our pad. */
1365 *jump_entry = buildaddr;
1366
1367 return 0;
1368 }
1369
1370 #endif /* __x86_64__ */
1371
1372 /* Build a jump pad that saves registers and calls a collection
1373 function. Writes a jump instruction to the jump pad to
1374 JJUMPAD_INSN. The caller is responsible to write it in at the
1375 tracepoint address. */
1376
1377 static int
1378 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1379 CORE_ADDR collector,
1380 CORE_ADDR lockaddr,
1381 ULONGEST orig_size,
1382 CORE_ADDR *jump_entry,
1383 CORE_ADDR *trampoline,
1384 ULONGEST *trampoline_size,
1385 unsigned char *jjump_pad_insn,
1386 ULONGEST *jjump_pad_insn_size,
1387 CORE_ADDR *adjusted_insn_addr,
1388 CORE_ADDR *adjusted_insn_addr_end,
1389 char *err)
1390 {
1391 unsigned char buf[0x100];
1392 int i, offset;
1393 CORE_ADDR buildaddr = *jump_entry;
1394
1395 /* Build the jump pad. */
1396
1397 /* First, do tracepoint data collection. Save registers. */
1398 i = 0;
1399 buf[i++] = 0x60; /* pushad */
1400 buf[i++] = 0x68; /* push tpaddr aka $pc */
1401 *((int *)(buf + i)) = (int) tpaddr;
1402 i += 4;
1403 buf[i++] = 0x9c; /* pushf */
1404 buf[i++] = 0x1e; /* push %ds */
1405 buf[i++] = 0x06; /* push %es */
1406 buf[i++] = 0x0f; /* push %fs */
1407 buf[i++] = 0xa0;
1408 buf[i++] = 0x0f; /* push %gs */
1409 buf[i++] = 0xa8;
1410 buf[i++] = 0x16; /* push %ss */
1411 buf[i++] = 0x0e; /* push %cs */
1412 append_insns (&buildaddr, i, buf);
1413
1414 /* Stack space for the collecting_t object. */
1415 i = 0;
1416 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1417
1418 /* Build the object. */
1419 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1420 memcpy (buf + i, &tpoint, 4);
1421 i += 4;
1422 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1423
1424 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1425 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1426 append_insns (&buildaddr, i, buf);
1427
1428 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1429 If we cared for it, this could be using xchg alternatively. */
1430
1431 i = 0;
1432 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1433 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1434 %esp,<lockaddr> */
1435 memcpy (&buf[i], (void *) &lockaddr, 4);
1436 i += 4;
1437 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1438 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1439 append_insns (&buildaddr, i, buf);
1440
1441
1442 /* Set up arguments to the gdb_collect call. */
1443 i = 0;
1444 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1445 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1446 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1447 append_insns (&buildaddr, i, buf);
1448
1449 i = 0;
1450 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1451 append_insns (&buildaddr, i, buf);
1452
1453 i = 0;
1454 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1455 memcpy (&buf[i], (void *) &tpoint, 4);
1456 i += 4;
1457 append_insns (&buildaddr, i, buf);
1458
1459 buf[0] = 0xe8; /* call <reladdr> */
1460 offset = collector - (buildaddr + sizeof (jump_insn));
1461 memcpy (buf + 1, &offset, 4);
1462 append_insns (&buildaddr, 5, buf);
1463 /* Clean up after the call. */
1464 buf[0] = 0x83; /* add $0x8,%esp */
1465 buf[1] = 0xc4;
1466 buf[2] = 0x08;
1467 append_insns (&buildaddr, 3, buf);
1468
1469
1470 /* Clear the spin-lock. This would need the LOCK prefix on older
1471 broken archs. */
1472 i = 0;
1473 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1474 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1475 memcpy (buf + i, &lockaddr, 4);
1476 i += 4;
1477 append_insns (&buildaddr, i, buf);
1478
1479
1480 /* Remove stack that had been used for the collect_t object. */
1481 i = 0;
1482 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1483 append_insns (&buildaddr, i, buf);
1484
1485 i = 0;
1486 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1487 buf[i++] = 0xc4;
1488 buf[i++] = 0x04;
1489 buf[i++] = 0x17; /* pop %ss */
1490 buf[i++] = 0x0f; /* pop %gs */
1491 buf[i++] = 0xa9;
1492 buf[i++] = 0x0f; /* pop %fs */
1493 buf[i++] = 0xa1;
1494 buf[i++] = 0x07; /* pop %es */
1495 buf[i++] = 0x1f; /* pop %ds */
1496 buf[i++] = 0x9d; /* popf */
1497 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1498 buf[i++] = 0xc4;
1499 buf[i++] = 0x04;
1500 buf[i++] = 0x61; /* popad */
1501 append_insns (&buildaddr, i, buf);
1502
1503 /* Now, adjust the original instruction to execute in the jump
1504 pad. */
1505 *adjusted_insn_addr = buildaddr;
1506 relocate_instruction (&buildaddr, tpaddr);
1507 *adjusted_insn_addr_end = buildaddr;
1508
1509 /* Write the jump back to the program. */
1510 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1511 memcpy (buf, jump_insn, sizeof (jump_insn));
1512 memcpy (buf + 1, &offset, 4);
1513 append_insns (&buildaddr, sizeof (jump_insn), buf);
1514
1515 /* The jump pad is now built. Wire in a jump to our jump pad. This
1516 is always done last (by our caller actually), so that we can
1517 install fast tracepoints with threads running. This relies on
1518 the agent's atomic write support. */
1519 if (orig_size == 4)
1520 {
1521 /* Create a trampoline. */
1522 *trampoline_size = sizeof (jump_insn);
1523 if (!claim_trampoline_space (*trampoline_size, trampoline))
1524 {
1525 /* No trampoline space available. */
1526 strcpy (err,
1527 "E.Cannot allocate trampoline space needed for fast "
1528 "tracepoints on 4-byte instructions.");
1529 return 1;
1530 }
1531
1532 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1533 memcpy (buf, jump_insn, sizeof (jump_insn));
1534 memcpy (buf + 1, &offset, 4);
1535 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1536
1537 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1538 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1539 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1540 memcpy (buf + 2, &offset, 2);
1541 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1542 *jjump_pad_insn_size = sizeof (small_jump_insn);
1543 }
1544 else
1545 {
1546 /* Else use a 32-bit relative jump instruction. */
1547 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1548 memcpy (buf, jump_insn, sizeof (jump_insn));
1549 memcpy (buf + 1, &offset, 4);
1550 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1551 *jjump_pad_insn_size = sizeof (jump_insn);
1552 }
1553
1554 /* Return the end address of our pad. */
1555 *jump_entry = buildaddr;
1556
1557 return 0;
1558 }
1559
1560 static int
1561 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1562 CORE_ADDR collector,
1563 CORE_ADDR lockaddr,
1564 ULONGEST orig_size,
1565 CORE_ADDR *jump_entry,
1566 CORE_ADDR *trampoline,
1567 ULONGEST *trampoline_size,
1568 unsigned char *jjump_pad_insn,
1569 ULONGEST *jjump_pad_insn_size,
1570 CORE_ADDR *adjusted_insn_addr,
1571 CORE_ADDR *adjusted_insn_addr_end,
1572 char *err)
1573 {
1574 #ifdef __x86_64__
1575 if (register_size (0) == 8)
1576 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1577 collector, lockaddr,
1578 orig_size, jump_entry,
1579 trampoline, trampoline_size,
1580 jjump_pad_insn,
1581 jjump_pad_insn_size,
1582 adjusted_insn_addr,
1583 adjusted_insn_addr_end,
1584 err);
1585 #endif
1586
1587 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1588 collector, lockaddr,
1589 orig_size, jump_entry,
1590 trampoline, trampoline_size,
1591 jjump_pad_insn,
1592 jjump_pad_insn_size,
1593 adjusted_insn_addr,
1594 adjusted_insn_addr_end,
1595 err);
1596 }
1597
1598 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1599 architectures. */
1600
1601 static int
1602 x86_get_min_fast_tracepoint_insn_len (void)
1603 {
1604 static int warned_about_fast_tracepoints = 0;
1605
1606 #ifdef __x86_64__
1607 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1608 used for fast tracepoints. */
1609 if (register_size (0) == 8)
1610 return 5;
1611 #endif
1612
1613 if (agent_loaded_p ())
1614 {
1615 char errbuf[IPA_BUFSIZ];
1616
1617 errbuf[0] = '\0';
1618
1619 /* On x86, if trampolines are available, then 4-byte jump instructions
1620 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1621 with a 4-byte offset are used instead. */
1622 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1623 return 4;
1624 else
1625 {
1626 /* GDB has no channel to explain to user why a shorter fast
1627 tracepoint is not possible, but at least make GDBserver
1628 mention that something has gone awry. */
1629 if (!warned_about_fast_tracepoints)
1630 {
1631 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1632 warned_about_fast_tracepoints = 1;
1633 }
1634 return 5;
1635 }
1636 }
1637 else
1638 {
1639 /* Indicate that the minimum length is currently unknown since the IPA
1640 has not loaded yet. */
1641 return 0;
1642 }
1643 }
1644
1645 static void
1646 add_insns (unsigned char *start, int len)
1647 {
1648 CORE_ADDR buildaddr = current_insn_ptr;
1649
1650 if (debug_threads)
1651 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1652 len, paddress (buildaddr));
1653
1654 append_insns (&buildaddr, len, start);
1655 current_insn_ptr = buildaddr;
1656 }
1657
1658 /* Our general strategy for emitting code is to avoid specifying raw
1659 bytes whenever possible, and instead copy a block of inline asm
1660 that is embedded in the function. This is a little messy, because
1661 we need to keep the compiler from discarding what looks like dead
1662 code, plus suppress various warnings. */
1663
1664 #define EMIT_ASM(NAME, INSNS) \
1665 do \
1666 { \
1667 extern unsigned char start_ ## NAME, end_ ## NAME; \
1668 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1669 __asm__ ("jmp end_" #NAME "\n" \
1670 "\t" "start_" #NAME ":" \
1671 "\t" INSNS "\n" \
1672 "\t" "end_" #NAME ":"); \
1673 } while (0)
1674
1675 #ifdef __x86_64__
1676
1677 #define EMIT_ASM32(NAME,INSNS) \
1678 do \
1679 { \
1680 extern unsigned char start_ ## NAME, end_ ## NAME; \
1681 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1682 __asm__ (".code32\n" \
1683 "\t" "jmp end_" #NAME "\n" \
1684 "\t" "start_" #NAME ":\n" \
1685 "\t" INSNS "\n" \
1686 "\t" "end_" #NAME ":\n" \
1687 ".code64\n"); \
1688 } while (0)
1689
1690 #else
1691
1692 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1693
1694 #endif
1695
1696 #ifdef __x86_64__
1697
1698 static void
1699 amd64_emit_prologue (void)
1700 {
1701 EMIT_ASM (amd64_prologue,
1702 "pushq %rbp\n\t"
1703 "movq %rsp,%rbp\n\t"
1704 "sub $0x20,%rsp\n\t"
1705 "movq %rdi,-8(%rbp)\n\t"
1706 "movq %rsi,-16(%rbp)");
1707 }
1708
1709
1710 static void
1711 amd64_emit_epilogue (void)
1712 {
1713 EMIT_ASM (amd64_epilogue,
1714 "movq -16(%rbp),%rdi\n\t"
1715 "movq %rax,(%rdi)\n\t"
1716 "xor %rax,%rax\n\t"
1717 "leave\n\t"
1718 "ret");
1719 }
1720
1721 static void
1722 amd64_emit_add (void)
1723 {
1724 EMIT_ASM (amd64_add,
1725 "add (%rsp),%rax\n\t"
1726 "lea 0x8(%rsp),%rsp");
1727 }
1728
1729 static void
1730 amd64_emit_sub (void)
1731 {
1732 EMIT_ASM (amd64_sub,
1733 "sub %rax,(%rsp)\n\t"
1734 "pop %rax");
1735 }
1736
1737 static void
1738 amd64_emit_mul (void)
1739 {
1740 emit_error = 1;
1741 }
1742
1743 static void
1744 amd64_emit_lsh (void)
1745 {
1746 emit_error = 1;
1747 }
1748
1749 static void
1750 amd64_emit_rsh_signed (void)
1751 {
1752 emit_error = 1;
1753 }
1754
1755 static void
1756 amd64_emit_rsh_unsigned (void)
1757 {
1758 emit_error = 1;
1759 }
1760
1761 static void
1762 amd64_emit_ext (int arg)
1763 {
1764 switch (arg)
1765 {
1766 case 8:
1767 EMIT_ASM (amd64_ext_8,
1768 "cbtw\n\t"
1769 "cwtl\n\t"
1770 "cltq");
1771 break;
1772 case 16:
1773 EMIT_ASM (amd64_ext_16,
1774 "cwtl\n\t"
1775 "cltq");
1776 break;
1777 case 32:
1778 EMIT_ASM (amd64_ext_32,
1779 "cltq");
1780 break;
1781 default:
1782 emit_error = 1;
1783 }
1784 }
1785
1786 static void
1787 amd64_emit_log_not (void)
1788 {
1789 EMIT_ASM (amd64_log_not,
1790 "test %rax,%rax\n\t"
1791 "sete %cl\n\t"
1792 "movzbq %cl,%rax");
1793 }
1794
1795 static void
1796 amd64_emit_bit_and (void)
1797 {
1798 EMIT_ASM (amd64_and,
1799 "and (%rsp),%rax\n\t"
1800 "lea 0x8(%rsp),%rsp");
1801 }
1802
1803 static void
1804 amd64_emit_bit_or (void)
1805 {
1806 EMIT_ASM (amd64_or,
1807 "or (%rsp),%rax\n\t"
1808 "lea 0x8(%rsp),%rsp");
1809 }
1810
1811 static void
1812 amd64_emit_bit_xor (void)
1813 {
1814 EMIT_ASM (amd64_xor,
1815 "xor (%rsp),%rax\n\t"
1816 "lea 0x8(%rsp),%rsp");
1817 }
1818
1819 static void
1820 amd64_emit_bit_not (void)
1821 {
1822 EMIT_ASM (amd64_bit_not,
1823 "xorq $0xffffffffffffffff,%rax");
1824 }
1825
1826 static void
1827 amd64_emit_equal (void)
1828 {
1829 EMIT_ASM (amd64_equal,
1830 "cmp %rax,(%rsp)\n\t"
1831 "je .Lamd64_equal_true\n\t"
1832 "xor %rax,%rax\n\t"
1833 "jmp .Lamd64_equal_end\n\t"
1834 ".Lamd64_equal_true:\n\t"
1835 "mov $0x1,%rax\n\t"
1836 ".Lamd64_equal_end:\n\t"
1837 "lea 0x8(%rsp),%rsp");
1838 }
1839
1840 static void
1841 amd64_emit_less_signed (void)
1842 {
1843 EMIT_ASM (amd64_less_signed,
1844 "cmp %rax,(%rsp)\n\t"
1845 "jl .Lamd64_less_signed_true\n\t"
1846 "xor %rax,%rax\n\t"
1847 "jmp .Lamd64_less_signed_end\n\t"
1848 ".Lamd64_less_signed_true:\n\t"
1849 "mov $1,%rax\n\t"
1850 ".Lamd64_less_signed_end:\n\t"
1851 "lea 0x8(%rsp),%rsp");
1852 }
1853
1854 static void
1855 amd64_emit_less_unsigned (void)
1856 {
1857 EMIT_ASM (amd64_less_unsigned,
1858 "cmp %rax,(%rsp)\n\t"
1859 "jb .Lamd64_less_unsigned_true\n\t"
1860 "xor %rax,%rax\n\t"
1861 "jmp .Lamd64_less_unsigned_end\n\t"
1862 ".Lamd64_less_unsigned_true:\n\t"
1863 "mov $1,%rax\n\t"
1864 ".Lamd64_less_unsigned_end:\n\t"
1865 "lea 0x8(%rsp),%rsp");
1866 }
1867
1868 static void
1869 amd64_emit_ref (int size)
1870 {
1871 switch (size)
1872 {
1873 case 1:
1874 EMIT_ASM (amd64_ref1,
1875 "movb (%rax),%al");
1876 break;
1877 case 2:
1878 EMIT_ASM (amd64_ref2,
1879 "movw (%rax),%ax");
1880 break;
1881 case 4:
1882 EMIT_ASM (amd64_ref4,
1883 "movl (%rax),%eax");
1884 break;
1885 case 8:
1886 EMIT_ASM (amd64_ref8,
1887 "movq (%rax),%rax");
1888 break;
1889 }
1890 }
1891
1892 static void
1893 amd64_emit_if_goto (int *offset_p, int *size_p)
1894 {
1895 EMIT_ASM (amd64_if_goto,
1896 "mov %rax,%rcx\n\t"
1897 "pop %rax\n\t"
1898 "cmp $0,%rcx\n\t"
1899 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1900 if (offset_p)
1901 *offset_p = 10;
1902 if (size_p)
1903 *size_p = 4;
1904 }
1905
1906 static void
1907 amd64_emit_goto (int *offset_p, int *size_p)
1908 {
1909 EMIT_ASM (amd64_goto,
1910 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1911 if (offset_p)
1912 *offset_p = 1;
1913 if (size_p)
1914 *size_p = 4;
1915 }
1916
1917 static void
1918 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1919 {
1920 int diff = (to - (from + size));
1921 unsigned char buf[sizeof (int)];
1922
1923 if (size != 4)
1924 {
1925 emit_error = 1;
1926 return;
1927 }
1928
1929 memcpy (buf, &diff, sizeof (int));
1930 write_inferior_memory (from, buf, sizeof (int));
1931 }
1932
1933 static void
1934 amd64_emit_const (LONGEST num)
1935 {
1936 unsigned char buf[16];
1937 int i;
1938 CORE_ADDR buildaddr = current_insn_ptr;
1939
1940 i = 0;
1941 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1942 memcpy (&buf[i], &num, sizeof (num));
1943 i += 8;
1944 append_insns (&buildaddr, i, buf);
1945 current_insn_ptr = buildaddr;
1946 }
1947
1948 static void
1949 amd64_emit_call (CORE_ADDR fn)
1950 {
1951 unsigned char buf[16];
1952 int i;
1953 CORE_ADDR buildaddr;
1954 LONGEST offset64;
1955
1956 /* The destination function being in the shared library, may be
1957 >31-bits away off the compiled code pad. */
1958
1959 buildaddr = current_insn_ptr;
1960
1961 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1962
1963 i = 0;
1964
1965 if (offset64 > INT_MAX || offset64 < INT_MIN)
1966 {
1967 /* Offset is too large for a call. Use callq, but that requires
1968 a register, so avoid it if possible. Use r10, since it is
1969 call-clobbered, we don't have to push/pop it. */
1970 buf[i++] = 0x48; /* mov $fn,%r10 */
1971 buf[i++] = 0xba;
1972 memcpy (buf + i, &fn, 8);
1973 i += 8;
1974 buf[i++] = 0xff; /* callq *%r10 */
1975 buf[i++] = 0xd2;
1976 }
1977 else
1978 {
1979 int offset32 = offset64; /* we know we can't overflow here. */
1980 memcpy (buf + i, &offset32, 4);
1981 i += 4;
1982 }
1983
1984 append_insns (&buildaddr, i, buf);
1985 current_insn_ptr = buildaddr;
1986 }
1987
1988 static void
1989 amd64_emit_reg (int reg)
1990 {
1991 unsigned char buf[16];
1992 int i;
1993 CORE_ADDR buildaddr;
1994
1995 /* Assume raw_regs is still in %rdi. */
1996 buildaddr = current_insn_ptr;
1997 i = 0;
1998 buf[i++] = 0xbe; /* mov $<n>,%esi */
1999 memcpy (&buf[i], &reg, sizeof (reg));
2000 i += 4;
2001 append_insns (&buildaddr, i, buf);
2002 current_insn_ptr = buildaddr;
2003 amd64_emit_call (get_raw_reg_func_addr ());
2004 }
2005
2006 static void
2007 amd64_emit_pop (void)
2008 {
2009 EMIT_ASM (amd64_pop,
2010 "pop %rax");
2011 }
2012
2013 static void
2014 amd64_emit_stack_flush (void)
2015 {
2016 EMIT_ASM (amd64_stack_flush,
2017 "push %rax");
2018 }
2019
2020 static void
2021 amd64_emit_zero_ext (int arg)
2022 {
2023 switch (arg)
2024 {
2025 case 8:
2026 EMIT_ASM (amd64_zero_ext_8,
2027 "and $0xff,%rax");
2028 break;
2029 case 16:
2030 EMIT_ASM (amd64_zero_ext_16,
2031 "and $0xffff,%rax");
2032 break;
2033 case 32:
2034 EMIT_ASM (amd64_zero_ext_32,
2035 "mov $0xffffffff,%rcx\n\t"
2036 "and %rcx,%rax");
2037 break;
2038 default:
2039 emit_error = 1;
2040 }
2041 }
2042
2043 static void
2044 amd64_emit_swap (void)
2045 {
2046 EMIT_ASM (amd64_swap,
2047 "mov %rax,%rcx\n\t"
2048 "pop %rax\n\t"
2049 "push %rcx");
2050 }
2051
2052 static void
2053 amd64_emit_stack_adjust (int n)
2054 {
2055 unsigned char buf[16];
2056 int i;
2057 CORE_ADDR buildaddr = current_insn_ptr;
2058
2059 i = 0;
2060 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2061 buf[i++] = 0x8d;
2062 buf[i++] = 0x64;
2063 buf[i++] = 0x24;
2064 /* This only handles adjustments up to 16, but we don't expect any more. */
2065 buf[i++] = n * 8;
2066 append_insns (&buildaddr, i, buf);
2067 current_insn_ptr = buildaddr;
2068 }
2069
2070 /* FN's prototype is `LONGEST(*fn)(int)'. */
2071
2072 static void
2073 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2074 {
2075 unsigned char buf[16];
2076 int i;
2077 CORE_ADDR buildaddr;
2078
2079 buildaddr = current_insn_ptr;
2080 i = 0;
2081 buf[i++] = 0xbf; /* movl $<n>,%edi */
2082 memcpy (&buf[i], &arg1, sizeof (arg1));
2083 i += 4;
2084 append_insns (&buildaddr, i, buf);
2085 current_insn_ptr = buildaddr;
2086 amd64_emit_call (fn);
2087 }
2088
2089 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2090
2091 static void
2092 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2093 {
2094 unsigned char buf[16];
2095 int i;
2096 CORE_ADDR buildaddr;
2097
2098 buildaddr = current_insn_ptr;
2099 i = 0;
2100 buf[i++] = 0xbf; /* movl $<n>,%edi */
2101 memcpy (&buf[i], &arg1, sizeof (arg1));
2102 i += 4;
2103 append_insns (&buildaddr, i, buf);
2104 current_insn_ptr = buildaddr;
2105 EMIT_ASM (amd64_void_call_2_a,
2106 /* Save away a copy of the stack top. */
2107 "push %rax\n\t"
2108 /* Also pass top as the second argument. */
2109 "mov %rax,%rsi");
2110 amd64_emit_call (fn);
2111 EMIT_ASM (amd64_void_call_2_b,
2112 /* Restore the stack top, %rax may have been trashed. */
2113 "pop %rax");
2114 }
2115
2116 void
2117 amd64_emit_eq_goto (int *offset_p, int *size_p)
2118 {
2119 EMIT_ASM (amd64_eq,
2120 "cmp %rax,(%rsp)\n\t"
2121 "jne .Lamd64_eq_fallthru\n\t"
2122 "lea 0x8(%rsp),%rsp\n\t"
2123 "pop %rax\n\t"
2124 /* jmp, but don't trust the assembler to choose the right jump */
2125 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2126 ".Lamd64_eq_fallthru:\n\t"
2127 "lea 0x8(%rsp),%rsp\n\t"
2128 "pop %rax");
2129
2130 if (offset_p)
2131 *offset_p = 13;
2132 if (size_p)
2133 *size_p = 4;
2134 }
2135
2136 void
2137 amd64_emit_ne_goto (int *offset_p, int *size_p)
2138 {
2139 EMIT_ASM (amd64_ne,
2140 "cmp %rax,(%rsp)\n\t"
2141 "je .Lamd64_ne_fallthru\n\t"
2142 "lea 0x8(%rsp),%rsp\n\t"
2143 "pop %rax\n\t"
2144 /* jmp, but don't trust the assembler to choose the right jump */
2145 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2146 ".Lamd64_ne_fallthru:\n\t"
2147 "lea 0x8(%rsp),%rsp\n\t"
2148 "pop %rax");
2149
2150 if (offset_p)
2151 *offset_p = 13;
2152 if (size_p)
2153 *size_p = 4;
2154 }
2155
2156 void
2157 amd64_emit_lt_goto (int *offset_p, int *size_p)
2158 {
2159 EMIT_ASM (amd64_lt,
2160 "cmp %rax,(%rsp)\n\t"
2161 "jnl .Lamd64_lt_fallthru\n\t"
2162 "lea 0x8(%rsp),%rsp\n\t"
2163 "pop %rax\n\t"
2164 /* jmp, but don't trust the assembler to choose the right jump */
2165 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2166 ".Lamd64_lt_fallthru:\n\t"
2167 "lea 0x8(%rsp),%rsp\n\t"
2168 "pop %rax");
2169
2170 if (offset_p)
2171 *offset_p = 13;
2172 if (size_p)
2173 *size_p = 4;
2174 }
2175
2176 void
2177 amd64_emit_le_goto (int *offset_p, int *size_p)
2178 {
2179 EMIT_ASM (amd64_le,
2180 "cmp %rax,(%rsp)\n\t"
2181 "jnle .Lamd64_le_fallthru\n\t"
2182 "lea 0x8(%rsp),%rsp\n\t"
2183 "pop %rax\n\t"
2184 /* jmp, but don't trust the assembler to choose the right jump */
2185 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2186 ".Lamd64_le_fallthru:\n\t"
2187 "lea 0x8(%rsp),%rsp\n\t"
2188 "pop %rax");
2189
2190 if (offset_p)
2191 *offset_p = 13;
2192 if (size_p)
2193 *size_p = 4;
2194 }
2195
2196 void
2197 amd64_emit_gt_goto (int *offset_p, int *size_p)
2198 {
2199 EMIT_ASM (amd64_gt,
2200 "cmp %rax,(%rsp)\n\t"
2201 "jng .Lamd64_gt_fallthru\n\t"
2202 "lea 0x8(%rsp),%rsp\n\t"
2203 "pop %rax\n\t"
2204 /* jmp, but don't trust the assembler to choose the right jump */
2205 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2206 ".Lamd64_gt_fallthru:\n\t"
2207 "lea 0x8(%rsp),%rsp\n\t"
2208 "pop %rax");
2209
2210 if (offset_p)
2211 *offset_p = 13;
2212 if (size_p)
2213 *size_p = 4;
2214 }
2215
2216 void
2217 amd64_emit_ge_goto (int *offset_p, int *size_p)
2218 {
2219 EMIT_ASM (amd64_ge,
2220 "cmp %rax,(%rsp)\n\t"
2221 "jnge .Lamd64_ge_fallthru\n\t"
2222 ".Lamd64_ge_jump:\n\t"
2223 "lea 0x8(%rsp),%rsp\n\t"
2224 "pop %rax\n\t"
2225 /* jmp, but don't trust the assembler to choose the right jump */
2226 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2227 ".Lamd64_ge_fallthru:\n\t"
2228 "lea 0x8(%rsp),%rsp\n\t"
2229 "pop %rax");
2230
2231 if (offset_p)
2232 *offset_p = 13;
2233 if (size_p)
2234 *size_p = 4;
2235 }
2236
2237 struct emit_ops amd64_emit_ops =
2238 {
2239 amd64_emit_prologue,
2240 amd64_emit_epilogue,
2241 amd64_emit_add,
2242 amd64_emit_sub,
2243 amd64_emit_mul,
2244 amd64_emit_lsh,
2245 amd64_emit_rsh_signed,
2246 amd64_emit_rsh_unsigned,
2247 amd64_emit_ext,
2248 amd64_emit_log_not,
2249 amd64_emit_bit_and,
2250 amd64_emit_bit_or,
2251 amd64_emit_bit_xor,
2252 amd64_emit_bit_not,
2253 amd64_emit_equal,
2254 amd64_emit_less_signed,
2255 amd64_emit_less_unsigned,
2256 amd64_emit_ref,
2257 amd64_emit_if_goto,
2258 amd64_emit_goto,
2259 amd64_write_goto_address,
2260 amd64_emit_const,
2261 amd64_emit_call,
2262 amd64_emit_reg,
2263 amd64_emit_pop,
2264 amd64_emit_stack_flush,
2265 amd64_emit_zero_ext,
2266 amd64_emit_swap,
2267 amd64_emit_stack_adjust,
2268 amd64_emit_int_call_1,
2269 amd64_emit_void_call_2,
2270 amd64_emit_eq_goto,
2271 amd64_emit_ne_goto,
2272 amd64_emit_lt_goto,
2273 amd64_emit_le_goto,
2274 amd64_emit_gt_goto,
2275 amd64_emit_ge_goto
2276 };
2277
2278 #endif /* __x86_64__ */
2279
2280 static void
2281 i386_emit_prologue (void)
2282 {
2283 EMIT_ASM32 (i386_prologue,
2284 "push %ebp\n\t"
2285 "mov %esp,%ebp\n\t"
2286 "push %ebx");
2287 /* At this point, the raw regs base address is at 8(%ebp), and the
2288 value pointer is at 12(%ebp). */
2289 }
2290
2291 static void
2292 i386_emit_epilogue (void)
2293 {
2294 EMIT_ASM32 (i386_epilogue,
2295 "mov 12(%ebp),%ecx\n\t"
2296 "mov %eax,(%ecx)\n\t"
2297 "mov %ebx,0x4(%ecx)\n\t"
2298 "xor %eax,%eax\n\t"
2299 "pop %ebx\n\t"
2300 "pop %ebp\n\t"
2301 "ret");
2302 }
2303
2304 static void
2305 i386_emit_add (void)
2306 {
2307 EMIT_ASM32 (i386_add,
2308 "add (%esp),%eax\n\t"
2309 "adc 0x4(%esp),%ebx\n\t"
2310 "lea 0x8(%esp),%esp");
2311 }
2312
2313 static void
2314 i386_emit_sub (void)
2315 {
2316 EMIT_ASM32 (i386_sub,
2317 "subl %eax,(%esp)\n\t"
2318 "sbbl %ebx,4(%esp)\n\t"
2319 "pop %eax\n\t"
2320 "pop %ebx\n\t");
2321 }
2322
2323 static void
2324 i386_emit_mul (void)
2325 {
2326 emit_error = 1;
2327 }
2328
2329 static void
2330 i386_emit_lsh (void)
2331 {
2332 emit_error = 1;
2333 }
2334
2335 static void
2336 i386_emit_rsh_signed (void)
2337 {
2338 emit_error = 1;
2339 }
2340
2341 static void
2342 i386_emit_rsh_unsigned (void)
2343 {
2344 emit_error = 1;
2345 }
2346
2347 static void
2348 i386_emit_ext (int arg)
2349 {
2350 switch (arg)
2351 {
2352 case 8:
2353 EMIT_ASM32 (i386_ext_8,
2354 "cbtw\n\t"
2355 "cwtl\n\t"
2356 "movl %eax,%ebx\n\t"
2357 "sarl $31,%ebx");
2358 break;
2359 case 16:
2360 EMIT_ASM32 (i386_ext_16,
2361 "cwtl\n\t"
2362 "movl %eax,%ebx\n\t"
2363 "sarl $31,%ebx");
2364 break;
2365 case 32:
2366 EMIT_ASM32 (i386_ext_32,
2367 "movl %eax,%ebx\n\t"
2368 "sarl $31,%ebx");
2369 break;
2370 default:
2371 emit_error = 1;
2372 }
2373 }
2374
2375 static void
2376 i386_emit_log_not (void)
2377 {
2378 EMIT_ASM32 (i386_log_not,
2379 "or %ebx,%eax\n\t"
2380 "test %eax,%eax\n\t"
2381 "sete %cl\n\t"
2382 "xor %ebx,%ebx\n\t"
2383 "movzbl %cl,%eax");
2384 }
2385
2386 static void
2387 i386_emit_bit_and (void)
2388 {
2389 EMIT_ASM32 (i386_and,
2390 "and (%esp),%eax\n\t"
2391 "and 0x4(%esp),%ebx\n\t"
2392 "lea 0x8(%esp),%esp");
2393 }
2394
2395 static void
2396 i386_emit_bit_or (void)
2397 {
2398 EMIT_ASM32 (i386_or,
2399 "or (%esp),%eax\n\t"
2400 "or 0x4(%esp),%ebx\n\t"
2401 "lea 0x8(%esp),%esp");
2402 }
2403
2404 static void
2405 i386_emit_bit_xor (void)
2406 {
2407 EMIT_ASM32 (i386_xor,
2408 "xor (%esp),%eax\n\t"
2409 "xor 0x4(%esp),%ebx\n\t"
2410 "lea 0x8(%esp),%esp");
2411 }
2412
2413 static void
2414 i386_emit_bit_not (void)
2415 {
2416 EMIT_ASM32 (i386_bit_not,
2417 "xor $0xffffffff,%eax\n\t"
2418 "xor $0xffffffff,%ebx\n\t");
2419 }
2420
2421 static void
2422 i386_emit_equal (void)
2423 {
2424 EMIT_ASM32 (i386_equal,
2425 "cmpl %ebx,4(%esp)\n\t"
2426 "jne .Li386_equal_false\n\t"
2427 "cmpl %eax,(%esp)\n\t"
2428 "je .Li386_equal_true\n\t"
2429 ".Li386_equal_false:\n\t"
2430 "xor %eax,%eax\n\t"
2431 "jmp .Li386_equal_end\n\t"
2432 ".Li386_equal_true:\n\t"
2433 "mov $1,%eax\n\t"
2434 ".Li386_equal_end:\n\t"
2435 "xor %ebx,%ebx\n\t"
2436 "lea 0x8(%esp),%esp");
2437 }
2438
2439 static void
2440 i386_emit_less_signed (void)
2441 {
2442 EMIT_ASM32 (i386_less_signed,
2443 "cmpl %ebx,4(%esp)\n\t"
2444 "jl .Li386_less_signed_true\n\t"
2445 "jne .Li386_less_signed_false\n\t"
2446 "cmpl %eax,(%esp)\n\t"
2447 "jl .Li386_less_signed_true\n\t"
2448 ".Li386_less_signed_false:\n\t"
2449 "xor %eax,%eax\n\t"
2450 "jmp .Li386_less_signed_end\n\t"
2451 ".Li386_less_signed_true:\n\t"
2452 "mov $1,%eax\n\t"
2453 ".Li386_less_signed_end:\n\t"
2454 "xor %ebx,%ebx\n\t"
2455 "lea 0x8(%esp),%esp");
2456 }
2457
2458 static void
2459 i386_emit_less_unsigned (void)
2460 {
2461 EMIT_ASM32 (i386_less_unsigned,
2462 "cmpl %ebx,4(%esp)\n\t"
2463 "jb .Li386_less_unsigned_true\n\t"
2464 "jne .Li386_less_unsigned_false\n\t"
2465 "cmpl %eax,(%esp)\n\t"
2466 "jb .Li386_less_unsigned_true\n\t"
2467 ".Li386_less_unsigned_false:\n\t"
2468 "xor %eax,%eax\n\t"
2469 "jmp .Li386_less_unsigned_end\n\t"
2470 ".Li386_less_unsigned_true:\n\t"
2471 "mov $1,%eax\n\t"
2472 ".Li386_less_unsigned_end:\n\t"
2473 "xor %ebx,%ebx\n\t"
2474 "lea 0x8(%esp),%esp");
2475 }
2476
2477 static void
2478 i386_emit_ref (int size)
2479 {
2480 switch (size)
2481 {
2482 case 1:
2483 EMIT_ASM32 (i386_ref1,
2484 "movb (%eax),%al");
2485 break;
2486 case 2:
2487 EMIT_ASM32 (i386_ref2,
2488 "movw (%eax),%ax");
2489 break;
2490 case 4:
2491 EMIT_ASM32 (i386_ref4,
2492 "movl (%eax),%eax");
2493 break;
2494 case 8:
2495 EMIT_ASM32 (i386_ref8,
2496 "movl 4(%eax),%ebx\n\t"
2497 "movl (%eax),%eax");
2498 break;
2499 }
2500 }
2501
2502 static void
2503 i386_emit_if_goto (int *offset_p, int *size_p)
2504 {
2505 EMIT_ASM32 (i386_if_goto,
2506 "mov %eax,%ecx\n\t"
2507 "or %ebx,%ecx\n\t"
2508 "pop %eax\n\t"
2509 "pop %ebx\n\t"
2510 "cmpl $0,%ecx\n\t"
2511 /* Don't trust the assembler to choose the right jump */
2512 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2513
2514 if (offset_p)
2515 *offset_p = 11; /* be sure that this matches the sequence above */
2516 if (size_p)
2517 *size_p = 4;
2518 }
2519
2520 static void
2521 i386_emit_goto (int *offset_p, int *size_p)
2522 {
2523 EMIT_ASM32 (i386_goto,
2524 /* Don't trust the assembler to choose the right jump */
2525 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2526 if (offset_p)
2527 *offset_p = 1;
2528 if (size_p)
2529 *size_p = 4;
2530 }
2531
2532 static void
2533 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2534 {
2535 int diff = (to - (from + size));
2536 unsigned char buf[sizeof (int)];
2537
2538 /* We're only doing 4-byte sizes at the moment. */
2539 if (size != 4)
2540 {
2541 emit_error = 1;
2542 return;
2543 }
2544
2545 memcpy (buf, &diff, sizeof (int));
2546 write_inferior_memory (from, buf, sizeof (int));
2547 }
2548
2549 static void
2550 i386_emit_const (LONGEST num)
2551 {
2552 unsigned char buf[16];
2553 int i, hi, lo;
2554 CORE_ADDR buildaddr = current_insn_ptr;
2555
2556 i = 0;
2557 buf[i++] = 0xb8; /* mov $<n>,%eax */
2558 lo = num & 0xffffffff;
2559 memcpy (&buf[i], &lo, sizeof (lo));
2560 i += 4;
2561 hi = ((num >> 32) & 0xffffffff);
2562 if (hi)
2563 {
2564 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2565 memcpy (&buf[i], &hi, sizeof (hi));
2566 i += 4;
2567 }
2568 else
2569 {
2570 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2571 }
2572 append_insns (&buildaddr, i, buf);
2573 current_insn_ptr = buildaddr;
2574 }
2575
2576 static void
2577 i386_emit_call (CORE_ADDR fn)
2578 {
2579 unsigned char buf[16];
2580 int i, offset;
2581 CORE_ADDR buildaddr;
2582
2583 buildaddr = current_insn_ptr;
2584 i = 0;
2585 buf[i++] = 0xe8; /* call <reladdr> */
2586 offset = ((int) fn) - (buildaddr + 5);
2587 memcpy (buf + 1, &offset, 4);
2588 append_insns (&buildaddr, 5, buf);
2589 current_insn_ptr = buildaddr;
2590 }
2591
2592 static void
2593 i386_emit_reg (int reg)
2594 {
2595 unsigned char buf[16];
2596 int i;
2597 CORE_ADDR buildaddr;
2598
2599 EMIT_ASM32 (i386_reg_a,
2600 "sub $0x8,%esp");
2601 buildaddr = current_insn_ptr;
2602 i = 0;
2603 buf[i++] = 0xb8; /* mov $<n>,%eax */
2604 memcpy (&buf[i], &reg, sizeof (reg));
2605 i += 4;
2606 append_insns (&buildaddr, i, buf);
2607 current_insn_ptr = buildaddr;
2608 EMIT_ASM32 (i386_reg_b,
2609 "mov %eax,4(%esp)\n\t"
2610 "mov 8(%ebp),%eax\n\t"
2611 "mov %eax,(%esp)");
2612 i386_emit_call (get_raw_reg_func_addr ());
2613 EMIT_ASM32 (i386_reg_c,
2614 "xor %ebx,%ebx\n\t"
2615 "lea 0x8(%esp),%esp");
2616 }
2617
2618 static void
2619 i386_emit_pop (void)
2620 {
2621 EMIT_ASM32 (i386_pop,
2622 "pop %eax\n\t"
2623 "pop %ebx");
2624 }
2625
2626 static void
2627 i386_emit_stack_flush (void)
2628 {
2629 EMIT_ASM32 (i386_stack_flush,
2630 "push %ebx\n\t"
2631 "push %eax");
2632 }
2633
2634 static void
2635 i386_emit_zero_ext (int arg)
2636 {
2637 switch (arg)
2638 {
2639 case 8:
2640 EMIT_ASM32 (i386_zero_ext_8,
2641 "and $0xff,%eax\n\t"
2642 "xor %ebx,%ebx");
2643 break;
2644 case 16:
2645 EMIT_ASM32 (i386_zero_ext_16,
2646 "and $0xffff,%eax\n\t"
2647 "xor %ebx,%ebx");
2648 break;
2649 case 32:
2650 EMIT_ASM32 (i386_zero_ext_32,
2651 "xor %ebx,%ebx");
2652 break;
2653 default:
2654 emit_error = 1;
2655 }
2656 }
2657
2658 static void
2659 i386_emit_swap (void)
2660 {
2661 EMIT_ASM32 (i386_swap,
2662 "mov %eax,%ecx\n\t"
2663 "mov %ebx,%edx\n\t"
2664 "pop %eax\n\t"
2665 "pop %ebx\n\t"
2666 "push %edx\n\t"
2667 "push %ecx");
2668 }
2669
2670 static void
2671 i386_emit_stack_adjust (int n)
2672 {
2673 unsigned char buf[16];
2674 int i;
2675 CORE_ADDR buildaddr = current_insn_ptr;
2676
2677 i = 0;
2678 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2679 buf[i++] = 0x64;
2680 buf[i++] = 0x24;
2681 buf[i++] = n * 8;
2682 append_insns (&buildaddr, i, buf);
2683 current_insn_ptr = buildaddr;
2684 }
2685
2686 /* FN's prototype is `LONGEST(*fn)(int)'. */
2687
2688 static void
2689 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2690 {
2691 unsigned char buf[16];
2692 int i;
2693 CORE_ADDR buildaddr;
2694
2695 EMIT_ASM32 (i386_int_call_1_a,
2696 /* Reserve a bit of stack space. */
2697 "sub $0x8,%esp");
2698 /* Put the one argument on the stack. */
2699 buildaddr = current_insn_ptr;
2700 i = 0;
2701 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2702 buf[i++] = 0x04;
2703 buf[i++] = 0x24;
2704 memcpy (&buf[i], &arg1, sizeof (arg1));
2705 i += 4;
2706 append_insns (&buildaddr, i, buf);
2707 current_insn_ptr = buildaddr;
2708 i386_emit_call (fn);
2709 EMIT_ASM32 (i386_int_call_1_c,
2710 "mov %edx,%ebx\n\t"
2711 "lea 0x8(%esp),%esp");
2712 }
2713
2714 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2715
2716 static void
2717 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2718 {
2719 unsigned char buf[16];
2720 int i;
2721 CORE_ADDR buildaddr;
2722
2723 EMIT_ASM32 (i386_void_call_2_a,
2724 /* Preserve %eax only; we don't have to worry about %ebx. */
2725 "push %eax\n\t"
2726 /* Reserve a bit of stack space for arguments. */
2727 "sub $0x10,%esp\n\t"
2728 /* Copy "top" to the second argument position. (Note that
2729 we can't assume function won't scribble on its
2730 arguments, so don't try to restore from this.) */
2731 "mov %eax,4(%esp)\n\t"
2732 "mov %ebx,8(%esp)");
2733 /* Put the first argument on the stack. */
2734 buildaddr = current_insn_ptr;
2735 i = 0;
2736 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2737 buf[i++] = 0x04;
2738 buf[i++] = 0x24;
2739 memcpy (&buf[i], &arg1, sizeof (arg1));
2740 i += 4;
2741 append_insns (&buildaddr, i, buf);
2742 current_insn_ptr = buildaddr;
2743 i386_emit_call (fn);
2744 EMIT_ASM32 (i386_void_call_2_b,
2745 "lea 0x10(%esp),%esp\n\t"
2746 /* Restore original stack top. */
2747 "pop %eax");
2748 }
2749
2750
2751 void
2752 i386_emit_eq_goto (int *offset_p, int *size_p)
2753 {
2754 EMIT_ASM32 (eq,
2755 /* Check low half first, more likely to be decider */
2756 "cmpl %eax,(%esp)\n\t"
2757 "jne .Leq_fallthru\n\t"
2758 "cmpl %ebx,4(%esp)\n\t"
2759 "jne .Leq_fallthru\n\t"
2760 "lea 0x8(%esp),%esp\n\t"
2761 "pop %eax\n\t"
2762 "pop %ebx\n\t"
2763 /* jmp, but don't trust the assembler to choose the right jump */
2764 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2765 ".Leq_fallthru:\n\t"
2766 "lea 0x8(%esp),%esp\n\t"
2767 "pop %eax\n\t"
2768 "pop %ebx");
2769
2770 if (offset_p)
2771 *offset_p = 18;
2772 if (size_p)
2773 *size_p = 4;
2774 }
2775
2776 void
2777 i386_emit_ne_goto (int *offset_p, int *size_p)
2778 {
2779 EMIT_ASM32 (ne,
2780 /* Check low half first, more likely to be decider */
2781 "cmpl %eax,(%esp)\n\t"
2782 "jne .Lne_jump\n\t"
2783 "cmpl %ebx,4(%esp)\n\t"
2784 "je .Lne_fallthru\n\t"
2785 ".Lne_jump:\n\t"
2786 "lea 0x8(%esp),%esp\n\t"
2787 "pop %eax\n\t"
2788 "pop %ebx\n\t"
2789 /* jmp, but don't trust the assembler to choose the right jump */
2790 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2791 ".Lne_fallthru:\n\t"
2792 "lea 0x8(%esp),%esp\n\t"
2793 "pop %eax\n\t"
2794 "pop %ebx");
2795
2796 if (offset_p)
2797 *offset_p = 18;
2798 if (size_p)
2799 *size_p = 4;
2800 }
2801
2802 void
2803 i386_emit_lt_goto (int *offset_p, int *size_p)
2804 {
2805 EMIT_ASM32 (lt,
2806 "cmpl %ebx,4(%esp)\n\t"
2807 "jl .Llt_jump\n\t"
2808 "jne .Llt_fallthru\n\t"
2809 "cmpl %eax,(%esp)\n\t"
2810 "jnl .Llt_fallthru\n\t"
2811 ".Llt_jump:\n\t"
2812 "lea 0x8(%esp),%esp\n\t"
2813 "pop %eax\n\t"
2814 "pop %ebx\n\t"
2815 /* jmp, but don't trust the assembler to choose the right jump */
2816 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2817 ".Llt_fallthru:\n\t"
2818 "lea 0x8(%esp),%esp\n\t"
2819 "pop %eax\n\t"
2820 "pop %ebx");
2821
2822 if (offset_p)
2823 *offset_p = 20;
2824 if (size_p)
2825 *size_p = 4;
2826 }
2827
2828 void
2829 i386_emit_le_goto (int *offset_p, int *size_p)
2830 {
2831 EMIT_ASM32 (le,
2832 "cmpl %ebx,4(%esp)\n\t"
2833 "jle .Lle_jump\n\t"
2834 "jne .Lle_fallthru\n\t"
2835 "cmpl %eax,(%esp)\n\t"
2836 "jnle .Lle_fallthru\n\t"
2837 ".Lle_jump:\n\t"
2838 "lea 0x8(%esp),%esp\n\t"
2839 "pop %eax\n\t"
2840 "pop %ebx\n\t"
2841 /* jmp, but don't trust the assembler to choose the right jump */
2842 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2843 ".Lle_fallthru:\n\t"
2844 "lea 0x8(%esp),%esp\n\t"
2845 "pop %eax\n\t"
2846 "pop %ebx");
2847
2848 if (offset_p)
2849 *offset_p = 20;
2850 if (size_p)
2851 *size_p = 4;
2852 }
2853
2854 void
2855 i386_emit_gt_goto (int *offset_p, int *size_p)
2856 {
2857 EMIT_ASM32 (gt,
2858 "cmpl %ebx,4(%esp)\n\t"
2859 "jg .Lgt_jump\n\t"
2860 "jne .Lgt_fallthru\n\t"
2861 "cmpl %eax,(%esp)\n\t"
2862 "jng .Lgt_fallthru\n\t"
2863 ".Lgt_jump:\n\t"
2864 "lea 0x8(%esp),%esp\n\t"
2865 "pop %eax\n\t"
2866 "pop %ebx\n\t"
2867 /* jmp, but don't trust the assembler to choose the right jump */
2868 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2869 ".Lgt_fallthru:\n\t"
2870 "lea 0x8(%esp),%esp\n\t"
2871 "pop %eax\n\t"
2872 "pop %ebx");
2873
2874 if (offset_p)
2875 *offset_p = 20;
2876 if (size_p)
2877 *size_p = 4;
2878 }
2879
2880 void
2881 i386_emit_ge_goto (int *offset_p, int *size_p)
2882 {
2883 EMIT_ASM32 (ge,
2884 "cmpl %ebx,4(%esp)\n\t"
2885 "jge .Lge_jump\n\t"
2886 "jne .Lge_fallthru\n\t"
2887 "cmpl %eax,(%esp)\n\t"
2888 "jnge .Lge_fallthru\n\t"
2889 ".Lge_jump:\n\t"
2890 "lea 0x8(%esp),%esp\n\t"
2891 "pop %eax\n\t"
2892 "pop %ebx\n\t"
2893 /* jmp, but don't trust the assembler to choose the right jump */
2894 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2895 ".Lge_fallthru:\n\t"
2896 "lea 0x8(%esp),%esp\n\t"
2897 "pop %eax\n\t"
2898 "pop %ebx");
2899
2900 if (offset_p)
2901 *offset_p = 20;
2902 if (size_p)
2903 *size_p = 4;
2904 }
2905
2906 struct emit_ops i386_emit_ops =
2907 {
2908 i386_emit_prologue,
2909 i386_emit_epilogue,
2910 i386_emit_add,
2911 i386_emit_sub,
2912 i386_emit_mul,
2913 i386_emit_lsh,
2914 i386_emit_rsh_signed,
2915 i386_emit_rsh_unsigned,
2916 i386_emit_ext,
2917 i386_emit_log_not,
2918 i386_emit_bit_and,
2919 i386_emit_bit_or,
2920 i386_emit_bit_xor,
2921 i386_emit_bit_not,
2922 i386_emit_equal,
2923 i386_emit_less_signed,
2924 i386_emit_less_unsigned,
2925 i386_emit_ref,
2926 i386_emit_if_goto,
2927 i386_emit_goto,
2928 i386_write_goto_address,
2929 i386_emit_const,
2930 i386_emit_call,
2931 i386_emit_reg,
2932 i386_emit_pop,
2933 i386_emit_stack_flush,
2934 i386_emit_zero_ext,
2935 i386_emit_swap,
2936 i386_emit_stack_adjust,
2937 i386_emit_int_call_1,
2938 i386_emit_void_call_2,
2939 i386_emit_eq_goto,
2940 i386_emit_ne_goto,
2941 i386_emit_lt_goto,
2942 i386_emit_le_goto,
2943 i386_emit_gt_goto,
2944 i386_emit_ge_goto
2945 };
2946
2947
2948 static struct emit_ops *
2949 x86_emit_ops (void)
2950 {
2951 #ifdef __x86_64__
2952 int use_64bit = register_size (0) == 8;
2953
2954 if (use_64bit)
2955 return &amd64_emit_ops;
2956 else
2957 #endif
2958 return &i386_emit_ops;
2959 }
2960
2961 /* This is initialized assuming an amd64 target.
2962 x86_arch_setup will correct it for i386 or amd64 targets. */
2963
2964 struct linux_target_ops the_low_target =
2965 {
2966 x86_arch_setup,
2967 -1,
2968 NULL,
2969 NULL,
2970 NULL,
2971 NULL,
2972 x86_get_pc,
2973 x86_set_pc,
2974 x86_breakpoint,
2975 x86_breakpoint_len,
2976 NULL,
2977 1,
2978 x86_breakpoint_at,
2979 x86_insert_point,
2980 x86_remove_point,
2981 x86_stopped_by_watchpoint,
2982 x86_stopped_data_address,
2983 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2984 native i386 case (no registers smaller than an xfer unit), and are not
2985 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2986 NULL,
2987 NULL,
2988 /* need to fix up i386 siginfo if host is amd64 */
2989 x86_siginfo_fixup,
2990 x86_linux_new_process,
2991 x86_linux_new_thread,
2992 x86_linux_prepare_to_resume,
2993 x86_linux_process_qsupported,
2994 x86_supports_tracepoints,
2995 x86_get_thread_area,
2996 x86_install_fast_tracepoint_jump_pad,
2997 x86_emit_ops,
2998 x86_get_min_fast_tracepoint_insn_len,
2999 };
This page took 0.133308 seconds and 4 git commands to generate.