* tracepoint.c (encode_actions_1): Use the location's gdbarch.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
7b6bb8da 3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
d0722149
DE
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
aa5ca48f 21#include <stddef.h>
d0722149 22#include <signal.h>
6a271cae 23#include <limits.h>
d0722149
DE
24#include "server.h"
25#include "linux-low.h"
26#include "i387-fp.h"
aa5ca48f 27#include "i386-low.h"
1570b33e
L
28#include "i386-xstate.h"
29#include "elf/common.h"
d0722149
DE
30
31#include "gdb_proc_service.h"
32
90884b2b 33/* Defined in auto-generated file i386-linux.c. */
d0722149 34void init_registers_i386_linux (void);
90884b2b
L
35/* Defined in auto-generated file amd64-linux.c. */
36void init_registers_amd64_linux (void);
1570b33e
L
37/* Defined in auto-generated file i386-avx-linux.c. */
38void init_registers_i386_avx_linux (void);
39/* Defined in auto-generated file amd64-avx-linux.c. */
40void init_registers_amd64_avx_linux (void);
3a13a53b
L
41/* Defined in auto-generated file i386-mmx-linux.c. */
42void init_registers_i386_mmx_linux (void);
1570b33e 43
fa593d66
PA
44static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
45
1570b33e
L
46/* Backward compatibility for gdb without XML support. */
47
48static const char *xmltarget_i386_linux_no_xml = "@<target>\
49<architecture>i386</architecture>\
50<osabi>GNU/Linux</osabi>\
51</target>";
f6d1620c
L
52
53#ifdef __x86_64__
1570b33e
L
54static const char *xmltarget_amd64_linux_no_xml = "@<target>\
55<architecture>i386:x86-64</architecture>\
56<osabi>GNU/Linux</osabi>\
57</target>";
f6d1620c 58#endif
d0722149
DE
59
60#include <sys/reg.h>
61#include <sys/procfs.h>
62#include <sys/ptrace.h>
1570b33e
L
63#include <sys/uio.h>
64
65#ifndef PTRACE_GETREGSET
66#define PTRACE_GETREGSET 0x4204
67#endif
68
69#ifndef PTRACE_SETREGSET
70#define PTRACE_SETREGSET 0x4205
71#endif
72
d0722149
DE
73
74#ifndef PTRACE_GET_THREAD_AREA
75#define PTRACE_GET_THREAD_AREA 25
76#endif
77
78/* This definition comes from prctl.h, but some kernels may not have it. */
79#ifndef PTRACE_ARCH_PRCTL
80#define PTRACE_ARCH_PRCTL 30
81#endif
82
83/* The following definitions come from prctl.h, but may be absent
84 for certain configurations. */
85#ifndef ARCH_GET_FS
86#define ARCH_SET_GS 0x1001
87#define ARCH_SET_FS 0x1002
88#define ARCH_GET_FS 0x1003
89#define ARCH_GET_GS 0x1004
90#endif
91
aa5ca48f
DE
92/* Per-process arch-specific data we want to keep. */
93
94struct arch_process_info
95{
96 struct i386_debug_reg_state debug_reg_state;
97};
98
99/* Per-thread arch-specific data we want to keep. */
100
101struct arch_lwp_info
102{
103 /* Non-zero if our copy differs from what's recorded in the thread. */
104 int debug_registers_changed;
105};
106
d0722149
DE
107#ifdef __x86_64__
108
109/* Mapping between the general-purpose registers in `struct user'
110 format and GDB's register array layout.
111 Note that the transfer layout uses 64-bit regs. */
112static /*const*/ int i386_regmap[] =
113{
114 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
115 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
116 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
117 DS * 8, ES * 8, FS * 8, GS * 8
118};
119
120#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
121
122/* So code below doesn't have to care, i386 or amd64. */
123#define ORIG_EAX ORIG_RAX
124
125static const int x86_64_regmap[] =
126{
127 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
128 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
129 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
130 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
131 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
132 DS * 8, ES * 8, FS * 8, GS * 8,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1, -1,
137 ORIG_RAX * 8
138};
139
140#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
141
142#else /* ! __x86_64__ */
143
144/* Mapping between the general-purpose registers in `struct user'
145 format and GDB's register array layout. */
146static /*const*/ int i386_regmap[] =
147{
148 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
149 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
150 EIP * 4, EFL * 4, CS * 4, SS * 4,
151 DS * 4, ES * 4, FS * 4, GS * 4
152};
153
154#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
155
156#endif
157\f
158/* Called by libthread_db. */
159
160ps_err_e
161ps_get_thread_area (const struct ps_prochandle *ph,
162 lwpid_t lwpid, int idx, void **base)
163{
164#ifdef __x86_64__
165 int use_64bit = register_size (0) == 8;
166
167 if (use_64bit)
168 {
169 switch (idx)
170 {
171 case FS:
172 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
173 return PS_OK;
174 break;
175 case GS:
176 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
177 return PS_OK;
178 break;
179 default:
180 return PS_BADADDR;
181 }
182 return PS_ERR;
183 }
184#endif
185
186 {
187 unsigned int desc[4];
188
189 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
190 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
191 return PS_ERR;
192
193 *(int *)base = desc[1];
194 return PS_OK;
195 }
196}
fa593d66
PA
197
198/* Get the thread area address. This is used to recognize which
199 thread is which when tracing with the in-process agent library. We
200 don't read anything from the address, and treat it as opaque; it's
201 the address itself that we assume is unique per-thread. */
202
203static int
204x86_get_thread_area (int lwpid, CORE_ADDR *addr)
205{
206#ifdef __x86_64__
207 int use_64bit = register_size (0) == 8;
208
209 if (use_64bit)
210 {
211 void *base;
212 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
213 {
214 *addr = (CORE_ADDR) (uintptr_t) base;
215 return 0;
216 }
217
218 return -1;
219 }
220#endif
221
222 {
223 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
224 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
225 unsigned int desc[4];
226 ULONGEST gs = 0;
227 const int reg_thread_area = 3; /* bits to scale down register value. */
228 int idx;
229
230 collect_register_by_name (regcache, "gs", &gs);
231
232 idx = gs >> reg_thread_area;
233
234 if (ptrace (PTRACE_GET_THREAD_AREA,
493e2a69
MS
235 lwpid_of (lwp),
236 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
237 return -1;
238
239 *addr = desc[1];
240 return 0;
241 }
242}
243
244
d0722149
DE
245\f
246static int
247i386_cannot_store_register (int regno)
248{
249 return regno >= I386_NUM_REGS;
250}
251
252static int
253i386_cannot_fetch_register (int regno)
254{
255 return regno >= I386_NUM_REGS;
256}
257
258static void
442ea881 259x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
260{
261 int i;
262
263#ifdef __x86_64__
264 if (register_size (0) == 8)
265 {
266 for (i = 0; i < X86_64_NUM_REGS; i++)
267 if (x86_64_regmap[i] != -1)
442ea881 268 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
269 return;
270 }
271#endif
272
273 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 274 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 275
442ea881
PA
276 collect_register_by_name (regcache, "orig_eax",
277 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
278}
279
280static void
442ea881 281x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
282{
283 int i;
284
285#ifdef __x86_64__
286 if (register_size (0) == 8)
287 {
288 for (i = 0; i < X86_64_NUM_REGS; i++)
289 if (x86_64_regmap[i] != -1)
442ea881 290 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
291 return;
292 }
293#endif
294
295 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 296 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 297
442ea881
PA
298 supply_register_by_name (regcache, "orig_eax",
299 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
300}
301
302static void
442ea881 303x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
304{
305#ifdef __x86_64__
442ea881 306 i387_cache_to_fxsave (regcache, buf);
d0722149 307#else
442ea881 308 i387_cache_to_fsave (regcache, buf);
d0722149
DE
309#endif
310}
311
312static void
442ea881 313x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
314{
315#ifdef __x86_64__
442ea881 316 i387_fxsave_to_cache (regcache, buf);
d0722149 317#else
442ea881 318 i387_fsave_to_cache (regcache, buf);
d0722149
DE
319#endif
320}
321
322#ifndef __x86_64__
323
324static void
442ea881 325x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 326{
442ea881 327 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
328}
329
330static void
442ea881 331x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 332{
442ea881 333 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
334}
335
336#endif
337
1570b33e
L
338static void
339x86_fill_xstateregset (struct regcache *regcache, void *buf)
340{
341 i387_cache_to_xsave (regcache, buf);
342}
343
344static void
345x86_store_xstateregset (struct regcache *regcache, const void *buf)
346{
347 i387_xsave_to_cache (regcache, buf);
348}
349
d0722149
DE
350/* ??? The non-biarch i386 case stores all the i387 regs twice.
351 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
352 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
353 doesn't work. IWBN to avoid the duplication in the case where it
354 does work. Maybe the arch_setup routine could check whether it works
355 and update target_regsets accordingly, maybe by moving target_regsets
356 to linux_target_ops and set the right one there, rather than having to
357 modify the target_regsets global. */
358
359struct regset_info target_regsets[] =
360{
361#ifdef HAVE_PTRACE_GETREGS
1570b33e 362 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
363 GENERAL_REGS,
364 x86_fill_gregset, x86_store_gregset },
1570b33e
L
365 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
366 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
367# ifndef __x86_64__
368# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 369 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
370 EXTENDED_REGS,
371 x86_fill_fpxregset, x86_store_fpxregset },
372# endif
373# endif
1570b33e 374 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
375 FP_REGS,
376 x86_fill_fpregset, x86_store_fpregset },
377#endif /* HAVE_PTRACE_GETREGS */
1570b33e 378 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
379};
380
381static CORE_ADDR
442ea881 382x86_get_pc (struct regcache *regcache)
d0722149
DE
383{
384 int use_64bit = register_size (0) == 8;
385
386 if (use_64bit)
387 {
388 unsigned long pc;
442ea881 389 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
390 return (CORE_ADDR) pc;
391 }
392 else
393 {
394 unsigned int pc;
442ea881 395 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
396 return (CORE_ADDR) pc;
397 }
398}
399
400static void
442ea881 401x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149
DE
402{
403 int use_64bit = register_size (0) == 8;
404
405 if (use_64bit)
406 {
407 unsigned long newpc = pc;
442ea881 408 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
409 }
410 else
411 {
412 unsigned int newpc = pc;
442ea881 413 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
414 }
415}
416\f
417static const unsigned char x86_breakpoint[] = { 0xCC };
418#define x86_breakpoint_len 1
419
420static int
421x86_breakpoint_at (CORE_ADDR pc)
422{
423 unsigned char c;
424
fc7238bb 425 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
426 if (c == 0xCC)
427 return 1;
428
429 return 0;
430}
431\f
aa5ca48f
DE
432/* Support for debug registers. */
433
434static unsigned long
435x86_linux_dr_get (ptid_t ptid, int regnum)
436{
437 int tid;
438 unsigned long value;
439
440 tid = ptid_get_lwp (ptid);
441
442 errno = 0;
443 value = ptrace (PTRACE_PEEKUSER, tid,
444 offsetof (struct user, u_debugreg[regnum]), 0);
445 if (errno != 0)
446 error ("Couldn't read debug register");
447
448 return value;
449}
450
451static void
452x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
453{
454 int tid;
455
456 tid = ptid_get_lwp (ptid);
457
458 errno = 0;
459 ptrace (PTRACE_POKEUSER, tid,
460 offsetof (struct user, u_debugreg[regnum]), value);
461 if (errno != 0)
462 error ("Couldn't write debug register");
463}
464
964e4306
PA
465static int
466update_debug_registers_callback (struct inferior_list_entry *entry,
467 void *pid_p)
468{
469 struct lwp_info *lwp = (struct lwp_info *) entry;
470 int pid = *(int *) pid_p;
471
472 /* Only update the threads of this process. */
473 if (pid_of (lwp) == pid)
474 {
475 /* The actual update is done later just before resuming the lwp,
476 we just mark that the registers need updating. */
477 lwp->arch_private->debug_registers_changed = 1;
478
479 /* If the lwp isn't stopped, force it to momentarily pause, so
480 we can update its debug registers. */
481 if (!lwp->stopped)
482 linux_stop_lwp (lwp);
483 }
484
485 return 0;
486}
487
aa5ca48f
DE
488/* Update the inferior's debug register REGNUM from STATE. */
489
490void
491i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
492{
964e4306 493 /* Only update the threads of this process. */
aa5ca48f
DE
494 int pid = pid_of (get_thread_lwp (current_inferior));
495
496 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
497 fatal ("Invalid debug register %d", regnum);
498
964e4306
PA
499 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
500}
aa5ca48f 501
964e4306 502/* Return the inferior's debug register REGNUM. */
aa5ca48f 503
964e4306
PA
504CORE_ADDR
505i386_dr_low_get_addr (int regnum)
506{
507 struct lwp_info *lwp = get_thread_lwp (current_inferior);
508 ptid_t ptid = ptid_of (lwp);
509
510 /* DR6 and DR7 are retrieved with some other way. */
0a5b1e09 511 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306
PA
512
513 return x86_linux_dr_get (ptid, regnum);
aa5ca48f
DE
514}
515
516/* Update the inferior's DR7 debug control register from STATE. */
517
518void
519i386_dr_low_set_control (const struct i386_debug_reg_state *state)
520{
964e4306 521 /* Only update the threads of this process. */
aa5ca48f
DE
522 int pid = pid_of (get_thread_lwp (current_inferior));
523
964e4306
PA
524 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
525}
aa5ca48f 526
964e4306
PA
527/* Return the inferior's DR7 debug control register. */
528
529unsigned
530i386_dr_low_get_control (void)
531{
532 struct lwp_info *lwp = get_thread_lwp (current_inferior);
533 ptid_t ptid = ptid_of (lwp);
534
535 return x86_linux_dr_get (ptid, DR_CONTROL);
aa5ca48f
DE
536}
537
538/* Get the value of the DR6 debug status register from the inferior
539 and record it in STATE. */
540
964e4306
PA
541unsigned
542i386_dr_low_get_status (void)
aa5ca48f
DE
543{
544 struct lwp_info *lwp = get_thread_lwp (current_inferior);
545 ptid_t ptid = ptid_of (lwp);
546
964e4306 547 return x86_linux_dr_get (ptid, DR_STATUS);
aa5ca48f
DE
548}
549\f
90d74c30 550/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
551
552static int
553x86_insert_point (char type, CORE_ADDR addr, int len)
554{
555 struct process_info *proc = current_process ();
556 switch (type)
557 {
8b07ae33 558 case '0':
90d74c30
PA
559 {
560 int ret;
561
562 ret = prepare_to_access_memory ();
563 if (ret)
564 return -1;
565 ret = set_gdb_breakpoint_at (addr);
0146f85b 566 done_accessing_memory ();
90d74c30
PA
567 return ret;
568 }
aa5ca48f
DE
569 case '2':
570 case '3':
571 case '4':
572 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
573 type, addr, len);
574 default:
575 /* Unsupported. */
576 return 1;
577 }
578}
579
580static int
581x86_remove_point (char type, CORE_ADDR addr, int len)
582{
583 struct process_info *proc = current_process ();
584 switch (type)
585 {
8b07ae33 586 case '0':
90d74c30
PA
587 {
588 int ret;
589
590 ret = prepare_to_access_memory ();
591 if (ret)
592 return -1;
593 ret = delete_gdb_breakpoint_at (addr);
0146f85b 594 done_accessing_memory ();
90d74c30
PA
595 return ret;
596 }
aa5ca48f
DE
597 case '2':
598 case '3':
599 case '4':
600 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
601 type, addr, len);
602 default:
603 /* Unsupported. */
604 return 1;
605 }
606}
607
608static int
609x86_stopped_by_watchpoint (void)
610{
611 struct process_info *proc = current_process ();
612 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
613}
614
615static CORE_ADDR
616x86_stopped_data_address (void)
617{
618 struct process_info *proc = current_process ();
619 CORE_ADDR addr;
620 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
621 &addr))
622 return addr;
623 return 0;
624}
625\f
626/* Called when a new process is created. */
627
628static struct arch_process_info *
629x86_linux_new_process (void)
630{
631 struct arch_process_info *info = xcalloc (1, sizeof (*info));
632
633 i386_low_init_dregs (&info->debug_reg_state);
634
635 return info;
636}
637
638/* Called when a new thread is detected. */
639
640static struct arch_lwp_info *
641x86_linux_new_thread (void)
642{
643 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
644
645 info->debug_registers_changed = 1;
646
647 return info;
648}
649
650/* Called when resuming a thread.
651 If the debug regs have changed, update the thread's copies. */
652
653static void
654x86_linux_prepare_to_resume (struct lwp_info *lwp)
655{
b9a881c2
PA
656 ptid_t ptid = ptid_of (lwp);
657
aa5ca48f
DE
658 if (lwp->arch_private->debug_registers_changed)
659 {
660 int i;
aa5ca48f
DE
661 int pid = ptid_get_pid (ptid);
662 struct process_info *proc = find_process_pid (pid);
493e2a69
MS
663 struct i386_debug_reg_state *state
664 = &proc->private->arch_private->debug_reg_state;
aa5ca48f
DE
665
666 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
667 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
668
669 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
670
671 lwp->arch_private->debug_registers_changed = 0;
672 }
b9a881c2
PA
673
674 if (lwp->stopped_by_watchpoint)
675 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
676}
677\f
d0722149
DE
678/* When GDBSERVER is built as a 64-bit application on linux, the
679 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
680 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
681 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
682 conversion in-place ourselves. */
683
684/* These types below (compat_*) define a siginfo type that is layout
685 compatible with the siginfo type exported by the 32-bit userspace
686 support. */
687
688#ifdef __x86_64__
689
690typedef int compat_int_t;
691typedef unsigned int compat_uptr_t;
692
693typedef int compat_time_t;
694typedef int compat_timer_t;
695typedef int compat_clock_t;
696
697struct compat_timeval
698{
699 compat_time_t tv_sec;
700 int tv_usec;
701};
702
703typedef union compat_sigval
704{
705 compat_int_t sival_int;
706 compat_uptr_t sival_ptr;
707} compat_sigval_t;
708
709typedef struct compat_siginfo
710{
711 int si_signo;
712 int si_errno;
713 int si_code;
714
715 union
716 {
717 int _pad[((128 / sizeof (int)) - 3)];
718
719 /* kill() */
720 struct
721 {
722 unsigned int _pid;
723 unsigned int _uid;
724 } _kill;
725
726 /* POSIX.1b timers */
727 struct
728 {
729 compat_timer_t _tid;
730 int _overrun;
731 compat_sigval_t _sigval;
732 } _timer;
733
734 /* POSIX.1b signals */
735 struct
736 {
737 unsigned int _pid;
738 unsigned int _uid;
739 compat_sigval_t _sigval;
740 } _rt;
741
742 /* SIGCHLD */
743 struct
744 {
745 unsigned int _pid;
746 unsigned int _uid;
747 int _status;
748 compat_clock_t _utime;
749 compat_clock_t _stime;
750 } _sigchld;
751
752 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
753 struct
754 {
755 unsigned int _addr;
756 } _sigfault;
757
758 /* SIGPOLL */
759 struct
760 {
761 int _band;
762 int _fd;
763 } _sigpoll;
764 } _sifields;
765} compat_siginfo_t;
766
767#define cpt_si_pid _sifields._kill._pid
768#define cpt_si_uid _sifields._kill._uid
769#define cpt_si_timerid _sifields._timer._tid
770#define cpt_si_overrun _sifields._timer._overrun
771#define cpt_si_status _sifields._sigchld._status
772#define cpt_si_utime _sifields._sigchld._utime
773#define cpt_si_stime _sifields._sigchld._stime
774#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
775#define cpt_si_addr _sifields._sigfault._addr
776#define cpt_si_band _sifields._sigpoll._band
777#define cpt_si_fd _sifields._sigpoll._fd
778
779/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
780 In their place is si_timer1,si_timer2. */
781#ifndef si_timerid
782#define si_timerid si_timer1
783#endif
784#ifndef si_overrun
785#define si_overrun si_timer2
786#endif
787
788static void
789compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
790{
791 memset (to, 0, sizeof (*to));
792
793 to->si_signo = from->si_signo;
794 to->si_errno = from->si_errno;
795 to->si_code = from->si_code;
796
b53a1623 797 if (to->si_code == SI_TIMER)
d0722149 798 {
b53a1623
PA
799 to->cpt_si_timerid = from->si_timerid;
800 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
801 to->cpt_si_ptr = (intptr_t) from->si_ptr;
802 }
803 else if (to->si_code == SI_USER)
804 {
805 to->cpt_si_pid = from->si_pid;
806 to->cpt_si_uid = from->si_uid;
807 }
b53a1623 808 else if (to->si_code < 0)
d0722149 809 {
b53a1623
PA
810 to->cpt_si_pid = from->si_pid;
811 to->cpt_si_uid = from->si_uid;
d0722149
DE
812 to->cpt_si_ptr = (intptr_t) from->si_ptr;
813 }
814 else
815 {
816 switch (to->si_signo)
817 {
818 case SIGCHLD:
819 to->cpt_si_pid = from->si_pid;
820 to->cpt_si_uid = from->si_uid;
821 to->cpt_si_status = from->si_status;
822 to->cpt_si_utime = from->si_utime;
823 to->cpt_si_stime = from->si_stime;
824 break;
825 case SIGILL:
826 case SIGFPE:
827 case SIGSEGV:
828 case SIGBUS:
829 to->cpt_si_addr = (intptr_t) from->si_addr;
830 break;
831 case SIGPOLL:
832 to->cpt_si_band = from->si_band;
833 to->cpt_si_fd = from->si_fd;
834 break;
835 default:
836 to->cpt_si_pid = from->si_pid;
837 to->cpt_si_uid = from->si_uid;
838 to->cpt_si_ptr = (intptr_t) from->si_ptr;
839 break;
840 }
841 }
842}
843
844static void
845siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
846{
847 memset (to, 0, sizeof (*to));
848
849 to->si_signo = from->si_signo;
850 to->si_errno = from->si_errno;
851 to->si_code = from->si_code;
852
b53a1623 853 if (to->si_code == SI_TIMER)
d0722149 854 {
b53a1623
PA
855 to->si_timerid = from->cpt_si_timerid;
856 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
857 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
858 }
859 else if (to->si_code == SI_USER)
860 {
861 to->si_pid = from->cpt_si_pid;
862 to->si_uid = from->cpt_si_uid;
863 }
b53a1623 864 else if (to->si_code < 0)
d0722149 865 {
b53a1623
PA
866 to->si_pid = from->cpt_si_pid;
867 to->si_uid = from->cpt_si_uid;
d0722149
DE
868 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
869 }
870 else
871 {
872 switch (to->si_signo)
873 {
874 case SIGCHLD:
875 to->si_pid = from->cpt_si_pid;
876 to->si_uid = from->cpt_si_uid;
877 to->si_status = from->cpt_si_status;
878 to->si_utime = from->cpt_si_utime;
879 to->si_stime = from->cpt_si_stime;
880 break;
881 case SIGILL:
882 case SIGFPE:
883 case SIGSEGV:
884 case SIGBUS:
885 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
886 break;
887 case SIGPOLL:
888 to->si_band = from->cpt_si_band;
889 to->si_fd = from->cpt_si_fd;
890 break;
891 default:
892 to->si_pid = from->cpt_si_pid;
893 to->si_uid = from->cpt_si_uid;
894 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
895 break;
896 }
897 }
898}
899
900#endif /* __x86_64__ */
901
902/* Convert a native/host siginfo object, into/from the siginfo in the
903 layout of the inferiors' architecture. Returns true if any
904 conversion was done; false otherwise. If DIRECTION is 1, then copy
905 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
906 INF. */
907
908static int
909x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
910{
911#ifdef __x86_64__
912 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
913 if (register_size (0) == 4)
914 {
9f1036c1
DE
915 if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
916 fatal ("unexpected difference in siginfo");
d0722149
DE
917
918 if (direction == 0)
919 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
920 else
921 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
922
923 return 1;
924 }
925#endif
926
927 return 0;
928}
929\f
1570b33e
L
930static int use_xml;
931
932/* Update gdbserver_xmltarget. */
933
934static void
935x86_linux_update_xmltarget (void)
936{
3a13a53b
L
937 int pid;
938 struct regset_info *regset;
1570b33e
L
939 static unsigned long long xcr0;
940 static int have_ptrace_getregset = -1;
59e04013 941#if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
3a13a53b
L
942 static int have_ptrace_getfpxregs = -1;
943#endif
1570b33e
L
944
945 if (!current_inferior)
946 return;
947
45ba0d02
PA
948 /* Before changing the register cache internal layout or the target
949 regsets, flush the contents of the current valid caches back to
950 the threads. */
951 regcache_invalidate ();
952
3a13a53b 953 pid = pid_of (get_thread_lwp (current_inferior));
1570b33e
L
954#ifdef __x86_64__
955 if (num_xmm_registers == 8)
956 init_registers_i386_linux ();
957 else
958 init_registers_amd64_linux ();
959#else
3a13a53b
L
960 {
961# ifdef HAVE_PTRACE_GETFPXREGS
962 if (have_ptrace_getfpxregs == -1)
963 {
964 elf_fpxregset_t fpxregs;
965
966 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
967 {
968 have_ptrace_getfpxregs = 0;
969 x86_xcr0 = I386_XSTATE_X87_MASK;
970
971 /* Disable PTRACE_GETFPXREGS. */
972 for (regset = target_regsets;
973 regset->fill_function != NULL; regset++)
974 if (regset->get_request == PTRACE_GETFPXREGS)
975 {
976 regset->size = 0;
977 break;
978 }
979 }
980 else
981 have_ptrace_getfpxregs = 1;
982 }
983
984 if (!have_ptrace_getfpxregs)
985 {
986 init_registers_i386_mmx_linux ();
987 return;
988 }
989# endif
990 init_registers_i386_linux ();
991 }
1570b33e
L
992#endif
993
994 if (!use_xml)
995 {
996 /* Don't use XML. */
997#ifdef __x86_64__
998 if (num_xmm_registers == 8)
999 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1000 else
1001 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
1002#else
1003 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1004#endif
1005
1006 x86_xcr0 = I386_XSTATE_SSE_MASK;
1007
1008 return;
1009 }
1010
1011 /* Check if XSAVE extended state is supported. */
1012 if (have_ptrace_getregset == -1)
1013 {
1570b33e
L
1014 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
1015 struct iovec iov;
1570b33e
L
1016
1017 iov.iov_base = xstateregs;
1018 iov.iov_len = sizeof (xstateregs);
1019
1020 /* Check if PTRACE_GETREGSET works. */
1021 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
1022 &iov) < 0)
1023 {
1024 have_ptrace_getregset = 0;
1025 return;
1026 }
1027 else
1028 have_ptrace_getregset = 1;
1029
1030 /* Get XCR0 from XSAVE extended state at byte 464. */
1031 xcr0 = xstateregs[464 / sizeof (long long)];
1032
1033 /* Use PTRACE_GETREGSET if it is available. */
1034 for (regset = target_regsets;
1035 regset->fill_function != NULL; regset++)
1036 if (regset->get_request == PTRACE_GETREGSET)
1037 regset->size = I386_XSTATE_SIZE (xcr0);
1038 else if (regset->type != GENERAL_REGS)
1039 regset->size = 0;
1040 }
1041
1042 if (have_ptrace_getregset)
1043 {
1044 /* AVX is the highest feature we support. */
1045 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
1046 {
1047 x86_xcr0 = xcr0;
1048
1049#ifdef __x86_64__
1050 /* I386 has 8 xmm regs. */
1051 if (num_xmm_registers == 8)
1052 init_registers_i386_avx_linux ();
1053 else
1054 init_registers_amd64_avx_linux ();
1055#else
1056 init_registers_i386_avx_linux ();
1057#endif
1058 }
1059 }
1060}
1061
1062/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1063 PTRACE_GETREGSET. */
1064
1065static void
1066x86_linux_process_qsupported (const char *query)
1067{
1068 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1069 with "i386" in qSupported query, it supports x86 XML target
1070 descriptions. */
1071 use_xml = 0;
1072 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1073 {
1074 char *copy = xstrdup (query + 13);
1075 char *p;
1076
1077 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1078 {
1079 if (strcmp (p, "i386") == 0)
1080 {
1081 use_xml = 1;
1082 break;
1083 }
1084 }
1085
1086 free (copy);
1087 }
1088
1089 x86_linux_update_xmltarget ();
1090}
1091
9f1036c1 1092/* Initialize gdbserver for the architecture of the inferior. */
d0722149
DE
1093
1094static void
1095x86_arch_setup (void)
1096{
1097#ifdef __x86_64__
1098 int pid = pid_of (get_thread_lwp (current_inferior));
1099 char *file = linux_child_pid_to_exec_file (pid);
1100 int use_64bit = elf_64_file_p (file);
1101
1102 free (file);
1103
1104 if (use_64bit < 0)
1105 {
1106 /* This can only happen if /proc/<pid>/exe is unreadable,
1107 but "that can't happen" if we've gotten this far.
1108 Fall through and assume this is a 32-bit program. */
1109 }
1110 else if (use_64bit)
1111 {
d0722149
DE
1112 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1113 the_low_target.num_regs = -1;
1114 the_low_target.regmap = NULL;
1115 the_low_target.cannot_fetch_register = NULL;
1116 the_low_target.cannot_store_register = NULL;
1117
1118 /* Amd64 has 16 xmm regs. */
1119 num_xmm_registers = 16;
1120
1570b33e 1121 x86_linux_update_xmltarget ();
d0722149
DE
1122 return;
1123 }
1124#endif
1125
1126 /* Ok we have a 32-bit inferior. */
1127
d0722149
DE
1128 the_low_target.num_regs = I386_NUM_REGS;
1129 the_low_target.regmap = i386_regmap;
1130 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1131 the_low_target.cannot_store_register = i386_cannot_store_register;
1132
1133 /* I386 has 8 xmm regs. */
1134 num_xmm_registers = 8;
1570b33e
L
1135
1136 x86_linux_update_xmltarget ();
d0722149
DE
1137}
1138
219f2f23
PA
1139static int
1140x86_supports_tracepoints (void)
1141{
1142 return 1;
1143}
1144
fa593d66
PA
1145static void
1146append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1147{
1148 write_inferior_memory (*to, buf, len);
1149 *to += len;
1150}
1151
1152static int
1153push_opcode (unsigned char *buf, char *op)
1154{
1155 unsigned char *buf_org = buf;
1156
1157 while (1)
1158 {
1159 char *endptr;
1160 unsigned long ul = strtoul (op, &endptr, 16);
1161
1162 if (endptr == op)
1163 break;
1164
1165 *buf++ = ul;
1166 op = endptr;
1167 }
1168
1169 return buf - buf_org;
1170}
1171
1172#ifdef __x86_64__
1173
1174/* Build a jump pad that saves registers and calls a collection
1175 function. Writes a jump instruction to the jump pad to
1176 JJUMPAD_INSN. The caller is responsible to write it in at the
1177 tracepoint address. */
1178
1179static int
1180amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1181 CORE_ADDR collector,
1182 CORE_ADDR lockaddr,
1183 ULONGEST orig_size,
1184 CORE_ADDR *jump_entry,
1185 unsigned char *jjump_pad_insn,
1186 ULONGEST *jjump_pad_insn_size,
1187 CORE_ADDR *adjusted_insn_addr,
1188 CORE_ADDR *adjusted_insn_addr_end)
1189{
1190 unsigned char buf[40];
1191 int i, offset;
1192 CORE_ADDR buildaddr = *jump_entry;
1193
1194 /* Build the jump pad. */
1195
1196 /* First, do tracepoint data collection. Save registers. */
1197 i = 0;
1198 /* Need to ensure stack pointer saved first. */
1199 buf[i++] = 0x54; /* push %rsp */
1200 buf[i++] = 0x55; /* push %rbp */
1201 buf[i++] = 0x57; /* push %rdi */
1202 buf[i++] = 0x56; /* push %rsi */
1203 buf[i++] = 0x52; /* push %rdx */
1204 buf[i++] = 0x51; /* push %rcx */
1205 buf[i++] = 0x53; /* push %rbx */
1206 buf[i++] = 0x50; /* push %rax */
1207 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1208 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1209 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1210 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1211 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1212 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1213 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1214 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1215 buf[i++] = 0x9c; /* pushfq */
1216 buf[i++] = 0x48; /* movl <addr>,%rdi */
1217 buf[i++] = 0xbf;
1218 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1219 i += sizeof (unsigned long);
1220 buf[i++] = 0x57; /* push %rdi */
1221 append_insns (&buildaddr, i, buf);
1222
1223 /* Stack space for the collecting_t object. */
1224 i = 0;
1225 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1226 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1227 memcpy (buf + i, &tpoint, 8);
1228 i += 8;
1229 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1230 i += push_opcode (&buf[i],
1231 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1232 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1233 append_insns (&buildaddr, i, buf);
1234
1235 /* spin-lock. */
1236 i = 0;
1237 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1238 memcpy (&buf[i], (void *) &lockaddr, 8);
1239 i += 8;
1240 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1241 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1242 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1243 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1244 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1245 append_insns (&buildaddr, i, buf);
1246
1247 /* Set up the gdb_collect call. */
1248 /* At this point, (stack pointer + 0x18) is the base of our saved
1249 register block. */
1250
1251 i = 0;
1252 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1253 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1254
1255 /* tpoint address may be 64-bit wide. */
1256 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1257 memcpy (buf + i, &tpoint, 8);
1258 i += 8;
1259 append_insns (&buildaddr, i, buf);
1260
1261 /* The collector function being in the shared library, may be
1262 >31-bits away off the jump pad. */
1263 i = 0;
1264 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1265 memcpy (buf + i, &collector, 8);
1266 i += 8;
1267 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1268 append_insns (&buildaddr, i, buf);
1269
1270 /* Clear the spin-lock. */
1271 i = 0;
1272 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1273 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1274 memcpy (buf + i, &lockaddr, 8);
1275 i += 8;
1276 append_insns (&buildaddr, i, buf);
1277
1278 /* Remove stack that had been used for the collect_t object. */
1279 i = 0;
1280 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1281 append_insns (&buildaddr, i, buf);
1282
1283 /* Restore register state. */
1284 i = 0;
1285 buf[i++] = 0x48; /* add $0x8,%rsp */
1286 buf[i++] = 0x83;
1287 buf[i++] = 0xc4;
1288 buf[i++] = 0x08;
1289 buf[i++] = 0x9d; /* popfq */
1290 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1291 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1292 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1293 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1294 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1295 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1296 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1297 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1298 buf[i++] = 0x58; /* pop %rax */
1299 buf[i++] = 0x5b; /* pop %rbx */
1300 buf[i++] = 0x59; /* pop %rcx */
1301 buf[i++] = 0x5a; /* pop %rdx */
1302 buf[i++] = 0x5e; /* pop %rsi */
1303 buf[i++] = 0x5f; /* pop %rdi */
1304 buf[i++] = 0x5d; /* pop %rbp */
1305 buf[i++] = 0x5c; /* pop %rsp */
1306 append_insns (&buildaddr, i, buf);
1307
1308 /* Now, adjust the original instruction to execute in the jump
1309 pad. */
1310 *adjusted_insn_addr = buildaddr;
1311 relocate_instruction (&buildaddr, tpaddr);
1312 *adjusted_insn_addr_end = buildaddr;
1313
1314 /* Finally, write a jump back to the program. */
1315 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1316 memcpy (buf, jump_insn, sizeof (jump_insn));
1317 memcpy (buf + 1, &offset, 4);
1318 append_insns (&buildaddr, sizeof (jump_insn), buf);
1319
1320 /* The jump pad is now built. Wire in a jump to our jump pad. This
1321 is always done last (by our caller actually), so that we can
1322 install fast tracepoints with threads running. This relies on
1323 the agent's atomic write support. */
1324 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1325 memcpy (buf, jump_insn, sizeof (jump_insn));
1326 memcpy (buf + 1, &offset, 4);
1327 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1328 *jjump_pad_insn_size = sizeof (jump_insn);
1329
1330 /* Return the end address of our pad. */
1331 *jump_entry = buildaddr;
1332
1333 return 0;
1334}
1335
1336#endif /* __x86_64__ */
1337
1338/* Build a jump pad that saves registers and calls a collection
1339 function. Writes a jump instruction to the jump pad to
1340 JJUMPAD_INSN. The caller is responsible to write it in at the
1341 tracepoint address. */
1342
1343static int
1344i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1345 CORE_ADDR collector,
1346 CORE_ADDR lockaddr,
1347 ULONGEST orig_size,
1348 CORE_ADDR *jump_entry,
1349 unsigned char *jjump_pad_insn,
1350 ULONGEST *jjump_pad_insn_size,
1351 CORE_ADDR *adjusted_insn_addr,
1352 CORE_ADDR *adjusted_insn_addr_end)
1353{
1354 unsigned char buf[0x100];
1355 int i, offset;
1356 CORE_ADDR buildaddr = *jump_entry;
1357
1358 /* Build the jump pad. */
1359
1360 /* First, do tracepoint data collection. Save registers. */
1361 i = 0;
1362 buf[i++] = 0x60; /* pushad */
1363 buf[i++] = 0x68; /* push tpaddr aka $pc */
1364 *((int *)(buf + i)) = (int) tpaddr;
1365 i += 4;
1366 buf[i++] = 0x9c; /* pushf */
1367 buf[i++] = 0x1e; /* push %ds */
1368 buf[i++] = 0x06; /* push %es */
1369 buf[i++] = 0x0f; /* push %fs */
1370 buf[i++] = 0xa0;
1371 buf[i++] = 0x0f; /* push %gs */
1372 buf[i++] = 0xa8;
1373 buf[i++] = 0x16; /* push %ss */
1374 buf[i++] = 0x0e; /* push %cs */
1375 append_insns (&buildaddr, i, buf);
1376
1377 /* Stack space for the collecting_t object. */
1378 i = 0;
1379 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1380
1381 /* Build the object. */
1382 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1383 memcpy (buf + i, &tpoint, 4);
1384 i += 4;
1385 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1386
1387 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1388 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1389 append_insns (&buildaddr, i, buf);
1390
1391 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1392 If we cared for it, this could be using xchg alternatively. */
1393
1394 i = 0;
1395 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1396 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1397 %esp,<lockaddr> */
1398 memcpy (&buf[i], (void *) &lockaddr, 4);
1399 i += 4;
1400 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1401 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1402 append_insns (&buildaddr, i, buf);
1403
1404
1405 /* Set up arguments to the gdb_collect call. */
1406 i = 0;
1407 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1408 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1409 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1410 append_insns (&buildaddr, i, buf);
1411
1412 i = 0;
1413 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1414 append_insns (&buildaddr, i, buf);
1415
1416 i = 0;
1417 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1418 memcpy (&buf[i], (void *) &tpoint, 4);
1419 i += 4;
1420 append_insns (&buildaddr, i, buf);
1421
1422 buf[0] = 0xe8; /* call <reladdr> */
1423 offset = collector - (buildaddr + sizeof (jump_insn));
1424 memcpy (buf + 1, &offset, 4);
1425 append_insns (&buildaddr, 5, buf);
1426 /* Clean up after the call. */
1427 buf[0] = 0x83; /* add $0x8,%esp */
1428 buf[1] = 0xc4;
1429 buf[2] = 0x08;
1430 append_insns (&buildaddr, 3, buf);
1431
1432
1433 /* Clear the spin-lock. This would need the LOCK prefix on older
1434 broken archs. */
1435 i = 0;
1436 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1437 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1438 memcpy (buf + i, &lockaddr, 4);
1439 i += 4;
1440 append_insns (&buildaddr, i, buf);
1441
1442
1443 /* Remove stack that had been used for the collect_t object. */
1444 i = 0;
1445 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1446 append_insns (&buildaddr, i, buf);
1447
1448 i = 0;
1449 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1450 buf[i++] = 0xc4;
1451 buf[i++] = 0x04;
1452 buf[i++] = 0x17; /* pop %ss */
1453 buf[i++] = 0x0f; /* pop %gs */
1454 buf[i++] = 0xa9;
1455 buf[i++] = 0x0f; /* pop %fs */
1456 buf[i++] = 0xa1;
1457 buf[i++] = 0x07; /* pop %es */
1458 buf[i++] = 0x1f; /* pop %de */
1459 buf[i++] = 0x9d; /* popf */
1460 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1461 buf[i++] = 0xc4;
1462 buf[i++] = 0x04;
1463 buf[i++] = 0x61; /* popad */
1464 append_insns (&buildaddr, i, buf);
1465
1466 /* Now, adjust the original instruction to execute in the jump
1467 pad. */
1468 *adjusted_insn_addr = buildaddr;
1469 relocate_instruction (&buildaddr, tpaddr);
1470 *adjusted_insn_addr_end = buildaddr;
1471
1472 /* Write the jump back to the program. */
1473 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1474 memcpy (buf, jump_insn, sizeof (jump_insn));
1475 memcpy (buf + 1, &offset, 4);
1476 append_insns (&buildaddr, sizeof (jump_insn), buf);
1477
1478 /* The jump pad is now built. Wire in a jump to our jump pad. This
1479 is always done last (by our caller actually), so that we can
1480 install fast tracepoints with threads running. This relies on
1481 the agent's atomic write support. */
1482 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1483 memcpy (buf, jump_insn, sizeof (jump_insn));
1484 memcpy (buf + 1, &offset, 4);
1485 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1486 *jjump_pad_insn_size = sizeof (jump_insn);
1487
1488 /* Return the end address of our pad. */
1489 *jump_entry = buildaddr;
1490
1491 return 0;
1492}
1493
1494static int
1495x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1496 CORE_ADDR collector,
1497 CORE_ADDR lockaddr,
1498 ULONGEST orig_size,
1499 CORE_ADDR *jump_entry,
1500 unsigned char *jjump_pad_insn,
1501 ULONGEST *jjump_pad_insn_size,
1502 CORE_ADDR *adjusted_insn_addr,
1503 CORE_ADDR *adjusted_insn_addr_end)
1504{
1505#ifdef __x86_64__
1506 if (register_size (0) == 8)
1507 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1508 collector, lockaddr,
1509 orig_size, jump_entry,
1510 jjump_pad_insn,
1511 jjump_pad_insn_size,
1512 adjusted_insn_addr,
1513 adjusted_insn_addr_end);
1514#endif
1515
1516 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1517 collector, lockaddr,
1518 orig_size, jump_entry,
1519 jjump_pad_insn,
1520 jjump_pad_insn_size,
1521 adjusted_insn_addr,
1522 adjusted_insn_addr_end);
1523}
1524
6a271cae
PA
1525static void
1526add_insns (unsigned char *start, int len)
1527{
1528 CORE_ADDR buildaddr = current_insn_ptr;
1529
1530 if (debug_threads)
1531 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1532 len, paddress (buildaddr));
1533
1534 append_insns (&buildaddr, len, start);
1535 current_insn_ptr = buildaddr;
1536}
1537
6a271cae
PA
1538/* Our general strategy for emitting code is to avoid specifying raw
1539 bytes whenever possible, and instead copy a block of inline asm
1540 that is embedded in the function. This is a little messy, because
1541 we need to keep the compiler from discarding what looks like dead
1542 code, plus suppress various warnings. */
1543
9e4344e5
PA
1544#define EMIT_ASM(NAME, INSNS) \
1545 do \
1546 { \
1547 extern unsigned char start_ ## NAME, end_ ## NAME; \
1548 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1549 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1550 "\t" "start_" #NAME ":" \
1551 "\t" INSNS "\n" \
1552 "\t" "end_" #NAME ":"); \
1553 } while (0)
6a271cae
PA
1554
1555#ifdef __x86_64__
1556
1557#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1558 do \
1559 { \
1560 extern unsigned char start_ ## NAME, end_ ## NAME; \
1561 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1562 __asm__ (".code32\n" \
1563 "\t" "jmp end_" #NAME "\n" \
1564 "\t" "start_" #NAME ":\n" \
1565 "\t" INSNS "\n" \
1566 "\t" "end_" #NAME ":\n" \
1567 ".code64\n"); \
1568 } while (0)
6a271cae
PA
1569
1570#else
1571
1572#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1573
1574#endif
1575
1576#ifdef __x86_64__
1577
1578static void
1579amd64_emit_prologue (void)
1580{
1581 EMIT_ASM (amd64_prologue,
1582 "pushq %rbp\n\t"
1583 "movq %rsp,%rbp\n\t"
1584 "sub $0x20,%rsp\n\t"
1585 "movq %rdi,-8(%rbp)\n\t"
1586 "movq %rsi,-16(%rbp)");
1587}
1588
1589
1590static void
1591amd64_emit_epilogue (void)
1592{
1593 EMIT_ASM (amd64_epilogue,
1594 "movq -16(%rbp),%rdi\n\t"
1595 "movq %rax,(%rdi)\n\t"
1596 "xor %rax,%rax\n\t"
1597 "leave\n\t"
1598 "ret");
1599}
1600
1601static void
1602amd64_emit_add (void)
1603{
1604 EMIT_ASM (amd64_add,
1605 "add (%rsp),%rax\n\t"
1606 "lea 0x8(%rsp),%rsp");
1607}
1608
1609static void
1610amd64_emit_sub (void)
1611{
1612 EMIT_ASM (amd64_sub,
1613 "sub %rax,(%rsp)\n\t"
1614 "pop %rax");
1615}
1616
1617static void
1618amd64_emit_mul (void)
1619{
1620 emit_error = 1;
1621}
1622
1623static void
1624amd64_emit_lsh (void)
1625{
1626 emit_error = 1;
1627}
1628
1629static void
1630amd64_emit_rsh_signed (void)
1631{
1632 emit_error = 1;
1633}
1634
1635static void
1636amd64_emit_rsh_unsigned (void)
1637{
1638 emit_error = 1;
1639}
1640
1641static void
1642amd64_emit_ext (int arg)
1643{
1644 switch (arg)
1645 {
1646 case 8:
1647 EMIT_ASM (amd64_ext_8,
1648 "cbtw\n\t"
1649 "cwtl\n\t"
1650 "cltq");
1651 break;
1652 case 16:
1653 EMIT_ASM (amd64_ext_16,
1654 "cwtl\n\t"
1655 "cltq");
1656 break;
1657 case 32:
1658 EMIT_ASM (amd64_ext_32,
1659 "cltq");
1660 break;
1661 default:
1662 emit_error = 1;
1663 }
1664}
1665
1666static void
1667amd64_emit_log_not (void)
1668{
1669 EMIT_ASM (amd64_log_not,
1670 "test %rax,%rax\n\t"
1671 "sete %cl\n\t"
1672 "movzbq %cl,%rax");
1673}
1674
1675static void
1676amd64_emit_bit_and (void)
1677{
1678 EMIT_ASM (amd64_and,
1679 "and (%rsp),%rax\n\t"
1680 "lea 0x8(%rsp),%rsp");
1681}
1682
1683static void
1684amd64_emit_bit_or (void)
1685{
1686 EMIT_ASM (amd64_or,
1687 "or (%rsp),%rax\n\t"
1688 "lea 0x8(%rsp),%rsp");
1689}
1690
1691static void
1692amd64_emit_bit_xor (void)
1693{
1694 EMIT_ASM (amd64_xor,
1695 "xor (%rsp),%rax\n\t"
1696 "lea 0x8(%rsp),%rsp");
1697}
1698
1699static void
1700amd64_emit_bit_not (void)
1701{
1702 EMIT_ASM (amd64_bit_not,
1703 "xorq $0xffffffffffffffff,%rax");
1704}
1705
1706static void
1707amd64_emit_equal (void)
1708{
1709 EMIT_ASM (amd64_equal,
1710 "cmp %rax,(%rsp)\n\t"
1711 "je .Lamd64_equal_true\n\t"
1712 "xor %rax,%rax\n\t"
1713 "jmp .Lamd64_equal_end\n\t"
1714 ".Lamd64_equal_true:\n\t"
1715 "mov $0x1,%rax\n\t"
1716 ".Lamd64_equal_end:\n\t"
1717 "lea 0x8(%rsp),%rsp");
1718}
1719
1720static void
1721amd64_emit_less_signed (void)
1722{
1723 EMIT_ASM (amd64_less_signed,
1724 "cmp %rax,(%rsp)\n\t"
1725 "jl .Lamd64_less_signed_true\n\t"
1726 "xor %rax,%rax\n\t"
1727 "jmp .Lamd64_less_signed_end\n\t"
1728 ".Lamd64_less_signed_true:\n\t"
1729 "mov $1,%rax\n\t"
1730 ".Lamd64_less_signed_end:\n\t"
1731 "lea 0x8(%rsp),%rsp");
1732}
1733
1734static void
1735amd64_emit_less_unsigned (void)
1736{
1737 EMIT_ASM (amd64_less_unsigned,
1738 "cmp %rax,(%rsp)\n\t"
1739 "jb .Lamd64_less_unsigned_true\n\t"
1740 "xor %rax,%rax\n\t"
1741 "jmp .Lamd64_less_unsigned_end\n\t"
1742 ".Lamd64_less_unsigned_true:\n\t"
1743 "mov $1,%rax\n\t"
1744 ".Lamd64_less_unsigned_end:\n\t"
1745 "lea 0x8(%rsp),%rsp");
1746}
1747
1748static void
1749amd64_emit_ref (int size)
1750{
1751 switch (size)
1752 {
1753 case 1:
1754 EMIT_ASM (amd64_ref1,
1755 "movb (%rax),%al");
1756 break;
1757 case 2:
1758 EMIT_ASM (amd64_ref2,
1759 "movw (%rax),%ax");
1760 break;
1761 case 4:
1762 EMIT_ASM (amd64_ref4,
1763 "movl (%rax),%eax");
1764 break;
1765 case 8:
1766 EMIT_ASM (amd64_ref8,
1767 "movq (%rax),%rax");
1768 break;
1769 }
1770}
1771
1772static void
1773amd64_emit_if_goto (int *offset_p, int *size_p)
1774{
1775 EMIT_ASM (amd64_if_goto,
1776 "mov %rax,%rcx\n\t"
1777 "pop %rax\n\t"
1778 "cmp $0,%rcx\n\t"
1779 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1780 if (offset_p)
1781 *offset_p = 10;
1782 if (size_p)
1783 *size_p = 4;
1784}
1785
1786static void
1787amd64_emit_goto (int *offset_p, int *size_p)
1788{
1789 EMIT_ASM (amd64_goto,
1790 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1791 if (offset_p)
1792 *offset_p = 1;
1793 if (size_p)
1794 *size_p = 4;
1795}
1796
1797static void
1798amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1799{
1800 int diff = (to - (from + size));
1801 unsigned char buf[sizeof (int)];
1802
1803 if (size != 4)
1804 {
1805 emit_error = 1;
1806 return;
1807 }
1808
1809 memcpy (buf, &diff, sizeof (int));
1810 write_inferior_memory (from, buf, sizeof (int));
1811}
1812
1813static void
4e29fb54 1814amd64_emit_const (LONGEST num)
6a271cae
PA
1815{
1816 unsigned char buf[16];
1817 int i;
1818 CORE_ADDR buildaddr = current_insn_ptr;
1819
1820 i = 0;
1821 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1822 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1823 i += 8;
1824 append_insns (&buildaddr, i, buf);
1825 current_insn_ptr = buildaddr;
1826}
1827
1828static void
1829amd64_emit_call (CORE_ADDR fn)
1830{
1831 unsigned char buf[16];
1832 int i;
1833 CORE_ADDR buildaddr;
4e29fb54 1834 LONGEST offset64;
6a271cae
PA
1835
1836 /* The destination function being in the shared library, may be
1837 >31-bits away off the compiled code pad. */
1838
1839 buildaddr = current_insn_ptr;
1840
1841 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1842
1843 i = 0;
1844
1845 if (offset64 > INT_MAX || offset64 < INT_MIN)
1846 {
1847 /* Offset is too large for a call. Use callq, but that requires
1848 a register, so avoid it if possible. Use r10, since it is
1849 call-clobbered, we don't have to push/pop it. */
1850 buf[i++] = 0x48; /* mov $fn,%r10 */
1851 buf[i++] = 0xba;
1852 memcpy (buf + i, &fn, 8);
1853 i += 8;
1854 buf[i++] = 0xff; /* callq *%r10 */
1855 buf[i++] = 0xd2;
1856 }
1857 else
1858 {
1859 int offset32 = offset64; /* we know we can't overflow here. */
1860 memcpy (buf + i, &offset32, 4);
1861 i += 4;
1862 }
1863
1864 append_insns (&buildaddr, i, buf);
1865 current_insn_ptr = buildaddr;
1866}
1867
1868static void
1869amd64_emit_reg (int reg)
1870{
1871 unsigned char buf[16];
1872 int i;
1873 CORE_ADDR buildaddr;
1874
1875 /* Assume raw_regs is still in %rdi. */
1876 buildaddr = current_insn_ptr;
1877 i = 0;
1878 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1879 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1880 i += 4;
1881 append_insns (&buildaddr, i, buf);
1882 current_insn_ptr = buildaddr;
1883 amd64_emit_call (get_raw_reg_func_addr ());
1884}
1885
1886static void
1887amd64_emit_pop (void)
1888{
1889 EMIT_ASM (amd64_pop,
1890 "pop %rax");
1891}
1892
1893static void
1894amd64_emit_stack_flush (void)
1895{
1896 EMIT_ASM (amd64_stack_flush,
1897 "push %rax");
1898}
1899
1900static void
1901amd64_emit_zero_ext (int arg)
1902{
1903 switch (arg)
1904 {
1905 case 8:
1906 EMIT_ASM (amd64_zero_ext_8,
1907 "and $0xff,%rax");
1908 break;
1909 case 16:
1910 EMIT_ASM (amd64_zero_ext_16,
1911 "and $0xffff,%rax");
1912 break;
1913 case 32:
1914 EMIT_ASM (amd64_zero_ext_32,
1915 "mov $0xffffffff,%rcx\n\t"
1916 "and %rcx,%rax");
1917 break;
1918 default:
1919 emit_error = 1;
1920 }
1921}
1922
1923static void
1924amd64_emit_swap (void)
1925{
1926 EMIT_ASM (amd64_swap,
1927 "mov %rax,%rcx\n\t"
1928 "pop %rax\n\t"
1929 "push %rcx");
1930}
1931
1932static void
1933amd64_emit_stack_adjust (int n)
1934{
1935 unsigned char buf[16];
1936 int i;
1937 CORE_ADDR buildaddr = current_insn_ptr;
1938
1939 i = 0;
1940 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1941 buf[i++] = 0x8d;
1942 buf[i++] = 0x64;
1943 buf[i++] = 0x24;
1944 /* This only handles adjustments up to 16, but we don't expect any more. */
1945 buf[i++] = n * 8;
1946 append_insns (&buildaddr, i, buf);
1947 current_insn_ptr = buildaddr;
1948}
1949
1950/* FN's prototype is `LONGEST(*fn)(int)'. */
1951
1952static void
1953amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1954{
1955 unsigned char buf[16];
1956 int i;
1957 CORE_ADDR buildaddr;
1958
1959 buildaddr = current_insn_ptr;
1960 i = 0;
1961 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1962 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1963 i += 4;
1964 append_insns (&buildaddr, i, buf);
1965 current_insn_ptr = buildaddr;
1966 amd64_emit_call (fn);
1967}
1968
4e29fb54 1969/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
1970
1971static void
1972amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1973{
1974 unsigned char buf[16];
1975 int i;
1976 CORE_ADDR buildaddr;
1977
1978 buildaddr = current_insn_ptr;
1979 i = 0;
1980 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1981 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1982 i += 4;
1983 append_insns (&buildaddr, i, buf);
1984 current_insn_ptr = buildaddr;
1985 EMIT_ASM (amd64_void_call_2_a,
1986 /* Save away a copy of the stack top. */
1987 "push %rax\n\t"
1988 /* Also pass top as the second argument. */
1989 "mov %rax,%rsi");
1990 amd64_emit_call (fn);
1991 EMIT_ASM (amd64_void_call_2_b,
1992 /* Restore the stack top, %rax may have been trashed. */
1993 "pop %rax");
1994}
1995
6b9801d4
SS
1996void
1997amd64_emit_eq_goto (int *offset_p, int *size_p)
1998{
1999 EMIT_ASM (amd64_eq,
2000 "cmp %rax,(%rsp)\n\t"
2001 "jne .Lamd64_eq_fallthru\n\t"
2002 "lea 0x8(%rsp),%rsp\n\t"
2003 "pop %rax\n\t"
2004 /* jmp, but don't trust the assembler to choose the right jump */
2005 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2006 ".Lamd64_eq_fallthru:\n\t"
2007 "lea 0x8(%rsp),%rsp\n\t"
2008 "pop %rax");
2009
2010 if (offset_p)
2011 *offset_p = 13;
2012 if (size_p)
2013 *size_p = 4;
2014}
2015
2016void
2017amd64_emit_ne_goto (int *offset_p, int *size_p)
2018{
2019 EMIT_ASM (amd64_ne,
2020 "cmp %rax,(%rsp)\n\t"
2021 "je .Lamd64_ne_fallthru\n\t"
2022 "lea 0x8(%rsp),%rsp\n\t"
2023 "pop %rax\n\t"
2024 /* jmp, but don't trust the assembler to choose the right jump */
2025 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2026 ".Lamd64_ne_fallthru:\n\t"
2027 "lea 0x8(%rsp),%rsp\n\t"
2028 "pop %rax");
2029
2030 if (offset_p)
2031 *offset_p = 13;
2032 if (size_p)
2033 *size_p = 4;
2034}
2035
2036void
2037amd64_emit_lt_goto (int *offset_p, int *size_p)
2038{
2039 EMIT_ASM (amd64_lt,
2040 "cmp %rax,(%rsp)\n\t"
2041 "jnl .Lamd64_lt_fallthru\n\t"
2042 "lea 0x8(%rsp),%rsp\n\t"
2043 "pop %rax\n\t"
2044 /* jmp, but don't trust the assembler to choose the right jump */
2045 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2046 ".Lamd64_lt_fallthru:\n\t"
2047 "lea 0x8(%rsp),%rsp\n\t"
2048 "pop %rax");
2049
2050 if (offset_p)
2051 *offset_p = 13;
2052 if (size_p)
2053 *size_p = 4;
2054}
2055
2056void
2057amd64_emit_le_goto (int *offset_p, int *size_p)
2058{
2059 EMIT_ASM (amd64_le,
2060 "cmp %rax,(%rsp)\n\t"
2061 "jnle .Lamd64_le_fallthru\n\t"
2062 "lea 0x8(%rsp),%rsp\n\t"
2063 "pop %rax\n\t"
2064 /* jmp, but don't trust the assembler to choose the right jump */
2065 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2066 ".Lamd64_le_fallthru:\n\t"
2067 "lea 0x8(%rsp),%rsp\n\t"
2068 "pop %rax");
2069
2070 if (offset_p)
2071 *offset_p = 13;
2072 if (size_p)
2073 *size_p = 4;
2074}
2075
2076void
2077amd64_emit_gt_goto (int *offset_p, int *size_p)
2078{
2079 EMIT_ASM (amd64_gt,
2080 "cmp %rax,(%rsp)\n\t"
2081 "jng .Lamd64_gt_fallthru\n\t"
2082 "lea 0x8(%rsp),%rsp\n\t"
2083 "pop %rax\n\t"
2084 /* jmp, but don't trust the assembler to choose the right jump */
2085 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2086 ".Lamd64_gt_fallthru:\n\t"
2087 "lea 0x8(%rsp),%rsp\n\t"
2088 "pop %rax");
2089
2090 if (offset_p)
2091 *offset_p = 13;
2092 if (size_p)
2093 *size_p = 4;
2094}
2095
2096void
2097amd64_emit_ge_goto (int *offset_p, int *size_p)
2098{
2099 EMIT_ASM (amd64_ge,
2100 "cmp %rax,(%rsp)\n\t"
2101 "jnge .Lamd64_ge_fallthru\n\t"
2102 ".Lamd64_ge_jump:\n\t"
2103 "lea 0x8(%rsp),%rsp\n\t"
2104 "pop %rax\n\t"
2105 /* jmp, but don't trust the assembler to choose the right jump */
2106 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2107 ".Lamd64_ge_fallthru:\n\t"
2108 "lea 0x8(%rsp),%rsp\n\t"
2109 "pop %rax");
2110
2111 if (offset_p)
2112 *offset_p = 13;
2113 if (size_p)
2114 *size_p = 4;
2115}
2116
6a271cae
PA
2117struct emit_ops amd64_emit_ops =
2118 {
2119 amd64_emit_prologue,
2120 amd64_emit_epilogue,
2121 amd64_emit_add,
2122 amd64_emit_sub,
2123 amd64_emit_mul,
2124 amd64_emit_lsh,
2125 amd64_emit_rsh_signed,
2126 amd64_emit_rsh_unsigned,
2127 amd64_emit_ext,
2128 amd64_emit_log_not,
2129 amd64_emit_bit_and,
2130 amd64_emit_bit_or,
2131 amd64_emit_bit_xor,
2132 amd64_emit_bit_not,
2133 amd64_emit_equal,
2134 amd64_emit_less_signed,
2135 amd64_emit_less_unsigned,
2136 amd64_emit_ref,
2137 amd64_emit_if_goto,
2138 amd64_emit_goto,
2139 amd64_write_goto_address,
2140 amd64_emit_const,
2141 amd64_emit_call,
2142 amd64_emit_reg,
2143 amd64_emit_pop,
2144 amd64_emit_stack_flush,
2145 amd64_emit_zero_ext,
2146 amd64_emit_swap,
2147 amd64_emit_stack_adjust,
2148 amd64_emit_int_call_1,
6b9801d4
SS
2149 amd64_emit_void_call_2,
2150 amd64_emit_eq_goto,
2151 amd64_emit_ne_goto,
2152 amd64_emit_lt_goto,
2153 amd64_emit_le_goto,
2154 amd64_emit_gt_goto,
2155 amd64_emit_ge_goto
6a271cae
PA
2156 };
2157
2158#endif /* __x86_64__ */
2159
2160static void
2161i386_emit_prologue (void)
2162{
2163 EMIT_ASM32 (i386_prologue,
2164 "push %ebp\n\t"
bf15cbda
SS
2165 "mov %esp,%ebp\n\t"
2166 "push %ebx");
6a271cae
PA
2167 /* At this point, the raw regs base address is at 8(%ebp), and the
2168 value pointer is at 12(%ebp). */
2169}
2170
2171static void
2172i386_emit_epilogue (void)
2173{
2174 EMIT_ASM32 (i386_epilogue,
2175 "mov 12(%ebp),%ecx\n\t"
2176 "mov %eax,(%ecx)\n\t"
2177 "mov %ebx,0x4(%ecx)\n\t"
2178 "xor %eax,%eax\n\t"
bf15cbda 2179 "pop %ebx\n\t"
6a271cae
PA
2180 "pop %ebp\n\t"
2181 "ret");
2182}
2183
2184static void
2185i386_emit_add (void)
2186{
2187 EMIT_ASM32 (i386_add,
2188 "add (%esp),%eax\n\t"
2189 "adc 0x4(%esp),%ebx\n\t"
2190 "lea 0x8(%esp),%esp");
2191}
2192
2193static void
2194i386_emit_sub (void)
2195{
2196 EMIT_ASM32 (i386_sub,
2197 "subl %eax,(%esp)\n\t"
2198 "sbbl %ebx,4(%esp)\n\t"
2199 "pop %eax\n\t"
2200 "pop %ebx\n\t");
2201}
2202
2203static void
2204i386_emit_mul (void)
2205{
2206 emit_error = 1;
2207}
2208
2209static void
2210i386_emit_lsh (void)
2211{
2212 emit_error = 1;
2213}
2214
2215static void
2216i386_emit_rsh_signed (void)
2217{
2218 emit_error = 1;
2219}
2220
2221static void
2222i386_emit_rsh_unsigned (void)
2223{
2224 emit_error = 1;
2225}
2226
2227static void
2228i386_emit_ext (int arg)
2229{
2230 switch (arg)
2231 {
2232 case 8:
2233 EMIT_ASM32 (i386_ext_8,
2234 "cbtw\n\t"
2235 "cwtl\n\t"
2236 "movl %eax,%ebx\n\t"
2237 "sarl $31,%ebx");
2238 break;
2239 case 16:
2240 EMIT_ASM32 (i386_ext_16,
2241 "cwtl\n\t"
2242 "movl %eax,%ebx\n\t"
2243 "sarl $31,%ebx");
2244 break;
2245 case 32:
2246 EMIT_ASM32 (i386_ext_32,
2247 "movl %eax,%ebx\n\t"
2248 "sarl $31,%ebx");
2249 break;
2250 default:
2251 emit_error = 1;
2252 }
2253}
2254
2255static void
2256i386_emit_log_not (void)
2257{
2258 EMIT_ASM32 (i386_log_not,
2259 "or %ebx,%eax\n\t"
2260 "test %eax,%eax\n\t"
2261 "sete %cl\n\t"
2262 "xor %ebx,%ebx\n\t"
2263 "movzbl %cl,%eax");
2264}
2265
2266static void
2267i386_emit_bit_and (void)
2268{
2269 EMIT_ASM32 (i386_and,
2270 "and (%esp),%eax\n\t"
2271 "and 0x4(%esp),%ebx\n\t"
2272 "lea 0x8(%esp),%esp");
2273}
2274
2275static void
2276i386_emit_bit_or (void)
2277{
2278 EMIT_ASM32 (i386_or,
2279 "or (%esp),%eax\n\t"
2280 "or 0x4(%esp),%ebx\n\t"
2281 "lea 0x8(%esp),%esp");
2282}
2283
2284static void
2285i386_emit_bit_xor (void)
2286{
2287 EMIT_ASM32 (i386_xor,
2288 "xor (%esp),%eax\n\t"
2289 "xor 0x4(%esp),%ebx\n\t"
2290 "lea 0x8(%esp),%esp");
2291}
2292
2293static void
2294i386_emit_bit_not (void)
2295{
2296 EMIT_ASM32 (i386_bit_not,
2297 "xor $0xffffffff,%eax\n\t"
2298 "xor $0xffffffff,%ebx\n\t");
2299}
2300
2301static void
2302i386_emit_equal (void)
2303{
2304 EMIT_ASM32 (i386_equal,
2305 "cmpl %ebx,4(%esp)\n\t"
2306 "jne .Li386_equal_false\n\t"
2307 "cmpl %eax,(%esp)\n\t"
2308 "je .Li386_equal_true\n\t"
2309 ".Li386_equal_false:\n\t"
2310 "xor %eax,%eax\n\t"
2311 "jmp .Li386_equal_end\n\t"
2312 ".Li386_equal_true:\n\t"
2313 "mov $1,%eax\n\t"
2314 ".Li386_equal_end:\n\t"
2315 "xor %ebx,%ebx\n\t"
2316 "lea 0x8(%esp),%esp");
2317}
2318
2319static void
2320i386_emit_less_signed (void)
2321{
2322 EMIT_ASM32 (i386_less_signed,
2323 "cmpl %ebx,4(%esp)\n\t"
2324 "jl .Li386_less_signed_true\n\t"
2325 "jne .Li386_less_signed_false\n\t"
2326 "cmpl %eax,(%esp)\n\t"
2327 "jl .Li386_less_signed_true\n\t"
2328 ".Li386_less_signed_false:\n\t"
2329 "xor %eax,%eax\n\t"
2330 "jmp .Li386_less_signed_end\n\t"
2331 ".Li386_less_signed_true:\n\t"
2332 "mov $1,%eax\n\t"
2333 ".Li386_less_signed_end:\n\t"
2334 "xor %ebx,%ebx\n\t"
2335 "lea 0x8(%esp),%esp");
2336}
2337
2338static void
2339i386_emit_less_unsigned (void)
2340{
2341 EMIT_ASM32 (i386_less_unsigned,
2342 "cmpl %ebx,4(%esp)\n\t"
2343 "jb .Li386_less_unsigned_true\n\t"
2344 "jne .Li386_less_unsigned_false\n\t"
2345 "cmpl %eax,(%esp)\n\t"
2346 "jb .Li386_less_unsigned_true\n\t"
2347 ".Li386_less_unsigned_false:\n\t"
2348 "xor %eax,%eax\n\t"
2349 "jmp .Li386_less_unsigned_end\n\t"
2350 ".Li386_less_unsigned_true:\n\t"
2351 "mov $1,%eax\n\t"
2352 ".Li386_less_unsigned_end:\n\t"
2353 "xor %ebx,%ebx\n\t"
2354 "lea 0x8(%esp),%esp");
2355}
2356
2357static void
2358i386_emit_ref (int size)
2359{
2360 switch (size)
2361 {
2362 case 1:
2363 EMIT_ASM32 (i386_ref1,
2364 "movb (%eax),%al");
2365 break;
2366 case 2:
2367 EMIT_ASM32 (i386_ref2,
2368 "movw (%eax),%ax");
2369 break;
2370 case 4:
2371 EMIT_ASM32 (i386_ref4,
2372 "movl (%eax),%eax");
2373 break;
2374 case 8:
2375 EMIT_ASM32 (i386_ref8,
2376 "movl 4(%eax),%ebx\n\t"
2377 "movl (%eax),%eax");
2378 break;
2379 }
2380}
2381
2382static void
2383i386_emit_if_goto (int *offset_p, int *size_p)
2384{
2385 EMIT_ASM32 (i386_if_goto,
2386 "mov %eax,%ecx\n\t"
2387 "or %ebx,%ecx\n\t"
2388 "pop %eax\n\t"
2389 "pop %ebx\n\t"
2390 "cmpl $0,%ecx\n\t"
2391 /* Don't trust the assembler to choose the right jump */
2392 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2393
2394 if (offset_p)
2395 *offset_p = 11; /* be sure that this matches the sequence above */
2396 if (size_p)
2397 *size_p = 4;
2398}
2399
2400static void
2401i386_emit_goto (int *offset_p, int *size_p)
2402{
2403 EMIT_ASM32 (i386_goto,
2404 /* Don't trust the assembler to choose the right jump */
2405 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2406 if (offset_p)
2407 *offset_p = 1;
2408 if (size_p)
2409 *size_p = 4;
2410}
2411
2412static void
2413i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2414{
2415 int diff = (to - (from + size));
2416 unsigned char buf[sizeof (int)];
2417
2418 /* We're only doing 4-byte sizes at the moment. */
2419 if (size != 4)
2420 {
2421 emit_error = 1;
2422 return;
2423 }
2424
2425 memcpy (buf, &diff, sizeof (int));
2426 write_inferior_memory (from, buf, sizeof (int));
2427}
2428
2429static void
4e29fb54 2430i386_emit_const (LONGEST num)
6a271cae
PA
2431{
2432 unsigned char buf[16];
b00ad6ff 2433 int i, hi, lo;
6a271cae
PA
2434 CORE_ADDR buildaddr = current_insn_ptr;
2435
2436 i = 0;
2437 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2438 lo = num & 0xffffffff;
2439 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2440 i += 4;
2441 hi = ((num >> 32) & 0xffffffff);
2442 if (hi)
2443 {
2444 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2445 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2446 i += 4;
2447 }
2448 else
2449 {
2450 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2451 }
2452 append_insns (&buildaddr, i, buf);
2453 current_insn_ptr = buildaddr;
2454}
2455
2456static void
2457i386_emit_call (CORE_ADDR fn)
2458{
2459 unsigned char buf[16];
2460 int i, offset;
2461 CORE_ADDR buildaddr;
2462
2463 buildaddr = current_insn_ptr;
2464 i = 0;
2465 buf[i++] = 0xe8; /* call <reladdr> */
2466 offset = ((int) fn) - (buildaddr + 5);
2467 memcpy (buf + 1, &offset, 4);
2468 append_insns (&buildaddr, 5, buf);
2469 current_insn_ptr = buildaddr;
2470}
2471
2472static void
2473i386_emit_reg (int reg)
2474{
2475 unsigned char buf[16];
2476 int i;
2477 CORE_ADDR buildaddr;
2478
2479 EMIT_ASM32 (i386_reg_a,
2480 "sub $0x8,%esp");
2481 buildaddr = current_insn_ptr;
2482 i = 0;
2483 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2484 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2485 i += 4;
2486 append_insns (&buildaddr, i, buf);
2487 current_insn_ptr = buildaddr;
2488 EMIT_ASM32 (i386_reg_b,
2489 "mov %eax,4(%esp)\n\t"
2490 "mov 8(%ebp),%eax\n\t"
2491 "mov %eax,(%esp)");
2492 i386_emit_call (get_raw_reg_func_addr ());
2493 EMIT_ASM32 (i386_reg_c,
2494 "xor %ebx,%ebx\n\t"
2495 "lea 0x8(%esp),%esp");
2496}
2497
2498static void
2499i386_emit_pop (void)
2500{
2501 EMIT_ASM32 (i386_pop,
2502 "pop %eax\n\t"
2503 "pop %ebx");
2504}
2505
2506static void
2507i386_emit_stack_flush (void)
2508{
2509 EMIT_ASM32 (i386_stack_flush,
2510 "push %ebx\n\t"
2511 "push %eax");
2512}
2513
2514static void
2515i386_emit_zero_ext (int arg)
2516{
2517 switch (arg)
2518 {
2519 case 8:
2520 EMIT_ASM32 (i386_zero_ext_8,
2521 "and $0xff,%eax\n\t"
2522 "xor %ebx,%ebx");
2523 break;
2524 case 16:
2525 EMIT_ASM32 (i386_zero_ext_16,
2526 "and $0xffff,%eax\n\t"
2527 "xor %ebx,%ebx");
2528 break;
2529 case 32:
2530 EMIT_ASM32 (i386_zero_ext_32,
2531 "xor %ebx,%ebx");
2532 break;
2533 default:
2534 emit_error = 1;
2535 }
2536}
2537
2538static void
2539i386_emit_swap (void)
2540{
2541 EMIT_ASM32 (i386_swap,
2542 "mov %eax,%ecx\n\t"
2543 "mov %ebx,%edx\n\t"
2544 "pop %eax\n\t"
2545 "pop %ebx\n\t"
2546 "push %edx\n\t"
2547 "push %ecx");
2548}
2549
2550static void
2551i386_emit_stack_adjust (int n)
2552{
2553 unsigned char buf[16];
2554 int i;
2555 CORE_ADDR buildaddr = current_insn_ptr;
2556
2557 i = 0;
2558 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2559 buf[i++] = 0x64;
2560 buf[i++] = 0x24;
2561 buf[i++] = n * 8;
2562 append_insns (&buildaddr, i, buf);
2563 current_insn_ptr = buildaddr;
2564}
2565
2566/* FN's prototype is `LONGEST(*fn)(int)'. */
2567
2568static void
2569i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2570{
2571 unsigned char buf[16];
2572 int i;
2573 CORE_ADDR buildaddr;
2574
2575 EMIT_ASM32 (i386_int_call_1_a,
2576 /* Reserve a bit of stack space. */
2577 "sub $0x8,%esp");
2578 /* Put the one argument on the stack. */
2579 buildaddr = current_insn_ptr;
2580 i = 0;
2581 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2582 buf[i++] = 0x04;
2583 buf[i++] = 0x24;
b00ad6ff 2584 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2585 i += 4;
2586 append_insns (&buildaddr, i, buf);
2587 current_insn_ptr = buildaddr;
2588 i386_emit_call (fn);
2589 EMIT_ASM32 (i386_int_call_1_c,
2590 "mov %edx,%ebx\n\t"
2591 "lea 0x8(%esp),%esp");
2592}
2593
4e29fb54 2594/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2595
2596static void
2597i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2598{
2599 unsigned char buf[16];
2600 int i;
2601 CORE_ADDR buildaddr;
2602
2603 EMIT_ASM32 (i386_void_call_2_a,
2604 /* Preserve %eax only; we don't have to worry about %ebx. */
2605 "push %eax\n\t"
2606 /* Reserve a bit of stack space for arguments. */
2607 "sub $0x10,%esp\n\t"
2608 /* Copy "top" to the second argument position. (Note that
2609 we can't assume function won't scribble on its
2610 arguments, so don't try to restore from this.) */
2611 "mov %eax,4(%esp)\n\t"
2612 "mov %ebx,8(%esp)");
2613 /* Put the first argument on the stack. */
2614 buildaddr = current_insn_ptr;
2615 i = 0;
2616 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2617 buf[i++] = 0x04;
2618 buf[i++] = 0x24;
b00ad6ff 2619 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2620 i += 4;
2621 append_insns (&buildaddr, i, buf);
2622 current_insn_ptr = buildaddr;
2623 i386_emit_call (fn);
2624 EMIT_ASM32 (i386_void_call_2_b,
2625 "lea 0x10(%esp),%esp\n\t"
2626 /* Restore original stack top. */
2627 "pop %eax");
2628}
2629
6b9801d4
SS
2630
2631void
2632i386_emit_eq_goto (int *offset_p, int *size_p)
2633{
2634 EMIT_ASM32 (eq,
2635 /* Check low half first, more likely to be decider */
2636 "cmpl %eax,(%esp)\n\t"
2637 "jne .Leq_fallthru\n\t"
2638 "cmpl %ebx,4(%esp)\n\t"
2639 "jne .Leq_fallthru\n\t"
2640 "lea 0x8(%esp),%esp\n\t"
2641 "pop %eax\n\t"
2642 "pop %ebx\n\t"
2643 /* jmp, but don't trust the assembler to choose the right jump */
2644 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2645 ".Leq_fallthru:\n\t"
2646 "lea 0x8(%esp),%esp\n\t"
2647 "pop %eax\n\t"
2648 "pop %ebx");
2649
2650 if (offset_p)
2651 *offset_p = 18;
2652 if (size_p)
2653 *size_p = 4;
2654}
2655
2656void
2657i386_emit_ne_goto (int *offset_p, int *size_p)
2658{
2659 EMIT_ASM32 (ne,
2660 /* Check low half first, more likely to be decider */
2661 "cmpl %eax,(%esp)\n\t"
2662 "jne .Lne_jump\n\t"
2663 "cmpl %ebx,4(%esp)\n\t"
2664 "je .Lne_fallthru\n\t"
2665 ".Lne_jump:\n\t"
2666 "lea 0x8(%esp),%esp\n\t"
2667 "pop %eax\n\t"
2668 "pop %ebx\n\t"
2669 /* jmp, but don't trust the assembler to choose the right jump */
2670 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2671 ".Lne_fallthru:\n\t"
2672 "lea 0x8(%esp),%esp\n\t"
2673 "pop %eax\n\t"
2674 "pop %ebx");
2675
2676 if (offset_p)
2677 *offset_p = 18;
2678 if (size_p)
2679 *size_p = 4;
2680}
2681
2682void
2683i386_emit_lt_goto (int *offset_p, int *size_p)
2684{
2685 EMIT_ASM32 (lt,
2686 "cmpl %ebx,4(%esp)\n\t"
2687 "jl .Llt_jump\n\t"
2688 "jne .Llt_fallthru\n\t"
2689 "cmpl %eax,(%esp)\n\t"
2690 "jnl .Llt_fallthru\n\t"
2691 ".Llt_jump:\n\t"
2692 "lea 0x8(%esp),%esp\n\t"
2693 "pop %eax\n\t"
2694 "pop %ebx\n\t"
2695 /* jmp, but don't trust the assembler to choose the right jump */
2696 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2697 ".Llt_fallthru:\n\t"
2698 "lea 0x8(%esp),%esp\n\t"
2699 "pop %eax\n\t"
2700 "pop %ebx");
2701
2702 if (offset_p)
2703 *offset_p = 20;
2704 if (size_p)
2705 *size_p = 4;
2706}
2707
2708void
2709i386_emit_le_goto (int *offset_p, int *size_p)
2710{
2711 EMIT_ASM32 (le,
2712 "cmpl %ebx,4(%esp)\n\t"
2713 "jle .Lle_jump\n\t"
2714 "jne .Lle_fallthru\n\t"
2715 "cmpl %eax,(%esp)\n\t"
2716 "jnle .Lle_fallthru\n\t"
2717 ".Lle_jump:\n\t"
2718 "lea 0x8(%esp),%esp\n\t"
2719 "pop %eax\n\t"
2720 "pop %ebx\n\t"
2721 /* jmp, but don't trust the assembler to choose the right jump */
2722 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2723 ".Lle_fallthru:\n\t"
2724 "lea 0x8(%esp),%esp\n\t"
2725 "pop %eax\n\t"
2726 "pop %ebx");
2727
2728 if (offset_p)
2729 *offset_p = 20;
2730 if (size_p)
2731 *size_p = 4;
2732}
2733
2734void
2735i386_emit_gt_goto (int *offset_p, int *size_p)
2736{
2737 EMIT_ASM32 (gt,
2738 "cmpl %ebx,4(%esp)\n\t"
2739 "jg .Lgt_jump\n\t"
2740 "jne .Lgt_fallthru\n\t"
2741 "cmpl %eax,(%esp)\n\t"
2742 "jng .Lgt_fallthru\n\t"
2743 ".Lgt_jump:\n\t"
2744 "lea 0x8(%esp),%esp\n\t"
2745 "pop %eax\n\t"
2746 "pop %ebx\n\t"
2747 /* jmp, but don't trust the assembler to choose the right jump */
2748 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2749 ".Lgt_fallthru:\n\t"
2750 "lea 0x8(%esp),%esp\n\t"
2751 "pop %eax\n\t"
2752 "pop %ebx");
2753
2754 if (offset_p)
2755 *offset_p = 20;
2756 if (size_p)
2757 *size_p = 4;
2758}
2759
2760void
2761i386_emit_ge_goto (int *offset_p, int *size_p)
2762{
2763 EMIT_ASM32 (ge,
2764 "cmpl %ebx,4(%esp)\n\t"
2765 "jge .Lge_jump\n\t"
2766 "jne .Lge_fallthru\n\t"
2767 "cmpl %eax,(%esp)\n\t"
2768 "jnge .Lge_fallthru\n\t"
2769 ".Lge_jump:\n\t"
2770 "lea 0x8(%esp),%esp\n\t"
2771 "pop %eax\n\t"
2772 "pop %ebx\n\t"
2773 /* jmp, but don't trust the assembler to choose the right jump */
2774 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2775 ".Lge_fallthru:\n\t"
2776 "lea 0x8(%esp),%esp\n\t"
2777 "pop %eax\n\t"
2778 "pop %ebx");
2779
2780 if (offset_p)
2781 *offset_p = 20;
2782 if (size_p)
2783 *size_p = 4;
2784}
2785
6a271cae
PA
2786struct emit_ops i386_emit_ops =
2787 {
2788 i386_emit_prologue,
2789 i386_emit_epilogue,
2790 i386_emit_add,
2791 i386_emit_sub,
2792 i386_emit_mul,
2793 i386_emit_lsh,
2794 i386_emit_rsh_signed,
2795 i386_emit_rsh_unsigned,
2796 i386_emit_ext,
2797 i386_emit_log_not,
2798 i386_emit_bit_and,
2799 i386_emit_bit_or,
2800 i386_emit_bit_xor,
2801 i386_emit_bit_not,
2802 i386_emit_equal,
2803 i386_emit_less_signed,
2804 i386_emit_less_unsigned,
2805 i386_emit_ref,
2806 i386_emit_if_goto,
2807 i386_emit_goto,
2808 i386_write_goto_address,
2809 i386_emit_const,
2810 i386_emit_call,
2811 i386_emit_reg,
2812 i386_emit_pop,
2813 i386_emit_stack_flush,
2814 i386_emit_zero_ext,
2815 i386_emit_swap,
2816 i386_emit_stack_adjust,
2817 i386_emit_int_call_1,
6b9801d4
SS
2818 i386_emit_void_call_2,
2819 i386_emit_eq_goto,
2820 i386_emit_ne_goto,
2821 i386_emit_lt_goto,
2822 i386_emit_le_goto,
2823 i386_emit_gt_goto,
2824 i386_emit_ge_goto
6a271cae
PA
2825 };
2826
2827
2828static struct emit_ops *
2829x86_emit_ops (void)
2830{
2831#ifdef __x86_64__
2832 int use_64bit = register_size (0) == 8;
2833
2834 if (use_64bit)
2835 return &amd64_emit_ops;
2836 else
2837#endif
2838 return &i386_emit_ops;
2839}
2840
d0722149
DE
2841/* This is initialized assuming an amd64 target.
2842 x86_arch_setup will correct it for i386 or amd64 targets. */
2843
2844struct linux_target_ops the_low_target =
2845{
2846 x86_arch_setup,
2847 -1,
2848 NULL,
2849 NULL,
2850 NULL,
2851 x86_get_pc,
2852 x86_set_pc,
2853 x86_breakpoint,
2854 x86_breakpoint_len,
2855 NULL,
2856 1,
2857 x86_breakpoint_at,
aa5ca48f
DE
2858 x86_insert_point,
2859 x86_remove_point,
2860 x86_stopped_by_watchpoint,
2861 x86_stopped_data_address,
d0722149
DE
2862 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2863 native i386 case (no registers smaller than an xfer unit), and are not
2864 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2865 NULL,
2866 NULL,
2867 /* need to fix up i386 siginfo if host is amd64 */
2868 x86_siginfo_fixup,
aa5ca48f
DE
2869 x86_linux_new_process,
2870 x86_linux_new_thread,
1570b33e 2871 x86_linux_prepare_to_resume,
219f2f23 2872 x86_linux_process_qsupported,
fa593d66
PA
2873 x86_supports_tracepoints,
2874 x86_get_thread_area,
6a271cae
PA
2875 x86_install_fast_tracepoint_jump_pad,
2876 x86_emit_ops
d0722149 2877};
This page took 0.366715 seconds and 4 git commands to generate.