PR gdb/14290:
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
0b302171 3 Copyright (C) 2002, 2004-2012 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
aa5ca48f 20#include <stddef.h>
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "server.h"
25#include "linux-low.h"
26#include "i387-fp.h"
aa5ca48f 27#include "i386-low.h"
1570b33e
L
28#include "i386-xstate.h"
29#include "elf/common.h"
d0722149
DE
30
31#include "gdb_proc_service.h"
58b4daa5 32#include "agent.h"
d0722149 33
90884b2b 34/* Defined in auto-generated file i386-linux.c. */
d0722149 35void init_registers_i386_linux (void);
90884b2b
L
36/* Defined in auto-generated file amd64-linux.c. */
37void init_registers_amd64_linux (void);
1570b33e
L
38/* Defined in auto-generated file i386-avx-linux.c. */
39void init_registers_i386_avx_linux (void);
40/* Defined in auto-generated file amd64-avx-linux.c. */
41void init_registers_amd64_avx_linux (void);
3a13a53b
L
42/* Defined in auto-generated file i386-mmx-linux.c. */
43void init_registers_i386_mmx_linux (void);
4d47af5c
L
44/* Defined in auto-generated file x32-linux.c. */
45void init_registers_x32_linux (void);
46/* Defined in auto-generated file x32-avx-linux.c. */
47void init_registers_x32_avx_linux (void);
1570b33e 48
fa593d66 49static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 50static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 51
1570b33e
L
52/* Backward compatibility for gdb without XML support. */
53
54static const char *xmltarget_i386_linux_no_xml = "@<target>\
55<architecture>i386</architecture>\
56<osabi>GNU/Linux</osabi>\
57</target>";
f6d1620c
L
58
59#ifdef __x86_64__
1570b33e
L
60static const char *xmltarget_amd64_linux_no_xml = "@<target>\
61<architecture>i386:x86-64</architecture>\
62<osabi>GNU/Linux</osabi>\
63</target>";
f6d1620c 64#endif
d0722149
DE
65
66#include <sys/reg.h>
67#include <sys/procfs.h>
68#include <sys/ptrace.h>
1570b33e
L
69#include <sys/uio.h>
70
71#ifndef PTRACE_GETREGSET
72#define PTRACE_GETREGSET 0x4204
73#endif
74
75#ifndef PTRACE_SETREGSET
76#define PTRACE_SETREGSET 0x4205
77#endif
78
d0722149
DE
79
80#ifndef PTRACE_GET_THREAD_AREA
81#define PTRACE_GET_THREAD_AREA 25
82#endif
83
84/* This definition comes from prctl.h, but some kernels may not have it. */
85#ifndef PTRACE_ARCH_PRCTL
86#define PTRACE_ARCH_PRCTL 30
87#endif
88
89/* The following definitions come from prctl.h, but may be absent
90 for certain configurations. */
91#ifndef ARCH_GET_FS
92#define ARCH_SET_GS 0x1001
93#define ARCH_SET_FS 0x1002
94#define ARCH_GET_FS 0x1003
95#define ARCH_GET_GS 0x1004
96#endif
97
aa5ca48f
DE
98/* Per-process arch-specific data we want to keep. */
99
100struct arch_process_info
101{
102 struct i386_debug_reg_state debug_reg_state;
103};
104
105/* Per-thread arch-specific data we want to keep. */
106
107struct arch_lwp_info
108{
109 /* Non-zero if our copy differs from what's recorded in the thread. */
110 int debug_registers_changed;
111};
112
d0722149
DE
113#ifdef __x86_64__
114
115/* Mapping between the general-purpose registers in `struct user'
116 format and GDB's register array layout.
117 Note that the transfer layout uses 64-bit regs. */
118static /*const*/ int i386_regmap[] =
119{
120 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
121 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
122 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
123 DS * 8, ES * 8, FS * 8, GS * 8
124};
125
126#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
127
128/* So code below doesn't have to care, i386 or amd64. */
129#define ORIG_EAX ORIG_RAX
130
131static const int x86_64_regmap[] =
132{
133 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
134 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
135 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
136 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
137 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
138 DS * 8, ES * 8, FS * 8, GS * 8,
139 -1, -1, -1, -1, -1, -1, -1, -1,
140 -1, -1, -1, -1, -1, -1, -1, -1,
141 -1, -1, -1, -1, -1, -1, -1, -1,
142 -1, -1, -1, -1, -1, -1, -1, -1, -1,
143 ORIG_RAX * 8
144};
145
146#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
147
148#else /* ! __x86_64__ */
149
150/* Mapping between the general-purpose registers in `struct user'
151 format and GDB's register array layout. */
152static /*const*/ int i386_regmap[] =
153{
154 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
155 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
156 EIP * 4, EFL * 4, CS * 4, SS * 4,
157 DS * 4, ES * 4, FS * 4, GS * 4
158};
159
160#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
161
162#endif
163\f
164/* Called by libthread_db. */
165
166ps_err_e
167ps_get_thread_area (const struct ps_prochandle *ph,
168 lwpid_t lwpid, int idx, void **base)
169{
170#ifdef __x86_64__
171 int use_64bit = register_size (0) == 8;
172
173 if (use_64bit)
174 {
175 switch (idx)
176 {
177 case FS:
178 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
179 return PS_OK;
180 break;
181 case GS:
182 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
183 return PS_OK;
184 break;
185 default:
186 return PS_BADADDR;
187 }
188 return PS_ERR;
189 }
190#endif
191
192 {
193 unsigned int desc[4];
194
195 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
196 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
197 return PS_ERR;
198
199 *(int *)base = desc[1];
200 return PS_OK;
201 }
202}
fa593d66
PA
203
204/* Get the thread area address. This is used to recognize which
205 thread is which when tracing with the in-process agent library. We
206 don't read anything from the address, and treat it as opaque; it's
207 the address itself that we assume is unique per-thread. */
208
209static int
210x86_get_thread_area (int lwpid, CORE_ADDR *addr)
211{
212#ifdef __x86_64__
213 int use_64bit = register_size (0) == 8;
214
215 if (use_64bit)
216 {
217 void *base;
218 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
219 {
220 *addr = (CORE_ADDR) (uintptr_t) base;
221 return 0;
222 }
223
224 return -1;
225 }
226#endif
227
228 {
229 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
230 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
231 unsigned int desc[4];
232 ULONGEST gs = 0;
233 const int reg_thread_area = 3; /* bits to scale down register value. */
234 int idx;
235
236 collect_register_by_name (regcache, "gs", &gs);
237
238 idx = gs >> reg_thread_area;
239
240 if (ptrace (PTRACE_GET_THREAD_AREA,
493e2a69
MS
241 lwpid_of (lwp),
242 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
243 return -1;
244
245 *addr = desc[1];
246 return 0;
247 }
248}
249
250
d0722149
DE
251\f
252static int
253i386_cannot_store_register (int regno)
254{
255 return regno >= I386_NUM_REGS;
256}
257
258static int
259i386_cannot_fetch_register (int regno)
260{
261 return regno >= I386_NUM_REGS;
262}
263
264static void
442ea881 265x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
266{
267 int i;
268
269#ifdef __x86_64__
270 if (register_size (0) == 8)
271 {
272 for (i = 0; i < X86_64_NUM_REGS; i++)
273 if (x86_64_regmap[i] != -1)
442ea881 274 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
275 return;
276 }
277#endif
278
279 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 280 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 281
442ea881
PA
282 collect_register_by_name (regcache, "orig_eax",
283 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
284}
285
286static void
442ea881 287x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
288{
289 int i;
290
291#ifdef __x86_64__
292 if (register_size (0) == 8)
293 {
294 for (i = 0; i < X86_64_NUM_REGS; i++)
295 if (x86_64_regmap[i] != -1)
442ea881 296 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
297 return;
298 }
299#endif
300
301 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 302 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 303
442ea881
PA
304 supply_register_by_name (regcache, "orig_eax",
305 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
306}
307
308static void
442ea881 309x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
310{
311#ifdef __x86_64__
442ea881 312 i387_cache_to_fxsave (regcache, buf);
d0722149 313#else
442ea881 314 i387_cache_to_fsave (regcache, buf);
d0722149
DE
315#endif
316}
317
318static void
442ea881 319x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
320{
321#ifdef __x86_64__
442ea881 322 i387_fxsave_to_cache (regcache, buf);
d0722149 323#else
442ea881 324 i387_fsave_to_cache (regcache, buf);
d0722149
DE
325#endif
326}
327
328#ifndef __x86_64__
329
330static void
442ea881 331x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 332{
442ea881 333 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
334}
335
336static void
442ea881 337x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 338{
442ea881 339 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
340}
341
342#endif
343
1570b33e
L
344static void
345x86_fill_xstateregset (struct regcache *regcache, void *buf)
346{
347 i387_cache_to_xsave (regcache, buf);
348}
349
350static void
351x86_store_xstateregset (struct regcache *regcache, const void *buf)
352{
353 i387_xsave_to_cache (regcache, buf);
354}
355
d0722149
DE
356/* ??? The non-biarch i386 case stores all the i387 regs twice.
357 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
358 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
359 doesn't work. IWBN to avoid the duplication in the case where it
360 does work. Maybe the arch_setup routine could check whether it works
361 and update target_regsets accordingly, maybe by moving target_regsets
362 to linux_target_ops and set the right one there, rather than having to
363 modify the target_regsets global. */
364
365struct regset_info target_regsets[] =
366{
367#ifdef HAVE_PTRACE_GETREGS
1570b33e 368 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
369 GENERAL_REGS,
370 x86_fill_gregset, x86_store_gregset },
1570b33e
L
371 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
372 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
373# ifndef __x86_64__
374# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 375 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
376 EXTENDED_REGS,
377 x86_fill_fpxregset, x86_store_fpxregset },
378# endif
379# endif
1570b33e 380 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
381 FP_REGS,
382 x86_fill_fpregset, x86_store_fpregset },
383#endif /* HAVE_PTRACE_GETREGS */
1570b33e 384 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
385};
386
387static CORE_ADDR
442ea881 388x86_get_pc (struct regcache *regcache)
d0722149
DE
389{
390 int use_64bit = register_size (0) == 8;
391
392 if (use_64bit)
393 {
394 unsigned long pc;
442ea881 395 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
396 return (CORE_ADDR) pc;
397 }
398 else
399 {
400 unsigned int pc;
442ea881 401 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
402 return (CORE_ADDR) pc;
403 }
404}
405
406static void
442ea881 407x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149
DE
408{
409 int use_64bit = register_size (0) == 8;
410
411 if (use_64bit)
412 {
413 unsigned long newpc = pc;
442ea881 414 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
415 }
416 else
417 {
418 unsigned int newpc = pc;
442ea881 419 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
420 }
421}
422\f
423static const unsigned char x86_breakpoint[] = { 0xCC };
424#define x86_breakpoint_len 1
425
426static int
427x86_breakpoint_at (CORE_ADDR pc)
428{
429 unsigned char c;
430
fc7238bb 431 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
432 if (c == 0xCC)
433 return 1;
434
435 return 0;
436}
437\f
aa5ca48f
DE
438/* Support for debug registers. */
439
440static unsigned long
441x86_linux_dr_get (ptid_t ptid, int regnum)
442{
443 int tid;
444 unsigned long value;
445
446 tid = ptid_get_lwp (ptid);
447
448 errno = 0;
449 value = ptrace (PTRACE_PEEKUSER, tid,
450 offsetof (struct user, u_debugreg[regnum]), 0);
451 if (errno != 0)
452 error ("Couldn't read debug register");
453
454 return value;
455}
456
457static void
458x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
459{
460 int tid;
461
462 tid = ptid_get_lwp (ptid);
463
464 errno = 0;
465 ptrace (PTRACE_POKEUSER, tid,
466 offsetof (struct user, u_debugreg[regnum]), value);
467 if (errno != 0)
468 error ("Couldn't write debug register");
469}
470
964e4306
PA
471static int
472update_debug_registers_callback (struct inferior_list_entry *entry,
473 void *pid_p)
474{
475 struct lwp_info *lwp = (struct lwp_info *) entry;
476 int pid = *(int *) pid_p;
477
478 /* Only update the threads of this process. */
479 if (pid_of (lwp) == pid)
480 {
481 /* The actual update is done later just before resuming the lwp,
482 we just mark that the registers need updating. */
483 lwp->arch_private->debug_registers_changed = 1;
484
485 /* If the lwp isn't stopped, force it to momentarily pause, so
486 we can update its debug registers. */
487 if (!lwp->stopped)
488 linux_stop_lwp (lwp);
489 }
490
491 return 0;
492}
493
aa5ca48f
DE
494/* Update the inferior's debug register REGNUM from STATE. */
495
496void
497i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
498{
964e4306 499 /* Only update the threads of this process. */
aa5ca48f
DE
500 int pid = pid_of (get_thread_lwp (current_inferior));
501
502 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
503 fatal ("Invalid debug register %d", regnum);
504
964e4306
PA
505 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
506}
aa5ca48f 507
964e4306 508/* Return the inferior's debug register REGNUM. */
aa5ca48f 509
964e4306
PA
510CORE_ADDR
511i386_dr_low_get_addr (int regnum)
512{
513 struct lwp_info *lwp = get_thread_lwp (current_inferior);
514 ptid_t ptid = ptid_of (lwp);
515
516 /* DR6 and DR7 are retrieved with some other way. */
0a5b1e09 517 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306
PA
518
519 return x86_linux_dr_get (ptid, regnum);
aa5ca48f
DE
520}
521
522/* Update the inferior's DR7 debug control register from STATE. */
523
524void
525i386_dr_low_set_control (const struct i386_debug_reg_state *state)
526{
964e4306 527 /* Only update the threads of this process. */
aa5ca48f
DE
528 int pid = pid_of (get_thread_lwp (current_inferior));
529
964e4306
PA
530 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
531}
aa5ca48f 532
964e4306
PA
533/* Return the inferior's DR7 debug control register. */
534
535unsigned
536i386_dr_low_get_control (void)
537{
538 struct lwp_info *lwp = get_thread_lwp (current_inferior);
539 ptid_t ptid = ptid_of (lwp);
540
541 return x86_linux_dr_get (ptid, DR_CONTROL);
aa5ca48f
DE
542}
543
544/* Get the value of the DR6 debug status register from the inferior
545 and record it in STATE. */
546
964e4306
PA
547unsigned
548i386_dr_low_get_status (void)
aa5ca48f
DE
549{
550 struct lwp_info *lwp = get_thread_lwp (current_inferior);
551 ptid_t ptid = ptid_of (lwp);
552
964e4306 553 return x86_linux_dr_get (ptid, DR_STATUS);
aa5ca48f
DE
554}
555\f
90d74c30 556/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
557
558static int
559x86_insert_point (char type, CORE_ADDR addr, int len)
560{
561 struct process_info *proc = current_process ();
562 switch (type)
563 {
961bd387 564 case '0': /* software-breakpoint */
90d74c30
PA
565 {
566 int ret;
567
568 ret = prepare_to_access_memory ();
569 if (ret)
570 return -1;
571 ret = set_gdb_breakpoint_at (addr);
0146f85b 572 done_accessing_memory ();
90d74c30
PA
573 return ret;
574 }
961bd387
ME
575 case '1': /* hardware-breakpoint */
576 case '2': /* write watchpoint */
577 case '3': /* read watchpoint */
578 case '4': /* access watchpoint */
aa5ca48f
DE
579 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
580 type, addr, len);
961bd387 581
aa5ca48f
DE
582 default:
583 /* Unsupported. */
584 return 1;
585 }
586}
587
588static int
589x86_remove_point (char type, CORE_ADDR addr, int len)
590{
591 struct process_info *proc = current_process ();
592 switch (type)
593 {
961bd387 594 case '0': /* software-breakpoint */
90d74c30
PA
595 {
596 int ret;
597
598 ret = prepare_to_access_memory ();
599 if (ret)
600 return -1;
601 ret = delete_gdb_breakpoint_at (addr);
0146f85b 602 done_accessing_memory ();
90d74c30
PA
603 return ret;
604 }
961bd387
ME
605 case '1': /* hardware-breakpoint */
606 case '2': /* write watchpoint */
607 case '3': /* read watchpoint */
608 case '4': /* access watchpoint */
aa5ca48f
DE
609 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
610 type, addr, len);
611 default:
612 /* Unsupported. */
613 return 1;
614 }
615}
616
617static int
618x86_stopped_by_watchpoint (void)
619{
620 struct process_info *proc = current_process ();
621 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
622}
623
624static CORE_ADDR
625x86_stopped_data_address (void)
626{
627 struct process_info *proc = current_process ();
628 CORE_ADDR addr;
629 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
630 &addr))
631 return addr;
632 return 0;
633}
634\f
635/* Called when a new process is created. */
636
637static struct arch_process_info *
638x86_linux_new_process (void)
639{
640 struct arch_process_info *info = xcalloc (1, sizeof (*info));
641
642 i386_low_init_dregs (&info->debug_reg_state);
643
644 return info;
645}
646
647/* Called when a new thread is detected. */
648
649static struct arch_lwp_info *
650x86_linux_new_thread (void)
651{
652 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
653
654 info->debug_registers_changed = 1;
655
656 return info;
657}
658
659/* Called when resuming a thread.
660 If the debug regs have changed, update the thread's copies. */
661
662static void
663x86_linux_prepare_to_resume (struct lwp_info *lwp)
664{
b9a881c2 665 ptid_t ptid = ptid_of (lwp);
6210a125 666 int clear_status = 0;
b9a881c2 667
aa5ca48f
DE
668 if (lwp->arch_private->debug_registers_changed)
669 {
670 int i;
aa5ca48f
DE
671 int pid = ptid_get_pid (ptid);
672 struct process_info *proc = find_process_pid (pid);
493e2a69
MS
673 struct i386_debug_reg_state *state
674 = &proc->private->arch_private->debug_reg_state;
aa5ca48f
DE
675
676 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
6210a125
PA
677 if (state->dr_ref_count[i] > 0)
678 {
679 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
680
681 /* If we're setting a watchpoint, any change the inferior
682 had done itself to the debug registers needs to be
683 discarded, otherwise, i386_low_stopped_data_address can
684 get confused. */
685 clear_status = 1;
686 }
aa5ca48f
DE
687
688 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
689
690 lwp->arch_private->debug_registers_changed = 0;
691 }
b9a881c2 692
6210a125 693 if (clear_status || lwp->stopped_by_watchpoint)
b9a881c2 694 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
695}
696\f
d0722149
DE
697/* When GDBSERVER is built as a 64-bit application on linux, the
698 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
699 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
700 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
701 conversion in-place ourselves. */
702
703/* These types below (compat_*) define a siginfo type that is layout
704 compatible with the siginfo type exported by the 32-bit userspace
705 support. */
706
707#ifdef __x86_64__
708
709typedef int compat_int_t;
710typedef unsigned int compat_uptr_t;
711
712typedef int compat_time_t;
713typedef int compat_timer_t;
714typedef int compat_clock_t;
715
716struct compat_timeval
717{
718 compat_time_t tv_sec;
719 int tv_usec;
720};
721
722typedef union compat_sigval
723{
724 compat_int_t sival_int;
725 compat_uptr_t sival_ptr;
726} compat_sigval_t;
727
728typedef struct compat_siginfo
729{
730 int si_signo;
731 int si_errno;
732 int si_code;
733
734 union
735 {
736 int _pad[((128 / sizeof (int)) - 3)];
737
738 /* kill() */
739 struct
740 {
741 unsigned int _pid;
742 unsigned int _uid;
743 } _kill;
744
745 /* POSIX.1b timers */
746 struct
747 {
748 compat_timer_t _tid;
749 int _overrun;
750 compat_sigval_t _sigval;
751 } _timer;
752
753 /* POSIX.1b signals */
754 struct
755 {
756 unsigned int _pid;
757 unsigned int _uid;
758 compat_sigval_t _sigval;
759 } _rt;
760
761 /* SIGCHLD */
762 struct
763 {
764 unsigned int _pid;
765 unsigned int _uid;
766 int _status;
767 compat_clock_t _utime;
768 compat_clock_t _stime;
769 } _sigchld;
770
771 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
772 struct
773 {
774 unsigned int _addr;
775 } _sigfault;
776
777 /* SIGPOLL */
778 struct
779 {
780 int _band;
781 int _fd;
782 } _sigpoll;
783 } _sifields;
784} compat_siginfo_t;
785
c92b5177
L
786/* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
787typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
788
789typedef struct compat_x32_siginfo
790{
791 int si_signo;
792 int si_errno;
793 int si_code;
794
795 union
796 {
797 int _pad[((128 / sizeof (int)) - 3)];
798
799 /* kill() */
800 struct
801 {
802 unsigned int _pid;
803 unsigned int _uid;
804 } _kill;
805
806 /* POSIX.1b timers */
807 struct
808 {
809 compat_timer_t _tid;
810 int _overrun;
811 compat_sigval_t _sigval;
812 } _timer;
813
814 /* POSIX.1b signals */
815 struct
816 {
817 unsigned int _pid;
818 unsigned int _uid;
819 compat_sigval_t _sigval;
820 } _rt;
821
822 /* SIGCHLD */
823 struct
824 {
825 unsigned int _pid;
826 unsigned int _uid;
827 int _status;
828 compat_x32_clock_t _utime;
829 compat_x32_clock_t _stime;
830 } _sigchld;
831
832 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
833 struct
834 {
835 unsigned int _addr;
836 } _sigfault;
837
838 /* SIGPOLL */
839 struct
840 {
841 int _band;
842 int _fd;
843 } _sigpoll;
844 } _sifields;
845} compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
846
d0722149
DE
847#define cpt_si_pid _sifields._kill._pid
848#define cpt_si_uid _sifields._kill._uid
849#define cpt_si_timerid _sifields._timer._tid
850#define cpt_si_overrun _sifields._timer._overrun
851#define cpt_si_status _sifields._sigchld._status
852#define cpt_si_utime _sifields._sigchld._utime
853#define cpt_si_stime _sifields._sigchld._stime
854#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
855#define cpt_si_addr _sifields._sigfault._addr
856#define cpt_si_band _sifields._sigpoll._band
857#define cpt_si_fd _sifields._sigpoll._fd
858
859/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
860 In their place is si_timer1,si_timer2. */
861#ifndef si_timerid
862#define si_timerid si_timer1
863#endif
864#ifndef si_overrun
865#define si_overrun si_timer2
866#endif
867
868static void
869compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
870{
871 memset (to, 0, sizeof (*to));
872
873 to->si_signo = from->si_signo;
874 to->si_errno = from->si_errno;
875 to->si_code = from->si_code;
876
b53a1623 877 if (to->si_code == SI_TIMER)
d0722149 878 {
b53a1623
PA
879 to->cpt_si_timerid = from->si_timerid;
880 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
881 to->cpt_si_ptr = (intptr_t) from->si_ptr;
882 }
883 else if (to->si_code == SI_USER)
884 {
885 to->cpt_si_pid = from->si_pid;
886 to->cpt_si_uid = from->si_uid;
887 }
b53a1623 888 else if (to->si_code < 0)
d0722149 889 {
b53a1623
PA
890 to->cpt_si_pid = from->si_pid;
891 to->cpt_si_uid = from->si_uid;
d0722149
DE
892 to->cpt_si_ptr = (intptr_t) from->si_ptr;
893 }
894 else
895 {
896 switch (to->si_signo)
897 {
898 case SIGCHLD:
899 to->cpt_si_pid = from->si_pid;
900 to->cpt_si_uid = from->si_uid;
901 to->cpt_si_status = from->si_status;
902 to->cpt_si_utime = from->si_utime;
903 to->cpt_si_stime = from->si_stime;
904 break;
905 case SIGILL:
906 case SIGFPE:
907 case SIGSEGV:
908 case SIGBUS:
909 to->cpt_si_addr = (intptr_t) from->si_addr;
910 break;
911 case SIGPOLL:
912 to->cpt_si_band = from->si_band;
913 to->cpt_si_fd = from->si_fd;
914 break;
915 default:
916 to->cpt_si_pid = from->si_pid;
917 to->cpt_si_uid = from->si_uid;
918 to->cpt_si_ptr = (intptr_t) from->si_ptr;
919 break;
920 }
921 }
922}
923
924static void
925siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
926{
927 memset (to, 0, sizeof (*to));
928
929 to->si_signo = from->si_signo;
930 to->si_errno = from->si_errno;
931 to->si_code = from->si_code;
932
b53a1623 933 if (to->si_code == SI_TIMER)
d0722149 934 {
b53a1623
PA
935 to->si_timerid = from->cpt_si_timerid;
936 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
937 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
938 }
939 else if (to->si_code == SI_USER)
940 {
941 to->si_pid = from->cpt_si_pid;
942 to->si_uid = from->cpt_si_uid;
943 }
b53a1623 944 else if (to->si_code < 0)
d0722149 945 {
b53a1623
PA
946 to->si_pid = from->cpt_si_pid;
947 to->si_uid = from->cpt_si_uid;
d0722149
DE
948 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
949 }
950 else
951 {
952 switch (to->si_signo)
953 {
954 case SIGCHLD:
955 to->si_pid = from->cpt_si_pid;
956 to->si_uid = from->cpt_si_uid;
957 to->si_status = from->cpt_si_status;
958 to->si_utime = from->cpt_si_utime;
959 to->si_stime = from->cpt_si_stime;
960 break;
961 case SIGILL:
962 case SIGFPE:
963 case SIGSEGV:
964 case SIGBUS:
965 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
966 break;
967 case SIGPOLL:
968 to->si_band = from->cpt_si_band;
969 to->si_fd = from->cpt_si_fd;
970 break;
971 default:
972 to->si_pid = from->cpt_si_pid;
973 to->si_uid = from->cpt_si_uid;
974 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
975 break;
976 }
977 }
978}
979
c92b5177
L
980static void
981compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
982 siginfo_t *from)
983{
984 memset (to, 0, sizeof (*to));
985
986 to->si_signo = from->si_signo;
987 to->si_errno = from->si_errno;
988 to->si_code = from->si_code;
989
990 if (to->si_code == SI_TIMER)
991 {
992 to->cpt_si_timerid = from->si_timerid;
993 to->cpt_si_overrun = from->si_overrun;
994 to->cpt_si_ptr = (intptr_t) from->si_ptr;
995 }
996 else if (to->si_code == SI_USER)
997 {
998 to->cpt_si_pid = from->si_pid;
999 to->cpt_si_uid = from->si_uid;
1000 }
1001 else if (to->si_code < 0)
1002 {
1003 to->cpt_si_pid = from->si_pid;
1004 to->cpt_si_uid = from->si_uid;
1005 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1006 }
1007 else
1008 {
1009 switch (to->si_signo)
1010 {
1011 case SIGCHLD:
1012 to->cpt_si_pid = from->si_pid;
1013 to->cpt_si_uid = from->si_uid;
1014 to->cpt_si_status = from->si_status;
1015 to->cpt_si_utime = from->si_utime;
1016 to->cpt_si_stime = from->si_stime;
1017 break;
1018 case SIGILL:
1019 case SIGFPE:
1020 case SIGSEGV:
1021 case SIGBUS:
1022 to->cpt_si_addr = (intptr_t) from->si_addr;
1023 break;
1024 case SIGPOLL:
1025 to->cpt_si_band = from->si_band;
1026 to->cpt_si_fd = from->si_fd;
1027 break;
1028 default:
1029 to->cpt_si_pid = from->si_pid;
1030 to->cpt_si_uid = from->si_uid;
1031 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1032 break;
1033 }
1034 }
1035}
1036
1037static void
1038siginfo_from_compat_x32_siginfo (siginfo_t *to,
1039 compat_x32_siginfo_t *from)
1040{
1041 memset (to, 0, sizeof (*to));
1042
1043 to->si_signo = from->si_signo;
1044 to->si_errno = from->si_errno;
1045 to->si_code = from->si_code;
1046
1047 if (to->si_code == SI_TIMER)
1048 {
1049 to->si_timerid = from->cpt_si_timerid;
1050 to->si_overrun = from->cpt_si_overrun;
1051 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1052 }
1053 else if (to->si_code == SI_USER)
1054 {
1055 to->si_pid = from->cpt_si_pid;
1056 to->si_uid = from->cpt_si_uid;
1057 }
1058 else if (to->si_code < 0)
1059 {
1060 to->si_pid = from->cpt_si_pid;
1061 to->si_uid = from->cpt_si_uid;
1062 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1063 }
1064 else
1065 {
1066 switch (to->si_signo)
1067 {
1068 case SIGCHLD:
1069 to->si_pid = from->cpt_si_pid;
1070 to->si_uid = from->cpt_si_uid;
1071 to->si_status = from->cpt_si_status;
1072 to->si_utime = from->cpt_si_utime;
1073 to->si_stime = from->cpt_si_stime;
1074 break;
1075 case SIGILL:
1076 case SIGFPE:
1077 case SIGSEGV:
1078 case SIGBUS:
1079 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1080 break;
1081 case SIGPOLL:
1082 to->si_band = from->cpt_si_band;
1083 to->si_fd = from->cpt_si_fd;
1084 break;
1085 default:
1086 to->si_pid = from->cpt_si_pid;
1087 to->si_uid = from->cpt_si_uid;
1088 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1089 break;
1090 }
1091 }
1092}
1093
1094/* Is this process 64-bit? */
1095static int linux_is_elf64;
d0722149
DE
1096#endif /* __x86_64__ */
1097
1098/* Convert a native/host siginfo object, into/from the siginfo in the
1099 layout of the inferiors' architecture. Returns true if any
1100 conversion was done; false otherwise. If DIRECTION is 1, then copy
1101 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1102 INF. */
1103
1104static int
a5362b9a 1105x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
d0722149
DE
1106{
1107#ifdef __x86_64__
1108 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1109 if (register_size (0) == 4)
1110 {
a5362b9a 1111 if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
9f1036c1 1112 fatal ("unexpected difference in siginfo");
d0722149
DE
1113
1114 if (direction == 0)
1115 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1116 else
1117 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1118
c92b5177
L
1119 return 1;
1120 }
1121 /* No fixup for native x32 GDB. */
1122 else if (!linux_is_elf64 && sizeof (void *) == 8)
1123 {
1124 if (sizeof (siginfo_t) != sizeof (compat_x32_siginfo_t))
1125 fatal ("unexpected difference in siginfo");
1126
1127 if (direction == 0)
1128 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1129 native);
1130 else
1131 siginfo_from_compat_x32_siginfo (native,
1132 (struct compat_x32_siginfo *) inf);
1133
d0722149
DE
1134 return 1;
1135 }
1136#endif
1137
1138 return 0;
1139}
1140\f
1570b33e
L
1141static int use_xml;
1142
1143/* Update gdbserver_xmltarget. */
1144
1145static void
1146x86_linux_update_xmltarget (void)
1147{
3a13a53b
L
1148 int pid;
1149 struct regset_info *regset;
1570b33e
L
1150 static unsigned long long xcr0;
1151 static int have_ptrace_getregset = -1;
59e04013 1152#if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
3a13a53b
L
1153 static int have_ptrace_getfpxregs = -1;
1154#endif
1570b33e
L
1155
1156 if (!current_inferior)
1157 return;
1158
45ba0d02
PA
1159 /* Before changing the register cache internal layout or the target
1160 regsets, flush the contents of the current valid caches back to
1161 the threads. */
1162 regcache_invalidate ();
1163
3a13a53b 1164 pid = pid_of (get_thread_lwp (current_inferior));
1570b33e
L
1165#ifdef __x86_64__
1166 if (num_xmm_registers == 8)
1167 init_registers_i386_linux ();
4d47af5c 1168 else if (linux_is_elf64)
1570b33e 1169 init_registers_amd64_linux ();
4d47af5c
L
1170 else
1171 init_registers_x32_linux ();
1570b33e 1172#else
3a13a53b
L
1173 {
1174# ifdef HAVE_PTRACE_GETFPXREGS
1175 if (have_ptrace_getfpxregs == -1)
1176 {
1177 elf_fpxregset_t fpxregs;
1178
1179 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
1180 {
1181 have_ptrace_getfpxregs = 0;
1182 x86_xcr0 = I386_XSTATE_X87_MASK;
1183
1184 /* Disable PTRACE_GETFPXREGS. */
1185 for (regset = target_regsets;
1186 regset->fill_function != NULL; regset++)
1187 if (regset->get_request == PTRACE_GETFPXREGS)
1188 {
1189 regset->size = 0;
1190 break;
1191 }
1192 }
1193 else
1194 have_ptrace_getfpxregs = 1;
1195 }
1196
1197 if (!have_ptrace_getfpxregs)
1198 {
1199 init_registers_i386_mmx_linux ();
1200 return;
1201 }
1202# endif
1203 init_registers_i386_linux ();
1204 }
1570b33e
L
1205#endif
1206
1207 if (!use_xml)
1208 {
1209 /* Don't use XML. */
1210#ifdef __x86_64__
1211 if (num_xmm_registers == 8)
1212 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1213 else
1214 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
1215#else
1216 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1217#endif
1218
1219 x86_xcr0 = I386_XSTATE_SSE_MASK;
1220
1221 return;
1222 }
1223
1224 /* Check if XSAVE extended state is supported. */
1225 if (have_ptrace_getregset == -1)
1226 {
1570b33e
L
1227 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
1228 struct iovec iov;
1570b33e
L
1229
1230 iov.iov_base = xstateregs;
1231 iov.iov_len = sizeof (xstateregs);
1232
1233 /* Check if PTRACE_GETREGSET works. */
1234 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
1235 &iov) < 0)
1236 {
1237 have_ptrace_getregset = 0;
1238 return;
1239 }
1240 else
1241 have_ptrace_getregset = 1;
1242
1243 /* Get XCR0 from XSAVE extended state at byte 464. */
1244 xcr0 = xstateregs[464 / sizeof (long long)];
1245
1246 /* Use PTRACE_GETREGSET if it is available. */
1247 for (regset = target_regsets;
1248 regset->fill_function != NULL; regset++)
1249 if (regset->get_request == PTRACE_GETREGSET)
1250 regset->size = I386_XSTATE_SIZE (xcr0);
1251 else if (regset->type != GENERAL_REGS)
1252 regset->size = 0;
1253 }
1254
1255 if (have_ptrace_getregset)
1256 {
1257 /* AVX is the highest feature we support. */
1258 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
1259 {
1260 x86_xcr0 = xcr0;
1261
1262#ifdef __x86_64__
1263 /* I386 has 8 xmm regs. */
1264 if (num_xmm_registers == 8)
1265 init_registers_i386_avx_linux ();
4d47af5c 1266 else if (linux_is_elf64)
1570b33e 1267 init_registers_amd64_avx_linux ();
4d47af5c
L
1268 else
1269 init_registers_x32_avx_linux ();
1570b33e
L
1270#else
1271 init_registers_i386_avx_linux ();
1272#endif
1273 }
1274 }
1275}
1276
1277/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1278 PTRACE_GETREGSET. */
1279
1280static void
1281x86_linux_process_qsupported (const char *query)
1282{
1283 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1284 with "i386" in qSupported query, it supports x86 XML target
1285 descriptions. */
1286 use_xml = 0;
1287 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1288 {
1289 char *copy = xstrdup (query + 13);
1290 char *p;
1291
1292 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1293 {
1294 if (strcmp (p, "i386") == 0)
1295 {
1296 use_xml = 1;
1297 break;
1298 }
1299 }
1300
1301 free (copy);
1302 }
1303
1304 x86_linux_update_xmltarget ();
1305}
1306
9f1036c1 1307/* Initialize gdbserver for the architecture of the inferior. */
d0722149
DE
1308
1309static void
1310x86_arch_setup (void)
1311{
d0722149 1312 int pid = pid_of (get_thread_lwp (current_inferior));
214d508e
L
1313 unsigned int machine;
1314 int is_elf64 = linux_pid_exe_is_elf_64_file (pid, &machine);
d0722149 1315
214d508e
L
1316 if (sizeof (void *) == 4)
1317 {
1318 if (is_elf64 > 0)
1319 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1320#ifndef __x86_64__
1321 else if (machine == EM_X86_64)
1322 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1323#endif
1324 }
1325
1326#ifdef __x86_64__
1327 if (is_elf64 < 0)
d0722149
DE
1328 {
1329 /* This can only happen if /proc/<pid>/exe is unreadable,
1330 but "that can't happen" if we've gotten this far.
1331 Fall through and assume this is a 32-bit program. */
1332 }
214d508e 1333 else if (machine == EM_X86_64)
d0722149 1334 {
d0722149
DE
1335 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1336 the_low_target.num_regs = -1;
1337 the_low_target.regmap = NULL;
1338 the_low_target.cannot_fetch_register = NULL;
1339 the_low_target.cannot_store_register = NULL;
1340
1341 /* Amd64 has 16 xmm regs. */
1342 num_xmm_registers = 16;
1343
c92b5177 1344 linux_is_elf64 = is_elf64;
1570b33e 1345 x86_linux_update_xmltarget ();
d0722149
DE
1346 return;
1347 }
c92b5177
L
1348
1349 linux_is_elf64 = 0;
d0722149
DE
1350#endif
1351
1352 /* Ok we have a 32-bit inferior. */
1353
d0722149
DE
1354 the_low_target.num_regs = I386_NUM_REGS;
1355 the_low_target.regmap = i386_regmap;
1356 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1357 the_low_target.cannot_store_register = i386_cannot_store_register;
1358
1359 /* I386 has 8 xmm regs. */
1360 num_xmm_registers = 8;
1570b33e
L
1361
1362 x86_linux_update_xmltarget ();
d0722149
DE
1363}
1364
219f2f23
PA
1365static int
1366x86_supports_tracepoints (void)
1367{
1368 return 1;
1369}
1370
fa593d66
PA
1371static void
1372append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1373{
1374 write_inferior_memory (*to, buf, len);
1375 *to += len;
1376}
1377
1378static int
1379push_opcode (unsigned char *buf, char *op)
1380{
1381 unsigned char *buf_org = buf;
1382
1383 while (1)
1384 {
1385 char *endptr;
1386 unsigned long ul = strtoul (op, &endptr, 16);
1387
1388 if (endptr == op)
1389 break;
1390
1391 *buf++ = ul;
1392 op = endptr;
1393 }
1394
1395 return buf - buf_org;
1396}
1397
1398#ifdef __x86_64__
1399
1400/* Build a jump pad that saves registers and calls a collection
1401 function. Writes a jump instruction to the jump pad to
1402 JJUMPAD_INSN. The caller is responsible to write it in at the
1403 tracepoint address. */
1404
1405static int
1406amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1407 CORE_ADDR collector,
1408 CORE_ADDR lockaddr,
1409 ULONGEST orig_size,
1410 CORE_ADDR *jump_entry,
405f8e94
SS
1411 CORE_ADDR *trampoline,
1412 ULONGEST *trampoline_size,
fa593d66
PA
1413 unsigned char *jjump_pad_insn,
1414 ULONGEST *jjump_pad_insn_size,
1415 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1416 CORE_ADDR *adjusted_insn_addr_end,
1417 char *err)
fa593d66
PA
1418{
1419 unsigned char buf[40];
1420 int i, offset;
f4647387
YQ
1421 int64_t loffset;
1422
fa593d66
PA
1423 CORE_ADDR buildaddr = *jump_entry;
1424
1425 /* Build the jump pad. */
1426
1427 /* First, do tracepoint data collection. Save registers. */
1428 i = 0;
1429 /* Need to ensure stack pointer saved first. */
1430 buf[i++] = 0x54; /* push %rsp */
1431 buf[i++] = 0x55; /* push %rbp */
1432 buf[i++] = 0x57; /* push %rdi */
1433 buf[i++] = 0x56; /* push %rsi */
1434 buf[i++] = 0x52; /* push %rdx */
1435 buf[i++] = 0x51; /* push %rcx */
1436 buf[i++] = 0x53; /* push %rbx */
1437 buf[i++] = 0x50; /* push %rax */
1438 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1439 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1440 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1441 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1442 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1443 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1444 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1445 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1446 buf[i++] = 0x9c; /* pushfq */
1447 buf[i++] = 0x48; /* movl <addr>,%rdi */
1448 buf[i++] = 0xbf;
1449 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1450 i += sizeof (unsigned long);
1451 buf[i++] = 0x57; /* push %rdi */
1452 append_insns (&buildaddr, i, buf);
1453
1454 /* Stack space for the collecting_t object. */
1455 i = 0;
1456 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1457 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1458 memcpy (buf + i, &tpoint, 8);
1459 i += 8;
1460 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1461 i += push_opcode (&buf[i],
1462 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1463 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1464 append_insns (&buildaddr, i, buf);
1465
1466 /* spin-lock. */
1467 i = 0;
1468 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1469 memcpy (&buf[i], (void *) &lockaddr, 8);
1470 i += 8;
1471 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1472 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1473 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1474 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1475 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1476 append_insns (&buildaddr, i, buf);
1477
1478 /* Set up the gdb_collect call. */
1479 /* At this point, (stack pointer + 0x18) is the base of our saved
1480 register block. */
1481
1482 i = 0;
1483 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1484 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1485
1486 /* tpoint address may be 64-bit wide. */
1487 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1488 memcpy (buf + i, &tpoint, 8);
1489 i += 8;
1490 append_insns (&buildaddr, i, buf);
1491
1492 /* The collector function being in the shared library, may be
1493 >31-bits away off the jump pad. */
1494 i = 0;
1495 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1496 memcpy (buf + i, &collector, 8);
1497 i += 8;
1498 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1499 append_insns (&buildaddr, i, buf);
1500
1501 /* Clear the spin-lock. */
1502 i = 0;
1503 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1504 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1505 memcpy (buf + i, &lockaddr, 8);
1506 i += 8;
1507 append_insns (&buildaddr, i, buf);
1508
1509 /* Remove stack that had been used for the collect_t object. */
1510 i = 0;
1511 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1512 append_insns (&buildaddr, i, buf);
1513
1514 /* Restore register state. */
1515 i = 0;
1516 buf[i++] = 0x48; /* add $0x8,%rsp */
1517 buf[i++] = 0x83;
1518 buf[i++] = 0xc4;
1519 buf[i++] = 0x08;
1520 buf[i++] = 0x9d; /* popfq */
1521 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1522 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1523 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1524 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1525 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1526 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1527 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1528 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1529 buf[i++] = 0x58; /* pop %rax */
1530 buf[i++] = 0x5b; /* pop %rbx */
1531 buf[i++] = 0x59; /* pop %rcx */
1532 buf[i++] = 0x5a; /* pop %rdx */
1533 buf[i++] = 0x5e; /* pop %rsi */
1534 buf[i++] = 0x5f; /* pop %rdi */
1535 buf[i++] = 0x5d; /* pop %rbp */
1536 buf[i++] = 0x5c; /* pop %rsp */
1537 append_insns (&buildaddr, i, buf);
1538
1539 /* Now, adjust the original instruction to execute in the jump
1540 pad. */
1541 *adjusted_insn_addr = buildaddr;
1542 relocate_instruction (&buildaddr, tpaddr);
1543 *adjusted_insn_addr_end = buildaddr;
1544
1545 /* Finally, write a jump back to the program. */
f4647387
YQ
1546
1547 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1548 if (loffset > INT_MAX || loffset < INT_MIN)
1549 {
1550 sprintf (err,
1551 "E.Jump back from jump pad too far from tracepoint "
1552 "(offset 0x%" PRIx64 " > int32).", loffset);
1553 return 1;
1554 }
1555
1556 offset = (int) loffset;
fa593d66
PA
1557 memcpy (buf, jump_insn, sizeof (jump_insn));
1558 memcpy (buf + 1, &offset, 4);
1559 append_insns (&buildaddr, sizeof (jump_insn), buf);
1560
1561 /* The jump pad is now built. Wire in a jump to our jump pad. This
1562 is always done last (by our caller actually), so that we can
1563 install fast tracepoints with threads running. This relies on
1564 the agent's atomic write support. */
f4647387
YQ
1565 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1566 if (loffset > INT_MAX || loffset < INT_MIN)
1567 {
1568 sprintf (err,
1569 "E.Jump pad too far from tracepoint "
1570 "(offset 0x%" PRIx64 " > int32).", loffset);
1571 return 1;
1572 }
1573
1574 offset = (int) loffset;
1575
fa593d66
PA
1576 memcpy (buf, jump_insn, sizeof (jump_insn));
1577 memcpy (buf + 1, &offset, 4);
1578 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1579 *jjump_pad_insn_size = sizeof (jump_insn);
1580
1581 /* Return the end address of our pad. */
1582 *jump_entry = buildaddr;
1583
1584 return 0;
1585}
1586
1587#endif /* __x86_64__ */
1588
1589/* Build a jump pad that saves registers and calls a collection
1590 function. Writes a jump instruction to the jump pad to
1591 JJUMPAD_INSN. The caller is responsible to write it in at the
1592 tracepoint address. */
1593
1594static int
1595i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1596 CORE_ADDR collector,
1597 CORE_ADDR lockaddr,
1598 ULONGEST orig_size,
1599 CORE_ADDR *jump_entry,
405f8e94
SS
1600 CORE_ADDR *trampoline,
1601 ULONGEST *trampoline_size,
fa593d66
PA
1602 unsigned char *jjump_pad_insn,
1603 ULONGEST *jjump_pad_insn_size,
1604 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1605 CORE_ADDR *adjusted_insn_addr_end,
1606 char *err)
fa593d66
PA
1607{
1608 unsigned char buf[0x100];
1609 int i, offset;
1610 CORE_ADDR buildaddr = *jump_entry;
1611
1612 /* Build the jump pad. */
1613
1614 /* First, do tracepoint data collection. Save registers. */
1615 i = 0;
1616 buf[i++] = 0x60; /* pushad */
1617 buf[i++] = 0x68; /* push tpaddr aka $pc */
1618 *((int *)(buf + i)) = (int) tpaddr;
1619 i += 4;
1620 buf[i++] = 0x9c; /* pushf */
1621 buf[i++] = 0x1e; /* push %ds */
1622 buf[i++] = 0x06; /* push %es */
1623 buf[i++] = 0x0f; /* push %fs */
1624 buf[i++] = 0xa0;
1625 buf[i++] = 0x0f; /* push %gs */
1626 buf[i++] = 0xa8;
1627 buf[i++] = 0x16; /* push %ss */
1628 buf[i++] = 0x0e; /* push %cs */
1629 append_insns (&buildaddr, i, buf);
1630
1631 /* Stack space for the collecting_t object. */
1632 i = 0;
1633 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1634
1635 /* Build the object. */
1636 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1637 memcpy (buf + i, &tpoint, 4);
1638 i += 4;
1639 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1640
1641 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1642 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1643 append_insns (&buildaddr, i, buf);
1644
1645 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1646 If we cared for it, this could be using xchg alternatively. */
1647
1648 i = 0;
1649 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1650 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1651 %esp,<lockaddr> */
1652 memcpy (&buf[i], (void *) &lockaddr, 4);
1653 i += 4;
1654 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1655 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1656 append_insns (&buildaddr, i, buf);
1657
1658
1659 /* Set up arguments to the gdb_collect call. */
1660 i = 0;
1661 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1662 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1663 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1664 append_insns (&buildaddr, i, buf);
1665
1666 i = 0;
1667 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1668 append_insns (&buildaddr, i, buf);
1669
1670 i = 0;
1671 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1672 memcpy (&buf[i], (void *) &tpoint, 4);
1673 i += 4;
1674 append_insns (&buildaddr, i, buf);
1675
1676 buf[0] = 0xe8; /* call <reladdr> */
1677 offset = collector - (buildaddr + sizeof (jump_insn));
1678 memcpy (buf + 1, &offset, 4);
1679 append_insns (&buildaddr, 5, buf);
1680 /* Clean up after the call. */
1681 buf[0] = 0x83; /* add $0x8,%esp */
1682 buf[1] = 0xc4;
1683 buf[2] = 0x08;
1684 append_insns (&buildaddr, 3, buf);
1685
1686
1687 /* Clear the spin-lock. This would need the LOCK prefix on older
1688 broken archs. */
1689 i = 0;
1690 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1691 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1692 memcpy (buf + i, &lockaddr, 4);
1693 i += 4;
1694 append_insns (&buildaddr, i, buf);
1695
1696
1697 /* Remove stack that had been used for the collect_t object. */
1698 i = 0;
1699 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1700 append_insns (&buildaddr, i, buf);
1701
1702 i = 0;
1703 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1704 buf[i++] = 0xc4;
1705 buf[i++] = 0x04;
1706 buf[i++] = 0x17; /* pop %ss */
1707 buf[i++] = 0x0f; /* pop %gs */
1708 buf[i++] = 0xa9;
1709 buf[i++] = 0x0f; /* pop %fs */
1710 buf[i++] = 0xa1;
1711 buf[i++] = 0x07; /* pop %es */
405f8e94 1712 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1713 buf[i++] = 0x9d; /* popf */
1714 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1715 buf[i++] = 0xc4;
1716 buf[i++] = 0x04;
1717 buf[i++] = 0x61; /* popad */
1718 append_insns (&buildaddr, i, buf);
1719
1720 /* Now, adjust the original instruction to execute in the jump
1721 pad. */
1722 *adjusted_insn_addr = buildaddr;
1723 relocate_instruction (&buildaddr, tpaddr);
1724 *adjusted_insn_addr_end = buildaddr;
1725
1726 /* Write the jump back to the program. */
1727 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1728 memcpy (buf, jump_insn, sizeof (jump_insn));
1729 memcpy (buf + 1, &offset, 4);
1730 append_insns (&buildaddr, sizeof (jump_insn), buf);
1731
1732 /* The jump pad is now built. Wire in a jump to our jump pad. This
1733 is always done last (by our caller actually), so that we can
1734 install fast tracepoints with threads running. This relies on
1735 the agent's atomic write support. */
405f8e94
SS
1736 if (orig_size == 4)
1737 {
1738 /* Create a trampoline. */
1739 *trampoline_size = sizeof (jump_insn);
1740 if (!claim_trampoline_space (*trampoline_size, trampoline))
1741 {
1742 /* No trampoline space available. */
1743 strcpy (err,
1744 "E.Cannot allocate trampoline space needed for fast "
1745 "tracepoints on 4-byte instructions.");
1746 return 1;
1747 }
1748
1749 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1750 memcpy (buf, jump_insn, sizeof (jump_insn));
1751 memcpy (buf + 1, &offset, 4);
1752 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1753
1754 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1755 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1756 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1757 memcpy (buf + 2, &offset, 2);
1758 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1759 *jjump_pad_insn_size = sizeof (small_jump_insn);
1760 }
1761 else
1762 {
1763 /* Else use a 32-bit relative jump instruction. */
1764 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1765 memcpy (buf, jump_insn, sizeof (jump_insn));
1766 memcpy (buf + 1, &offset, 4);
1767 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1768 *jjump_pad_insn_size = sizeof (jump_insn);
1769 }
fa593d66
PA
1770
1771 /* Return the end address of our pad. */
1772 *jump_entry = buildaddr;
1773
1774 return 0;
1775}
1776
1777static int
1778x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1779 CORE_ADDR collector,
1780 CORE_ADDR lockaddr,
1781 ULONGEST orig_size,
1782 CORE_ADDR *jump_entry,
405f8e94
SS
1783 CORE_ADDR *trampoline,
1784 ULONGEST *trampoline_size,
fa593d66
PA
1785 unsigned char *jjump_pad_insn,
1786 ULONGEST *jjump_pad_insn_size,
1787 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1788 CORE_ADDR *adjusted_insn_addr_end,
1789 char *err)
fa593d66
PA
1790{
1791#ifdef __x86_64__
1792 if (register_size (0) == 8)
1793 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1794 collector, lockaddr,
1795 orig_size, jump_entry,
405f8e94 1796 trampoline, trampoline_size,
fa593d66
PA
1797 jjump_pad_insn,
1798 jjump_pad_insn_size,
1799 adjusted_insn_addr,
405f8e94
SS
1800 adjusted_insn_addr_end,
1801 err);
fa593d66
PA
1802#endif
1803
1804 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1805 collector, lockaddr,
1806 orig_size, jump_entry,
405f8e94 1807 trampoline, trampoline_size,
fa593d66
PA
1808 jjump_pad_insn,
1809 jjump_pad_insn_size,
1810 adjusted_insn_addr,
405f8e94
SS
1811 adjusted_insn_addr_end,
1812 err);
1813}
1814
1815/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1816 architectures. */
1817
1818static int
1819x86_get_min_fast_tracepoint_insn_len (void)
1820{
1821 static int warned_about_fast_tracepoints = 0;
1822
1823#ifdef __x86_64__
1824 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1825 used for fast tracepoints. */
1826 if (register_size (0) == 8)
1827 return 5;
1828#endif
1829
58b4daa5 1830 if (agent_loaded_p ())
405f8e94
SS
1831 {
1832 char errbuf[IPA_BUFSIZ];
1833
1834 errbuf[0] = '\0';
1835
1836 /* On x86, if trampolines are available, then 4-byte jump instructions
1837 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1838 with a 4-byte offset are used instead. */
1839 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1840 return 4;
1841 else
1842 {
1843 /* GDB has no channel to explain to user why a shorter fast
1844 tracepoint is not possible, but at least make GDBserver
1845 mention that something has gone awry. */
1846 if (!warned_about_fast_tracepoints)
1847 {
1848 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1849 warned_about_fast_tracepoints = 1;
1850 }
1851 return 5;
1852 }
1853 }
1854 else
1855 {
1856 /* Indicate that the minimum length is currently unknown since the IPA
1857 has not loaded yet. */
1858 return 0;
1859 }
fa593d66
PA
1860}
1861
6a271cae
PA
1862static void
1863add_insns (unsigned char *start, int len)
1864{
1865 CORE_ADDR buildaddr = current_insn_ptr;
1866
1867 if (debug_threads)
1868 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1869 len, paddress (buildaddr));
1870
1871 append_insns (&buildaddr, len, start);
1872 current_insn_ptr = buildaddr;
1873}
1874
6a271cae
PA
1875/* Our general strategy for emitting code is to avoid specifying raw
1876 bytes whenever possible, and instead copy a block of inline asm
1877 that is embedded in the function. This is a little messy, because
1878 we need to keep the compiler from discarding what looks like dead
1879 code, plus suppress various warnings. */
1880
9e4344e5
PA
1881#define EMIT_ASM(NAME, INSNS) \
1882 do \
1883 { \
1884 extern unsigned char start_ ## NAME, end_ ## NAME; \
1885 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1886 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1887 "\t" "start_" #NAME ":" \
1888 "\t" INSNS "\n" \
1889 "\t" "end_" #NAME ":"); \
1890 } while (0)
6a271cae
PA
1891
1892#ifdef __x86_64__
1893
1894#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1895 do \
1896 { \
1897 extern unsigned char start_ ## NAME, end_ ## NAME; \
1898 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1899 __asm__ (".code32\n" \
1900 "\t" "jmp end_" #NAME "\n" \
1901 "\t" "start_" #NAME ":\n" \
1902 "\t" INSNS "\n" \
1903 "\t" "end_" #NAME ":\n" \
1904 ".code64\n"); \
1905 } while (0)
6a271cae
PA
1906
1907#else
1908
1909#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1910
1911#endif
1912
1913#ifdef __x86_64__
1914
1915static void
1916amd64_emit_prologue (void)
1917{
1918 EMIT_ASM (amd64_prologue,
1919 "pushq %rbp\n\t"
1920 "movq %rsp,%rbp\n\t"
1921 "sub $0x20,%rsp\n\t"
1922 "movq %rdi,-8(%rbp)\n\t"
1923 "movq %rsi,-16(%rbp)");
1924}
1925
1926
1927static void
1928amd64_emit_epilogue (void)
1929{
1930 EMIT_ASM (amd64_epilogue,
1931 "movq -16(%rbp),%rdi\n\t"
1932 "movq %rax,(%rdi)\n\t"
1933 "xor %rax,%rax\n\t"
1934 "leave\n\t"
1935 "ret");
1936}
1937
1938static void
1939amd64_emit_add (void)
1940{
1941 EMIT_ASM (amd64_add,
1942 "add (%rsp),%rax\n\t"
1943 "lea 0x8(%rsp),%rsp");
1944}
1945
1946static void
1947amd64_emit_sub (void)
1948{
1949 EMIT_ASM (amd64_sub,
1950 "sub %rax,(%rsp)\n\t"
1951 "pop %rax");
1952}
1953
1954static void
1955amd64_emit_mul (void)
1956{
1957 emit_error = 1;
1958}
1959
1960static void
1961amd64_emit_lsh (void)
1962{
1963 emit_error = 1;
1964}
1965
1966static void
1967amd64_emit_rsh_signed (void)
1968{
1969 emit_error = 1;
1970}
1971
1972static void
1973amd64_emit_rsh_unsigned (void)
1974{
1975 emit_error = 1;
1976}
1977
1978static void
1979amd64_emit_ext (int arg)
1980{
1981 switch (arg)
1982 {
1983 case 8:
1984 EMIT_ASM (amd64_ext_8,
1985 "cbtw\n\t"
1986 "cwtl\n\t"
1987 "cltq");
1988 break;
1989 case 16:
1990 EMIT_ASM (amd64_ext_16,
1991 "cwtl\n\t"
1992 "cltq");
1993 break;
1994 case 32:
1995 EMIT_ASM (amd64_ext_32,
1996 "cltq");
1997 break;
1998 default:
1999 emit_error = 1;
2000 }
2001}
2002
2003static void
2004amd64_emit_log_not (void)
2005{
2006 EMIT_ASM (amd64_log_not,
2007 "test %rax,%rax\n\t"
2008 "sete %cl\n\t"
2009 "movzbq %cl,%rax");
2010}
2011
2012static void
2013amd64_emit_bit_and (void)
2014{
2015 EMIT_ASM (amd64_and,
2016 "and (%rsp),%rax\n\t"
2017 "lea 0x8(%rsp),%rsp");
2018}
2019
2020static void
2021amd64_emit_bit_or (void)
2022{
2023 EMIT_ASM (amd64_or,
2024 "or (%rsp),%rax\n\t"
2025 "lea 0x8(%rsp),%rsp");
2026}
2027
2028static void
2029amd64_emit_bit_xor (void)
2030{
2031 EMIT_ASM (amd64_xor,
2032 "xor (%rsp),%rax\n\t"
2033 "lea 0x8(%rsp),%rsp");
2034}
2035
2036static void
2037amd64_emit_bit_not (void)
2038{
2039 EMIT_ASM (amd64_bit_not,
2040 "xorq $0xffffffffffffffff,%rax");
2041}
2042
2043static void
2044amd64_emit_equal (void)
2045{
2046 EMIT_ASM (amd64_equal,
2047 "cmp %rax,(%rsp)\n\t"
2048 "je .Lamd64_equal_true\n\t"
2049 "xor %rax,%rax\n\t"
2050 "jmp .Lamd64_equal_end\n\t"
2051 ".Lamd64_equal_true:\n\t"
2052 "mov $0x1,%rax\n\t"
2053 ".Lamd64_equal_end:\n\t"
2054 "lea 0x8(%rsp),%rsp");
2055}
2056
2057static void
2058amd64_emit_less_signed (void)
2059{
2060 EMIT_ASM (amd64_less_signed,
2061 "cmp %rax,(%rsp)\n\t"
2062 "jl .Lamd64_less_signed_true\n\t"
2063 "xor %rax,%rax\n\t"
2064 "jmp .Lamd64_less_signed_end\n\t"
2065 ".Lamd64_less_signed_true:\n\t"
2066 "mov $1,%rax\n\t"
2067 ".Lamd64_less_signed_end:\n\t"
2068 "lea 0x8(%rsp),%rsp");
2069}
2070
2071static void
2072amd64_emit_less_unsigned (void)
2073{
2074 EMIT_ASM (amd64_less_unsigned,
2075 "cmp %rax,(%rsp)\n\t"
2076 "jb .Lamd64_less_unsigned_true\n\t"
2077 "xor %rax,%rax\n\t"
2078 "jmp .Lamd64_less_unsigned_end\n\t"
2079 ".Lamd64_less_unsigned_true:\n\t"
2080 "mov $1,%rax\n\t"
2081 ".Lamd64_less_unsigned_end:\n\t"
2082 "lea 0x8(%rsp),%rsp");
2083}
2084
2085static void
2086amd64_emit_ref (int size)
2087{
2088 switch (size)
2089 {
2090 case 1:
2091 EMIT_ASM (amd64_ref1,
2092 "movb (%rax),%al");
2093 break;
2094 case 2:
2095 EMIT_ASM (amd64_ref2,
2096 "movw (%rax),%ax");
2097 break;
2098 case 4:
2099 EMIT_ASM (amd64_ref4,
2100 "movl (%rax),%eax");
2101 break;
2102 case 8:
2103 EMIT_ASM (amd64_ref8,
2104 "movq (%rax),%rax");
2105 break;
2106 }
2107}
2108
2109static void
2110amd64_emit_if_goto (int *offset_p, int *size_p)
2111{
2112 EMIT_ASM (amd64_if_goto,
2113 "mov %rax,%rcx\n\t"
2114 "pop %rax\n\t"
2115 "cmp $0,%rcx\n\t"
2116 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2117 if (offset_p)
2118 *offset_p = 10;
2119 if (size_p)
2120 *size_p = 4;
2121}
2122
2123static void
2124amd64_emit_goto (int *offset_p, int *size_p)
2125{
2126 EMIT_ASM (amd64_goto,
2127 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2128 if (offset_p)
2129 *offset_p = 1;
2130 if (size_p)
2131 *size_p = 4;
2132}
2133
2134static void
2135amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2136{
2137 int diff = (to - (from + size));
2138 unsigned char buf[sizeof (int)];
2139
2140 if (size != 4)
2141 {
2142 emit_error = 1;
2143 return;
2144 }
2145
2146 memcpy (buf, &diff, sizeof (int));
2147 write_inferior_memory (from, buf, sizeof (int));
2148}
2149
2150static void
4e29fb54 2151amd64_emit_const (LONGEST num)
6a271cae
PA
2152{
2153 unsigned char buf[16];
2154 int i;
2155 CORE_ADDR buildaddr = current_insn_ptr;
2156
2157 i = 0;
2158 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 2159 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
2160 i += 8;
2161 append_insns (&buildaddr, i, buf);
2162 current_insn_ptr = buildaddr;
2163}
2164
2165static void
2166amd64_emit_call (CORE_ADDR fn)
2167{
2168 unsigned char buf[16];
2169 int i;
2170 CORE_ADDR buildaddr;
4e29fb54 2171 LONGEST offset64;
6a271cae
PA
2172
2173 /* The destination function being in the shared library, may be
2174 >31-bits away off the compiled code pad. */
2175
2176 buildaddr = current_insn_ptr;
2177
2178 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2179
2180 i = 0;
2181
2182 if (offset64 > INT_MAX || offset64 < INT_MIN)
2183 {
2184 /* Offset is too large for a call. Use callq, but that requires
2185 a register, so avoid it if possible. Use r10, since it is
2186 call-clobbered, we don't have to push/pop it. */
2187 buf[i++] = 0x48; /* mov $fn,%r10 */
2188 buf[i++] = 0xba;
2189 memcpy (buf + i, &fn, 8);
2190 i += 8;
2191 buf[i++] = 0xff; /* callq *%r10 */
2192 buf[i++] = 0xd2;
2193 }
2194 else
2195 {
2196 int offset32 = offset64; /* we know we can't overflow here. */
2197 memcpy (buf + i, &offset32, 4);
2198 i += 4;
2199 }
2200
2201 append_insns (&buildaddr, i, buf);
2202 current_insn_ptr = buildaddr;
2203}
2204
2205static void
2206amd64_emit_reg (int reg)
2207{
2208 unsigned char buf[16];
2209 int i;
2210 CORE_ADDR buildaddr;
2211
2212 /* Assume raw_regs is still in %rdi. */
2213 buildaddr = current_insn_ptr;
2214 i = 0;
2215 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 2216 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2217 i += 4;
2218 append_insns (&buildaddr, i, buf);
2219 current_insn_ptr = buildaddr;
2220 amd64_emit_call (get_raw_reg_func_addr ());
2221}
2222
2223static void
2224amd64_emit_pop (void)
2225{
2226 EMIT_ASM (amd64_pop,
2227 "pop %rax");
2228}
2229
2230static void
2231amd64_emit_stack_flush (void)
2232{
2233 EMIT_ASM (amd64_stack_flush,
2234 "push %rax");
2235}
2236
2237static void
2238amd64_emit_zero_ext (int arg)
2239{
2240 switch (arg)
2241 {
2242 case 8:
2243 EMIT_ASM (amd64_zero_ext_8,
2244 "and $0xff,%rax");
2245 break;
2246 case 16:
2247 EMIT_ASM (amd64_zero_ext_16,
2248 "and $0xffff,%rax");
2249 break;
2250 case 32:
2251 EMIT_ASM (amd64_zero_ext_32,
2252 "mov $0xffffffff,%rcx\n\t"
2253 "and %rcx,%rax");
2254 break;
2255 default:
2256 emit_error = 1;
2257 }
2258}
2259
2260static void
2261amd64_emit_swap (void)
2262{
2263 EMIT_ASM (amd64_swap,
2264 "mov %rax,%rcx\n\t"
2265 "pop %rax\n\t"
2266 "push %rcx");
2267}
2268
2269static void
2270amd64_emit_stack_adjust (int n)
2271{
2272 unsigned char buf[16];
2273 int i;
2274 CORE_ADDR buildaddr = current_insn_ptr;
2275
2276 i = 0;
2277 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2278 buf[i++] = 0x8d;
2279 buf[i++] = 0x64;
2280 buf[i++] = 0x24;
2281 /* This only handles adjustments up to 16, but we don't expect any more. */
2282 buf[i++] = n * 8;
2283 append_insns (&buildaddr, i, buf);
2284 current_insn_ptr = buildaddr;
2285}
2286
2287/* FN's prototype is `LONGEST(*fn)(int)'. */
2288
2289static void
2290amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2291{
2292 unsigned char buf[16];
2293 int i;
2294 CORE_ADDR buildaddr;
2295
2296 buildaddr = current_insn_ptr;
2297 i = 0;
2298 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2299 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2300 i += 4;
2301 append_insns (&buildaddr, i, buf);
2302 current_insn_ptr = buildaddr;
2303 amd64_emit_call (fn);
2304}
2305
4e29fb54 2306/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2307
2308static void
2309amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2310{
2311 unsigned char buf[16];
2312 int i;
2313 CORE_ADDR buildaddr;
2314
2315 buildaddr = current_insn_ptr;
2316 i = 0;
2317 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2318 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2319 i += 4;
2320 append_insns (&buildaddr, i, buf);
2321 current_insn_ptr = buildaddr;
2322 EMIT_ASM (amd64_void_call_2_a,
2323 /* Save away a copy of the stack top. */
2324 "push %rax\n\t"
2325 /* Also pass top as the second argument. */
2326 "mov %rax,%rsi");
2327 amd64_emit_call (fn);
2328 EMIT_ASM (amd64_void_call_2_b,
2329 /* Restore the stack top, %rax may have been trashed. */
2330 "pop %rax");
2331}
2332
6b9801d4
SS
2333void
2334amd64_emit_eq_goto (int *offset_p, int *size_p)
2335{
2336 EMIT_ASM (amd64_eq,
2337 "cmp %rax,(%rsp)\n\t"
2338 "jne .Lamd64_eq_fallthru\n\t"
2339 "lea 0x8(%rsp),%rsp\n\t"
2340 "pop %rax\n\t"
2341 /* jmp, but don't trust the assembler to choose the right jump */
2342 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2343 ".Lamd64_eq_fallthru:\n\t"
2344 "lea 0x8(%rsp),%rsp\n\t"
2345 "pop %rax");
2346
2347 if (offset_p)
2348 *offset_p = 13;
2349 if (size_p)
2350 *size_p = 4;
2351}
2352
2353void
2354amd64_emit_ne_goto (int *offset_p, int *size_p)
2355{
2356 EMIT_ASM (amd64_ne,
2357 "cmp %rax,(%rsp)\n\t"
2358 "je .Lamd64_ne_fallthru\n\t"
2359 "lea 0x8(%rsp),%rsp\n\t"
2360 "pop %rax\n\t"
2361 /* jmp, but don't trust the assembler to choose the right jump */
2362 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2363 ".Lamd64_ne_fallthru:\n\t"
2364 "lea 0x8(%rsp),%rsp\n\t"
2365 "pop %rax");
2366
2367 if (offset_p)
2368 *offset_p = 13;
2369 if (size_p)
2370 *size_p = 4;
2371}
2372
2373void
2374amd64_emit_lt_goto (int *offset_p, int *size_p)
2375{
2376 EMIT_ASM (amd64_lt,
2377 "cmp %rax,(%rsp)\n\t"
2378 "jnl .Lamd64_lt_fallthru\n\t"
2379 "lea 0x8(%rsp),%rsp\n\t"
2380 "pop %rax\n\t"
2381 /* jmp, but don't trust the assembler to choose the right jump */
2382 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2383 ".Lamd64_lt_fallthru:\n\t"
2384 "lea 0x8(%rsp),%rsp\n\t"
2385 "pop %rax");
2386
2387 if (offset_p)
2388 *offset_p = 13;
2389 if (size_p)
2390 *size_p = 4;
2391}
2392
2393void
2394amd64_emit_le_goto (int *offset_p, int *size_p)
2395{
2396 EMIT_ASM (amd64_le,
2397 "cmp %rax,(%rsp)\n\t"
2398 "jnle .Lamd64_le_fallthru\n\t"
2399 "lea 0x8(%rsp),%rsp\n\t"
2400 "pop %rax\n\t"
2401 /* jmp, but don't trust the assembler to choose the right jump */
2402 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2403 ".Lamd64_le_fallthru:\n\t"
2404 "lea 0x8(%rsp),%rsp\n\t"
2405 "pop %rax");
2406
2407 if (offset_p)
2408 *offset_p = 13;
2409 if (size_p)
2410 *size_p = 4;
2411}
2412
2413void
2414amd64_emit_gt_goto (int *offset_p, int *size_p)
2415{
2416 EMIT_ASM (amd64_gt,
2417 "cmp %rax,(%rsp)\n\t"
2418 "jng .Lamd64_gt_fallthru\n\t"
2419 "lea 0x8(%rsp),%rsp\n\t"
2420 "pop %rax\n\t"
2421 /* jmp, but don't trust the assembler to choose the right jump */
2422 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2423 ".Lamd64_gt_fallthru:\n\t"
2424 "lea 0x8(%rsp),%rsp\n\t"
2425 "pop %rax");
2426
2427 if (offset_p)
2428 *offset_p = 13;
2429 if (size_p)
2430 *size_p = 4;
2431}
2432
2433void
2434amd64_emit_ge_goto (int *offset_p, int *size_p)
2435{
2436 EMIT_ASM (amd64_ge,
2437 "cmp %rax,(%rsp)\n\t"
2438 "jnge .Lamd64_ge_fallthru\n\t"
2439 ".Lamd64_ge_jump:\n\t"
2440 "lea 0x8(%rsp),%rsp\n\t"
2441 "pop %rax\n\t"
2442 /* jmp, but don't trust the assembler to choose the right jump */
2443 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2444 ".Lamd64_ge_fallthru:\n\t"
2445 "lea 0x8(%rsp),%rsp\n\t"
2446 "pop %rax");
2447
2448 if (offset_p)
2449 *offset_p = 13;
2450 if (size_p)
2451 *size_p = 4;
2452}
2453
6a271cae
PA
2454struct emit_ops amd64_emit_ops =
2455 {
2456 amd64_emit_prologue,
2457 amd64_emit_epilogue,
2458 amd64_emit_add,
2459 amd64_emit_sub,
2460 amd64_emit_mul,
2461 amd64_emit_lsh,
2462 amd64_emit_rsh_signed,
2463 amd64_emit_rsh_unsigned,
2464 amd64_emit_ext,
2465 amd64_emit_log_not,
2466 amd64_emit_bit_and,
2467 amd64_emit_bit_or,
2468 amd64_emit_bit_xor,
2469 amd64_emit_bit_not,
2470 amd64_emit_equal,
2471 amd64_emit_less_signed,
2472 amd64_emit_less_unsigned,
2473 amd64_emit_ref,
2474 amd64_emit_if_goto,
2475 amd64_emit_goto,
2476 amd64_write_goto_address,
2477 amd64_emit_const,
2478 amd64_emit_call,
2479 amd64_emit_reg,
2480 amd64_emit_pop,
2481 amd64_emit_stack_flush,
2482 amd64_emit_zero_ext,
2483 amd64_emit_swap,
2484 amd64_emit_stack_adjust,
2485 amd64_emit_int_call_1,
6b9801d4
SS
2486 amd64_emit_void_call_2,
2487 amd64_emit_eq_goto,
2488 amd64_emit_ne_goto,
2489 amd64_emit_lt_goto,
2490 amd64_emit_le_goto,
2491 amd64_emit_gt_goto,
2492 amd64_emit_ge_goto
6a271cae
PA
2493 };
2494
2495#endif /* __x86_64__ */
2496
2497static void
2498i386_emit_prologue (void)
2499{
2500 EMIT_ASM32 (i386_prologue,
2501 "push %ebp\n\t"
bf15cbda
SS
2502 "mov %esp,%ebp\n\t"
2503 "push %ebx");
6a271cae
PA
2504 /* At this point, the raw regs base address is at 8(%ebp), and the
2505 value pointer is at 12(%ebp). */
2506}
2507
2508static void
2509i386_emit_epilogue (void)
2510{
2511 EMIT_ASM32 (i386_epilogue,
2512 "mov 12(%ebp),%ecx\n\t"
2513 "mov %eax,(%ecx)\n\t"
2514 "mov %ebx,0x4(%ecx)\n\t"
2515 "xor %eax,%eax\n\t"
bf15cbda 2516 "pop %ebx\n\t"
6a271cae
PA
2517 "pop %ebp\n\t"
2518 "ret");
2519}
2520
2521static void
2522i386_emit_add (void)
2523{
2524 EMIT_ASM32 (i386_add,
2525 "add (%esp),%eax\n\t"
2526 "adc 0x4(%esp),%ebx\n\t"
2527 "lea 0x8(%esp),%esp");
2528}
2529
2530static void
2531i386_emit_sub (void)
2532{
2533 EMIT_ASM32 (i386_sub,
2534 "subl %eax,(%esp)\n\t"
2535 "sbbl %ebx,4(%esp)\n\t"
2536 "pop %eax\n\t"
2537 "pop %ebx\n\t");
2538}
2539
2540static void
2541i386_emit_mul (void)
2542{
2543 emit_error = 1;
2544}
2545
2546static void
2547i386_emit_lsh (void)
2548{
2549 emit_error = 1;
2550}
2551
2552static void
2553i386_emit_rsh_signed (void)
2554{
2555 emit_error = 1;
2556}
2557
2558static void
2559i386_emit_rsh_unsigned (void)
2560{
2561 emit_error = 1;
2562}
2563
2564static void
2565i386_emit_ext (int arg)
2566{
2567 switch (arg)
2568 {
2569 case 8:
2570 EMIT_ASM32 (i386_ext_8,
2571 "cbtw\n\t"
2572 "cwtl\n\t"
2573 "movl %eax,%ebx\n\t"
2574 "sarl $31,%ebx");
2575 break;
2576 case 16:
2577 EMIT_ASM32 (i386_ext_16,
2578 "cwtl\n\t"
2579 "movl %eax,%ebx\n\t"
2580 "sarl $31,%ebx");
2581 break;
2582 case 32:
2583 EMIT_ASM32 (i386_ext_32,
2584 "movl %eax,%ebx\n\t"
2585 "sarl $31,%ebx");
2586 break;
2587 default:
2588 emit_error = 1;
2589 }
2590}
2591
2592static void
2593i386_emit_log_not (void)
2594{
2595 EMIT_ASM32 (i386_log_not,
2596 "or %ebx,%eax\n\t"
2597 "test %eax,%eax\n\t"
2598 "sete %cl\n\t"
2599 "xor %ebx,%ebx\n\t"
2600 "movzbl %cl,%eax");
2601}
2602
2603static void
2604i386_emit_bit_and (void)
2605{
2606 EMIT_ASM32 (i386_and,
2607 "and (%esp),%eax\n\t"
2608 "and 0x4(%esp),%ebx\n\t"
2609 "lea 0x8(%esp),%esp");
2610}
2611
2612static void
2613i386_emit_bit_or (void)
2614{
2615 EMIT_ASM32 (i386_or,
2616 "or (%esp),%eax\n\t"
2617 "or 0x4(%esp),%ebx\n\t"
2618 "lea 0x8(%esp),%esp");
2619}
2620
2621static void
2622i386_emit_bit_xor (void)
2623{
2624 EMIT_ASM32 (i386_xor,
2625 "xor (%esp),%eax\n\t"
2626 "xor 0x4(%esp),%ebx\n\t"
2627 "lea 0x8(%esp),%esp");
2628}
2629
2630static void
2631i386_emit_bit_not (void)
2632{
2633 EMIT_ASM32 (i386_bit_not,
2634 "xor $0xffffffff,%eax\n\t"
2635 "xor $0xffffffff,%ebx\n\t");
2636}
2637
2638static void
2639i386_emit_equal (void)
2640{
2641 EMIT_ASM32 (i386_equal,
2642 "cmpl %ebx,4(%esp)\n\t"
2643 "jne .Li386_equal_false\n\t"
2644 "cmpl %eax,(%esp)\n\t"
2645 "je .Li386_equal_true\n\t"
2646 ".Li386_equal_false:\n\t"
2647 "xor %eax,%eax\n\t"
2648 "jmp .Li386_equal_end\n\t"
2649 ".Li386_equal_true:\n\t"
2650 "mov $1,%eax\n\t"
2651 ".Li386_equal_end:\n\t"
2652 "xor %ebx,%ebx\n\t"
2653 "lea 0x8(%esp),%esp");
2654}
2655
2656static void
2657i386_emit_less_signed (void)
2658{
2659 EMIT_ASM32 (i386_less_signed,
2660 "cmpl %ebx,4(%esp)\n\t"
2661 "jl .Li386_less_signed_true\n\t"
2662 "jne .Li386_less_signed_false\n\t"
2663 "cmpl %eax,(%esp)\n\t"
2664 "jl .Li386_less_signed_true\n\t"
2665 ".Li386_less_signed_false:\n\t"
2666 "xor %eax,%eax\n\t"
2667 "jmp .Li386_less_signed_end\n\t"
2668 ".Li386_less_signed_true:\n\t"
2669 "mov $1,%eax\n\t"
2670 ".Li386_less_signed_end:\n\t"
2671 "xor %ebx,%ebx\n\t"
2672 "lea 0x8(%esp),%esp");
2673}
2674
2675static void
2676i386_emit_less_unsigned (void)
2677{
2678 EMIT_ASM32 (i386_less_unsigned,
2679 "cmpl %ebx,4(%esp)\n\t"
2680 "jb .Li386_less_unsigned_true\n\t"
2681 "jne .Li386_less_unsigned_false\n\t"
2682 "cmpl %eax,(%esp)\n\t"
2683 "jb .Li386_less_unsigned_true\n\t"
2684 ".Li386_less_unsigned_false:\n\t"
2685 "xor %eax,%eax\n\t"
2686 "jmp .Li386_less_unsigned_end\n\t"
2687 ".Li386_less_unsigned_true:\n\t"
2688 "mov $1,%eax\n\t"
2689 ".Li386_less_unsigned_end:\n\t"
2690 "xor %ebx,%ebx\n\t"
2691 "lea 0x8(%esp),%esp");
2692}
2693
2694static void
2695i386_emit_ref (int size)
2696{
2697 switch (size)
2698 {
2699 case 1:
2700 EMIT_ASM32 (i386_ref1,
2701 "movb (%eax),%al");
2702 break;
2703 case 2:
2704 EMIT_ASM32 (i386_ref2,
2705 "movw (%eax),%ax");
2706 break;
2707 case 4:
2708 EMIT_ASM32 (i386_ref4,
2709 "movl (%eax),%eax");
2710 break;
2711 case 8:
2712 EMIT_ASM32 (i386_ref8,
2713 "movl 4(%eax),%ebx\n\t"
2714 "movl (%eax),%eax");
2715 break;
2716 }
2717}
2718
2719static void
2720i386_emit_if_goto (int *offset_p, int *size_p)
2721{
2722 EMIT_ASM32 (i386_if_goto,
2723 "mov %eax,%ecx\n\t"
2724 "or %ebx,%ecx\n\t"
2725 "pop %eax\n\t"
2726 "pop %ebx\n\t"
2727 "cmpl $0,%ecx\n\t"
2728 /* Don't trust the assembler to choose the right jump */
2729 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2730
2731 if (offset_p)
2732 *offset_p = 11; /* be sure that this matches the sequence above */
2733 if (size_p)
2734 *size_p = 4;
2735}
2736
2737static void
2738i386_emit_goto (int *offset_p, int *size_p)
2739{
2740 EMIT_ASM32 (i386_goto,
2741 /* Don't trust the assembler to choose the right jump */
2742 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2743 if (offset_p)
2744 *offset_p = 1;
2745 if (size_p)
2746 *size_p = 4;
2747}
2748
2749static void
2750i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2751{
2752 int diff = (to - (from + size));
2753 unsigned char buf[sizeof (int)];
2754
2755 /* We're only doing 4-byte sizes at the moment. */
2756 if (size != 4)
2757 {
2758 emit_error = 1;
2759 return;
2760 }
2761
2762 memcpy (buf, &diff, sizeof (int));
2763 write_inferior_memory (from, buf, sizeof (int));
2764}
2765
2766static void
4e29fb54 2767i386_emit_const (LONGEST num)
6a271cae
PA
2768{
2769 unsigned char buf[16];
b00ad6ff 2770 int i, hi, lo;
6a271cae
PA
2771 CORE_ADDR buildaddr = current_insn_ptr;
2772
2773 i = 0;
2774 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2775 lo = num & 0xffffffff;
2776 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2777 i += 4;
2778 hi = ((num >> 32) & 0xffffffff);
2779 if (hi)
2780 {
2781 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2782 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2783 i += 4;
2784 }
2785 else
2786 {
2787 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2788 }
2789 append_insns (&buildaddr, i, buf);
2790 current_insn_ptr = buildaddr;
2791}
2792
2793static void
2794i386_emit_call (CORE_ADDR fn)
2795{
2796 unsigned char buf[16];
2797 int i, offset;
2798 CORE_ADDR buildaddr;
2799
2800 buildaddr = current_insn_ptr;
2801 i = 0;
2802 buf[i++] = 0xe8; /* call <reladdr> */
2803 offset = ((int) fn) - (buildaddr + 5);
2804 memcpy (buf + 1, &offset, 4);
2805 append_insns (&buildaddr, 5, buf);
2806 current_insn_ptr = buildaddr;
2807}
2808
2809static void
2810i386_emit_reg (int reg)
2811{
2812 unsigned char buf[16];
2813 int i;
2814 CORE_ADDR buildaddr;
2815
2816 EMIT_ASM32 (i386_reg_a,
2817 "sub $0x8,%esp");
2818 buildaddr = current_insn_ptr;
2819 i = 0;
2820 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2821 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2822 i += 4;
2823 append_insns (&buildaddr, i, buf);
2824 current_insn_ptr = buildaddr;
2825 EMIT_ASM32 (i386_reg_b,
2826 "mov %eax,4(%esp)\n\t"
2827 "mov 8(%ebp),%eax\n\t"
2828 "mov %eax,(%esp)");
2829 i386_emit_call (get_raw_reg_func_addr ());
2830 EMIT_ASM32 (i386_reg_c,
2831 "xor %ebx,%ebx\n\t"
2832 "lea 0x8(%esp),%esp");
2833}
2834
2835static void
2836i386_emit_pop (void)
2837{
2838 EMIT_ASM32 (i386_pop,
2839 "pop %eax\n\t"
2840 "pop %ebx");
2841}
2842
2843static void
2844i386_emit_stack_flush (void)
2845{
2846 EMIT_ASM32 (i386_stack_flush,
2847 "push %ebx\n\t"
2848 "push %eax");
2849}
2850
2851static void
2852i386_emit_zero_ext (int arg)
2853{
2854 switch (arg)
2855 {
2856 case 8:
2857 EMIT_ASM32 (i386_zero_ext_8,
2858 "and $0xff,%eax\n\t"
2859 "xor %ebx,%ebx");
2860 break;
2861 case 16:
2862 EMIT_ASM32 (i386_zero_ext_16,
2863 "and $0xffff,%eax\n\t"
2864 "xor %ebx,%ebx");
2865 break;
2866 case 32:
2867 EMIT_ASM32 (i386_zero_ext_32,
2868 "xor %ebx,%ebx");
2869 break;
2870 default:
2871 emit_error = 1;
2872 }
2873}
2874
2875static void
2876i386_emit_swap (void)
2877{
2878 EMIT_ASM32 (i386_swap,
2879 "mov %eax,%ecx\n\t"
2880 "mov %ebx,%edx\n\t"
2881 "pop %eax\n\t"
2882 "pop %ebx\n\t"
2883 "push %edx\n\t"
2884 "push %ecx");
2885}
2886
2887static void
2888i386_emit_stack_adjust (int n)
2889{
2890 unsigned char buf[16];
2891 int i;
2892 CORE_ADDR buildaddr = current_insn_ptr;
2893
2894 i = 0;
2895 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2896 buf[i++] = 0x64;
2897 buf[i++] = 0x24;
2898 buf[i++] = n * 8;
2899 append_insns (&buildaddr, i, buf);
2900 current_insn_ptr = buildaddr;
2901}
2902
2903/* FN's prototype is `LONGEST(*fn)(int)'. */
2904
2905static void
2906i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2907{
2908 unsigned char buf[16];
2909 int i;
2910 CORE_ADDR buildaddr;
2911
2912 EMIT_ASM32 (i386_int_call_1_a,
2913 /* Reserve a bit of stack space. */
2914 "sub $0x8,%esp");
2915 /* Put the one argument on the stack. */
2916 buildaddr = current_insn_ptr;
2917 i = 0;
2918 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2919 buf[i++] = 0x04;
2920 buf[i++] = 0x24;
b00ad6ff 2921 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2922 i += 4;
2923 append_insns (&buildaddr, i, buf);
2924 current_insn_ptr = buildaddr;
2925 i386_emit_call (fn);
2926 EMIT_ASM32 (i386_int_call_1_c,
2927 "mov %edx,%ebx\n\t"
2928 "lea 0x8(%esp),%esp");
2929}
2930
4e29fb54 2931/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2932
2933static void
2934i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2935{
2936 unsigned char buf[16];
2937 int i;
2938 CORE_ADDR buildaddr;
2939
2940 EMIT_ASM32 (i386_void_call_2_a,
2941 /* Preserve %eax only; we don't have to worry about %ebx. */
2942 "push %eax\n\t"
2943 /* Reserve a bit of stack space for arguments. */
2944 "sub $0x10,%esp\n\t"
2945 /* Copy "top" to the second argument position. (Note that
2946 we can't assume function won't scribble on its
2947 arguments, so don't try to restore from this.) */
2948 "mov %eax,4(%esp)\n\t"
2949 "mov %ebx,8(%esp)");
2950 /* Put the first argument on the stack. */
2951 buildaddr = current_insn_ptr;
2952 i = 0;
2953 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2954 buf[i++] = 0x04;
2955 buf[i++] = 0x24;
b00ad6ff 2956 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2957 i += 4;
2958 append_insns (&buildaddr, i, buf);
2959 current_insn_ptr = buildaddr;
2960 i386_emit_call (fn);
2961 EMIT_ASM32 (i386_void_call_2_b,
2962 "lea 0x10(%esp),%esp\n\t"
2963 /* Restore original stack top. */
2964 "pop %eax");
2965}
2966
6b9801d4
SS
2967
2968void
2969i386_emit_eq_goto (int *offset_p, int *size_p)
2970{
2971 EMIT_ASM32 (eq,
2972 /* Check low half first, more likely to be decider */
2973 "cmpl %eax,(%esp)\n\t"
2974 "jne .Leq_fallthru\n\t"
2975 "cmpl %ebx,4(%esp)\n\t"
2976 "jne .Leq_fallthru\n\t"
2977 "lea 0x8(%esp),%esp\n\t"
2978 "pop %eax\n\t"
2979 "pop %ebx\n\t"
2980 /* jmp, but don't trust the assembler to choose the right jump */
2981 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2982 ".Leq_fallthru:\n\t"
2983 "lea 0x8(%esp),%esp\n\t"
2984 "pop %eax\n\t"
2985 "pop %ebx");
2986
2987 if (offset_p)
2988 *offset_p = 18;
2989 if (size_p)
2990 *size_p = 4;
2991}
2992
2993void
2994i386_emit_ne_goto (int *offset_p, int *size_p)
2995{
2996 EMIT_ASM32 (ne,
2997 /* Check low half first, more likely to be decider */
2998 "cmpl %eax,(%esp)\n\t"
2999 "jne .Lne_jump\n\t"
3000 "cmpl %ebx,4(%esp)\n\t"
3001 "je .Lne_fallthru\n\t"
3002 ".Lne_jump:\n\t"
3003 "lea 0x8(%esp),%esp\n\t"
3004 "pop %eax\n\t"
3005 "pop %ebx\n\t"
3006 /* jmp, but don't trust the assembler to choose the right jump */
3007 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3008 ".Lne_fallthru:\n\t"
3009 "lea 0x8(%esp),%esp\n\t"
3010 "pop %eax\n\t"
3011 "pop %ebx");
3012
3013 if (offset_p)
3014 *offset_p = 18;
3015 if (size_p)
3016 *size_p = 4;
3017}
3018
3019void
3020i386_emit_lt_goto (int *offset_p, int *size_p)
3021{
3022 EMIT_ASM32 (lt,
3023 "cmpl %ebx,4(%esp)\n\t"
3024 "jl .Llt_jump\n\t"
3025 "jne .Llt_fallthru\n\t"
3026 "cmpl %eax,(%esp)\n\t"
3027 "jnl .Llt_fallthru\n\t"
3028 ".Llt_jump:\n\t"
3029 "lea 0x8(%esp),%esp\n\t"
3030 "pop %eax\n\t"
3031 "pop %ebx\n\t"
3032 /* jmp, but don't trust the assembler to choose the right jump */
3033 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3034 ".Llt_fallthru:\n\t"
3035 "lea 0x8(%esp),%esp\n\t"
3036 "pop %eax\n\t"
3037 "pop %ebx");
3038
3039 if (offset_p)
3040 *offset_p = 20;
3041 if (size_p)
3042 *size_p = 4;
3043}
3044
3045void
3046i386_emit_le_goto (int *offset_p, int *size_p)
3047{
3048 EMIT_ASM32 (le,
3049 "cmpl %ebx,4(%esp)\n\t"
3050 "jle .Lle_jump\n\t"
3051 "jne .Lle_fallthru\n\t"
3052 "cmpl %eax,(%esp)\n\t"
3053 "jnle .Lle_fallthru\n\t"
3054 ".Lle_jump:\n\t"
3055 "lea 0x8(%esp),%esp\n\t"
3056 "pop %eax\n\t"
3057 "pop %ebx\n\t"
3058 /* jmp, but don't trust the assembler to choose the right jump */
3059 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3060 ".Lle_fallthru:\n\t"
3061 "lea 0x8(%esp),%esp\n\t"
3062 "pop %eax\n\t"
3063 "pop %ebx");
3064
3065 if (offset_p)
3066 *offset_p = 20;
3067 if (size_p)
3068 *size_p = 4;
3069}
3070
3071void
3072i386_emit_gt_goto (int *offset_p, int *size_p)
3073{
3074 EMIT_ASM32 (gt,
3075 "cmpl %ebx,4(%esp)\n\t"
3076 "jg .Lgt_jump\n\t"
3077 "jne .Lgt_fallthru\n\t"
3078 "cmpl %eax,(%esp)\n\t"
3079 "jng .Lgt_fallthru\n\t"
3080 ".Lgt_jump:\n\t"
3081 "lea 0x8(%esp),%esp\n\t"
3082 "pop %eax\n\t"
3083 "pop %ebx\n\t"
3084 /* jmp, but don't trust the assembler to choose the right jump */
3085 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3086 ".Lgt_fallthru:\n\t"
3087 "lea 0x8(%esp),%esp\n\t"
3088 "pop %eax\n\t"
3089 "pop %ebx");
3090
3091 if (offset_p)
3092 *offset_p = 20;
3093 if (size_p)
3094 *size_p = 4;
3095}
3096
3097void
3098i386_emit_ge_goto (int *offset_p, int *size_p)
3099{
3100 EMIT_ASM32 (ge,
3101 "cmpl %ebx,4(%esp)\n\t"
3102 "jge .Lge_jump\n\t"
3103 "jne .Lge_fallthru\n\t"
3104 "cmpl %eax,(%esp)\n\t"
3105 "jnge .Lge_fallthru\n\t"
3106 ".Lge_jump:\n\t"
3107 "lea 0x8(%esp),%esp\n\t"
3108 "pop %eax\n\t"
3109 "pop %ebx\n\t"
3110 /* jmp, but don't trust the assembler to choose the right jump */
3111 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3112 ".Lge_fallthru:\n\t"
3113 "lea 0x8(%esp),%esp\n\t"
3114 "pop %eax\n\t"
3115 "pop %ebx");
3116
3117 if (offset_p)
3118 *offset_p = 20;
3119 if (size_p)
3120 *size_p = 4;
3121}
3122
6a271cae
PA
3123struct emit_ops i386_emit_ops =
3124 {
3125 i386_emit_prologue,
3126 i386_emit_epilogue,
3127 i386_emit_add,
3128 i386_emit_sub,
3129 i386_emit_mul,
3130 i386_emit_lsh,
3131 i386_emit_rsh_signed,
3132 i386_emit_rsh_unsigned,
3133 i386_emit_ext,
3134 i386_emit_log_not,
3135 i386_emit_bit_and,
3136 i386_emit_bit_or,
3137 i386_emit_bit_xor,
3138 i386_emit_bit_not,
3139 i386_emit_equal,
3140 i386_emit_less_signed,
3141 i386_emit_less_unsigned,
3142 i386_emit_ref,
3143 i386_emit_if_goto,
3144 i386_emit_goto,
3145 i386_write_goto_address,
3146 i386_emit_const,
3147 i386_emit_call,
3148 i386_emit_reg,
3149 i386_emit_pop,
3150 i386_emit_stack_flush,
3151 i386_emit_zero_ext,
3152 i386_emit_swap,
3153 i386_emit_stack_adjust,
3154 i386_emit_int_call_1,
6b9801d4
SS
3155 i386_emit_void_call_2,
3156 i386_emit_eq_goto,
3157 i386_emit_ne_goto,
3158 i386_emit_lt_goto,
3159 i386_emit_le_goto,
3160 i386_emit_gt_goto,
3161 i386_emit_ge_goto
6a271cae
PA
3162 };
3163
3164
3165static struct emit_ops *
3166x86_emit_ops (void)
3167{
3168#ifdef __x86_64__
3169 int use_64bit = register_size (0) == 8;
3170
3171 if (use_64bit)
3172 return &amd64_emit_ops;
3173 else
3174#endif
3175 return &i386_emit_ops;
3176}
3177
d0722149
DE
3178/* This is initialized assuming an amd64 target.
3179 x86_arch_setup will correct it for i386 or amd64 targets. */
3180
3181struct linux_target_ops the_low_target =
3182{
3183 x86_arch_setup,
3184 -1,
3185 NULL,
3186 NULL,
3187 NULL,
1faeff08 3188 NULL,
c14dfd32 3189 NULL, /* fetch_register */
d0722149
DE
3190 x86_get_pc,
3191 x86_set_pc,
3192 x86_breakpoint,
3193 x86_breakpoint_len,
3194 NULL,
3195 1,
3196 x86_breakpoint_at,
aa5ca48f
DE
3197 x86_insert_point,
3198 x86_remove_point,
3199 x86_stopped_by_watchpoint,
3200 x86_stopped_data_address,
d0722149
DE
3201 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3202 native i386 case (no registers smaller than an xfer unit), and are not
3203 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3204 NULL,
3205 NULL,
3206 /* need to fix up i386 siginfo if host is amd64 */
3207 x86_siginfo_fixup,
aa5ca48f
DE
3208 x86_linux_new_process,
3209 x86_linux_new_thread,
1570b33e 3210 x86_linux_prepare_to_resume,
219f2f23 3211 x86_linux_process_qsupported,
fa593d66
PA
3212 x86_supports_tracepoints,
3213 x86_get_thread_area,
6a271cae 3214 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
3215 x86_emit_ops,
3216 x86_get_min_fast_tracepoint_insn_len,
d0722149 3217};
This page took 0.444546 seconds and 4 git commands to generate.