*** empty log message ***
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
7b6bb8da 3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
d0722149
DE
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
aa5ca48f 21#include <stddef.h>
d0722149 22#include <signal.h>
6a271cae 23#include <limits.h>
d0722149
DE
24#include "server.h"
25#include "linux-low.h"
26#include "i387-fp.h"
aa5ca48f 27#include "i386-low.h"
1570b33e
L
28#include "i386-xstate.h"
29#include "elf/common.h"
d0722149
DE
30
31#include "gdb_proc_service.h"
32
90884b2b 33/* Defined in auto-generated file i386-linux.c. */
d0722149 34void init_registers_i386_linux (void);
90884b2b
L
35/* Defined in auto-generated file amd64-linux.c. */
36void init_registers_amd64_linux (void);
1570b33e
L
37/* Defined in auto-generated file i386-avx-linux.c. */
38void init_registers_i386_avx_linux (void);
39/* Defined in auto-generated file amd64-avx-linux.c. */
40void init_registers_amd64_avx_linux (void);
3a13a53b
L
41/* Defined in auto-generated file i386-mmx-linux.c. */
42void init_registers_i386_mmx_linux (void);
1570b33e 43
fa593d66 44static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 45static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 46
1570b33e
L
47/* Backward compatibility for gdb without XML support. */
48
49static const char *xmltarget_i386_linux_no_xml = "@<target>\
50<architecture>i386</architecture>\
51<osabi>GNU/Linux</osabi>\
52</target>";
f6d1620c
L
53
54#ifdef __x86_64__
1570b33e
L
55static const char *xmltarget_amd64_linux_no_xml = "@<target>\
56<architecture>i386:x86-64</architecture>\
57<osabi>GNU/Linux</osabi>\
58</target>";
f6d1620c 59#endif
d0722149
DE
60
61#include <sys/reg.h>
62#include <sys/procfs.h>
63#include <sys/ptrace.h>
1570b33e
L
64#include <sys/uio.h>
65
66#ifndef PTRACE_GETREGSET
67#define PTRACE_GETREGSET 0x4204
68#endif
69
70#ifndef PTRACE_SETREGSET
71#define PTRACE_SETREGSET 0x4205
72#endif
73
d0722149
DE
74
75#ifndef PTRACE_GET_THREAD_AREA
76#define PTRACE_GET_THREAD_AREA 25
77#endif
78
79/* This definition comes from prctl.h, but some kernels may not have it. */
80#ifndef PTRACE_ARCH_PRCTL
81#define PTRACE_ARCH_PRCTL 30
82#endif
83
84/* The following definitions come from prctl.h, but may be absent
85 for certain configurations. */
86#ifndef ARCH_GET_FS
87#define ARCH_SET_GS 0x1001
88#define ARCH_SET_FS 0x1002
89#define ARCH_GET_FS 0x1003
90#define ARCH_GET_GS 0x1004
91#endif
92
aa5ca48f
DE
93/* Per-process arch-specific data we want to keep. */
94
95struct arch_process_info
96{
97 struct i386_debug_reg_state debug_reg_state;
98};
99
100/* Per-thread arch-specific data we want to keep. */
101
102struct arch_lwp_info
103{
104 /* Non-zero if our copy differs from what's recorded in the thread. */
105 int debug_registers_changed;
106};
107
d0722149
DE
108#ifdef __x86_64__
109
110/* Mapping between the general-purpose registers in `struct user'
111 format and GDB's register array layout.
112 Note that the transfer layout uses 64-bit regs. */
113static /*const*/ int i386_regmap[] =
114{
115 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
116 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
117 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
118 DS * 8, ES * 8, FS * 8, GS * 8
119};
120
121#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
122
123/* So code below doesn't have to care, i386 or amd64. */
124#define ORIG_EAX ORIG_RAX
125
126static const int x86_64_regmap[] =
127{
128 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
129 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
130 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
131 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
132 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
133 DS * 8, ES * 8, FS * 8, GS * 8,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1,
137 -1, -1, -1, -1, -1, -1, -1, -1, -1,
138 ORIG_RAX * 8
139};
140
141#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
142
143#else /* ! __x86_64__ */
144
145/* Mapping between the general-purpose registers in `struct user'
146 format and GDB's register array layout. */
147static /*const*/ int i386_regmap[] =
148{
149 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
150 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
151 EIP * 4, EFL * 4, CS * 4, SS * 4,
152 DS * 4, ES * 4, FS * 4, GS * 4
153};
154
155#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
156
157#endif
158\f
159/* Called by libthread_db. */
160
161ps_err_e
162ps_get_thread_area (const struct ps_prochandle *ph,
163 lwpid_t lwpid, int idx, void **base)
164{
165#ifdef __x86_64__
166 int use_64bit = register_size (0) == 8;
167
168 if (use_64bit)
169 {
170 switch (idx)
171 {
172 case FS:
173 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
174 return PS_OK;
175 break;
176 case GS:
177 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
178 return PS_OK;
179 break;
180 default:
181 return PS_BADADDR;
182 }
183 return PS_ERR;
184 }
185#endif
186
187 {
188 unsigned int desc[4];
189
190 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
191 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
192 return PS_ERR;
193
194 *(int *)base = desc[1];
195 return PS_OK;
196 }
197}
fa593d66
PA
198
199/* Get the thread area address. This is used to recognize which
200 thread is which when tracing with the in-process agent library. We
201 don't read anything from the address, and treat it as opaque; it's
202 the address itself that we assume is unique per-thread. */
203
204static int
205x86_get_thread_area (int lwpid, CORE_ADDR *addr)
206{
207#ifdef __x86_64__
208 int use_64bit = register_size (0) == 8;
209
210 if (use_64bit)
211 {
212 void *base;
213 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
214 {
215 *addr = (CORE_ADDR) (uintptr_t) base;
216 return 0;
217 }
218
219 return -1;
220 }
221#endif
222
223 {
224 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
225 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
226 unsigned int desc[4];
227 ULONGEST gs = 0;
228 const int reg_thread_area = 3; /* bits to scale down register value. */
229 int idx;
230
231 collect_register_by_name (regcache, "gs", &gs);
232
233 idx = gs >> reg_thread_area;
234
235 if (ptrace (PTRACE_GET_THREAD_AREA,
493e2a69
MS
236 lwpid_of (lwp),
237 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
238 return -1;
239
240 *addr = desc[1];
241 return 0;
242 }
243}
244
245
d0722149
DE
246\f
247static int
248i386_cannot_store_register (int regno)
249{
250 return regno >= I386_NUM_REGS;
251}
252
253static int
254i386_cannot_fetch_register (int regno)
255{
256 return regno >= I386_NUM_REGS;
257}
258
259static void
442ea881 260x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
261{
262 int i;
263
264#ifdef __x86_64__
265 if (register_size (0) == 8)
266 {
267 for (i = 0; i < X86_64_NUM_REGS; i++)
268 if (x86_64_regmap[i] != -1)
442ea881 269 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
270 return;
271 }
272#endif
273
274 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 275 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 276
442ea881
PA
277 collect_register_by_name (regcache, "orig_eax",
278 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
279}
280
281static void
442ea881 282x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
283{
284 int i;
285
286#ifdef __x86_64__
287 if (register_size (0) == 8)
288 {
289 for (i = 0; i < X86_64_NUM_REGS; i++)
290 if (x86_64_regmap[i] != -1)
442ea881 291 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
292 return;
293 }
294#endif
295
296 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 297 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 298
442ea881
PA
299 supply_register_by_name (regcache, "orig_eax",
300 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
301}
302
303static void
442ea881 304x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
305{
306#ifdef __x86_64__
442ea881 307 i387_cache_to_fxsave (regcache, buf);
d0722149 308#else
442ea881 309 i387_cache_to_fsave (regcache, buf);
d0722149
DE
310#endif
311}
312
313static void
442ea881 314x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
315{
316#ifdef __x86_64__
442ea881 317 i387_fxsave_to_cache (regcache, buf);
d0722149 318#else
442ea881 319 i387_fsave_to_cache (regcache, buf);
d0722149
DE
320#endif
321}
322
323#ifndef __x86_64__
324
325static void
442ea881 326x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 327{
442ea881 328 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
329}
330
331static void
442ea881 332x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 333{
442ea881 334 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
335}
336
337#endif
338
1570b33e
L
339static void
340x86_fill_xstateregset (struct regcache *regcache, void *buf)
341{
342 i387_cache_to_xsave (regcache, buf);
343}
344
345static void
346x86_store_xstateregset (struct regcache *regcache, const void *buf)
347{
348 i387_xsave_to_cache (regcache, buf);
349}
350
d0722149
DE
351/* ??? The non-biarch i386 case stores all the i387 regs twice.
352 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
353 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
354 doesn't work. IWBN to avoid the duplication in the case where it
355 does work. Maybe the arch_setup routine could check whether it works
356 and update target_regsets accordingly, maybe by moving target_regsets
357 to linux_target_ops and set the right one there, rather than having to
358 modify the target_regsets global. */
359
360struct regset_info target_regsets[] =
361{
362#ifdef HAVE_PTRACE_GETREGS
1570b33e 363 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
364 GENERAL_REGS,
365 x86_fill_gregset, x86_store_gregset },
1570b33e
L
366 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
367 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
368# ifndef __x86_64__
369# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 370 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
371 EXTENDED_REGS,
372 x86_fill_fpxregset, x86_store_fpxregset },
373# endif
374# endif
1570b33e 375 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
376 FP_REGS,
377 x86_fill_fpregset, x86_store_fpregset },
378#endif /* HAVE_PTRACE_GETREGS */
1570b33e 379 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
380};
381
382static CORE_ADDR
442ea881 383x86_get_pc (struct regcache *regcache)
d0722149
DE
384{
385 int use_64bit = register_size (0) == 8;
386
387 if (use_64bit)
388 {
389 unsigned long pc;
442ea881 390 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
391 return (CORE_ADDR) pc;
392 }
393 else
394 {
395 unsigned int pc;
442ea881 396 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
397 return (CORE_ADDR) pc;
398 }
399}
400
401static void
442ea881 402x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149
DE
403{
404 int use_64bit = register_size (0) == 8;
405
406 if (use_64bit)
407 {
408 unsigned long newpc = pc;
442ea881 409 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
410 }
411 else
412 {
413 unsigned int newpc = pc;
442ea881 414 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
415 }
416}
417\f
418static const unsigned char x86_breakpoint[] = { 0xCC };
419#define x86_breakpoint_len 1
420
421static int
422x86_breakpoint_at (CORE_ADDR pc)
423{
424 unsigned char c;
425
fc7238bb 426 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
427 if (c == 0xCC)
428 return 1;
429
430 return 0;
431}
432\f
aa5ca48f
DE
433/* Support for debug registers. */
434
435static unsigned long
436x86_linux_dr_get (ptid_t ptid, int regnum)
437{
438 int tid;
439 unsigned long value;
440
441 tid = ptid_get_lwp (ptid);
442
443 errno = 0;
444 value = ptrace (PTRACE_PEEKUSER, tid,
445 offsetof (struct user, u_debugreg[regnum]), 0);
446 if (errno != 0)
447 error ("Couldn't read debug register");
448
449 return value;
450}
451
452static void
453x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
454{
455 int tid;
456
457 tid = ptid_get_lwp (ptid);
458
459 errno = 0;
460 ptrace (PTRACE_POKEUSER, tid,
461 offsetof (struct user, u_debugreg[regnum]), value);
462 if (errno != 0)
463 error ("Couldn't write debug register");
464}
465
964e4306
PA
466static int
467update_debug_registers_callback (struct inferior_list_entry *entry,
468 void *pid_p)
469{
470 struct lwp_info *lwp = (struct lwp_info *) entry;
471 int pid = *(int *) pid_p;
472
473 /* Only update the threads of this process. */
474 if (pid_of (lwp) == pid)
475 {
476 /* The actual update is done later just before resuming the lwp,
477 we just mark that the registers need updating. */
478 lwp->arch_private->debug_registers_changed = 1;
479
480 /* If the lwp isn't stopped, force it to momentarily pause, so
481 we can update its debug registers. */
482 if (!lwp->stopped)
483 linux_stop_lwp (lwp);
484 }
485
486 return 0;
487}
488
aa5ca48f
DE
489/* Update the inferior's debug register REGNUM from STATE. */
490
491void
492i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
493{
964e4306 494 /* Only update the threads of this process. */
aa5ca48f
DE
495 int pid = pid_of (get_thread_lwp (current_inferior));
496
497 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
498 fatal ("Invalid debug register %d", regnum);
499
964e4306
PA
500 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
501}
aa5ca48f 502
964e4306 503/* Return the inferior's debug register REGNUM. */
aa5ca48f 504
964e4306
PA
505CORE_ADDR
506i386_dr_low_get_addr (int regnum)
507{
508 struct lwp_info *lwp = get_thread_lwp (current_inferior);
509 ptid_t ptid = ptid_of (lwp);
510
511 /* DR6 and DR7 are retrieved with some other way. */
0a5b1e09 512 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306
PA
513
514 return x86_linux_dr_get (ptid, regnum);
aa5ca48f
DE
515}
516
517/* Update the inferior's DR7 debug control register from STATE. */
518
519void
520i386_dr_low_set_control (const struct i386_debug_reg_state *state)
521{
964e4306 522 /* Only update the threads of this process. */
aa5ca48f
DE
523 int pid = pid_of (get_thread_lwp (current_inferior));
524
964e4306
PA
525 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
526}
aa5ca48f 527
964e4306
PA
528/* Return the inferior's DR7 debug control register. */
529
530unsigned
531i386_dr_low_get_control (void)
532{
533 struct lwp_info *lwp = get_thread_lwp (current_inferior);
534 ptid_t ptid = ptid_of (lwp);
535
536 return x86_linux_dr_get (ptid, DR_CONTROL);
aa5ca48f
DE
537}
538
539/* Get the value of the DR6 debug status register from the inferior
540 and record it in STATE. */
541
964e4306
PA
542unsigned
543i386_dr_low_get_status (void)
aa5ca48f
DE
544{
545 struct lwp_info *lwp = get_thread_lwp (current_inferior);
546 ptid_t ptid = ptid_of (lwp);
547
964e4306 548 return x86_linux_dr_get (ptid, DR_STATUS);
aa5ca48f
DE
549}
550\f
90d74c30 551/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
552
553static int
554x86_insert_point (char type, CORE_ADDR addr, int len)
555{
556 struct process_info *proc = current_process ();
557 switch (type)
558 {
8b07ae33 559 case '0':
90d74c30
PA
560 {
561 int ret;
562
563 ret = prepare_to_access_memory ();
564 if (ret)
565 return -1;
566 ret = set_gdb_breakpoint_at (addr);
0146f85b 567 done_accessing_memory ();
90d74c30
PA
568 return ret;
569 }
aa5ca48f
DE
570 case '2':
571 case '3':
572 case '4':
573 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
574 type, addr, len);
575 default:
576 /* Unsupported. */
577 return 1;
578 }
579}
580
581static int
582x86_remove_point (char type, CORE_ADDR addr, int len)
583{
584 struct process_info *proc = current_process ();
585 switch (type)
586 {
8b07ae33 587 case '0':
90d74c30
PA
588 {
589 int ret;
590
591 ret = prepare_to_access_memory ();
592 if (ret)
593 return -1;
594 ret = delete_gdb_breakpoint_at (addr);
0146f85b 595 done_accessing_memory ();
90d74c30
PA
596 return ret;
597 }
aa5ca48f
DE
598 case '2':
599 case '3':
600 case '4':
601 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
602 type, addr, len);
603 default:
604 /* Unsupported. */
605 return 1;
606 }
607}
608
609static int
610x86_stopped_by_watchpoint (void)
611{
612 struct process_info *proc = current_process ();
613 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
614}
615
616static CORE_ADDR
617x86_stopped_data_address (void)
618{
619 struct process_info *proc = current_process ();
620 CORE_ADDR addr;
621 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
622 &addr))
623 return addr;
624 return 0;
625}
626\f
627/* Called when a new process is created. */
628
629static struct arch_process_info *
630x86_linux_new_process (void)
631{
632 struct arch_process_info *info = xcalloc (1, sizeof (*info));
633
634 i386_low_init_dregs (&info->debug_reg_state);
635
636 return info;
637}
638
639/* Called when a new thread is detected. */
640
641static struct arch_lwp_info *
642x86_linux_new_thread (void)
643{
644 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
645
646 info->debug_registers_changed = 1;
647
648 return info;
649}
650
651/* Called when resuming a thread.
652 If the debug regs have changed, update the thread's copies. */
653
654static void
655x86_linux_prepare_to_resume (struct lwp_info *lwp)
656{
b9a881c2
PA
657 ptid_t ptid = ptid_of (lwp);
658
aa5ca48f
DE
659 if (lwp->arch_private->debug_registers_changed)
660 {
661 int i;
aa5ca48f
DE
662 int pid = ptid_get_pid (ptid);
663 struct process_info *proc = find_process_pid (pid);
493e2a69
MS
664 struct i386_debug_reg_state *state
665 = &proc->private->arch_private->debug_reg_state;
aa5ca48f
DE
666
667 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
668 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
669
670 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
671
672 lwp->arch_private->debug_registers_changed = 0;
673 }
b9a881c2
PA
674
675 if (lwp->stopped_by_watchpoint)
676 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
677}
678\f
d0722149
DE
679/* When GDBSERVER is built as a 64-bit application on linux, the
680 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
681 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
682 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
683 conversion in-place ourselves. */
684
685/* These types below (compat_*) define a siginfo type that is layout
686 compatible with the siginfo type exported by the 32-bit userspace
687 support. */
688
689#ifdef __x86_64__
690
691typedef int compat_int_t;
692typedef unsigned int compat_uptr_t;
693
694typedef int compat_time_t;
695typedef int compat_timer_t;
696typedef int compat_clock_t;
697
698struct compat_timeval
699{
700 compat_time_t tv_sec;
701 int tv_usec;
702};
703
704typedef union compat_sigval
705{
706 compat_int_t sival_int;
707 compat_uptr_t sival_ptr;
708} compat_sigval_t;
709
710typedef struct compat_siginfo
711{
712 int si_signo;
713 int si_errno;
714 int si_code;
715
716 union
717 {
718 int _pad[((128 / sizeof (int)) - 3)];
719
720 /* kill() */
721 struct
722 {
723 unsigned int _pid;
724 unsigned int _uid;
725 } _kill;
726
727 /* POSIX.1b timers */
728 struct
729 {
730 compat_timer_t _tid;
731 int _overrun;
732 compat_sigval_t _sigval;
733 } _timer;
734
735 /* POSIX.1b signals */
736 struct
737 {
738 unsigned int _pid;
739 unsigned int _uid;
740 compat_sigval_t _sigval;
741 } _rt;
742
743 /* SIGCHLD */
744 struct
745 {
746 unsigned int _pid;
747 unsigned int _uid;
748 int _status;
749 compat_clock_t _utime;
750 compat_clock_t _stime;
751 } _sigchld;
752
753 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
754 struct
755 {
756 unsigned int _addr;
757 } _sigfault;
758
759 /* SIGPOLL */
760 struct
761 {
762 int _band;
763 int _fd;
764 } _sigpoll;
765 } _sifields;
766} compat_siginfo_t;
767
768#define cpt_si_pid _sifields._kill._pid
769#define cpt_si_uid _sifields._kill._uid
770#define cpt_si_timerid _sifields._timer._tid
771#define cpt_si_overrun _sifields._timer._overrun
772#define cpt_si_status _sifields._sigchld._status
773#define cpt_si_utime _sifields._sigchld._utime
774#define cpt_si_stime _sifields._sigchld._stime
775#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
776#define cpt_si_addr _sifields._sigfault._addr
777#define cpt_si_band _sifields._sigpoll._band
778#define cpt_si_fd _sifields._sigpoll._fd
779
780/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
781 In their place is si_timer1,si_timer2. */
782#ifndef si_timerid
783#define si_timerid si_timer1
784#endif
785#ifndef si_overrun
786#define si_overrun si_timer2
787#endif
788
789static void
790compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
791{
792 memset (to, 0, sizeof (*to));
793
794 to->si_signo = from->si_signo;
795 to->si_errno = from->si_errno;
796 to->si_code = from->si_code;
797
b53a1623 798 if (to->si_code == SI_TIMER)
d0722149 799 {
b53a1623
PA
800 to->cpt_si_timerid = from->si_timerid;
801 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
802 to->cpt_si_ptr = (intptr_t) from->si_ptr;
803 }
804 else if (to->si_code == SI_USER)
805 {
806 to->cpt_si_pid = from->si_pid;
807 to->cpt_si_uid = from->si_uid;
808 }
b53a1623 809 else if (to->si_code < 0)
d0722149 810 {
b53a1623
PA
811 to->cpt_si_pid = from->si_pid;
812 to->cpt_si_uid = from->si_uid;
d0722149
DE
813 to->cpt_si_ptr = (intptr_t) from->si_ptr;
814 }
815 else
816 {
817 switch (to->si_signo)
818 {
819 case SIGCHLD:
820 to->cpt_si_pid = from->si_pid;
821 to->cpt_si_uid = from->si_uid;
822 to->cpt_si_status = from->si_status;
823 to->cpt_si_utime = from->si_utime;
824 to->cpt_si_stime = from->si_stime;
825 break;
826 case SIGILL:
827 case SIGFPE:
828 case SIGSEGV:
829 case SIGBUS:
830 to->cpt_si_addr = (intptr_t) from->si_addr;
831 break;
832 case SIGPOLL:
833 to->cpt_si_band = from->si_band;
834 to->cpt_si_fd = from->si_fd;
835 break;
836 default:
837 to->cpt_si_pid = from->si_pid;
838 to->cpt_si_uid = from->si_uid;
839 to->cpt_si_ptr = (intptr_t) from->si_ptr;
840 break;
841 }
842 }
843}
844
845static void
846siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
847{
848 memset (to, 0, sizeof (*to));
849
850 to->si_signo = from->si_signo;
851 to->si_errno = from->si_errno;
852 to->si_code = from->si_code;
853
b53a1623 854 if (to->si_code == SI_TIMER)
d0722149 855 {
b53a1623
PA
856 to->si_timerid = from->cpt_si_timerid;
857 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
858 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
859 }
860 else if (to->si_code == SI_USER)
861 {
862 to->si_pid = from->cpt_si_pid;
863 to->si_uid = from->cpt_si_uid;
864 }
b53a1623 865 else if (to->si_code < 0)
d0722149 866 {
b53a1623
PA
867 to->si_pid = from->cpt_si_pid;
868 to->si_uid = from->cpt_si_uid;
d0722149
DE
869 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
870 }
871 else
872 {
873 switch (to->si_signo)
874 {
875 case SIGCHLD:
876 to->si_pid = from->cpt_si_pid;
877 to->si_uid = from->cpt_si_uid;
878 to->si_status = from->cpt_si_status;
879 to->si_utime = from->cpt_si_utime;
880 to->si_stime = from->cpt_si_stime;
881 break;
882 case SIGILL:
883 case SIGFPE:
884 case SIGSEGV:
885 case SIGBUS:
886 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
887 break;
888 case SIGPOLL:
889 to->si_band = from->cpt_si_band;
890 to->si_fd = from->cpt_si_fd;
891 break;
892 default:
893 to->si_pid = from->cpt_si_pid;
894 to->si_uid = from->cpt_si_uid;
895 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
896 break;
897 }
898 }
899}
900
901#endif /* __x86_64__ */
902
903/* Convert a native/host siginfo object, into/from the siginfo in the
904 layout of the inferiors' architecture. Returns true if any
905 conversion was done; false otherwise. If DIRECTION is 1, then copy
906 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
907 INF. */
908
909static int
910x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
911{
912#ifdef __x86_64__
913 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
914 if (register_size (0) == 4)
915 {
9f1036c1
DE
916 if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
917 fatal ("unexpected difference in siginfo");
d0722149
DE
918
919 if (direction == 0)
920 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
921 else
922 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
923
924 return 1;
925 }
926#endif
927
928 return 0;
929}
930\f
1570b33e
L
931static int use_xml;
932
933/* Update gdbserver_xmltarget. */
934
935static void
936x86_linux_update_xmltarget (void)
937{
3a13a53b
L
938 int pid;
939 struct regset_info *regset;
1570b33e
L
940 static unsigned long long xcr0;
941 static int have_ptrace_getregset = -1;
59e04013 942#if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
3a13a53b
L
943 static int have_ptrace_getfpxregs = -1;
944#endif
1570b33e
L
945
946 if (!current_inferior)
947 return;
948
45ba0d02
PA
949 /* Before changing the register cache internal layout or the target
950 regsets, flush the contents of the current valid caches back to
951 the threads. */
952 regcache_invalidate ();
953
3a13a53b 954 pid = pid_of (get_thread_lwp (current_inferior));
1570b33e
L
955#ifdef __x86_64__
956 if (num_xmm_registers == 8)
957 init_registers_i386_linux ();
958 else
959 init_registers_amd64_linux ();
960#else
3a13a53b
L
961 {
962# ifdef HAVE_PTRACE_GETFPXREGS
963 if (have_ptrace_getfpxregs == -1)
964 {
965 elf_fpxregset_t fpxregs;
966
967 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
968 {
969 have_ptrace_getfpxregs = 0;
970 x86_xcr0 = I386_XSTATE_X87_MASK;
971
972 /* Disable PTRACE_GETFPXREGS. */
973 for (regset = target_regsets;
974 regset->fill_function != NULL; regset++)
975 if (regset->get_request == PTRACE_GETFPXREGS)
976 {
977 regset->size = 0;
978 break;
979 }
980 }
981 else
982 have_ptrace_getfpxregs = 1;
983 }
984
985 if (!have_ptrace_getfpxregs)
986 {
987 init_registers_i386_mmx_linux ();
988 return;
989 }
990# endif
991 init_registers_i386_linux ();
992 }
1570b33e
L
993#endif
994
995 if (!use_xml)
996 {
997 /* Don't use XML. */
998#ifdef __x86_64__
999 if (num_xmm_registers == 8)
1000 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1001 else
1002 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
1003#else
1004 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1005#endif
1006
1007 x86_xcr0 = I386_XSTATE_SSE_MASK;
1008
1009 return;
1010 }
1011
1012 /* Check if XSAVE extended state is supported. */
1013 if (have_ptrace_getregset == -1)
1014 {
1570b33e
L
1015 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
1016 struct iovec iov;
1570b33e
L
1017
1018 iov.iov_base = xstateregs;
1019 iov.iov_len = sizeof (xstateregs);
1020
1021 /* Check if PTRACE_GETREGSET works. */
1022 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
1023 &iov) < 0)
1024 {
1025 have_ptrace_getregset = 0;
1026 return;
1027 }
1028 else
1029 have_ptrace_getregset = 1;
1030
1031 /* Get XCR0 from XSAVE extended state at byte 464. */
1032 xcr0 = xstateregs[464 / sizeof (long long)];
1033
1034 /* Use PTRACE_GETREGSET if it is available. */
1035 for (regset = target_regsets;
1036 regset->fill_function != NULL; regset++)
1037 if (regset->get_request == PTRACE_GETREGSET)
1038 regset->size = I386_XSTATE_SIZE (xcr0);
1039 else if (regset->type != GENERAL_REGS)
1040 regset->size = 0;
1041 }
1042
1043 if (have_ptrace_getregset)
1044 {
1045 /* AVX is the highest feature we support. */
1046 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
1047 {
1048 x86_xcr0 = xcr0;
1049
1050#ifdef __x86_64__
1051 /* I386 has 8 xmm regs. */
1052 if (num_xmm_registers == 8)
1053 init_registers_i386_avx_linux ();
1054 else
1055 init_registers_amd64_avx_linux ();
1056#else
1057 init_registers_i386_avx_linux ();
1058#endif
1059 }
1060 }
1061}
1062
1063/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1064 PTRACE_GETREGSET. */
1065
1066static void
1067x86_linux_process_qsupported (const char *query)
1068{
1069 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1070 with "i386" in qSupported query, it supports x86 XML target
1071 descriptions. */
1072 use_xml = 0;
1073 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1074 {
1075 char *copy = xstrdup (query + 13);
1076 char *p;
1077
1078 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1079 {
1080 if (strcmp (p, "i386") == 0)
1081 {
1082 use_xml = 1;
1083 break;
1084 }
1085 }
1086
1087 free (copy);
1088 }
1089
1090 x86_linux_update_xmltarget ();
1091}
1092
9f1036c1 1093/* Initialize gdbserver for the architecture of the inferior. */
d0722149
DE
1094
1095static void
1096x86_arch_setup (void)
1097{
1098#ifdef __x86_64__
1099 int pid = pid_of (get_thread_lwp (current_inferior));
1100 char *file = linux_child_pid_to_exec_file (pid);
1101 int use_64bit = elf_64_file_p (file);
1102
1103 free (file);
1104
1105 if (use_64bit < 0)
1106 {
1107 /* This can only happen if /proc/<pid>/exe is unreadable,
1108 but "that can't happen" if we've gotten this far.
1109 Fall through and assume this is a 32-bit program. */
1110 }
1111 else if (use_64bit)
1112 {
d0722149
DE
1113 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1114 the_low_target.num_regs = -1;
1115 the_low_target.regmap = NULL;
1116 the_low_target.cannot_fetch_register = NULL;
1117 the_low_target.cannot_store_register = NULL;
1118
1119 /* Amd64 has 16 xmm regs. */
1120 num_xmm_registers = 16;
1121
1570b33e 1122 x86_linux_update_xmltarget ();
d0722149
DE
1123 return;
1124 }
1125#endif
1126
1127 /* Ok we have a 32-bit inferior. */
1128
d0722149
DE
1129 the_low_target.num_regs = I386_NUM_REGS;
1130 the_low_target.regmap = i386_regmap;
1131 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1132 the_low_target.cannot_store_register = i386_cannot_store_register;
1133
1134 /* I386 has 8 xmm regs. */
1135 num_xmm_registers = 8;
1570b33e
L
1136
1137 x86_linux_update_xmltarget ();
d0722149
DE
1138}
1139
219f2f23
PA
1140static int
1141x86_supports_tracepoints (void)
1142{
1143 return 1;
1144}
1145
fa593d66
PA
1146static void
1147append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1148{
1149 write_inferior_memory (*to, buf, len);
1150 *to += len;
1151}
1152
1153static int
1154push_opcode (unsigned char *buf, char *op)
1155{
1156 unsigned char *buf_org = buf;
1157
1158 while (1)
1159 {
1160 char *endptr;
1161 unsigned long ul = strtoul (op, &endptr, 16);
1162
1163 if (endptr == op)
1164 break;
1165
1166 *buf++ = ul;
1167 op = endptr;
1168 }
1169
1170 return buf - buf_org;
1171}
1172
1173#ifdef __x86_64__
1174
1175/* Build a jump pad that saves registers and calls a collection
1176 function. Writes a jump instruction to the jump pad to
1177 JJUMPAD_INSN. The caller is responsible to write it in at the
1178 tracepoint address. */
1179
1180static int
1181amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1182 CORE_ADDR collector,
1183 CORE_ADDR lockaddr,
1184 ULONGEST orig_size,
1185 CORE_ADDR *jump_entry,
405f8e94
SS
1186 CORE_ADDR *trampoline,
1187 ULONGEST *trampoline_size,
fa593d66
PA
1188 unsigned char *jjump_pad_insn,
1189 ULONGEST *jjump_pad_insn_size,
1190 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1191 CORE_ADDR *adjusted_insn_addr_end,
1192 char *err)
fa593d66
PA
1193{
1194 unsigned char buf[40];
1195 int i, offset;
1196 CORE_ADDR buildaddr = *jump_entry;
1197
1198 /* Build the jump pad. */
1199
1200 /* First, do tracepoint data collection. Save registers. */
1201 i = 0;
1202 /* Need to ensure stack pointer saved first. */
1203 buf[i++] = 0x54; /* push %rsp */
1204 buf[i++] = 0x55; /* push %rbp */
1205 buf[i++] = 0x57; /* push %rdi */
1206 buf[i++] = 0x56; /* push %rsi */
1207 buf[i++] = 0x52; /* push %rdx */
1208 buf[i++] = 0x51; /* push %rcx */
1209 buf[i++] = 0x53; /* push %rbx */
1210 buf[i++] = 0x50; /* push %rax */
1211 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1212 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1213 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1214 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1215 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1216 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1217 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1218 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1219 buf[i++] = 0x9c; /* pushfq */
1220 buf[i++] = 0x48; /* movl <addr>,%rdi */
1221 buf[i++] = 0xbf;
1222 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1223 i += sizeof (unsigned long);
1224 buf[i++] = 0x57; /* push %rdi */
1225 append_insns (&buildaddr, i, buf);
1226
1227 /* Stack space for the collecting_t object. */
1228 i = 0;
1229 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1230 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1231 memcpy (buf + i, &tpoint, 8);
1232 i += 8;
1233 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1234 i += push_opcode (&buf[i],
1235 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1236 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1237 append_insns (&buildaddr, i, buf);
1238
1239 /* spin-lock. */
1240 i = 0;
1241 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1242 memcpy (&buf[i], (void *) &lockaddr, 8);
1243 i += 8;
1244 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1245 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1246 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1247 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1248 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1249 append_insns (&buildaddr, i, buf);
1250
1251 /* Set up the gdb_collect call. */
1252 /* At this point, (stack pointer + 0x18) is the base of our saved
1253 register block. */
1254
1255 i = 0;
1256 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1257 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1258
1259 /* tpoint address may be 64-bit wide. */
1260 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1261 memcpy (buf + i, &tpoint, 8);
1262 i += 8;
1263 append_insns (&buildaddr, i, buf);
1264
1265 /* The collector function being in the shared library, may be
1266 >31-bits away off the jump pad. */
1267 i = 0;
1268 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1269 memcpy (buf + i, &collector, 8);
1270 i += 8;
1271 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1272 append_insns (&buildaddr, i, buf);
1273
1274 /* Clear the spin-lock. */
1275 i = 0;
1276 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1277 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1278 memcpy (buf + i, &lockaddr, 8);
1279 i += 8;
1280 append_insns (&buildaddr, i, buf);
1281
1282 /* Remove stack that had been used for the collect_t object. */
1283 i = 0;
1284 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1285 append_insns (&buildaddr, i, buf);
1286
1287 /* Restore register state. */
1288 i = 0;
1289 buf[i++] = 0x48; /* add $0x8,%rsp */
1290 buf[i++] = 0x83;
1291 buf[i++] = 0xc4;
1292 buf[i++] = 0x08;
1293 buf[i++] = 0x9d; /* popfq */
1294 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1295 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1296 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1297 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1298 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1299 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1300 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1301 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1302 buf[i++] = 0x58; /* pop %rax */
1303 buf[i++] = 0x5b; /* pop %rbx */
1304 buf[i++] = 0x59; /* pop %rcx */
1305 buf[i++] = 0x5a; /* pop %rdx */
1306 buf[i++] = 0x5e; /* pop %rsi */
1307 buf[i++] = 0x5f; /* pop %rdi */
1308 buf[i++] = 0x5d; /* pop %rbp */
1309 buf[i++] = 0x5c; /* pop %rsp */
1310 append_insns (&buildaddr, i, buf);
1311
1312 /* Now, adjust the original instruction to execute in the jump
1313 pad. */
1314 *adjusted_insn_addr = buildaddr;
1315 relocate_instruction (&buildaddr, tpaddr);
1316 *adjusted_insn_addr_end = buildaddr;
1317
1318 /* Finally, write a jump back to the program. */
1319 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1320 memcpy (buf, jump_insn, sizeof (jump_insn));
1321 memcpy (buf + 1, &offset, 4);
1322 append_insns (&buildaddr, sizeof (jump_insn), buf);
1323
1324 /* The jump pad is now built. Wire in a jump to our jump pad. This
1325 is always done last (by our caller actually), so that we can
1326 install fast tracepoints with threads running. This relies on
1327 the agent's atomic write support. */
1328 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1329 memcpy (buf, jump_insn, sizeof (jump_insn));
1330 memcpy (buf + 1, &offset, 4);
1331 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1332 *jjump_pad_insn_size = sizeof (jump_insn);
1333
1334 /* Return the end address of our pad. */
1335 *jump_entry = buildaddr;
1336
1337 return 0;
1338}
1339
1340#endif /* __x86_64__ */
1341
1342/* Build a jump pad that saves registers and calls a collection
1343 function. Writes a jump instruction to the jump pad to
1344 JJUMPAD_INSN. The caller is responsible to write it in at the
1345 tracepoint address. */
1346
1347static int
1348i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1349 CORE_ADDR collector,
1350 CORE_ADDR lockaddr,
1351 ULONGEST orig_size,
1352 CORE_ADDR *jump_entry,
405f8e94
SS
1353 CORE_ADDR *trampoline,
1354 ULONGEST *trampoline_size,
fa593d66
PA
1355 unsigned char *jjump_pad_insn,
1356 ULONGEST *jjump_pad_insn_size,
1357 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1358 CORE_ADDR *adjusted_insn_addr_end,
1359 char *err)
fa593d66
PA
1360{
1361 unsigned char buf[0x100];
1362 int i, offset;
1363 CORE_ADDR buildaddr = *jump_entry;
1364
1365 /* Build the jump pad. */
1366
1367 /* First, do tracepoint data collection. Save registers. */
1368 i = 0;
1369 buf[i++] = 0x60; /* pushad */
1370 buf[i++] = 0x68; /* push tpaddr aka $pc */
1371 *((int *)(buf + i)) = (int) tpaddr;
1372 i += 4;
1373 buf[i++] = 0x9c; /* pushf */
1374 buf[i++] = 0x1e; /* push %ds */
1375 buf[i++] = 0x06; /* push %es */
1376 buf[i++] = 0x0f; /* push %fs */
1377 buf[i++] = 0xa0;
1378 buf[i++] = 0x0f; /* push %gs */
1379 buf[i++] = 0xa8;
1380 buf[i++] = 0x16; /* push %ss */
1381 buf[i++] = 0x0e; /* push %cs */
1382 append_insns (&buildaddr, i, buf);
1383
1384 /* Stack space for the collecting_t object. */
1385 i = 0;
1386 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1387
1388 /* Build the object. */
1389 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1390 memcpy (buf + i, &tpoint, 4);
1391 i += 4;
1392 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1393
1394 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1395 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1396 append_insns (&buildaddr, i, buf);
1397
1398 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1399 If we cared for it, this could be using xchg alternatively. */
1400
1401 i = 0;
1402 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1403 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1404 %esp,<lockaddr> */
1405 memcpy (&buf[i], (void *) &lockaddr, 4);
1406 i += 4;
1407 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1408 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1409 append_insns (&buildaddr, i, buf);
1410
1411
1412 /* Set up arguments to the gdb_collect call. */
1413 i = 0;
1414 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1415 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1416 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1417 append_insns (&buildaddr, i, buf);
1418
1419 i = 0;
1420 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1421 append_insns (&buildaddr, i, buf);
1422
1423 i = 0;
1424 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1425 memcpy (&buf[i], (void *) &tpoint, 4);
1426 i += 4;
1427 append_insns (&buildaddr, i, buf);
1428
1429 buf[0] = 0xe8; /* call <reladdr> */
1430 offset = collector - (buildaddr + sizeof (jump_insn));
1431 memcpy (buf + 1, &offset, 4);
1432 append_insns (&buildaddr, 5, buf);
1433 /* Clean up after the call. */
1434 buf[0] = 0x83; /* add $0x8,%esp */
1435 buf[1] = 0xc4;
1436 buf[2] = 0x08;
1437 append_insns (&buildaddr, 3, buf);
1438
1439
1440 /* Clear the spin-lock. This would need the LOCK prefix on older
1441 broken archs. */
1442 i = 0;
1443 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1444 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1445 memcpy (buf + i, &lockaddr, 4);
1446 i += 4;
1447 append_insns (&buildaddr, i, buf);
1448
1449
1450 /* Remove stack that had been used for the collect_t object. */
1451 i = 0;
1452 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1453 append_insns (&buildaddr, i, buf);
1454
1455 i = 0;
1456 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1457 buf[i++] = 0xc4;
1458 buf[i++] = 0x04;
1459 buf[i++] = 0x17; /* pop %ss */
1460 buf[i++] = 0x0f; /* pop %gs */
1461 buf[i++] = 0xa9;
1462 buf[i++] = 0x0f; /* pop %fs */
1463 buf[i++] = 0xa1;
1464 buf[i++] = 0x07; /* pop %es */
405f8e94 1465 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1466 buf[i++] = 0x9d; /* popf */
1467 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1468 buf[i++] = 0xc4;
1469 buf[i++] = 0x04;
1470 buf[i++] = 0x61; /* popad */
1471 append_insns (&buildaddr, i, buf);
1472
1473 /* Now, adjust the original instruction to execute in the jump
1474 pad. */
1475 *adjusted_insn_addr = buildaddr;
1476 relocate_instruction (&buildaddr, tpaddr);
1477 *adjusted_insn_addr_end = buildaddr;
1478
1479 /* Write the jump back to the program. */
1480 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1481 memcpy (buf, jump_insn, sizeof (jump_insn));
1482 memcpy (buf + 1, &offset, 4);
1483 append_insns (&buildaddr, sizeof (jump_insn), buf);
1484
1485 /* The jump pad is now built. Wire in a jump to our jump pad. This
1486 is always done last (by our caller actually), so that we can
1487 install fast tracepoints with threads running. This relies on
1488 the agent's atomic write support. */
405f8e94
SS
1489 if (orig_size == 4)
1490 {
1491 /* Create a trampoline. */
1492 *trampoline_size = sizeof (jump_insn);
1493 if (!claim_trampoline_space (*trampoline_size, trampoline))
1494 {
1495 /* No trampoline space available. */
1496 strcpy (err,
1497 "E.Cannot allocate trampoline space needed for fast "
1498 "tracepoints on 4-byte instructions.");
1499 return 1;
1500 }
1501
1502 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1503 memcpy (buf, jump_insn, sizeof (jump_insn));
1504 memcpy (buf + 1, &offset, 4);
1505 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1506
1507 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1508 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1509 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1510 memcpy (buf + 2, &offset, 2);
1511 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1512 *jjump_pad_insn_size = sizeof (small_jump_insn);
1513 }
1514 else
1515 {
1516 /* Else use a 32-bit relative jump instruction. */
1517 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1518 memcpy (buf, jump_insn, sizeof (jump_insn));
1519 memcpy (buf + 1, &offset, 4);
1520 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1521 *jjump_pad_insn_size = sizeof (jump_insn);
1522 }
fa593d66
PA
1523
1524 /* Return the end address of our pad. */
1525 *jump_entry = buildaddr;
1526
1527 return 0;
1528}
1529
1530static int
1531x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1532 CORE_ADDR collector,
1533 CORE_ADDR lockaddr,
1534 ULONGEST orig_size,
1535 CORE_ADDR *jump_entry,
405f8e94
SS
1536 CORE_ADDR *trampoline,
1537 ULONGEST *trampoline_size,
fa593d66
PA
1538 unsigned char *jjump_pad_insn,
1539 ULONGEST *jjump_pad_insn_size,
1540 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1541 CORE_ADDR *adjusted_insn_addr_end,
1542 char *err)
fa593d66
PA
1543{
1544#ifdef __x86_64__
1545 if (register_size (0) == 8)
1546 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1547 collector, lockaddr,
1548 orig_size, jump_entry,
405f8e94 1549 trampoline, trampoline_size,
fa593d66
PA
1550 jjump_pad_insn,
1551 jjump_pad_insn_size,
1552 adjusted_insn_addr,
405f8e94
SS
1553 adjusted_insn_addr_end,
1554 err);
fa593d66
PA
1555#endif
1556
1557 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1558 collector, lockaddr,
1559 orig_size, jump_entry,
405f8e94 1560 trampoline, trampoline_size,
fa593d66
PA
1561 jjump_pad_insn,
1562 jjump_pad_insn_size,
1563 adjusted_insn_addr,
405f8e94
SS
1564 adjusted_insn_addr_end,
1565 err);
1566}
1567
1568/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1569 architectures. */
1570
1571static int
1572x86_get_min_fast_tracepoint_insn_len (void)
1573{
1574 static int warned_about_fast_tracepoints = 0;
1575
1576#ifdef __x86_64__
1577 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1578 used for fast tracepoints. */
1579 if (register_size (0) == 8)
1580 return 5;
1581#endif
1582
1583 if (in_process_agent_loaded ())
1584 {
1585 char errbuf[IPA_BUFSIZ];
1586
1587 errbuf[0] = '\0';
1588
1589 /* On x86, if trampolines are available, then 4-byte jump instructions
1590 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1591 with a 4-byte offset are used instead. */
1592 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1593 return 4;
1594 else
1595 {
1596 /* GDB has no channel to explain to user why a shorter fast
1597 tracepoint is not possible, but at least make GDBserver
1598 mention that something has gone awry. */
1599 if (!warned_about_fast_tracepoints)
1600 {
1601 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1602 warned_about_fast_tracepoints = 1;
1603 }
1604 return 5;
1605 }
1606 }
1607 else
1608 {
1609 /* Indicate that the minimum length is currently unknown since the IPA
1610 has not loaded yet. */
1611 return 0;
1612 }
fa593d66
PA
1613}
1614
6a271cae
PA
1615static void
1616add_insns (unsigned char *start, int len)
1617{
1618 CORE_ADDR buildaddr = current_insn_ptr;
1619
1620 if (debug_threads)
1621 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1622 len, paddress (buildaddr));
1623
1624 append_insns (&buildaddr, len, start);
1625 current_insn_ptr = buildaddr;
1626}
1627
6a271cae
PA
1628/* Our general strategy for emitting code is to avoid specifying raw
1629 bytes whenever possible, and instead copy a block of inline asm
1630 that is embedded in the function. This is a little messy, because
1631 we need to keep the compiler from discarding what looks like dead
1632 code, plus suppress various warnings. */
1633
9e4344e5
PA
1634#define EMIT_ASM(NAME, INSNS) \
1635 do \
1636 { \
1637 extern unsigned char start_ ## NAME, end_ ## NAME; \
1638 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1639 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1640 "\t" "start_" #NAME ":" \
1641 "\t" INSNS "\n" \
1642 "\t" "end_" #NAME ":"); \
1643 } while (0)
6a271cae
PA
1644
1645#ifdef __x86_64__
1646
1647#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1648 do \
1649 { \
1650 extern unsigned char start_ ## NAME, end_ ## NAME; \
1651 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1652 __asm__ (".code32\n" \
1653 "\t" "jmp end_" #NAME "\n" \
1654 "\t" "start_" #NAME ":\n" \
1655 "\t" INSNS "\n" \
1656 "\t" "end_" #NAME ":\n" \
1657 ".code64\n"); \
1658 } while (0)
6a271cae
PA
1659
1660#else
1661
1662#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1663
1664#endif
1665
1666#ifdef __x86_64__
1667
1668static void
1669amd64_emit_prologue (void)
1670{
1671 EMIT_ASM (amd64_prologue,
1672 "pushq %rbp\n\t"
1673 "movq %rsp,%rbp\n\t"
1674 "sub $0x20,%rsp\n\t"
1675 "movq %rdi,-8(%rbp)\n\t"
1676 "movq %rsi,-16(%rbp)");
1677}
1678
1679
1680static void
1681amd64_emit_epilogue (void)
1682{
1683 EMIT_ASM (amd64_epilogue,
1684 "movq -16(%rbp),%rdi\n\t"
1685 "movq %rax,(%rdi)\n\t"
1686 "xor %rax,%rax\n\t"
1687 "leave\n\t"
1688 "ret");
1689}
1690
1691static void
1692amd64_emit_add (void)
1693{
1694 EMIT_ASM (amd64_add,
1695 "add (%rsp),%rax\n\t"
1696 "lea 0x8(%rsp),%rsp");
1697}
1698
1699static void
1700amd64_emit_sub (void)
1701{
1702 EMIT_ASM (amd64_sub,
1703 "sub %rax,(%rsp)\n\t"
1704 "pop %rax");
1705}
1706
1707static void
1708amd64_emit_mul (void)
1709{
1710 emit_error = 1;
1711}
1712
1713static void
1714amd64_emit_lsh (void)
1715{
1716 emit_error = 1;
1717}
1718
1719static void
1720amd64_emit_rsh_signed (void)
1721{
1722 emit_error = 1;
1723}
1724
1725static void
1726amd64_emit_rsh_unsigned (void)
1727{
1728 emit_error = 1;
1729}
1730
1731static void
1732amd64_emit_ext (int arg)
1733{
1734 switch (arg)
1735 {
1736 case 8:
1737 EMIT_ASM (amd64_ext_8,
1738 "cbtw\n\t"
1739 "cwtl\n\t"
1740 "cltq");
1741 break;
1742 case 16:
1743 EMIT_ASM (amd64_ext_16,
1744 "cwtl\n\t"
1745 "cltq");
1746 break;
1747 case 32:
1748 EMIT_ASM (amd64_ext_32,
1749 "cltq");
1750 break;
1751 default:
1752 emit_error = 1;
1753 }
1754}
1755
1756static void
1757amd64_emit_log_not (void)
1758{
1759 EMIT_ASM (amd64_log_not,
1760 "test %rax,%rax\n\t"
1761 "sete %cl\n\t"
1762 "movzbq %cl,%rax");
1763}
1764
1765static void
1766amd64_emit_bit_and (void)
1767{
1768 EMIT_ASM (amd64_and,
1769 "and (%rsp),%rax\n\t"
1770 "lea 0x8(%rsp),%rsp");
1771}
1772
1773static void
1774amd64_emit_bit_or (void)
1775{
1776 EMIT_ASM (amd64_or,
1777 "or (%rsp),%rax\n\t"
1778 "lea 0x8(%rsp),%rsp");
1779}
1780
1781static void
1782amd64_emit_bit_xor (void)
1783{
1784 EMIT_ASM (amd64_xor,
1785 "xor (%rsp),%rax\n\t"
1786 "lea 0x8(%rsp),%rsp");
1787}
1788
1789static void
1790amd64_emit_bit_not (void)
1791{
1792 EMIT_ASM (amd64_bit_not,
1793 "xorq $0xffffffffffffffff,%rax");
1794}
1795
1796static void
1797amd64_emit_equal (void)
1798{
1799 EMIT_ASM (amd64_equal,
1800 "cmp %rax,(%rsp)\n\t"
1801 "je .Lamd64_equal_true\n\t"
1802 "xor %rax,%rax\n\t"
1803 "jmp .Lamd64_equal_end\n\t"
1804 ".Lamd64_equal_true:\n\t"
1805 "mov $0x1,%rax\n\t"
1806 ".Lamd64_equal_end:\n\t"
1807 "lea 0x8(%rsp),%rsp");
1808}
1809
1810static void
1811amd64_emit_less_signed (void)
1812{
1813 EMIT_ASM (amd64_less_signed,
1814 "cmp %rax,(%rsp)\n\t"
1815 "jl .Lamd64_less_signed_true\n\t"
1816 "xor %rax,%rax\n\t"
1817 "jmp .Lamd64_less_signed_end\n\t"
1818 ".Lamd64_less_signed_true:\n\t"
1819 "mov $1,%rax\n\t"
1820 ".Lamd64_less_signed_end:\n\t"
1821 "lea 0x8(%rsp),%rsp");
1822}
1823
1824static void
1825amd64_emit_less_unsigned (void)
1826{
1827 EMIT_ASM (amd64_less_unsigned,
1828 "cmp %rax,(%rsp)\n\t"
1829 "jb .Lamd64_less_unsigned_true\n\t"
1830 "xor %rax,%rax\n\t"
1831 "jmp .Lamd64_less_unsigned_end\n\t"
1832 ".Lamd64_less_unsigned_true:\n\t"
1833 "mov $1,%rax\n\t"
1834 ".Lamd64_less_unsigned_end:\n\t"
1835 "lea 0x8(%rsp),%rsp");
1836}
1837
1838static void
1839amd64_emit_ref (int size)
1840{
1841 switch (size)
1842 {
1843 case 1:
1844 EMIT_ASM (amd64_ref1,
1845 "movb (%rax),%al");
1846 break;
1847 case 2:
1848 EMIT_ASM (amd64_ref2,
1849 "movw (%rax),%ax");
1850 break;
1851 case 4:
1852 EMIT_ASM (amd64_ref4,
1853 "movl (%rax),%eax");
1854 break;
1855 case 8:
1856 EMIT_ASM (amd64_ref8,
1857 "movq (%rax),%rax");
1858 break;
1859 }
1860}
1861
1862static void
1863amd64_emit_if_goto (int *offset_p, int *size_p)
1864{
1865 EMIT_ASM (amd64_if_goto,
1866 "mov %rax,%rcx\n\t"
1867 "pop %rax\n\t"
1868 "cmp $0,%rcx\n\t"
1869 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1870 if (offset_p)
1871 *offset_p = 10;
1872 if (size_p)
1873 *size_p = 4;
1874}
1875
1876static void
1877amd64_emit_goto (int *offset_p, int *size_p)
1878{
1879 EMIT_ASM (amd64_goto,
1880 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1881 if (offset_p)
1882 *offset_p = 1;
1883 if (size_p)
1884 *size_p = 4;
1885}
1886
1887static void
1888amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1889{
1890 int diff = (to - (from + size));
1891 unsigned char buf[sizeof (int)];
1892
1893 if (size != 4)
1894 {
1895 emit_error = 1;
1896 return;
1897 }
1898
1899 memcpy (buf, &diff, sizeof (int));
1900 write_inferior_memory (from, buf, sizeof (int));
1901}
1902
1903static void
4e29fb54 1904amd64_emit_const (LONGEST num)
6a271cae
PA
1905{
1906 unsigned char buf[16];
1907 int i;
1908 CORE_ADDR buildaddr = current_insn_ptr;
1909
1910 i = 0;
1911 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1912 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1913 i += 8;
1914 append_insns (&buildaddr, i, buf);
1915 current_insn_ptr = buildaddr;
1916}
1917
1918static void
1919amd64_emit_call (CORE_ADDR fn)
1920{
1921 unsigned char buf[16];
1922 int i;
1923 CORE_ADDR buildaddr;
4e29fb54 1924 LONGEST offset64;
6a271cae
PA
1925
1926 /* The destination function being in the shared library, may be
1927 >31-bits away off the compiled code pad. */
1928
1929 buildaddr = current_insn_ptr;
1930
1931 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1932
1933 i = 0;
1934
1935 if (offset64 > INT_MAX || offset64 < INT_MIN)
1936 {
1937 /* Offset is too large for a call. Use callq, but that requires
1938 a register, so avoid it if possible. Use r10, since it is
1939 call-clobbered, we don't have to push/pop it. */
1940 buf[i++] = 0x48; /* mov $fn,%r10 */
1941 buf[i++] = 0xba;
1942 memcpy (buf + i, &fn, 8);
1943 i += 8;
1944 buf[i++] = 0xff; /* callq *%r10 */
1945 buf[i++] = 0xd2;
1946 }
1947 else
1948 {
1949 int offset32 = offset64; /* we know we can't overflow here. */
1950 memcpy (buf + i, &offset32, 4);
1951 i += 4;
1952 }
1953
1954 append_insns (&buildaddr, i, buf);
1955 current_insn_ptr = buildaddr;
1956}
1957
1958static void
1959amd64_emit_reg (int reg)
1960{
1961 unsigned char buf[16];
1962 int i;
1963 CORE_ADDR buildaddr;
1964
1965 /* Assume raw_regs is still in %rdi. */
1966 buildaddr = current_insn_ptr;
1967 i = 0;
1968 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1969 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1970 i += 4;
1971 append_insns (&buildaddr, i, buf);
1972 current_insn_ptr = buildaddr;
1973 amd64_emit_call (get_raw_reg_func_addr ());
1974}
1975
1976static void
1977amd64_emit_pop (void)
1978{
1979 EMIT_ASM (amd64_pop,
1980 "pop %rax");
1981}
1982
1983static void
1984amd64_emit_stack_flush (void)
1985{
1986 EMIT_ASM (amd64_stack_flush,
1987 "push %rax");
1988}
1989
1990static void
1991amd64_emit_zero_ext (int arg)
1992{
1993 switch (arg)
1994 {
1995 case 8:
1996 EMIT_ASM (amd64_zero_ext_8,
1997 "and $0xff,%rax");
1998 break;
1999 case 16:
2000 EMIT_ASM (amd64_zero_ext_16,
2001 "and $0xffff,%rax");
2002 break;
2003 case 32:
2004 EMIT_ASM (amd64_zero_ext_32,
2005 "mov $0xffffffff,%rcx\n\t"
2006 "and %rcx,%rax");
2007 break;
2008 default:
2009 emit_error = 1;
2010 }
2011}
2012
2013static void
2014amd64_emit_swap (void)
2015{
2016 EMIT_ASM (amd64_swap,
2017 "mov %rax,%rcx\n\t"
2018 "pop %rax\n\t"
2019 "push %rcx");
2020}
2021
2022static void
2023amd64_emit_stack_adjust (int n)
2024{
2025 unsigned char buf[16];
2026 int i;
2027 CORE_ADDR buildaddr = current_insn_ptr;
2028
2029 i = 0;
2030 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2031 buf[i++] = 0x8d;
2032 buf[i++] = 0x64;
2033 buf[i++] = 0x24;
2034 /* This only handles adjustments up to 16, but we don't expect any more. */
2035 buf[i++] = n * 8;
2036 append_insns (&buildaddr, i, buf);
2037 current_insn_ptr = buildaddr;
2038}
2039
2040/* FN's prototype is `LONGEST(*fn)(int)'. */
2041
2042static void
2043amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2044{
2045 unsigned char buf[16];
2046 int i;
2047 CORE_ADDR buildaddr;
2048
2049 buildaddr = current_insn_ptr;
2050 i = 0;
2051 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2052 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2053 i += 4;
2054 append_insns (&buildaddr, i, buf);
2055 current_insn_ptr = buildaddr;
2056 amd64_emit_call (fn);
2057}
2058
4e29fb54 2059/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2060
2061static void
2062amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2063{
2064 unsigned char buf[16];
2065 int i;
2066 CORE_ADDR buildaddr;
2067
2068 buildaddr = current_insn_ptr;
2069 i = 0;
2070 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2071 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2072 i += 4;
2073 append_insns (&buildaddr, i, buf);
2074 current_insn_ptr = buildaddr;
2075 EMIT_ASM (amd64_void_call_2_a,
2076 /* Save away a copy of the stack top. */
2077 "push %rax\n\t"
2078 /* Also pass top as the second argument. */
2079 "mov %rax,%rsi");
2080 amd64_emit_call (fn);
2081 EMIT_ASM (amd64_void_call_2_b,
2082 /* Restore the stack top, %rax may have been trashed. */
2083 "pop %rax");
2084}
2085
6b9801d4
SS
2086void
2087amd64_emit_eq_goto (int *offset_p, int *size_p)
2088{
2089 EMIT_ASM (amd64_eq,
2090 "cmp %rax,(%rsp)\n\t"
2091 "jne .Lamd64_eq_fallthru\n\t"
2092 "lea 0x8(%rsp),%rsp\n\t"
2093 "pop %rax\n\t"
2094 /* jmp, but don't trust the assembler to choose the right jump */
2095 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2096 ".Lamd64_eq_fallthru:\n\t"
2097 "lea 0x8(%rsp),%rsp\n\t"
2098 "pop %rax");
2099
2100 if (offset_p)
2101 *offset_p = 13;
2102 if (size_p)
2103 *size_p = 4;
2104}
2105
2106void
2107amd64_emit_ne_goto (int *offset_p, int *size_p)
2108{
2109 EMIT_ASM (amd64_ne,
2110 "cmp %rax,(%rsp)\n\t"
2111 "je .Lamd64_ne_fallthru\n\t"
2112 "lea 0x8(%rsp),%rsp\n\t"
2113 "pop %rax\n\t"
2114 /* jmp, but don't trust the assembler to choose the right jump */
2115 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2116 ".Lamd64_ne_fallthru:\n\t"
2117 "lea 0x8(%rsp),%rsp\n\t"
2118 "pop %rax");
2119
2120 if (offset_p)
2121 *offset_p = 13;
2122 if (size_p)
2123 *size_p = 4;
2124}
2125
2126void
2127amd64_emit_lt_goto (int *offset_p, int *size_p)
2128{
2129 EMIT_ASM (amd64_lt,
2130 "cmp %rax,(%rsp)\n\t"
2131 "jnl .Lamd64_lt_fallthru\n\t"
2132 "lea 0x8(%rsp),%rsp\n\t"
2133 "pop %rax\n\t"
2134 /* jmp, but don't trust the assembler to choose the right jump */
2135 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2136 ".Lamd64_lt_fallthru:\n\t"
2137 "lea 0x8(%rsp),%rsp\n\t"
2138 "pop %rax");
2139
2140 if (offset_p)
2141 *offset_p = 13;
2142 if (size_p)
2143 *size_p = 4;
2144}
2145
2146void
2147amd64_emit_le_goto (int *offset_p, int *size_p)
2148{
2149 EMIT_ASM (amd64_le,
2150 "cmp %rax,(%rsp)\n\t"
2151 "jnle .Lamd64_le_fallthru\n\t"
2152 "lea 0x8(%rsp),%rsp\n\t"
2153 "pop %rax\n\t"
2154 /* jmp, but don't trust the assembler to choose the right jump */
2155 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2156 ".Lamd64_le_fallthru:\n\t"
2157 "lea 0x8(%rsp),%rsp\n\t"
2158 "pop %rax");
2159
2160 if (offset_p)
2161 *offset_p = 13;
2162 if (size_p)
2163 *size_p = 4;
2164}
2165
2166void
2167amd64_emit_gt_goto (int *offset_p, int *size_p)
2168{
2169 EMIT_ASM (amd64_gt,
2170 "cmp %rax,(%rsp)\n\t"
2171 "jng .Lamd64_gt_fallthru\n\t"
2172 "lea 0x8(%rsp),%rsp\n\t"
2173 "pop %rax\n\t"
2174 /* jmp, but don't trust the assembler to choose the right jump */
2175 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2176 ".Lamd64_gt_fallthru:\n\t"
2177 "lea 0x8(%rsp),%rsp\n\t"
2178 "pop %rax");
2179
2180 if (offset_p)
2181 *offset_p = 13;
2182 if (size_p)
2183 *size_p = 4;
2184}
2185
2186void
2187amd64_emit_ge_goto (int *offset_p, int *size_p)
2188{
2189 EMIT_ASM (amd64_ge,
2190 "cmp %rax,(%rsp)\n\t"
2191 "jnge .Lamd64_ge_fallthru\n\t"
2192 ".Lamd64_ge_jump:\n\t"
2193 "lea 0x8(%rsp),%rsp\n\t"
2194 "pop %rax\n\t"
2195 /* jmp, but don't trust the assembler to choose the right jump */
2196 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2197 ".Lamd64_ge_fallthru:\n\t"
2198 "lea 0x8(%rsp),%rsp\n\t"
2199 "pop %rax");
2200
2201 if (offset_p)
2202 *offset_p = 13;
2203 if (size_p)
2204 *size_p = 4;
2205}
2206
6a271cae
PA
2207struct emit_ops amd64_emit_ops =
2208 {
2209 amd64_emit_prologue,
2210 amd64_emit_epilogue,
2211 amd64_emit_add,
2212 amd64_emit_sub,
2213 amd64_emit_mul,
2214 amd64_emit_lsh,
2215 amd64_emit_rsh_signed,
2216 amd64_emit_rsh_unsigned,
2217 amd64_emit_ext,
2218 amd64_emit_log_not,
2219 amd64_emit_bit_and,
2220 amd64_emit_bit_or,
2221 amd64_emit_bit_xor,
2222 amd64_emit_bit_not,
2223 amd64_emit_equal,
2224 amd64_emit_less_signed,
2225 amd64_emit_less_unsigned,
2226 amd64_emit_ref,
2227 amd64_emit_if_goto,
2228 amd64_emit_goto,
2229 amd64_write_goto_address,
2230 amd64_emit_const,
2231 amd64_emit_call,
2232 amd64_emit_reg,
2233 amd64_emit_pop,
2234 amd64_emit_stack_flush,
2235 amd64_emit_zero_ext,
2236 amd64_emit_swap,
2237 amd64_emit_stack_adjust,
2238 amd64_emit_int_call_1,
6b9801d4
SS
2239 amd64_emit_void_call_2,
2240 amd64_emit_eq_goto,
2241 amd64_emit_ne_goto,
2242 amd64_emit_lt_goto,
2243 amd64_emit_le_goto,
2244 amd64_emit_gt_goto,
2245 amd64_emit_ge_goto
6a271cae
PA
2246 };
2247
2248#endif /* __x86_64__ */
2249
2250static void
2251i386_emit_prologue (void)
2252{
2253 EMIT_ASM32 (i386_prologue,
2254 "push %ebp\n\t"
bf15cbda
SS
2255 "mov %esp,%ebp\n\t"
2256 "push %ebx");
6a271cae
PA
2257 /* At this point, the raw regs base address is at 8(%ebp), and the
2258 value pointer is at 12(%ebp). */
2259}
2260
2261static void
2262i386_emit_epilogue (void)
2263{
2264 EMIT_ASM32 (i386_epilogue,
2265 "mov 12(%ebp),%ecx\n\t"
2266 "mov %eax,(%ecx)\n\t"
2267 "mov %ebx,0x4(%ecx)\n\t"
2268 "xor %eax,%eax\n\t"
bf15cbda 2269 "pop %ebx\n\t"
6a271cae
PA
2270 "pop %ebp\n\t"
2271 "ret");
2272}
2273
2274static void
2275i386_emit_add (void)
2276{
2277 EMIT_ASM32 (i386_add,
2278 "add (%esp),%eax\n\t"
2279 "adc 0x4(%esp),%ebx\n\t"
2280 "lea 0x8(%esp),%esp");
2281}
2282
2283static void
2284i386_emit_sub (void)
2285{
2286 EMIT_ASM32 (i386_sub,
2287 "subl %eax,(%esp)\n\t"
2288 "sbbl %ebx,4(%esp)\n\t"
2289 "pop %eax\n\t"
2290 "pop %ebx\n\t");
2291}
2292
2293static void
2294i386_emit_mul (void)
2295{
2296 emit_error = 1;
2297}
2298
2299static void
2300i386_emit_lsh (void)
2301{
2302 emit_error = 1;
2303}
2304
2305static void
2306i386_emit_rsh_signed (void)
2307{
2308 emit_error = 1;
2309}
2310
2311static void
2312i386_emit_rsh_unsigned (void)
2313{
2314 emit_error = 1;
2315}
2316
2317static void
2318i386_emit_ext (int arg)
2319{
2320 switch (arg)
2321 {
2322 case 8:
2323 EMIT_ASM32 (i386_ext_8,
2324 "cbtw\n\t"
2325 "cwtl\n\t"
2326 "movl %eax,%ebx\n\t"
2327 "sarl $31,%ebx");
2328 break;
2329 case 16:
2330 EMIT_ASM32 (i386_ext_16,
2331 "cwtl\n\t"
2332 "movl %eax,%ebx\n\t"
2333 "sarl $31,%ebx");
2334 break;
2335 case 32:
2336 EMIT_ASM32 (i386_ext_32,
2337 "movl %eax,%ebx\n\t"
2338 "sarl $31,%ebx");
2339 break;
2340 default:
2341 emit_error = 1;
2342 }
2343}
2344
2345static void
2346i386_emit_log_not (void)
2347{
2348 EMIT_ASM32 (i386_log_not,
2349 "or %ebx,%eax\n\t"
2350 "test %eax,%eax\n\t"
2351 "sete %cl\n\t"
2352 "xor %ebx,%ebx\n\t"
2353 "movzbl %cl,%eax");
2354}
2355
2356static void
2357i386_emit_bit_and (void)
2358{
2359 EMIT_ASM32 (i386_and,
2360 "and (%esp),%eax\n\t"
2361 "and 0x4(%esp),%ebx\n\t"
2362 "lea 0x8(%esp),%esp");
2363}
2364
2365static void
2366i386_emit_bit_or (void)
2367{
2368 EMIT_ASM32 (i386_or,
2369 "or (%esp),%eax\n\t"
2370 "or 0x4(%esp),%ebx\n\t"
2371 "lea 0x8(%esp),%esp");
2372}
2373
2374static void
2375i386_emit_bit_xor (void)
2376{
2377 EMIT_ASM32 (i386_xor,
2378 "xor (%esp),%eax\n\t"
2379 "xor 0x4(%esp),%ebx\n\t"
2380 "lea 0x8(%esp),%esp");
2381}
2382
2383static void
2384i386_emit_bit_not (void)
2385{
2386 EMIT_ASM32 (i386_bit_not,
2387 "xor $0xffffffff,%eax\n\t"
2388 "xor $0xffffffff,%ebx\n\t");
2389}
2390
2391static void
2392i386_emit_equal (void)
2393{
2394 EMIT_ASM32 (i386_equal,
2395 "cmpl %ebx,4(%esp)\n\t"
2396 "jne .Li386_equal_false\n\t"
2397 "cmpl %eax,(%esp)\n\t"
2398 "je .Li386_equal_true\n\t"
2399 ".Li386_equal_false:\n\t"
2400 "xor %eax,%eax\n\t"
2401 "jmp .Li386_equal_end\n\t"
2402 ".Li386_equal_true:\n\t"
2403 "mov $1,%eax\n\t"
2404 ".Li386_equal_end:\n\t"
2405 "xor %ebx,%ebx\n\t"
2406 "lea 0x8(%esp),%esp");
2407}
2408
2409static void
2410i386_emit_less_signed (void)
2411{
2412 EMIT_ASM32 (i386_less_signed,
2413 "cmpl %ebx,4(%esp)\n\t"
2414 "jl .Li386_less_signed_true\n\t"
2415 "jne .Li386_less_signed_false\n\t"
2416 "cmpl %eax,(%esp)\n\t"
2417 "jl .Li386_less_signed_true\n\t"
2418 ".Li386_less_signed_false:\n\t"
2419 "xor %eax,%eax\n\t"
2420 "jmp .Li386_less_signed_end\n\t"
2421 ".Li386_less_signed_true:\n\t"
2422 "mov $1,%eax\n\t"
2423 ".Li386_less_signed_end:\n\t"
2424 "xor %ebx,%ebx\n\t"
2425 "lea 0x8(%esp),%esp");
2426}
2427
2428static void
2429i386_emit_less_unsigned (void)
2430{
2431 EMIT_ASM32 (i386_less_unsigned,
2432 "cmpl %ebx,4(%esp)\n\t"
2433 "jb .Li386_less_unsigned_true\n\t"
2434 "jne .Li386_less_unsigned_false\n\t"
2435 "cmpl %eax,(%esp)\n\t"
2436 "jb .Li386_less_unsigned_true\n\t"
2437 ".Li386_less_unsigned_false:\n\t"
2438 "xor %eax,%eax\n\t"
2439 "jmp .Li386_less_unsigned_end\n\t"
2440 ".Li386_less_unsigned_true:\n\t"
2441 "mov $1,%eax\n\t"
2442 ".Li386_less_unsigned_end:\n\t"
2443 "xor %ebx,%ebx\n\t"
2444 "lea 0x8(%esp),%esp");
2445}
2446
2447static void
2448i386_emit_ref (int size)
2449{
2450 switch (size)
2451 {
2452 case 1:
2453 EMIT_ASM32 (i386_ref1,
2454 "movb (%eax),%al");
2455 break;
2456 case 2:
2457 EMIT_ASM32 (i386_ref2,
2458 "movw (%eax),%ax");
2459 break;
2460 case 4:
2461 EMIT_ASM32 (i386_ref4,
2462 "movl (%eax),%eax");
2463 break;
2464 case 8:
2465 EMIT_ASM32 (i386_ref8,
2466 "movl 4(%eax),%ebx\n\t"
2467 "movl (%eax),%eax");
2468 break;
2469 }
2470}
2471
2472static void
2473i386_emit_if_goto (int *offset_p, int *size_p)
2474{
2475 EMIT_ASM32 (i386_if_goto,
2476 "mov %eax,%ecx\n\t"
2477 "or %ebx,%ecx\n\t"
2478 "pop %eax\n\t"
2479 "pop %ebx\n\t"
2480 "cmpl $0,%ecx\n\t"
2481 /* Don't trust the assembler to choose the right jump */
2482 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2483
2484 if (offset_p)
2485 *offset_p = 11; /* be sure that this matches the sequence above */
2486 if (size_p)
2487 *size_p = 4;
2488}
2489
2490static void
2491i386_emit_goto (int *offset_p, int *size_p)
2492{
2493 EMIT_ASM32 (i386_goto,
2494 /* Don't trust the assembler to choose the right jump */
2495 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2496 if (offset_p)
2497 *offset_p = 1;
2498 if (size_p)
2499 *size_p = 4;
2500}
2501
2502static void
2503i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2504{
2505 int diff = (to - (from + size));
2506 unsigned char buf[sizeof (int)];
2507
2508 /* We're only doing 4-byte sizes at the moment. */
2509 if (size != 4)
2510 {
2511 emit_error = 1;
2512 return;
2513 }
2514
2515 memcpy (buf, &diff, sizeof (int));
2516 write_inferior_memory (from, buf, sizeof (int));
2517}
2518
2519static void
4e29fb54 2520i386_emit_const (LONGEST num)
6a271cae
PA
2521{
2522 unsigned char buf[16];
b00ad6ff 2523 int i, hi, lo;
6a271cae
PA
2524 CORE_ADDR buildaddr = current_insn_ptr;
2525
2526 i = 0;
2527 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2528 lo = num & 0xffffffff;
2529 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2530 i += 4;
2531 hi = ((num >> 32) & 0xffffffff);
2532 if (hi)
2533 {
2534 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2535 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2536 i += 4;
2537 }
2538 else
2539 {
2540 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2541 }
2542 append_insns (&buildaddr, i, buf);
2543 current_insn_ptr = buildaddr;
2544}
2545
2546static void
2547i386_emit_call (CORE_ADDR fn)
2548{
2549 unsigned char buf[16];
2550 int i, offset;
2551 CORE_ADDR buildaddr;
2552
2553 buildaddr = current_insn_ptr;
2554 i = 0;
2555 buf[i++] = 0xe8; /* call <reladdr> */
2556 offset = ((int) fn) - (buildaddr + 5);
2557 memcpy (buf + 1, &offset, 4);
2558 append_insns (&buildaddr, 5, buf);
2559 current_insn_ptr = buildaddr;
2560}
2561
2562static void
2563i386_emit_reg (int reg)
2564{
2565 unsigned char buf[16];
2566 int i;
2567 CORE_ADDR buildaddr;
2568
2569 EMIT_ASM32 (i386_reg_a,
2570 "sub $0x8,%esp");
2571 buildaddr = current_insn_ptr;
2572 i = 0;
2573 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2574 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2575 i += 4;
2576 append_insns (&buildaddr, i, buf);
2577 current_insn_ptr = buildaddr;
2578 EMIT_ASM32 (i386_reg_b,
2579 "mov %eax,4(%esp)\n\t"
2580 "mov 8(%ebp),%eax\n\t"
2581 "mov %eax,(%esp)");
2582 i386_emit_call (get_raw_reg_func_addr ());
2583 EMIT_ASM32 (i386_reg_c,
2584 "xor %ebx,%ebx\n\t"
2585 "lea 0x8(%esp),%esp");
2586}
2587
2588static void
2589i386_emit_pop (void)
2590{
2591 EMIT_ASM32 (i386_pop,
2592 "pop %eax\n\t"
2593 "pop %ebx");
2594}
2595
2596static void
2597i386_emit_stack_flush (void)
2598{
2599 EMIT_ASM32 (i386_stack_flush,
2600 "push %ebx\n\t"
2601 "push %eax");
2602}
2603
2604static void
2605i386_emit_zero_ext (int arg)
2606{
2607 switch (arg)
2608 {
2609 case 8:
2610 EMIT_ASM32 (i386_zero_ext_8,
2611 "and $0xff,%eax\n\t"
2612 "xor %ebx,%ebx");
2613 break;
2614 case 16:
2615 EMIT_ASM32 (i386_zero_ext_16,
2616 "and $0xffff,%eax\n\t"
2617 "xor %ebx,%ebx");
2618 break;
2619 case 32:
2620 EMIT_ASM32 (i386_zero_ext_32,
2621 "xor %ebx,%ebx");
2622 break;
2623 default:
2624 emit_error = 1;
2625 }
2626}
2627
2628static void
2629i386_emit_swap (void)
2630{
2631 EMIT_ASM32 (i386_swap,
2632 "mov %eax,%ecx\n\t"
2633 "mov %ebx,%edx\n\t"
2634 "pop %eax\n\t"
2635 "pop %ebx\n\t"
2636 "push %edx\n\t"
2637 "push %ecx");
2638}
2639
2640static void
2641i386_emit_stack_adjust (int n)
2642{
2643 unsigned char buf[16];
2644 int i;
2645 CORE_ADDR buildaddr = current_insn_ptr;
2646
2647 i = 0;
2648 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2649 buf[i++] = 0x64;
2650 buf[i++] = 0x24;
2651 buf[i++] = n * 8;
2652 append_insns (&buildaddr, i, buf);
2653 current_insn_ptr = buildaddr;
2654}
2655
2656/* FN's prototype is `LONGEST(*fn)(int)'. */
2657
2658static void
2659i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2660{
2661 unsigned char buf[16];
2662 int i;
2663 CORE_ADDR buildaddr;
2664
2665 EMIT_ASM32 (i386_int_call_1_a,
2666 /* Reserve a bit of stack space. */
2667 "sub $0x8,%esp");
2668 /* Put the one argument on the stack. */
2669 buildaddr = current_insn_ptr;
2670 i = 0;
2671 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2672 buf[i++] = 0x04;
2673 buf[i++] = 0x24;
b00ad6ff 2674 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2675 i += 4;
2676 append_insns (&buildaddr, i, buf);
2677 current_insn_ptr = buildaddr;
2678 i386_emit_call (fn);
2679 EMIT_ASM32 (i386_int_call_1_c,
2680 "mov %edx,%ebx\n\t"
2681 "lea 0x8(%esp),%esp");
2682}
2683
4e29fb54 2684/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2685
2686static void
2687i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2688{
2689 unsigned char buf[16];
2690 int i;
2691 CORE_ADDR buildaddr;
2692
2693 EMIT_ASM32 (i386_void_call_2_a,
2694 /* Preserve %eax only; we don't have to worry about %ebx. */
2695 "push %eax\n\t"
2696 /* Reserve a bit of stack space for arguments. */
2697 "sub $0x10,%esp\n\t"
2698 /* Copy "top" to the second argument position. (Note that
2699 we can't assume function won't scribble on its
2700 arguments, so don't try to restore from this.) */
2701 "mov %eax,4(%esp)\n\t"
2702 "mov %ebx,8(%esp)");
2703 /* Put the first argument on the stack. */
2704 buildaddr = current_insn_ptr;
2705 i = 0;
2706 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2707 buf[i++] = 0x04;
2708 buf[i++] = 0x24;
b00ad6ff 2709 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2710 i += 4;
2711 append_insns (&buildaddr, i, buf);
2712 current_insn_ptr = buildaddr;
2713 i386_emit_call (fn);
2714 EMIT_ASM32 (i386_void_call_2_b,
2715 "lea 0x10(%esp),%esp\n\t"
2716 /* Restore original stack top. */
2717 "pop %eax");
2718}
2719
6b9801d4
SS
2720
2721void
2722i386_emit_eq_goto (int *offset_p, int *size_p)
2723{
2724 EMIT_ASM32 (eq,
2725 /* Check low half first, more likely to be decider */
2726 "cmpl %eax,(%esp)\n\t"
2727 "jne .Leq_fallthru\n\t"
2728 "cmpl %ebx,4(%esp)\n\t"
2729 "jne .Leq_fallthru\n\t"
2730 "lea 0x8(%esp),%esp\n\t"
2731 "pop %eax\n\t"
2732 "pop %ebx\n\t"
2733 /* jmp, but don't trust the assembler to choose the right jump */
2734 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2735 ".Leq_fallthru:\n\t"
2736 "lea 0x8(%esp),%esp\n\t"
2737 "pop %eax\n\t"
2738 "pop %ebx");
2739
2740 if (offset_p)
2741 *offset_p = 18;
2742 if (size_p)
2743 *size_p = 4;
2744}
2745
2746void
2747i386_emit_ne_goto (int *offset_p, int *size_p)
2748{
2749 EMIT_ASM32 (ne,
2750 /* Check low half first, more likely to be decider */
2751 "cmpl %eax,(%esp)\n\t"
2752 "jne .Lne_jump\n\t"
2753 "cmpl %ebx,4(%esp)\n\t"
2754 "je .Lne_fallthru\n\t"
2755 ".Lne_jump:\n\t"
2756 "lea 0x8(%esp),%esp\n\t"
2757 "pop %eax\n\t"
2758 "pop %ebx\n\t"
2759 /* jmp, but don't trust the assembler to choose the right jump */
2760 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2761 ".Lne_fallthru:\n\t"
2762 "lea 0x8(%esp),%esp\n\t"
2763 "pop %eax\n\t"
2764 "pop %ebx");
2765
2766 if (offset_p)
2767 *offset_p = 18;
2768 if (size_p)
2769 *size_p = 4;
2770}
2771
2772void
2773i386_emit_lt_goto (int *offset_p, int *size_p)
2774{
2775 EMIT_ASM32 (lt,
2776 "cmpl %ebx,4(%esp)\n\t"
2777 "jl .Llt_jump\n\t"
2778 "jne .Llt_fallthru\n\t"
2779 "cmpl %eax,(%esp)\n\t"
2780 "jnl .Llt_fallthru\n\t"
2781 ".Llt_jump:\n\t"
2782 "lea 0x8(%esp),%esp\n\t"
2783 "pop %eax\n\t"
2784 "pop %ebx\n\t"
2785 /* jmp, but don't trust the assembler to choose the right jump */
2786 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2787 ".Llt_fallthru:\n\t"
2788 "lea 0x8(%esp),%esp\n\t"
2789 "pop %eax\n\t"
2790 "pop %ebx");
2791
2792 if (offset_p)
2793 *offset_p = 20;
2794 if (size_p)
2795 *size_p = 4;
2796}
2797
2798void
2799i386_emit_le_goto (int *offset_p, int *size_p)
2800{
2801 EMIT_ASM32 (le,
2802 "cmpl %ebx,4(%esp)\n\t"
2803 "jle .Lle_jump\n\t"
2804 "jne .Lle_fallthru\n\t"
2805 "cmpl %eax,(%esp)\n\t"
2806 "jnle .Lle_fallthru\n\t"
2807 ".Lle_jump:\n\t"
2808 "lea 0x8(%esp),%esp\n\t"
2809 "pop %eax\n\t"
2810 "pop %ebx\n\t"
2811 /* jmp, but don't trust the assembler to choose the right jump */
2812 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2813 ".Lle_fallthru:\n\t"
2814 "lea 0x8(%esp),%esp\n\t"
2815 "pop %eax\n\t"
2816 "pop %ebx");
2817
2818 if (offset_p)
2819 *offset_p = 20;
2820 if (size_p)
2821 *size_p = 4;
2822}
2823
2824void
2825i386_emit_gt_goto (int *offset_p, int *size_p)
2826{
2827 EMIT_ASM32 (gt,
2828 "cmpl %ebx,4(%esp)\n\t"
2829 "jg .Lgt_jump\n\t"
2830 "jne .Lgt_fallthru\n\t"
2831 "cmpl %eax,(%esp)\n\t"
2832 "jng .Lgt_fallthru\n\t"
2833 ".Lgt_jump:\n\t"
2834 "lea 0x8(%esp),%esp\n\t"
2835 "pop %eax\n\t"
2836 "pop %ebx\n\t"
2837 /* jmp, but don't trust the assembler to choose the right jump */
2838 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2839 ".Lgt_fallthru:\n\t"
2840 "lea 0x8(%esp),%esp\n\t"
2841 "pop %eax\n\t"
2842 "pop %ebx");
2843
2844 if (offset_p)
2845 *offset_p = 20;
2846 if (size_p)
2847 *size_p = 4;
2848}
2849
2850void
2851i386_emit_ge_goto (int *offset_p, int *size_p)
2852{
2853 EMIT_ASM32 (ge,
2854 "cmpl %ebx,4(%esp)\n\t"
2855 "jge .Lge_jump\n\t"
2856 "jne .Lge_fallthru\n\t"
2857 "cmpl %eax,(%esp)\n\t"
2858 "jnge .Lge_fallthru\n\t"
2859 ".Lge_jump:\n\t"
2860 "lea 0x8(%esp),%esp\n\t"
2861 "pop %eax\n\t"
2862 "pop %ebx\n\t"
2863 /* jmp, but don't trust the assembler to choose the right jump */
2864 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2865 ".Lge_fallthru:\n\t"
2866 "lea 0x8(%esp),%esp\n\t"
2867 "pop %eax\n\t"
2868 "pop %ebx");
2869
2870 if (offset_p)
2871 *offset_p = 20;
2872 if (size_p)
2873 *size_p = 4;
2874}
2875
6a271cae
PA
2876struct emit_ops i386_emit_ops =
2877 {
2878 i386_emit_prologue,
2879 i386_emit_epilogue,
2880 i386_emit_add,
2881 i386_emit_sub,
2882 i386_emit_mul,
2883 i386_emit_lsh,
2884 i386_emit_rsh_signed,
2885 i386_emit_rsh_unsigned,
2886 i386_emit_ext,
2887 i386_emit_log_not,
2888 i386_emit_bit_and,
2889 i386_emit_bit_or,
2890 i386_emit_bit_xor,
2891 i386_emit_bit_not,
2892 i386_emit_equal,
2893 i386_emit_less_signed,
2894 i386_emit_less_unsigned,
2895 i386_emit_ref,
2896 i386_emit_if_goto,
2897 i386_emit_goto,
2898 i386_write_goto_address,
2899 i386_emit_const,
2900 i386_emit_call,
2901 i386_emit_reg,
2902 i386_emit_pop,
2903 i386_emit_stack_flush,
2904 i386_emit_zero_ext,
2905 i386_emit_swap,
2906 i386_emit_stack_adjust,
2907 i386_emit_int_call_1,
6b9801d4
SS
2908 i386_emit_void_call_2,
2909 i386_emit_eq_goto,
2910 i386_emit_ne_goto,
2911 i386_emit_lt_goto,
2912 i386_emit_le_goto,
2913 i386_emit_gt_goto,
2914 i386_emit_ge_goto
6a271cae
PA
2915 };
2916
2917
2918static struct emit_ops *
2919x86_emit_ops (void)
2920{
2921#ifdef __x86_64__
2922 int use_64bit = register_size (0) == 8;
2923
2924 if (use_64bit)
2925 return &amd64_emit_ops;
2926 else
2927#endif
2928 return &i386_emit_ops;
2929}
2930
d0722149
DE
2931/* This is initialized assuming an amd64 target.
2932 x86_arch_setup will correct it for i386 or amd64 targets. */
2933
2934struct linux_target_ops the_low_target =
2935{
2936 x86_arch_setup,
2937 -1,
2938 NULL,
2939 NULL,
2940 NULL,
2941 x86_get_pc,
2942 x86_set_pc,
2943 x86_breakpoint,
2944 x86_breakpoint_len,
2945 NULL,
2946 1,
2947 x86_breakpoint_at,
aa5ca48f
DE
2948 x86_insert_point,
2949 x86_remove_point,
2950 x86_stopped_by_watchpoint,
2951 x86_stopped_data_address,
d0722149
DE
2952 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2953 native i386 case (no registers smaller than an xfer unit), and are not
2954 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2955 NULL,
2956 NULL,
2957 /* need to fix up i386 siginfo if host is amd64 */
2958 x86_siginfo_fixup,
aa5ca48f
DE
2959 x86_linux_new_process,
2960 x86_linux_new_thread,
1570b33e 2961 x86_linux_prepare_to_resume,
219f2f23 2962 x86_linux_process_qsupported,
fa593d66
PA
2963 x86_supports_tracepoints,
2964 x86_get_thread_area,
6a271cae 2965 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2966 x86_emit_ops,
2967 x86_get_min_fast_tracepoint_insn_len,
d0722149 2968};
This page took 0.339315 seconds and 4 git commands to generate.