gdbserver/linux-low: turn 'regs_info' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
797bcff5
TBA
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
aa8d21c9
TBA
107 const regs_info *get_regs_info () override;
108
797bcff5
TBA
109protected:
110
111 void low_arch_setup () override;
ef0478f6
TBA
112};
113
114/* The singleton target ops object. */
115
116static x86_target the_x86_target;
117
aa5ca48f
DE
118/* Per-process arch-specific data we want to keep. */
119
120struct arch_process_info
121{
df7e5265 122 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
123};
124
d0722149
DE
125#ifdef __x86_64__
126
127/* Mapping between the general-purpose registers in `struct user'
128 format and GDB's register array layout.
129 Note that the transfer layout uses 64-bit regs. */
130static /*const*/ int i386_regmap[] =
131{
132 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
133 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
134 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
135 DS * 8, ES * 8, FS * 8, GS * 8
136};
137
138#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
139
140/* So code below doesn't have to care, i386 or amd64. */
141#define ORIG_EAX ORIG_RAX
bc9540e8 142#define REGSIZE 8
d0722149
DE
143
144static const int x86_64_regmap[] =
145{
146 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
147 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
148 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
149 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
150 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
151 DS * 8, ES * 8, FS * 8, GS * 8,
152 -1, -1, -1, -1, -1, -1, -1, -1,
153 -1, -1, -1, -1, -1, -1, -1, -1,
154 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
155 -1,
156 -1, -1, -1, -1, -1, -1, -1, -1,
157 ORIG_RAX * 8,
2735833d
WT
158#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
159 21 * 8, 22 * 8,
160#else
161 -1, -1,
162#endif
a196ebeb 163 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
164 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
165 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
166 -1, -1, -1, -1, -1, -1, -1, -1,
167 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
168 -1, -1, -1, -1, -1, -1, -1, -1,
169 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
170 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
171 -1, -1, -1, -1, -1, -1, -1, -1,
172 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
173 -1, -1, -1, -1, -1, -1, -1, -1,
174 -1 /* pkru */
d0722149
DE
175};
176
177#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 178#define X86_64_USER_REGS (GS + 1)
d0722149
DE
179
180#else /* ! __x86_64__ */
181
182/* Mapping between the general-purpose registers in `struct user'
183 format and GDB's register array layout. */
184static /*const*/ int i386_regmap[] =
185{
186 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
187 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
188 EIP * 4, EFL * 4, CS * 4, SS * 4,
189 DS * 4, ES * 4, FS * 4, GS * 4
190};
191
192#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
193
bc9540e8
PA
194#define REGSIZE 4
195
d0722149 196#endif
3aee8918
PA
197
198#ifdef __x86_64__
199
200/* Returns true if the current inferior belongs to a x86-64 process,
201 per the tdesc. */
202
203static int
204is_64bit_tdesc (void)
205{
0bfdf32f 206 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
207
208 return register_size (regcache->tdesc, 0) == 8;
209}
210
211#endif
212
d0722149
DE
213\f
214/* Called by libthread_db. */
215
216ps_err_e
754653a7 217ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
218 lwpid_t lwpid, int idx, void **base)
219{
220#ifdef __x86_64__
3aee8918 221 int use_64bit = is_64bit_tdesc ();
d0722149
DE
222
223 if (use_64bit)
224 {
225 switch (idx)
226 {
227 case FS:
228 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
229 return PS_OK;
230 break;
231 case GS:
232 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
233 return PS_OK;
234 break;
235 default:
236 return PS_BADADDR;
237 }
238 return PS_ERR;
239 }
240#endif
241
242 {
243 unsigned int desc[4];
244
245 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
246 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
247 return PS_ERR;
248
d1ec4ce7
DE
249 /* Ensure we properly extend the value to 64-bits for x86_64. */
250 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
251 return PS_OK;
252 }
253}
fa593d66
PA
254
255/* Get the thread area address. This is used to recognize which
256 thread is which when tracing with the in-process agent library. We
257 don't read anything from the address, and treat it as opaque; it's
258 the address itself that we assume is unique per-thread. */
259
260static int
261x86_get_thread_area (int lwpid, CORE_ADDR *addr)
262{
263#ifdef __x86_64__
3aee8918 264 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
265
266 if (use_64bit)
267 {
268 void *base;
269 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
270 {
271 *addr = (CORE_ADDR) (uintptr_t) base;
272 return 0;
273 }
274
275 return -1;
276 }
277#endif
278
279 {
f2907e49 280 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
281 struct thread_info *thr = get_lwp_thread (lwp);
282 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
283 unsigned int desc[4];
284 ULONGEST gs = 0;
285 const int reg_thread_area = 3; /* bits to scale down register value. */
286 int idx;
287
288 collect_register_by_name (regcache, "gs", &gs);
289
290 idx = gs >> reg_thread_area;
291
292 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 293 lwpid_of (thr),
493e2a69 294 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
295 return -1;
296
297 *addr = desc[1];
298 return 0;
299 }
300}
301
302
d0722149
DE
303\f
304static int
3aee8918 305x86_cannot_store_register (int regno)
d0722149 306{
3aee8918
PA
307#ifdef __x86_64__
308 if (is_64bit_tdesc ())
309 return 0;
310#endif
311
d0722149
DE
312 return regno >= I386_NUM_REGS;
313}
314
315static int
3aee8918 316x86_cannot_fetch_register (int regno)
d0722149 317{
3aee8918
PA
318#ifdef __x86_64__
319 if (is_64bit_tdesc ())
320 return 0;
321#endif
322
d0722149
DE
323 return regno >= I386_NUM_REGS;
324}
325
326static void
442ea881 327x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
328{
329 int i;
330
331#ifdef __x86_64__
3aee8918 332 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
333 {
334 for (i = 0; i < X86_64_NUM_REGS; i++)
335 if (x86_64_regmap[i] != -1)
442ea881 336 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
337
338#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
339 {
340 unsigned long base;
341 int lwpid = lwpid_of (current_thread);
342
343 collect_register_by_name (regcache, "fs_base", &base);
344 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
345
346 collect_register_by_name (regcache, "gs_base", &base);
347 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
348 }
349#endif
350
d0722149
DE
351 return;
352 }
9e0aa64f
JK
353
354 /* 32-bit inferior registers need to be zero-extended.
355 Callers would read uninitialized memory otherwise. */
356 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
357#endif
358
359 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 360 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 361
442ea881 362 collect_register_by_name (regcache, "orig_eax",
bc9540e8 363 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 364
e90a813d 365#ifdef __x86_64__
3f52fdbc
KB
366 /* Sign extend EAX value to avoid potential syscall restart
367 problems.
368
369 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
370 for a detailed explanation. */
371 if (register_size (regcache->tdesc, 0) == 4)
372 {
373 void *ptr = ((gdb_byte *) buf
374 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
375
376 *(int64_t *) ptr = *(int32_t *) ptr;
377 }
e90a813d 378#endif
d0722149
DE
379}
380
381static void
442ea881 382x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
383{
384 int i;
385
386#ifdef __x86_64__
3aee8918 387 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
388 {
389 for (i = 0; i < X86_64_NUM_REGS; i++)
390 if (x86_64_regmap[i] != -1)
442ea881 391 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
392
393#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
394 {
395 unsigned long base;
396 int lwpid = lwpid_of (current_thread);
397
398 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
399 supply_register_by_name (regcache, "fs_base", &base);
400
401 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
402 supply_register_by_name (regcache, "gs_base", &base);
403 }
404#endif
d0722149
DE
405 return;
406 }
407#endif
408
409 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 410 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 411
442ea881 412 supply_register_by_name (regcache, "orig_eax",
bc9540e8 413 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
414}
415
416static void
442ea881 417x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
418{
419#ifdef __x86_64__
442ea881 420 i387_cache_to_fxsave (regcache, buf);
d0722149 421#else
442ea881 422 i387_cache_to_fsave (regcache, buf);
d0722149
DE
423#endif
424}
425
426static void
442ea881 427x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
428{
429#ifdef __x86_64__
442ea881 430 i387_fxsave_to_cache (regcache, buf);
d0722149 431#else
442ea881 432 i387_fsave_to_cache (regcache, buf);
d0722149
DE
433#endif
434}
435
436#ifndef __x86_64__
437
438static void
442ea881 439x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 440{
442ea881 441 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
442}
443
444static void
442ea881 445x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 446{
442ea881 447 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
448}
449
450#endif
451
1570b33e
L
452static void
453x86_fill_xstateregset (struct regcache *regcache, void *buf)
454{
455 i387_cache_to_xsave (regcache, buf);
456}
457
458static void
459x86_store_xstateregset (struct regcache *regcache, const void *buf)
460{
461 i387_xsave_to_cache (regcache, buf);
462}
463
d0722149
DE
464/* ??? The non-biarch i386 case stores all the i387 regs twice.
465 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
466 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
467 doesn't work. IWBN to avoid the duplication in the case where it
468 does work. Maybe the arch_setup routine could check whether it works
3aee8918 469 and update the supported regsets accordingly. */
d0722149 470
3aee8918 471static struct regset_info x86_regsets[] =
d0722149
DE
472{
473#ifdef HAVE_PTRACE_GETREGS
1570b33e 474 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
475 GENERAL_REGS,
476 x86_fill_gregset, x86_store_gregset },
1570b33e
L
477 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
478 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
479# ifndef __x86_64__
480# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 481 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
482 EXTENDED_REGS,
483 x86_fill_fpxregset, x86_store_fpxregset },
484# endif
485# endif
1570b33e 486 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
487 FP_REGS,
488 x86_fill_fpregset, x86_store_fpregset },
489#endif /* HAVE_PTRACE_GETREGS */
50bc912a 490 NULL_REGSET
d0722149
DE
491};
492
493static CORE_ADDR
442ea881 494x86_get_pc (struct regcache *regcache)
d0722149 495{
3aee8918 496 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
497
498 if (use_64bit)
499 {
6598661d
PA
500 uint64_t pc;
501
442ea881 502 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
503 return (CORE_ADDR) pc;
504 }
505 else
506 {
6598661d
PA
507 uint32_t pc;
508
442ea881 509 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
510 return (CORE_ADDR) pc;
511 }
512}
513
514static void
442ea881 515x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 516{
3aee8918 517 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
518
519 if (use_64bit)
520 {
6598661d
PA
521 uint64_t newpc = pc;
522
442ea881 523 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
524 }
525 else
526 {
6598661d
PA
527 uint32_t newpc = pc;
528
442ea881 529 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
530 }
531}
532\f
dd373349 533static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
534#define x86_breakpoint_len 1
535
536static int
537x86_breakpoint_at (CORE_ADDR pc)
538{
539 unsigned char c;
540
52405d85 541 the_target->read_memory (pc, &c, 1);
d0722149
DE
542 if (c == 0xCC)
543 return 1;
544
545 return 0;
546}
547\f
42995dbd 548/* Low-level function vector. */
df7e5265 549struct x86_dr_low_type x86_dr_low =
42995dbd 550 {
d33472ad
GB
551 x86_linux_dr_set_control,
552 x86_linux_dr_set_addr,
553 x86_linux_dr_get_addr,
554 x86_linux_dr_get_status,
555 x86_linux_dr_get_control,
42995dbd
GB
556 sizeof (void *),
557 };
aa5ca48f 558\f
90d74c30 559/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
560
561static int
802e8e6d
PA
562x86_supports_z_point_type (char z_type)
563{
564 switch (z_type)
565 {
566 case Z_PACKET_SW_BP:
567 case Z_PACKET_HW_BP:
568 case Z_PACKET_WRITE_WP:
569 case Z_PACKET_ACCESS_WP:
570 return 1;
571 default:
572 return 0;
573 }
574}
575
576static int
577x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
578 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
579{
580 struct process_info *proc = current_process ();
802e8e6d 581
aa5ca48f
DE
582 switch (type)
583 {
802e8e6d
PA
584 case raw_bkpt_type_hw:
585 case raw_bkpt_type_write_wp:
586 case raw_bkpt_type_access_wp:
a4165e94 587 {
802e8e6d
PA
588 enum target_hw_bp_type hw_type
589 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 590 struct x86_debug_reg_state *state
fe978cb0 591 = &proc->priv->arch_private->debug_reg_state;
a4165e94 592
df7e5265 593 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 594 }
961bd387 595
aa5ca48f
DE
596 default:
597 /* Unsupported. */
598 return 1;
599 }
600}
601
602static int
802e8e6d
PA
603x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
604 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
605{
606 struct process_info *proc = current_process ();
802e8e6d 607
aa5ca48f
DE
608 switch (type)
609 {
802e8e6d
PA
610 case raw_bkpt_type_hw:
611 case raw_bkpt_type_write_wp:
612 case raw_bkpt_type_access_wp:
a4165e94 613 {
802e8e6d
PA
614 enum target_hw_bp_type hw_type
615 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 616 struct x86_debug_reg_state *state
fe978cb0 617 = &proc->priv->arch_private->debug_reg_state;
a4165e94 618
df7e5265 619 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 620 }
aa5ca48f
DE
621 default:
622 /* Unsupported. */
623 return 1;
624 }
625}
626
627static int
628x86_stopped_by_watchpoint (void)
629{
630 struct process_info *proc = current_process ();
fe978cb0 631 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
632}
633
634static CORE_ADDR
635x86_stopped_data_address (void)
636{
637 struct process_info *proc = current_process ();
638 CORE_ADDR addr;
fe978cb0 639 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 640 &addr))
aa5ca48f
DE
641 return addr;
642 return 0;
643}
644\f
645/* Called when a new process is created. */
646
647static struct arch_process_info *
648x86_linux_new_process (void)
649{
ed859da7 650 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 651
df7e5265 652 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
653
654 return info;
655}
656
04ec7890
SM
657/* Called when a process is being deleted. */
658
659static void
660x86_linux_delete_process (struct arch_process_info *info)
661{
662 xfree (info);
663}
664
3a8a0396
DB
665/* Target routine for linux_new_fork. */
666
667static void
668x86_linux_new_fork (struct process_info *parent, struct process_info *child)
669{
670 /* These are allocated by linux_add_process. */
671 gdb_assert (parent->priv != NULL
672 && parent->priv->arch_private != NULL);
673 gdb_assert (child->priv != NULL
674 && child->priv->arch_private != NULL);
675
676 /* Linux kernel before 2.6.33 commit
677 72f674d203cd230426437cdcf7dd6f681dad8b0d
678 will inherit hardware debug registers from parent
679 on fork/vfork/clone. Newer Linux kernels create such tasks with
680 zeroed debug registers.
681
682 GDB core assumes the child inherits the watchpoints/hw
683 breakpoints of the parent, and will remove them all from the
684 forked off process. Copy the debug registers mirrors into the
685 new process so that all breakpoints and watchpoints can be
686 removed together. The debug registers mirror will become zeroed
687 in the end before detaching the forked off process, thus making
688 this compatible with older Linux kernels too. */
689
690 *child->priv->arch_private = *parent->priv->arch_private;
691}
692
70a0bb6b
GB
693/* See nat/x86-dregs.h. */
694
695struct x86_debug_reg_state *
696x86_debug_reg_state (pid_t pid)
697{
698 struct process_info *proc = find_process_pid (pid);
699
700 return &proc->priv->arch_private->debug_reg_state;
701}
aa5ca48f 702\f
d0722149
DE
703/* When GDBSERVER is built as a 64-bit application on linux, the
704 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
705 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
706 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
707 conversion in-place ourselves. */
708
9cf12d57 709/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
710 layout of the inferiors' architecture. Returns true if any
711 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 712 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
713 INF. */
714
715static int
9cf12d57 716x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
717{
718#ifdef __x86_64__
760256f9 719 unsigned int machine;
0bfdf32f 720 int tid = lwpid_of (current_thread);
760256f9
PA
721 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
722
d0722149 723 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 724 if (!is_64bit_tdesc ())
9cf12d57 725 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 726 FIXUP_32);
c92b5177 727 /* No fixup for native x32 GDB. */
760256f9 728 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 729 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 730 FIXUP_X32);
d0722149
DE
731#endif
732
733 return 0;
734}
735\f
1570b33e
L
736static int use_xml;
737
3aee8918
PA
738/* Format of XSAVE extended state is:
739 struct
740 {
741 fxsave_bytes[0..463]
742 sw_usable_bytes[464..511]
743 xstate_hdr_bytes[512..575]
744 avx_bytes[576..831]
745 future_state etc
746 };
747
748 Same memory layout will be used for the coredump NT_X86_XSTATE
749 representing the XSAVE extended state registers.
750
751 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
752 extended state mask, which is the same as the extended control register
753 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
754 together with the mask saved in the xstate_hdr_bytes to determine what
755 states the processor/OS supports and what state, used or initialized,
756 the process/thread is in. */
757#define I386_LINUX_XSAVE_XCR0_OFFSET 464
758
759/* Does the current host support the GETFPXREGS request? The header
760 file may or may not define it, and even if it is defined, the
761 kernel will return EIO if it's running on a pre-SSE processor. */
762int have_ptrace_getfpxregs =
763#ifdef HAVE_PTRACE_GETFPXREGS
764 -1
765#else
766 0
767#endif
768;
1570b33e 769
3aee8918
PA
770/* Get Linux/x86 target description from running target. */
771
772static const struct target_desc *
773x86_linux_read_description (void)
1570b33e 774{
3aee8918
PA
775 unsigned int machine;
776 int is_elf64;
a196ebeb 777 int xcr0_features;
3aee8918
PA
778 int tid;
779 static uint64_t xcr0;
3a13a53b 780 struct regset_info *regset;
1570b33e 781
0bfdf32f 782 tid = lwpid_of (current_thread);
1570b33e 783
3aee8918 784 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 785
3aee8918 786 if (sizeof (void *) == 4)
3a13a53b 787 {
3aee8918
PA
788 if (is_elf64 > 0)
789 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
790#ifndef __x86_64__
791 else if (machine == EM_X86_64)
792 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
793#endif
794 }
3a13a53b 795
3aee8918
PA
796#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
797 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
798 {
799 elf_fpxregset_t fpxregs;
3a13a53b 800
3aee8918 801 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 802 {
3aee8918
PA
803 have_ptrace_getfpxregs = 0;
804 have_ptrace_getregset = 0;
f49ff000 805 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 806 }
3aee8918
PA
807 else
808 have_ptrace_getfpxregs = 1;
3a13a53b 809 }
1570b33e
L
810#endif
811
812 if (!use_xml)
813 {
df7e5265 814 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 815
1570b33e
L
816 /* Don't use XML. */
817#ifdef __x86_64__
3aee8918
PA
818 if (machine == EM_X86_64)
819 return tdesc_amd64_linux_no_xml;
1570b33e 820 else
1570b33e 821#endif
3aee8918 822 return tdesc_i386_linux_no_xml;
1570b33e
L
823 }
824
1570b33e
L
825 if (have_ptrace_getregset == -1)
826 {
df7e5265 827 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 828 struct iovec iov;
1570b33e
L
829
830 iov.iov_base = xstateregs;
831 iov.iov_len = sizeof (xstateregs);
832
833 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
834 if (ptrace (PTRACE_GETREGSET, tid,
835 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
836 have_ptrace_getregset = 0;
837 else
1570b33e 838 {
3aee8918
PA
839 have_ptrace_getregset = 1;
840
841 /* Get XCR0 from XSAVE extended state. */
842 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
843 / sizeof (uint64_t))];
844
845 /* Use PTRACE_GETREGSET if it is available. */
846 for (regset = x86_regsets;
847 regset->fill_function != NULL; regset++)
848 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 849 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
850 else if (regset->type != GENERAL_REGS)
851 regset->size = 0;
1570b33e 852 }
1570b33e
L
853 }
854
3aee8918 855 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 856 xcr0_features = (have_ptrace_getregset
2e1e43e1 857 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 858
a196ebeb 859 if (xcr0_features)
3aee8918 860 x86_xcr0 = xcr0;
1570b33e 861
3aee8918
PA
862 if (machine == EM_X86_64)
863 {
1570b33e 864#ifdef __x86_64__
b4570e4b 865 const target_desc *tdesc = NULL;
a196ebeb 866
b4570e4b 867 if (xcr0_features)
3aee8918 868 {
b4570e4b
YQ
869 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
870 !is_elf64);
1570b33e 871 }
b4570e4b
YQ
872
873 if (tdesc == NULL)
874 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
875 return tdesc;
3aee8918 876#endif
1570b33e 877 }
3aee8918
PA
878 else
879 {
f49ff000 880 const target_desc *tdesc = NULL;
a1fa17ee 881
f49ff000
YQ
882 if (xcr0_features)
883 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 884
f49ff000
YQ
885 if (tdesc == NULL)
886 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 887
f49ff000 888 return tdesc;
3aee8918
PA
889 }
890
891 gdb_assert_not_reached ("failed to return tdesc");
892}
893
3aee8918
PA
894/* Update all the target description of all processes; a new GDB
895 connected, and it may or not support xml target descriptions. */
896
797bcff5
TBA
897void
898x86_target::update_xmltarget ()
3aee8918 899{
0bfdf32f 900 struct thread_info *saved_thread = current_thread;
3aee8918
PA
901
902 /* Before changing the register cache's internal layout, flush the
903 contents of the current valid caches back to the threads, and
904 release the current regcache objects. */
905 regcache_release ();
906
797bcff5 907 for_each_process ([this] (process_info *proc) {
9179355e
SM
908 int pid = proc->pid;
909
910 /* Look up any thread of this process. */
911 current_thread = find_any_thread_of_pid (pid);
912
797bcff5 913 low_arch_setup ();
9179355e 914 });
3aee8918 915
0bfdf32f 916 current_thread = saved_thread;
1570b33e
L
917}
918
919/* Process qSupported query, "xmlRegisters=". Update the buffer size for
920 PTRACE_GETREGSET. */
921
922static void
06e03fff 923x86_linux_process_qsupported (char **features, int count)
1570b33e 924{
06e03fff
PA
925 int i;
926
1570b33e
L
927 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
928 with "i386" in qSupported query, it supports x86 XML target
929 descriptions. */
930 use_xml = 0;
06e03fff 931 for (i = 0; i < count; i++)
1570b33e 932 {
06e03fff 933 const char *feature = features[i];
1570b33e 934
06e03fff 935 if (startswith (feature, "xmlRegisters="))
1570b33e 936 {
06e03fff 937 char *copy = xstrdup (feature + 13);
06e03fff 938
ca3a04f6
CB
939 char *saveptr;
940 for (char *p = strtok_r (copy, ",", &saveptr);
941 p != NULL;
942 p = strtok_r (NULL, ",", &saveptr))
1570b33e 943 {
06e03fff
PA
944 if (strcmp (p, "i386") == 0)
945 {
946 use_xml = 1;
947 break;
948 }
1570b33e 949 }
1570b33e 950
06e03fff
PA
951 free (copy);
952 }
1570b33e 953 }
797bcff5 954 the_x86_target.update_xmltarget ();
1570b33e
L
955}
956
3aee8918 957/* Common for x86/x86-64. */
d0722149 958
3aee8918
PA
959static struct regsets_info x86_regsets_info =
960 {
961 x86_regsets, /* regsets */
962 0, /* num_regsets */
963 NULL, /* disabled_regsets */
964 };
214d508e
L
965
966#ifdef __x86_64__
3aee8918
PA
967static struct regs_info amd64_linux_regs_info =
968 {
969 NULL, /* regset_bitmap */
970 NULL, /* usrregs_info */
971 &x86_regsets_info
972 };
d0722149 973#endif
3aee8918
PA
974static struct usrregs_info i386_linux_usrregs_info =
975 {
976 I386_NUM_REGS,
977 i386_regmap,
978 };
d0722149 979
3aee8918
PA
980static struct regs_info i386_linux_regs_info =
981 {
982 NULL, /* regset_bitmap */
983 &i386_linux_usrregs_info,
984 &x86_regsets_info
985 };
d0722149 986
aa8d21c9
TBA
987const regs_info *
988x86_target::get_regs_info ()
3aee8918
PA
989{
990#ifdef __x86_64__
991 if (is_64bit_tdesc ())
992 return &amd64_linux_regs_info;
993 else
994#endif
995 return &i386_linux_regs_info;
996}
d0722149 997
3aee8918
PA
998/* Initialize the target description for the architecture of the
999 inferior. */
1570b33e 1000
797bcff5
TBA
1001void
1002x86_target::low_arch_setup ()
3aee8918
PA
1003{
1004 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1005}
1006
82075af2
JS
1007/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1008 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1009
1010static void
4cc32bec 1011x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
1012{
1013 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1014
1015 if (use_64bit)
1016 {
1017 long l_sysno;
82075af2
JS
1018
1019 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1020 *sysno = (int) l_sysno;
82075af2
JS
1021 }
1022 else
4cc32bec 1023 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1024}
1025
219f2f23
PA
1026static int
1027x86_supports_tracepoints (void)
1028{
1029 return 1;
1030}
1031
fa593d66
PA
1032static void
1033append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1034{
4196ab2a 1035 target_write_memory (*to, buf, len);
fa593d66
PA
1036 *to += len;
1037}
1038
1039static int
a121b7c1 1040push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1041{
1042 unsigned char *buf_org = buf;
1043
1044 while (1)
1045 {
1046 char *endptr;
1047 unsigned long ul = strtoul (op, &endptr, 16);
1048
1049 if (endptr == op)
1050 break;
1051
1052 *buf++ = ul;
1053 op = endptr;
1054 }
1055
1056 return buf - buf_org;
1057}
1058
1059#ifdef __x86_64__
1060
1061/* Build a jump pad that saves registers and calls a collection
1062 function. Writes a jump instruction to the jump pad to
1063 JJUMPAD_INSN. The caller is responsible to write it in at the
1064 tracepoint address. */
1065
1066static int
1067amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1068 CORE_ADDR collector,
1069 CORE_ADDR lockaddr,
1070 ULONGEST orig_size,
1071 CORE_ADDR *jump_entry,
405f8e94
SS
1072 CORE_ADDR *trampoline,
1073 ULONGEST *trampoline_size,
fa593d66
PA
1074 unsigned char *jjump_pad_insn,
1075 ULONGEST *jjump_pad_insn_size,
1076 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1077 CORE_ADDR *adjusted_insn_addr_end,
1078 char *err)
fa593d66
PA
1079{
1080 unsigned char buf[40];
1081 int i, offset;
f4647387
YQ
1082 int64_t loffset;
1083
fa593d66
PA
1084 CORE_ADDR buildaddr = *jump_entry;
1085
1086 /* Build the jump pad. */
1087
1088 /* First, do tracepoint data collection. Save registers. */
1089 i = 0;
1090 /* Need to ensure stack pointer saved first. */
1091 buf[i++] = 0x54; /* push %rsp */
1092 buf[i++] = 0x55; /* push %rbp */
1093 buf[i++] = 0x57; /* push %rdi */
1094 buf[i++] = 0x56; /* push %rsi */
1095 buf[i++] = 0x52; /* push %rdx */
1096 buf[i++] = 0x51; /* push %rcx */
1097 buf[i++] = 0x53; /* push %rbx */
1098 buf[i++] = 0x50; /* push %rax */
1099 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1100 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1101 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1102 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1103 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1104 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1105 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1106 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1107 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1108 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1109 buf[i++] = 0xbf;
c8ef42ee
PA
1110 memcpy (buf + i, &tpaddr, 8);
1111 i += 8;
fa593d66
PA
1112 buf[i++] = 0x57; /* push %rdi */
1113 append_insns (&buildaddr, i, buf);
1114
1115 /* Stack space for the collecting_t object. */
1116 i = 0;
1117 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1118 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1119 memcpy (buf + i, &tpoint, 8);
1120 i += 8;
1121 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1122 i += push_opcode (&buf[i],
1123 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1124 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1125 append_insns (&buildaddr, i, buf);
1126
1127 /* spin-lock. */
1128 i = 0;
1129 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1130 memcpy (&buf[i], (void *) &lockaddr, 8);
1131 i += 8;
1132 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1133 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1134 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1135 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1136 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1137 append_insns (&buildaddr, i, buf);
1138
1139 /* Set up the gdb_collect call. */
1140 /* At this point, (stack pointer + 0x18) is the base of our saved
1141 register block. */
1142
1143 i = 0;
1144 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1145 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1146
1147 /* tpoint address may be 64-bit wide. */
1148 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1149 memcpy (buf + i, &tpoint, 8);
1150 i += 8;
1151 append_insns (&buildaddr, i, buf);
1152
1153 /* The collector function being in the shared library, may be
1154 >31-bits away off the jump pad. */
1155 i = 0;
1156 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1157 memcpy (buf + i, &collector, 8);
1158 i += 8;
1159 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1160 append_insns (&buildaddr, i, buf);
1161
1162 /* Clear the spin-lock. */
1163 i = 0;
1164 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1165 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1166 memcpy (buf + i, &lockaddr, 8);
1167 i += 8;
1168 append_insns (&buildaddr, i, buf);
1169
1170 /* Remove stack that had been used for the collect_t object. */
1171 i = 0;
1172 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1173 append_insns (&buildaddr, i, buf);
1174
1175 /* Restore register state. */
1176 i = 0;
1177 buf[i++] = 0x48; /* add $0x8,%rsp */
1178 buf[i++] = 0x83;
1179 buf[i++] = 0xc4;
1180 buf[i++] = 0x08;
1181 buf[i++] = 0x9d; /* popfq */
1182 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1183 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1184 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1185 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1186 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1187 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1188 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1189 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1190 buf[i++] = 0x58; /* pop %rax */
1191 buf[i++] = 0x5b; /* pop %rbx */
1192 buf[i++] = 0x59; /* pop %rcx */
1193 buf[i++] = 0x5a; /* pop %rdx */
1194 buf[i++] = 0x5e; /* pop %rsi */
1195 buf[i++] = 0x5f; /* pop %rdi */
1196 buf[i++] = 0x5d; /* pop %rbp */
1197 buf[i++] = 0x5c; /* pop %rsp */
1198 append_insns (&buildaddr, i, buf);
1199
1200 /* Now, adjust the original instruction to execute in the jump
1201 pad. */
1202 *adjusted_insn_addr = buildaddr;
1203 relocate_instruction (&buildaddr, tpaddr);
1204 *adjusted_insn_addr_end = buildaddr;
1205
1206 /* Finally, write a jump back to the program. */
f4647387
YQ
1207
1208 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1209 if (loffset > INT_MAX || loffset < INT_MIN)
1210 {
1211 sprintf (err,
1212 "E.Jump back from jump pad too far from tracepoint "
1213 "(offset 0x%" PRIx64 " > int32).", loffset);
1214 return 1;
1215 }
1216
1217 offset = (int) loffset;
fa593d66
PA
1218 memcpy (buf, jump_insn, sizeof (jump_insn));
1219 memcpy (buf + 1, &offset, 4);
1220 append_insns (&buildaddr, sizeof (jump_insn), buf);
1221
1222 /* The jump pad is now built. Wire in a jump to our jump pad. This
1223 is always done last (by our caller actually), so that we can
1224 install fast tracepoints with threads running. This relies on
1225 the agent's atomic write support. */
f4647387
YQ
1226 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1227 if (loffset > INT_MAX || loffset < INT_MIN)
1228 {
1229 sprintf (err,
1230 "E.Jump pad too far from tracepoint "
1231 "(offset 0x%" PRIx64 " > int32).", loffset);
1232 return 1;
1233 }
1234
1235 offset = (int) loffset;
1236
fa593d66
PA
1237 memcpy (buf, jump_insn, sizeof (jump_insn));
1238 memcpy (buf + 1, &offset, 4);
1239 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1240 *jjump_pad_insn_size = sizeof (jump_insn);
1241
1242 /* Return the end address of our pad. */
1243 *jump_entry = buildaddr;
1244
1245 return 0;
1246}
1247
1248#endif /* __x86_64__ */
1249
1250/* Build a jump pad that saves registers and calls a collection
1251 function. Writes a jump instruction to the jump pad to
1252 JJUMPAD_INSN. The caller is responsible to write it in at the
1253 tracepoint address. */
1254
1255static int
1256i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1257 CORE_ADDR collector,
1258 CORE_ADDR lockaddr,
1259 ULONGEST orig_size,
1260 CORE_ADDR *jump_entry,
405f8e94
SS
1261 CORE_ADDR *trampoline,
1262 ULONGEST *trampoline_size,
fa593d66
PA
1263 unsigned char *jjump_pad_insn,
1264 ULONGEST *jjump_pad_insn_size,
1265 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1266 CORE_ADDR *adjusted_insn_addr_end,
1267 char *err)
fa593d66
PA
1268{
1269 unsigned char buf[0x100];
1270 int i, offset;
1271 CORE_ADDR buildaddr = *jump_entry;
1272
1273 /* Build the jump pad. */
1274
1275 /* First, do tracepoint data collection. Save registers. */
1276 i = 0;
1277 buf[i++] = 0x60; /* pushad */
1278 buf[i++] = 0x68; /* push tpaddr aka $pc */
1279 *((int *)(buf + i)) = (int) tpaddr;
1280 i += 4;
1281 buf[i++] = 0x9c; /* pushf */
1282 buf[i++] = 0x1e; /* push %ds */
1283 buf[i++] = 0x06; /* push %es */
1284 buf[i++] = 0x0f; /* push %fs */
1285 buf[i++] = 0xa0;
1286 buf[i++] = 0x0f; /* push %gs */
1287 buf[i++] = 0xa8;
1288 buf[i++] = 0x16; /* push %ss */
1289 buf[i++] = 0x0e; /* push %cs */
1290 append_insns (&buildaddr, i, buf);
1291
1292 /* Stack space for the collecting_t object. */
1293 i = 0;
1294 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1295
1296 /* Build the object. */
1297 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1298 memcpy (buf + i, &tpoint, 4);
1299 i += 4;
1300 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1301
1302 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1303 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1304 append_insns (&buildaddr, i, buf);
1305
1306 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1307 If we cared for it, this could be using xchg alternatively. */
1308
1309 i = 0;
1310 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1311 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1312 %esp,<lockaddr> */
1313 memcpy (&buf[i], (void *) &lockaddr, 4);
1314 i += 4;
1315 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1316 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1317 append_insns (&buildaddr, i, buf);
1318
1319
1320 /* Set up arguments to the gdb_collect call. */
1321 i = 0;
1322 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1323 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1324 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1325 append_insns (&buildaddr, i, buf);
1326
1327 i = 0;
1328 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1329 append_insns (&buildaddr, i, buf);
1330
1331 i = 0;
1332 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1333 memcpy (&buf[i], (void *) &tpoint, 4);
1334 i += 4;
1335 append_insns (&buildaddr, i, buf);
1336
1337 buf[0] = 0xe8; /* call <reladdr> */
1338 offset = collector - (buildaddr + sizeof (jump_insn));
1339 memcpy (buf + 1, &offset, 4);
1340 append_insns (&buildaddr, 5, buf);
1341 /* Clean up after the call. */
1342 buf[0] = 0x83; /* add $0x8,%esp */
1343 buf[1] = 0xc4;
1344 buf[2] = 0x08;
1345 append_insns (&buildaddr, 3, buf);
1346
1347
1348 /* Clear the spin-lock. This would need the LOCK prefix on older
1349 broken archs. */
1350 i = 0;
1351 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1352 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1353 memcpy (buf + i, &lockaddr, 4);
1354 i += 4;
1355 append_insns (&buildaddr, i, buf);
1356
1357
1358 /* Remove stack that had been used for the collect_t object. */
1359 i = 0;
1360 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1361 append_insns (&buildaddr, i, buf);
1362
1363 i = 0;
1364 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1365 buf[i++] = 0xc4;
1366 buf[i++] = 0x04;
1367 buf[i++] = 0x17; /* pop %ss */
1368 buf[i++] = 0x0f; /* pop %gs */
1369 buf[i++] = 0xa9;
1370 buf[i++] = 0x0f; /* pop %fs */
1371 buf[i++] = 0xa1;
1372 buf[i++] = 0x07; /* pop %es */
405f8e94 1373 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1374 buf[i++] = 0x9d; /* popf */
1375 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1376 buf[i++] = 0xc4;
1377 buf[i++] = 0x04;
1378 buf[i++] = 0x61; /* popad */
1379 append_insns (&buildaddr, i, buf);
1380
1381 /* Now, adjust the original instruction to execute in the jump
1382 pad. */
1383 *adjusted_insn_addr = buildaddr;
1384 relocate_instruction (&buildaddr, tpaddr);
1385 *adjusted_insn_addr_end = buildaddr;
1386
1387 /* Write the jump back to the program. */
1388 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1389 memcpy (buf, jump_insn, sizeof (jump_insn));
1390 memcpy (buf + 1, &offset, 4);
1391 append_insns (&buildaddr, sizeof (jump_insn), buf);
1392
1393 /* The jump pad is now built. Wire in a jump to our jump pad. This
1394 is always done last (by our caller actually), so that we can
1395 install fast tracepoints with threads running. This relies on
1396 the agent's atomic write support. */
405f8e94
SS
1397 if (orig_size == 4)
1398 {
1399 /* Create a trampoline. */
1400 *trampoline_size = sizeof (jump_insn);
1401 if (!claim_trampoline_space (*trampoline_size, trampoline))
1402 {
1403 /* No trampoline space available. */
1404 strcpy (err,
1405 "E.Cannot allocate trampoline space needed for fast "
1406 "tracepoints on 4-byte instructions.");
1407 return 1;
1408 }
1409
1410 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1411 memcpy (buf, jump_insn, sizeof (jump_insn));
1412 memcpy (buf + 1, &offset, 4);
4196ab2a 1413 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1414
1415 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1416 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1417 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1418 memcpy (buf + 2, &offset, 2);
1419 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1420 *jjump_pad_insn_size = sizeof (small_jump_insn);
1421 }
1422 else
1423 {
1424 /* Else use a 32-bit relative jump instruction. */
1425 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1426 memcpy (buf, jump_insn, sizeof (jump_insn));
1427 memcpy (buf + 1, &offset, 4);
1428 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1429 *jjump_pad_insn_size = sizeof (jump_insn);
1430 }
fa593d66
PA
1431
1432 /* Return the end address of our pad. */
1433 *jump_entry = buildaddr;
1434
1435 return 0;
1436}
1437
1438static int
1439x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1440 CORE_ADDR collector,
1441 CORE_ADDR lockaddr,
1442 ULONGEST orig_size,
1443 CORE_ADDR *jump_entry,
405f8e94
SS
1444 CORE_ADDR *trampoline,
1445 ULONGEST *trampoline_size,
fa593d66
PA
1446 unsigned char *jjump_pad_insn,
1447 ULONGEST *jjump_pad_insn_size,
1448 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1449 CORE_ADDR *adjusted_insn_addr_end,
1450 char *err)
fa593d66
PA
1451{
1452#ifdef __x86_64__
3aee8918 1453 if (is_64bit_tdesc ())
fa593d66
PA
1454 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1455 collector, lockaddr,
1456 orig_size, jump_entry,
405f8e94 1457 trampoline, trampoline_size,
fa593d66
PA
1458 jjump_pad_insn,
1459 jjump_pad_insn_size,
1460 adjusted_insn_addr,
405f8e94
SS
1461 adjusted_insn_addr_end,
1462 err);
fa593d66
PA
1463#endif
1464
1465 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1466 collector, lockaddr,
1467 orig_size, jump_entry,
405f8e94 1468 trampoline, trampoline_size,
fa593d66
PA
1469 jjump_pad_insn,
1470 jjump_pad_insn_size,
1471 adjusted_insn_addr,
405f8e94
SS
1472 adjusted_insn_addr_end,
1473 err);
1474}
1475
1476/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1477 architectures. */
1478
1479static int
1480x86_get_min_fast_tracepoint_insn_len (void)
1481{
1482 static int warned_about_fast_tracepoints = 0;
1483
1484#ifdef __x86_64__
1485 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1486 used for fast tracepoints. */
3aee8918 1487 if (is_64bit_tdesc ())
405f8e94
SS
1488 return 5;
1489#endif
1490
58b4daa5 1491 if (agent_loaded_p ())
405f8e94
SS
1492 {
1493 char errbuf[IPA_BUFSIZ];
1494
1495 errbuf[0] = '\0';
1496
1497 /* On x86, if trampolines are available, then 4-byte jump instructions
1498 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1499 with a 4-byte offset are used instead. */
1500 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1501 return 4;
1502 else
1503 {
1504 /* GDB has no channel to explain to user why a shorter fast
1505 tracepoint is not possible, but at least make GDBserver
1506 mention that something has gone awry. */
1507 if (!warned_about_fast_tracepoints)
1508 {
422186a9 1509 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1510 warned_about_fast_tracepoints = 1;
1511 }
1512 return 5;
1513 }
1514 }
1515 else
1516 {
1517 /* Indicate that the minimum length is currently unknown since the IPA
1518 has not loaded yet. */
1519 return 0;
1520 }
fa593d66
PA
1521}
1522
6a271cae
PA
1523static void
1524add_insns (unsigned char *start, int len)
1525{
1526 CORE_ADDR buildaddr = current_insn_ptr;
1527
1528 if (debug_threads)
87ce2a04
DE
1529 debug_printf ("Adding %d bytes of insn at %s\n",
1530 len, paddress (buildaddr));
6a271cae
PA
1531
1532 append_insns (&buildaddr, len, start);
1533 current_insn_ptr = buildaddr;
1534}
1535
6a271cae
PA
1536/* Our general strategy for emitting code is to avoid specifying raw
1537 bytes whenever possible, and instead copy a block of inline asm
1538 that is embedded in the function. This is a little messy, because
1539 we need to keep the compiler from discarding what looks like dead
1540 code, plus suppress various warnings. */
1541
9e4344e5
PA
1542#define EMIT_ASM(NAME, INSNS) \
1543 do \
1544 { \
1545 extern unsigned char start_ ## NAME, end_ ## NAME; \
1546 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1547 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1548 "\t" "start_" #NAME ":" \
1549 "\t" INSNS "\n" \
1550 "\t" "end_" #NAME ":"); \
1551 } while (0)
6a271cae
PA
1552
1553#ifdef __x86_64__
1554
1555#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1556 do \
1557 { \
1558 extern unsigned char start_ ## NAME, end_ ## NAME; \
1559 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1560 __asm__ (".code32\n" \
1561 "\t" "jmp end_" #NAME "\n" \
1562 "\t" "start_" #NAME ":\n" \
1563 "\t" INSNS "\n" \
1564 "\t" "end_" #NAME ":\n" \
1565 ".code64\n"); \
1566 } while (0)
6a271cae
PA
1567
1568#else
1569
1570#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1571
1572#endif
1573
1574#ifdef __x86_64__
1575
1576static void
1577amd64_emit_prologue (void)
1578{
1579 EMIT_ASM (amd64_prologue,
1580 "pushq %rbp\n\t"
1581 "movq %rsp,%rbp\n\t"
1582 "sub $0x20,%rsp\n\t"
1583 "movq %rdi,-8(%rbp)\n\t"
1584 "movq %rsi,-16(%rbp)");
1585}
1586
1587
1588static void
1589amd64_emit_epilogue (void)
1590{
1591 EMIT_ASM (amd64_epilogue,
1592 "movq -16(%rbp),%rdi\n\t"
1593 "movq %rax,(%rdi)\n\t"
1594 "xor %rax,%rax\n\t"
1595 "leave\n\t"
1596 "ret");
1597}
1598
1599static void
1600amd64_emit_add (void)
1601{
1602 EMIT_ASM (amd64_add,
1603 "add (%rsp),%rax\n\t"
1604 "lea 0x8(%rsp),%rsp");
1605}
1606
1607static void
1608amd64_emit_sub (void)
1609{
1610 EMIT_ASM (amd64_sub,
1611 "sub %rax,(%rsp)\n\t"
1612 "pop %rax");
1613}
1614
1615static void
1616amd64_emit_mul (void)
1617{
1618 emit_error = 1;
1619}
1620
1621static void
1622amd64_emit_lsh (void)
1623{
1624 emit_error = 1;
1625}
1626
1627static void
1628amd64_emit_rsh_signed (void)
1629{
1630 emit_error = 1;
1631}
1632
1633static void
1634amd64_emit_rsh_unsigned (void)
1635{
1636 emit_error = 1;
1637}
1638
1639static void
1640amd64_emit_ext (int arg)
1641{
1642 switch (arg)
1643 {
1644 case 8:
1645 EMIT_ASM (amd64_ext_8,
1646 "cbtw\n\t"
1647 "cwtl\n\t"
1648 "cltq");
1649 break;
1650 case 16:
1651 EMIT_ASM (amd64_ext_16,
1652 "cwtl\n\t"
1653 "cltq");
1654 break;
1655 case 32:
1656 EMIT_ASM (amd64_ext_32,
1657 "cltq");
1658 break;
1659 default:
1660 emit_error = 1;
1661 }
1662}
1663
1664static void
1665amd64_emit_log_not (void)
1666{
1667 EMIT_ASM (amd64_log_not,
1668 "test %rax,%rax\n\t"
1669 "sete %cl\n\t"
1670 "movzbq %cl,%rax");
1671}
1672
1673static void
1674amd64_emit_bit_and (void)
1675{
1676 EMIT_ASM (amd64_and,
1677 "and (%rsp),%rax\n\t"
1678 "lea 0x8(%rsp),%rsp");
1679}
1680
1681static void
1682amd64_emit_bit_or (void)
1683{
1684 EMIT_ASM (amd64_or,
1685 "or (%rsp),%rax\n\t"
1686 "lea 0x8(%rsp),%rsp");
1687}
1688
1689static void
1690amd64_emit_bit_xor (void)
1691{
1692 EMIT_ASM (amd64_xor,
1693 "xor (%rsp),%rax\n\t"
1694 "lea 0x8(%rsp),%rsp");
1695}
1696
1697static void
1698amd64_emit_bit_not (void)
1699{
1700 EMIT_ASM (amd64_bit_not,
1701 "xorq $0xffffffffffffffff,%rax");
1702}
1703
1704static void
1705amd64_emit_equal (void)
1706{
1707 EMIT_ASM (amd64_equal,
1708 "cmp %rax,(%rsp)\n\t"
1709 "je .Lamd64_equal_true\n\t"
1710 "xor %rax,%rax\n\t"
1711 "jmp .Lamd64_equal_end\n\t"
1712 ".Lamd64_equal_true:\n\t"
1713 "mov $0x1,%rax\n\t"
1714 ".Lamd64_equal_end:\n\t"
1715 "lea 0x8(%rsp),%rsp");
1716}
1717
1718static void
1719amd64_emit_less_signed (void)
1720{
1721 EMIT_ASM (amd64_less_signed,
1722 "cmp %rax,(%rsp)\n\t"
1723 "jl .Lamd64_less_signed_true\n\t"
1724 "xor %rax,%rax\n\t"
1725 "jmp .Lamd64_less_signed_end\n\t"
1726 ".Lamd64_less_signed_true:\n\t"
1727 "mov $1,%rax\n\t"
1728 ".Lamd64_less_signed_end:\n\t"
1729 "lea 0x8(%rsp),%rsp");
1730}
1731
1732static void
1733amd64_emit_less_unsigned (void)
1734{
1735 EMIT_ASM (amd64_less_unsigned,
1736 "cmp %rax,(%rsp)\n\t"
1737 "jb .Lamd64_less_unsigned_true\n\t"
1738 "xor %rax,%rax\n\t"
1739 "jmp .Lamd64_less_unsigned_end\n\t"
1740 ".Lamd64_less_unsigned_true:\n\t"
1741 "mov $1,%rax\n\t"
1742 ".Lamd64_less_unsigned_end:\n\t"
1743 "lea 0x8(%rsp),%rsp");
1744}
1745
1746static void
1747amd64_emit_ref (int size)
1748{
1749 switch (size)
1750 {
1751 case 1:
1752 EMIT_ASM (amd64_ref1,
1753 "movb (%rax),%al");
1754 break;
1755 case 2:
1756 EMIT_ASM (amd64_ref2,
1757 "movw (%rax),%ax");
1758 break;
1759 case 4:
1760 EMIT_ASM (amd64_ref4,
1761 "movl (%rax),%eax");
1762 break;
1763 case 8:
1764 EMIT_ASM (amd64_ref8,
1765 "movq (%rax),%rax");
1766 break;
1767 }
1768}
1769
1770static void
1771amd64_emit_if_goto (int *offset_p, int *size_p)
1772{
1773 EMIT_ASM (amd64_if_goto,
1774 "mov %rax,%rcx\n\t"
1775 "pop %rax\n\t"
1776 "cmp $0,%rcx\n\t"
1777 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1778 if (offset_p)
1779 *offset_p = 10;
1780 if (size_p)
1781 *size_p = 4;
1782}
1783
1784static void
1785amd64_emit_goto (int *offset_p, int *size_p)
1786{
1787 EMIT_ASM (amd64_goto,
1788 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1789 if (offset_p)
1790 *offset_p = 1;
1791 if (size_p)
1792 *size_p = 4;
1793}
1794
1795static void
1796amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1797{
1798 int diff = (to - (from + size));
1799 unsigned char buf[sizeof (int)];
1800
1801 if (size != 4)
1802 {
1803 emit_error = 1;
1804 return;
1805 }
1806
1807 memcpy (buf, &diff, sizeof (int));
4196ab2a 1808 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1809}
1810
1811static void
4e29fb54 1812amd64_emit_const (LONGEST num)
6a271cae
PA
1813{
1814 unsigned char buf[16];
1815 int i;
1816 CORE_ADDR buildaddr = current_insn_ptr;
1817
1818 i = 0;
1819 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1820 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1821 i += 8;
1822 append_insns (&buildaddr, i, buf);
1823 current_insn_ptr = buildaddr;
1824}
1825
1826static void
1827amd64_emit_call (CORE_ADDR fn)
1828{
1829 unsigned char buf[16];
1830 int i;
1831 CORE_ADDR buildaddr;
4e29fb54 1832 LONGEST offset64;
6a271cae
PA
1833
1834 /* The destination function being in the shared library, may be
1835 >31-bits away off the compiled code pad. */
1836
1837 buildaddr = current_insn_ptr;
1838
1839 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1840
1841 i = 0;
1842
1843 if (offset64 > INT_MAX || offset64 < INT_MIN)
1844 {
1845 /* Offset is too large for a call. Use callq, but that requires
1846 a register, so avoid it if possible. Use r10, since it is
1847 call-clobbered, we don't have to push/pop it. */
1848 buf[i++] = 0x48; /* mov $fn,%r10 */
1849 buf[i++] = 0xba;
1850 memcpy (buf + i, &fn, 8);
1851 i += 8;
1852 buf[i++] = 0xff; /* callq *%r10 */
1853 buf[i++] = 0xd2;
1854 }
1855 else
1856 {
1857 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1858
1859 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1860 memcpy (buf + i, &offset32, 4);
1861 i += 4;
1862 }
1863
1864 append_insns (&buildaddr, i, buf);
1865 current_insn_ptr = buildaddr;
1866}
1867
1868static void
1869amd64_emit_reg (int reg)
1870{
1871 unsigned char buf[16];
1872 int i;
1873 CORE_ADDR buildaddr;
1874
1875 /* Assume raw_regs is still in %rdi. */
1876 buildaddr = current_insn_ptr;
1877 i = 0;
1878 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1879 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1880 i += 4;
1881 append_insns (&buildaddr, i, buf);
1882 current_insn_ptr = buildaddr;
1883 amd64_emit_call (get_raw_reg_func_addr ());
1884}
1885
1886static void
1887amd64_emit_pop (void)
1888{
1889 EMIT_ASM (amd64_pop,
1890 "pop %rax");
1891}
1892
1893static void
1894amd64_emit_stack_flush (void)
1895{
1896 EMIT_ASM (amd64_stack_flush,
1897 "push %rax");
1898}
1899
1900static void
1901amd64_emit_zero_ext (int arg)
1902{
1903 switch (arg)
1904 {
1905 case 8:
1906 EMIT_ASM (amd64_zero_ext_8,
1907 "and $0xff,%rax");
1908 break;
1909 case 16:
1910 EMIT_ASM (amd64_zero_ext_16,
1911 "and $0xffff,%rax");
1912 break;
1913 case 32:
1914 EMIT_ASM (amd64_zero_ext_32,
1915 "mov $0xffffffff,%rcx\n\t"
1916 "and %rcx,%rax");
1917 break;
1918 default:
1919 emit_error = 1;
1920 }
1921}
1922
1923static void
1924amd64_emit_swap (void)
1925{
1926 EMIT_ASM (amd64_swap,
1927 "mov %rax,%rcx\n\t"
1928 "pop %rax\n\t"
1929 "push %rcx");
1930}
1931
1932static void
1933amd64_emit_stack_adjust (int n)
1934{
1935 unsigned char buf[16];
1936 int i;
1937 CORE_ADDR buildaddr = current_insn_ptr;
1938
1939 i = 0;
1940 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1941 buf[i++] = 0x8d;
1942 buf[i++] = 0x64;
1943 buf[i++] = 0x24;
1944 /* This only handles adjustments up to 16, but we don't expect any more. */
1945 buf[i++] = n * 8;
1946 append_insns (&buildaddr, i, buf);
1947 current_insn_ptr = buildaddr;
1948}
1949
1950/* FN's prototype is `LONGEST(*fn)(int)'. */
1951
1952static void
1953amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1954{
1955 unsigned char buf[16];
1956 int i;
1957 CORE_ADDR buildaddr;
1958
1959 buildaddr = current_insn_ptr;
1960 i = 0;
1961 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1962 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1963 i += 4;
1964 append_insns (&buildaddr, i, buf);
1965 current_insn_ptr = buildaddr;
1966 amd64_emit_call (fn);
1967}
1968
4e29fb54 1969/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
1970
1971static void
1972amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1973{
1974 unsigned char buf[16];
1975 int i;
1976 CORE_ADDR buildaddr;
1977
1978 buildaddr = current_insn_ptr;
1979 i = 0;
1980 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1981 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1982 i += 4;
1983 append_insns (&buildaddr, i, buf);
1984 current_insn_ptr = buildaddr;
1985 EMIT_ASM (amd64_void_call_2_a,
1986 /* Save away a copy of the stack top. */
1987 "push %rax\n\t"
1988 /* Also pass top as the second argument. */
1989 "mov %rax,%rsi");
1990 amd64_emit_call (fn);
1991 EMIT_ASM (amd64_void_call_2_b,
1992 /* Restore the stack top, %rax may have been trashed. */
1993 "pop %rax");
1994}
1995
df4a0200 1996static void
6b9801d4
SS
1997amd64_emit_eq_goto (int *offset_p, int *size_p)
1998{
1999 EMIT_ASM (amd64_eq,
2000 "cmp %rax,(%rsp)\n\t"
2001 "jne .Lamd64_eq_fallthru\n\t"
2002 "lea 0x8(%rsp),%rsp\n\t"
2003 "pop %rax\n\t"
2004 /* jmp, but don't trust the assembler to choose the right jump */
2005 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2006 ".Lamd64_eq_fallthru:\n\t"
2007 "lea 0x8(%rsp),%rsp\n\t"
2008 "pop %rax");
2009
2010 if (offset_p)
2011 *offset_p = 13;
2012 if (size_p)
2013 *size_p = 4;
2014}
2015
df4a0200 2016static void
6b9801d4
SS
2017amd64_emit_ne_goto (int *offset_p, int *size_p)
2018{
2019 EMIT_ASM (amd64_ne,
2020 "cmp %rax,(%rsp)\n\t"
2021 "je .Lamd64_ne_fallthru\n\t"
2022 "lea 0x8(%rsp),%rsp\n\t"
2023 "pop %rax\n\t"
2024 /* jmp, but don't trust the assembler to choose the right jump */
2025 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2026 ".Lamd64_ne_fallthru:\n\t"
2027 "lea 0x8(%rsp),%rsp\n\t"
2028 "pop %rax");
2029
2030 if (offset_p)
2031 *offset_p = 13;
2032 if (size_p)
2033 *size_p = 4;
2034}
2035
df4a0200 2036static void
6b9801d4
SS
2037amd64_emit_lt_goto (int *offset_p, int *size_p)
2038{
2039 EMIT_ASM (amd64_lt,
2040 "cmp %rax,(%rsp)\n\t"
2041 "jnl .Lamd64_lt_fallthru\n\t"
2042 "lea 0x8(%rsp),%rsp\n\t"
2043 "pop %rax\n\t"
2044 /* jmp, but don't trust the assembler to choose the right jump */
2045 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2046 ".Lamd64_lt_fallthru:\n\t"
2047 "lea 0x8(%rsp),%rsp\n\t"
2048 "pop %rax");
2049
2050 if (offset_p)
2051 *offset_p = 13;
2052 if (size_p)
2053 *size_p = 4;
2054}
2055
df4a0200 2056static void
6b9801d4
SS
2057amd64_emit_le_goto (int *offset_p, int *size_p)
2058{
2059 EMIT_ASM (amd64_le,
2060 "cmp %rax,(%rsp)\n\t"
2061 "jnle .Lamd64_le_fallthru\n\t"
2062 "lea 0x8(%rsp),%rsp\n\t"
2063 "pop %rax\n\t"
2064 /* jmp, but don't trust the assembler to choose the right jump */
2065 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2066 ".Lamd64_le_fallthru:\n\t"
2067 "lea 0x8(%rsp),%rsp\n\t"
2068 "pop %rax");
2069
2070 if (offset_p)
2071 *offset_p = 13;
2072 if (size_p)
2073 *size_p = 4;
2074}
2075
df4a0200 2076static void
6b9801d4
SS
2077amd64_emit_gt_goto (int *offset_p, int *size_p)
2078{
2079 EMIT_ASM (amd64_gt,
2080 "cmp %rax,(%rsp)\n\t"
2081 "jng .Lamd64_gt_fallthru\n\t"
2082 "lea 0x8(%rsp),%rsp\n\t"
2083 "pop %rax\n\t"
2084 /* jmp, but don't trust the assembler to choose the right jump */
2085 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2086 ".Lamd64_gt_fallthru:\n\t"
2087 "lea 0x8(%rsp),%rsp\n\t"
2088 "pop %rax");
2089
2090 if (offset_p)
2091 *offset_p = 13;
2092 if (size_p)
2093 *size_p = 4;
2094}
2095
df4a0200 2096static void
6b9801d4
SS
2097amd64_emit_ge_goto (int *offset_p, int *size_p)
2098{
2099 EMIT_ASM (amd64_ge,
2100 "cmp %rax,(%rsp)\n\t"
2101 "jnge .Lamd64_ge_fallthru\n\t"
2102 ".Lamd64_ge_jump:\n\t"
2103 "lea 0x8(%rsp),%rsp\n\t"
2104 "pop %rax\n\t"
2105 /* jmp, but don't trust the assembler to choose the right jump */
2106 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2107 ".Lamd64_ge_fallthru:\n\t"
2108 "lea 0x8(%rsp),%rsp\n\t"
2109 "pop %rax");
2110
2111 if (offset_p)
2112 *offset_p = 13;
2113 if (size_p)
2114 *size_p = 4;
2115}
2116
6a271cae
PA
2117struct emit_ops amd64_emit_ops =
2118 {
2119 amd64_emit_prologue,
2120 amd64_emit_epilogue,
2121 amd64_emit_add,
2122 amd64_emit_sub,
2123 amd64_emit_mul,
2124 amd64_emit_lsh,
2125 amd64_emit_rsh_signed,
2126 amd64_emit_rsh_unsigned,
2127 amd64_emit_ext,
2128 amd64_emit_log_not,
2129 amd64_emit_bit_and,
2130 amd64_emit_bit_or,
2131 amd64_emit_bit_xor,
2132 amd64_emit_bit_not,
2133 amd64_emit_equal,
2134 amd64_emit_less_signed,
2135 amd64_emit_less_unsigned,
2136 amd64_emit_ref,
2137 amd64_emit_if_goto,
2138 amd64_emit_goto,
2139 amd64_write_goto_address,
2140 amd64_emit_const,
2141 amd64_emit_call,
2142 amd64_emit_reg,
2143 amd64_emit_pop,
2144 amd64_emit_stack_flush,
2145 amd64_emit_zero_ext,
2146 amd64_emit_swap,
2147 amd64_emit_stack_adjust,
2148 amd64_emit_int_call_1,
6b9801d4
SS
2149 amd64_emit_void_call_2,
2150 amd64_emit_eq_goto,
2151 amd64_emit_ne_goto,
2152 amd64_emit_lt_goto,
2153 amd64_emit_le_goto,
2154 amd64_emit_gt_goto,
2155 amd64_emit_ge_goto
6a271cae
PA
2156 };
2157
2158#endif /* __x86_64__ */
2159
2160static void
2161i386_emit_prologue (void)
2162{
2163 EMIT_ASM32 (i386_prologue,
2164 "push %ebp\n\t"
bf15cbda
SS
2165 "mov %esp,%ebp\n\t"
2166 "push %ebx");
6a271cae
PA
2167 /* At this point, the raw regs base address is at 8(%ebp), and the
2168 value pointer is at 12(%ebp). */
2169}
2170
2171static void
2172i386_emit_epilogue (void)
2173{
2174 EMIT_ASM32 (i386_epilogue,
2175 "mov 12(%ebp),%ecx\n\t"
2176 "mov %eax,(%ecx)\n\t"
2177 "mov %ebx,0x4(%ecx)\n\t"
2178 "xor %eax,%eax\n\t"
bf15cbda 2179 "pop %ebx\n\t"
6a271cae
PA
2180 "pop %ebp\n\t"
2181 "ret");
2182}
2183
2184static void
2185i386_emit_add (void)
2186{
2187 EMIT_ASM32 (i386_add,
2188 "add (%esp),%eax\n\t"
2189 "adc 0x4(%esp),%ebx\n\t"
2190 "lea 0x8(%esp),%esp");
2191}
2192
2193static void
2194i386_emit_sub (void)
2195{
2196 EMIT_ASM32 (i386_sub,
2197 "subl %eax,(%esp)\n\t"
2198 "sbbl %ebx,4(%esp)\n\t"
2199 "pop %eax\n\t"
2200 "pop %ebx\n\t");
2201}
2202
2203static void
2204i386_emit_mul (void)
2205{
2206 emit_error = 1;
2207}
2208
2209static void
2210i386_emit_lsh (void)
2211{
2212 emit_error = 1;
2213}
2214
2215static void
2216i386_emit_rsh_signed (void)
2217{
2218 emit_error = 1;
2219}
2220
2221static void
2222i386_emit_rsh_unsigned (void)
2223{
2224 emit_error = 1;
2225}
2226
2227static void
2228i386_emit_ext (int arg)
2229{
2230 switch (arg)
2231 {
2232 case 8:
2233 EMIT_ASM32 (i386_ext_8,
2234 "cbtw\n\t"
2235 "cwtl\n\t"
2236 "movl %eax,%ebx\n\t"
2237 "sarl $31,%ebx");
2238 break;
2239 case 16:
2240 EMIT_ASM32 (i386_ext_16,
2241 "cwtl\n\t"
2242 "movl %eax,%ebx\n\t"
2243 "sarl $31,%ebx");
2244 break;
2245 case 32:
2246 EMIT_ASM32 (i386_ext_32,
2247 "movl %eax,%ebx\n\t"
2248 "sarl $31,%ebx");
2249 break;
2250 default:
2251 emit_error = 1;
2252 }
2253}
2254
2255static void
2256i386_emit_log_not (void)
2257{
2258 EMIT_ASM32 (i386_log_not,
2259 "or %ebx,%eax\n\t"
2260 "test %eax,%eax\n\t"
2261 "sete %cl\n\t"
2262 "xor %ebx,%ebx\n\t"
2263 "movzbl %cl,%eax");
2264}
2265
2266static void
2267i386_emit_bit_and (void)
2268{
2269 EMIT_ASM32 (i386_and,
2270 "and (%esp),%eax\n\t"
2271 "and 0x4(%esp),%ebx\n\t"
2272 "lea 0x8(%esp),%esp");
2273}
2274
2275static void
2276i386_emit_bit_or (void)
2277{
2278 EMIT_ASM32 (i386_or,
2279 "or (%esp),%eax\n\t"
2280 "or 0x4(%esp),%ebx\n\t"
2281 "lea 0x8(%esp),%esp");
2282}
2283
2284static void
2285i386_emit_bit_xor (void)
2286{
2287 EMIT_ASM32 (i386_xor,
2288 "xor (%esp),%eax\n\t"
2289 "xor 0x4(%esp),%ebx\n\t"
2290 "lea 0x8(%esp),%esp");
2291}
2292
2293static void
2294i386_emit_bit_not (void)
2295{
2296 EMIT_ASM32 (i386_bit_not,
2297 "xor $0xffffffff,%eax\n\t"
2298 "xor $0xffffffff,%ebx\n\t");
2299}
2300
2301static void
2302i386_emit_equal (void)
2303{
2304 EMIT_ASM32 (i386_equal,
2305 "cmpl %ebx,4(%esp)\n\t"
2306 "jne .Li386_equal_false\n\t"
2307 "cmpl %eax,(%esp)\n\t"
2308 "je .Li386_equal_true\n\t"
2309 ".Li386_equal_false:\n\t"
2310 "xor %eax,%eax\n\t"
2311 "jmp .Li386_equal_end\n\t"
2312 ".Li386_equal_true:\n\t"
2313 "mov $1,%eax\n\t"
2314 ".Li386_equal_end:\n\t"
2315 "xor %ebx,%ebx\n\t"
2316 "lea 0x8(%esp),%esp");
2317}
2318
2319static void
2320i386_emit_less_signed (void)
2321{
2322 EMIT_ASM32 (i386_less_signed,
2323 "cmpl %ebx,4(%esp)\n\t"
2324 "jl .Li386_less_signed_true\n\t"
2325 "jne .Li386_less_signed_false\n\t"
2326 "cmpl %eax,(%esp)\n\t"
2327 "jl .Li386_less_signed_true\n\t"
2328 ".Li386_less_signed_false:\n\t"
2329 "xor %eax,%eax\n\t"
2330 "jmp .Li386_less_signed_end\n\t"
2331 ".Li386_less_signed_true:\n\t"
2332 "mov $1,%eax\n\t"
2333 ".Li386_less_signed_end:\n\t"
2334 "xor %ebx,%ebx\n\t"
2335 "lea 0x8(%esp),%esp");
2336}
2337
2338static void
2339i386_emit_less_unsigned (void)
2340{
2341 EMIT_ASM32 (i386_less_unsigned,
2342 "cmpl %ebx,4(%esp)\n\t"
2343 "jb .Li386_less_unsigned_true\n\t"
2344 "jne .Li386_less_unsigned_false\n\t"
2345 "cmpl %eax,(%esp)\n\t"
2346 "jb .Li386_less_unsigned_true\n\t"
2347 ".Li386_less_unsigned_false:\n\t"
2348 "xor %eax,%eax\n\t"
2349 "jmp .Li386_less_unsigned_end\n\t"
2350 ".Li386_less_unsigned_true:\n\t"
2351 "mov $1,%eax\n\t"
2352 ".Li386_less_unsigned_end:\n\t"
2353 "xor %ebx,%ebx\n\t"
2354 "lea 0x8(%esp),%esp");
2355}
2356
2357static void
2358i386_emit_ref (int size)
2359{
2360 switch (size)
2361 {
2362 case 1:
2363 EMIT_ASM32 (i386_ref1,
2364 "movb (%eax),%al");
2365 break;
2366 case 2:
2367 EMIT_ASM32 (i386_ref2,
2368 "movw (%eax),%ax");
2369 break;
2370 case 4:
2371 EMIT_ASM32 (i386_ref4,
2372 "movl (%eax),%eax");
2373 break;
2374 case 8:
2375 EMIT_ASM32 (i386_ref8,
2376 "movl 4(%eax),%ebx\n\t"
2377 "movl (%eax),%eax");
2378 break;
2379 }
2380}
2381
2382static void
2383i386_emit_if_goto (int *offset_p, int *size_p)
2384{
2385 EMIT_ASM32 (i386_if_goto,
2386 "mov %eax,%ecx\n\t"
2387 "or %ebx,%ecx\n\t"
2388 "pop %eax\n\t"
2389 "pop %ebx\n\t"
2390 "cmpl $0,%ecx\n\t"
2391 /* Don't trust the assembler to choose the right jump */
2392 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2393
2394 if (offset_p)
2395 *offset_p = 11; /* be sure that this matches the sequence above */
2396 if (size_p)
2397 *size_p = 4;
2398}
2399
2400static void
2401i386_emit_goto (int *offset_p, int *size_p)
2402{
2403 EMIT_ASM32 (i386_goto,
2404 /* Don't trust the assembler to choose the right jump */
2405 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2406 if (offset_p)
2407 *offset_p = 1;
2408 if (size_p)
2409 *size_p = 4;
2410}
2411
2412static void
2413i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2414{
2415 int diff = (to - (from + size));
2416 unsigned char buf[sizeof (int)];
2417
2418 /* We're only doing 4-byte sizes at the moment. */
2419 if (size != 4)
2420 {
2421 emit_error = 1;
2422 return;
2423 }
2424
2425 memcpy (buf, &diff, sizeof (int));
4196ab2a 2426 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2427}
2428
2429static void
4e29fb54 2430i386_emit_const (LONGEST num)
6a271cae
PA
2431{
2432 unsigned char buf[16];
b00ad6ff 2433 int i, hi, lo;
6a271cae
PA
2434 CORE_ADDR buildaddr = current_insn_ptr;
2435
2436 i = 0;
2437 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2438 lo = num & 0xffffffff;
2439 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2440 i += 4;
2441 hi = ((num >> 32) & 0xffffffff);
2442 if (hi)
2443 {
2444 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2445 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2446 i += 4;
2447 }
2448 else
2449 {
2450 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2451 }
2452 append_insns (&buildaddr, i, buf);
2453 current_insn_ptr = buildaddr;
2454}
2455
2456static void
2457i386_emit_call (CORE_ADDR fn)
2458{
2459 unsigned char buf[16];
2460 int i, offset;
2461 CORE_ADDR buildaddr;
2462
2463 buildaddr = current_insn_ptr;
2464 i = 0;
2465 buf[i++] = 0xe8; /* call <reladdr> */
2466 offset = ((int) fn) - (buildaddr + 5);
2467 memcpy (buf + 1, &offset, 4);
2468 append_insns (&buildaddr, 5, buf);
2469 current_insn_ptr = buildaddr;
2470}
2471
2472static void
2473i386_emit_reg (int reg)
2474{
2475 unsigned char buf[16];
2476 int i;
2477 CORE_ADDR buildaddr;
2478
2479 EMIT_ASM32 (i386_reg_a,
2480 "sub $0x8,%esp");
2481 buildaddr = current_insn_ptr;
2482 i = 0;
2483 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2484 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2485 i += 4;
2486 append_insns (&buildaddr, i, buf);
2487 current_insn_ptr = buildaddr;
2488 EMIT_ASM32 (i386_reg_b,
2489 "mov %eax,4(%esp)\n\t"
2490 "mov 8(%ebp),%eax\n\t"
2491 "mov %eax,(%esp)");
2492 i386_emit_call (get_raw_reg_func_addr ());
2493 EMIT_ASM32 (i386_reg_c,
2494 "xor %ebx,%ebx\n\t"
2495 "lea 0x8(%esp),%esp");
2496}
2497
2498static void
2499i386_emit_pop (void)
2500{
2501 EMIT_ASM32 (i386_pop,
2502 "pop %eax\n\t"
2503 "pop %ebx");
2504}
2505
2506static void
2507i386_emit_stack_flush (void)
2508{
2509 EMIT_ASM32 (i386_stack_flush,
2510 "push %ebx\n\t"
2511 "push %eax");
2512}
2513
2514static void
2515i386_emit_zero_ext (int arg)
2516{
2517 switch (arg)
2518 {
2519 case 8:
2520 EMIT_ASM32 (i386_zero_ext_8,
2521 "and $0xff,%eax\n\t"
2522 "xor %ebx,%ebx");
2523 break;
2524 case 16:
2525 EMIT_ASM32 (i386_zero_ext_16,
2526 "and $0xffff,%eax\n\t"
2527 "xor %ebx,%ebx");
2528 break;
2529 case 32:
2530 EMIT_ASM32 (i386_zero_ext_32,
2531 "xor %ebx,%ebx");
2532 break;
2533 default:
2534 emit_error = 1;
2535 }
2536}
2537
2538static void
2539i386_emit_swap (void)
2540{
2541 EMIT_ASM32 (i386_swap,
2542 "mov %eax,%ecx\n\t"
2543 "mov %ebx,%edx\n\t"
2544 "pop %eax\n\t"
2545 "pop %ebx\n\t"
2546 "push %edx\n\t"
2547 "push %ecx");
2548}
2549
2550static void
2551i386_emit_stack_adjust (int n)
2552{
2553 unsigned char buf[16];
2554 int i;
2555 CORE_ADDR buildaddr = current_insn_ptr;
2556
2557 i = 0;
2558 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2559 buf[i++] = 0x64;
2560 buf[i++] = 0x24;
2561 buf[i++] = n * 8;
2562 append_insns (&buildaddr, i, buf);
2563 current_insn_ptr = buildaddr;
2564}
2565
2566/* FN's prototype is `LONGEST(*fn)(int)'. */
2567
2568static void
2569i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2570{
2571 unsigned char buf[16];
2572 int i;
2573 CORE_ADDR buildaddr;
2574
2575 EMIT_ASM32 (i386_int_call_1_a,
2576 /* Reserve a bit of stack space. */
2577 "sub $0x8,%esp");
2578 /* Put the one argument on the stack. */
2579 buildaddr = current_insn_ptr;
2580 i = 0;
2581 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2582 buf[i++] = 0x04;
2583 buf[i++] = 0x24;
b00ad6ff 2584 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2585 i += 4;
2586 append_insns (&buildaddr, i, buf);
2587 current_insn_ptr = buildaddr;
2588 i386_emit_call (fn);
2589 EMIT_ASM32 (i386_int_call_1_c,
2590 "mov %edx,%ebx\n\t"
2591 "lea 0x8(%esp),%esp");
2592}
2593
4e29fb54 2594/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2595
2596static void
2597i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2598{
2599 unsigned char buf[16];
2600 int i;
2601 CORE_ADDR buildaddr;
2602
2603 EMIT_ASM32 (i386_void_call_2_a,
2604 /* Preserve %eax only; we don't have to worry about %ebx. */
2605 "push %eax\n\t"
2606 /* Reserve a bit of stack space for arguments. */
2607 "sub $0x10,%esp\n\t"
2608 /* Copy "top" to the second argument position. (Note that
2609 we can't assume function won't scribble on its
2610 arguments, so don't try to restore from this.) */
2611 "mov %eax,4(%esp)\n\t"
2612 "mov %ebx,8(%esp)");
2613 /* Put the first argument on the stack. */
2614 buildaddr = current_insn_ptr;
2615 i = 0;
2616 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2617 buf[i++] = 0x04;
2618 buf[i++] = 0x24;
b00ad6ff 2619 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2620 i += 4;
2621 append_insns (&buildaddr, i, buf);
2622 current_insn_ptr = buildaddr;
2623 i386_emit_call (fn);
2624 EMIT_ASM32 (i386_void_call_2_b,
2625 "lea 0x10(%esp),%esp\n\t"
2626 /* Restore original stack top. */
2627 "pop %eax");
2628}
2629
6b9801d4 2630
df4a0200 2631static void
6b9801d4
SS
2632i386_emit_eq_goto (int *offset_p, int *size_p)
2633{
2634 EMIT_ASM32 (eq,
2635 /* Check low half first, more likely to be decider */
2636 "cmpl %eax,(%esp)\n\t"
2637 "jne .Leq_fallthru\n\t"
2638 "cmpl %ebx,4(%esp)\n\t"
2639 "jne .Leq_fallthru\n\t"
2640 "lea 0x8(%esp),%esp\n\t"
2641 "pop %eax\n\t"
2642 "pop %ebx\n\t"
2643 /* jmp, but don't trust the assembler to choose the right jump */
2644 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2645 ".Leq_fallthru:\n\t"
2646 "lea 0x8(%esp),%esp\n\t"
2647 "pop %eax\n\t"
2648 "pop %ebx");
2649
2650 if (offset_p)
2651 *offset_p = 18;
2652 if (size_p)
2653 *size_p = 4;
2654}
2655
df4a0200 2656static void
6b9801d4
SS
2657i386_emit_ne_goto (int *offset_p, int *size_p)
2658{
2659 EMIT_ASM32 (ne,
2660 /* Check low half first, more likely to be decider */
2661 "cmpl %eax,(%esp)\n\t"
2662 "jne .Lne_jump\n\t"
2663 "cmpl %ebx,4(%esp)\n\t"
2664 "je .Lne_fallthru\n\t"
2665 ".Lne_jump:\n\t"
2666 "lea 0x8(%esp),%esp\n\t"
2667 "pop %eax\n\t"
2668 "pop %ebx\n\t"
2669 /* jmp, but don't trust the assembler to choose the right jump */
2670 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2671 ".Lne_fallthru:\n\t"
2672 "lea 0x8(%esp),%esp\n\t"
2673 "pop %eax\n\t"
2674 "pop %ebx");
2675
2676 if (offset_p)
2677 *offset_p = 18;
2678 if (size_p)
2679 *size_p = 4;
2680}
2681
df4a0200 2682static void
6b9801d4
SS
2683i386_emit_lt_goto (int *offset_p, int *size_p)
2684{
2685 EMIT_ASM32 (lt,
2686 "cmpl %ebx,4(%esp)\n\t"
2687 "jl .Llt_jump\n\t"
2688 "jne .Llt_fallthru\n\t"
2689 "cmpl %eax,(%esp)\n\t"
2690 "jnl .Llt_fallthru\n\t"
2691 ".Llt_jump:\n\t"
2692 "lea 0x8(%esp),%esp\n\t"
2693 "pop %eax\n\t"
2694 "pop %ebx\n\t"
2695 /* jmp, but don't trust the assembler to choose the right jump */
2696 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2697 ".Llt_fallthru:\n\t"
2698 "lea 0x8(%esp),%esp\n\t"
2699 "pop %eax\n\t"
2700 "pop %ebx");
2701
2702 if (offset_p)
2703 *offset_p = 20;
2704 if (size_p)
2705 *size_p = 4;
2706}
2707
df4a0200 2708static void
6b9801d4
SS
2709i386_emit_le_goto (int *offset_p, int *size_p)
2710{
2711 EMIT_ASM32 (le,
2712 "cmpl %ebx,4(%esp)\n\t"
2713 "jle .Lle_jump\n\t"
2714 "jne .Lle_fallthru\n\t"
2715 "cmpl %eax,(%esp)\n\t"
2716 "jnle .Lle_fallthru\n\t"
2717 ".Lle_jump:\n\t"
2718 "lea 0x8(%esp),%esp\n\t"
2719 "pop %eax\n\t"
2720 "pop %ebx\n\t"
2721 /* jmp, but don't trust the assembler to choose the right jump */
2722 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2723 ".Lle_fallthru:\n\t"
2724 "lea 0x8(%esp),%esp\n\t"
2725 "pop %eax\n\t"
2726 "pop %ebx");
2727
2728 if (offset_p)
2729 *offset_p = 20;
2730 if (size_p)
2731 *size_p = 4;
2732}
2733
df4a0200 2734static void
6b9801d4
SS
2735i386_emit_gt_goto (int *offset_p, int *size_p)
2736{
2737 EMIT_ASM32 (gt,
2738 "cmpl %ebx,4(%esp)\n\t"
2739 "jg .Lgt_jump\n\t"
2740 "jne .Lgt_fallthru\n\t"
2741 "cmpl %eax,(%esp)\n\t"
2742 "jng .Lgt_fallthru\n\t"
2743 ".Lgt_jump:\n\t"
2744 "lea 0x8(%esp),%esp\n\t"
2745 "pop %eax\n\t"
2746 "pop %ebx\n\t"
2747 /* jmp, but don't trust the assembler to choose the right jump */
2748 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2749 ".Lgt_fallthru:\n\t"
2750 "lea 0x8(%esp),%esp\n\t"
2751 "pop %eax\n\t"
2752 "pop %ebx");
2753
2754 if (offset_p)
2755 *offset_p = 20;
2756 if (size_p)
2757 *size_p = 4;
2758}
2759
df4a0200 2760static void
6b9801d4
SS
2761i386_emit_ge_goto (int *offset_p, int *size_p)
2762{
2763 EMIT_ASM32 (ge,
2764 "cmpl %ebx,4(%esp)\n\t"
2765 "jge .Lge_jump\n\t"
2766 "jne .Lge_fallthru\n\t"
2767 "cmpl %eax,(%esp)\n\t"
2768 "jnge .Lge_fallthru\n\t"
2769 ".Lge_jump:\n\t"
2770 "lea 0x8(%esp),%esp\n\t"
2771 "pop %eax\n\t"
2772 "pop %ebx\n\t"
2773 /* jmp, but don't trust the assembler to choose the right jump */
2774 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2775 ".Lge_fallthru:\n\t"
2776 "lea 0x8(%esp),%esp\n\t"
2777 "pop %eax\n\t"
2778 "pop %ebx");
2779
2780 if (offset_p)
2781 *offset_p = 20;
2782 if (size_p)
2783 *size_p = 4;
2784}
2785
6a271cae
PA
2786struct emit_ops i386_emit_ops =
2787 {
2788 i386_emit_prologue,
2789 i386_emit_epilogue,
2790 i386_emit_add,
2791 i386_emit_sub,
2792 i386_emit_mul,
2793 i386_emit_lsh,
2794 i386_emit_rsh_signed,
2795 i386_emit_rsh_unsigned,
2796 i386_emit_ext,
2797 i386_emit_log_not,
2798 i386_emit_bit_and,
2799 i386_emit_bit_or,
2800 i386_emit_bit_xor,
2801 i386_emit_bit_not,
2802 i386_emit_equal,
2803 i386_emit_less_signed,
2804 i386_emit_less_unsigned,
2805 i386_emit_ref,
2806 i386_emit_if_goto,
2807 i386_emit_goto,
2808 i386_write_goto_address,
2809 i386_emit_const,
2810 i386_emit_call,
2811 i386_emit_reg,
2812 i386_emit_pop,
2813 i386_emit_stack_flush,
2814 i386_emit_zero_ext,
2815 i386_emit_swap,
2816 i386_emit_stack_adjust,
2817 i386_emit_int_call_1,
6b9801d4
SS
2818 i386_emit_void_call_2,
2819 i386_emit_eq_goto,
2820 i386_emit_ne_goto,
2821 i386_emit_lt_goto,
2822 i386_emit_le_goto,
2823 i386_emit_gt_goto,
2824 i386_emit_ge_goto
6a271cae
PA
2825 };
2826
2827
2828static struct emit_ops *
2829x86_emit_ops (void)
2830{
2831#ifdef __x86_64__
3aee8918 2832 if (is_64bit_tdesc ())
6a271cae
PA
2833 return &amd64_emit_ops;
2834 else
2835#endif
2836 return &i386_emit_ops;
2837}
2838
dd373349
AT
2839/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2840
2841static const gdb_byte *
2842x86_sw_breakpoint_from_kind (int kind, int *size)
2843{
2844 *size = x86_breakpoint_len;
2845 return x86_breakpoint;
2846}
2847
c2d6af84
PA
2848static int
2849x86_supports_range_stepping (void)
2850{
2851 return 1;
2852}
2853
7d00775e
AT
2854/* Implementation of linux_target_ops method "supports_hardware_single_step".
2855 */
2856
2857static int
2858x86_supports_hardware_single_step (void)
2859{
2860 return 1;
2861}
2862
ae91f625
MK
2863static int
2864x86_get_ipa_tdesc_idx (void)
2865{
2866 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2867 const struct target_desc *tdesc = regcache->tdesc;
2868
2869#ifdef __x86_64__
b4570e4b 2870 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2871#endif
2872
f49ff000 2873 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2874 return X86_TDESC_SSE;
ae91f625 2875
f49ff000 2876 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2877}
2878
d0722149
DE
2879/* This is initialized assuming an amd64 target.
2880 x86_arch_setup will correct it for i386 or amd64 targets. */
2881
2882struct linux_target_ops the_low_target =
2883{
3aee8918
PA
2884 x86_cannot_fetch_register,
2885 x86_cannot_store_register,
c14dfd32 2886 NULL, /* fetch_register */
d0722149
DE
2887 x86_get_pc,
2888 x86_set_pc,
dd373349
AT
2889 NULL, /* breakpoint_kind_from_pc */
2890 x86_sw_breakpoint_from_kind,
d0722149
DE
2891 NULL,
2892 1,
2893 x86_breakpoint_at,
802e8e6d 2894 x86_supports_z_point_type,
aa5ca48f
DE
2895 x86_insert_point,
2896 x86_remove_point,
2897 x86_stopped_by_watchpoint,
2898 x86_stopped_data_address,
d0722149
DE
2899 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2900 native i386 case (no registers smaller than an xfer unit), and are not
2901 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2902 NULL,
2903 NULL,
2904 /* need to fix up i386 siginfo if host is amd64 */
2905 x86_siginfo_fixup,
aa5ca48f 2906 x86_linux_new_process,
04ec7890 2907 x86_linux_delete_process,
aa5ca48f 2908 x86_linux_new_thread,
466eecee 2909 x86_linux_delete_thread,
3a8a0396 2910 x86_linux_new_fork,
1570b33e 2911 x86_linux_prepare_to_resume,
219f2f23 2912 x86_linux_process_qsupported,
fa593d66
PA
2913 x86_supports_tracepoints,
2914 x86_get_thread_area,
6a271cae 2915 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2916 x86_emit_ops,
2917 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 2918 x86_supports_range_stepping,
7d00775e
AT
2919 NULL, /* breakpoint_kind_from_current_state */
2920 x86_supports_hardware_single_step,
82075af2 2921 x86_get_syscall_trapinfo,
ae91f625 2922 x86_get_ipa_tdesc_idx,
d0722149 2923};
3aee8918 2924
ef0478f6
TBA
2925/* The linux target ops object. */
2926
2927linux_process_target *the_linux_target = &the_x86_target;
2928
3aee8918
PA
2929void
2930initialize_low_arch (void)
2931{
2932 /* Initialize the Linux target descriptions. */
2933#ifdef __x86_64__
cc397f3a 2934 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
2935 copy_target_description (tdesc_amd64_linux_no_xml,
2936 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2937 false));
3aee8918
PA
2938 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2939#endif
f49ff000 2940
cc397f3a 2941 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
2942 copy_target_description (tdesc_i386_linux_no_xml,
2943 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
2944 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2945
2946 initialize_regsets_info (&x86_regsets_info);
2947}
This page took 1.184547 seconds and 4 git commands to generate.