gdbserver/linux-low: turn 'fetch_register' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
797bcff5
TBA
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
aa8d21c9
TBA
107 const regs_info *get_regs_info () override;
108
797bcff5
TBA
109protected:
110
111 void low_arch_setup () override;
daca57a7
TBA
112
113 bool low_cannot_fetch_register (int regno) override;
114
115 bool low_cannot_store_register (int regno) override;
ef0478f6
TBA
116};
117
118/* The singleton target ops object. */
119
120static x86_target the_x86_target;
121
aa5ca48f
DE
122/* Per-process arch-specific data we want to keep. */
123
124struct arch_process_info
125{
df7e5265 126 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
127};
128
d0722149
DE
129#ifdef __x86_64__
130
131/* Mapping between the general-purpose registers in `struct user'
132 format and GDB's register array layout.
133 Note that the transfer layout uses 64-bit regs. */
134static /*const*/ int i386_regmap[] =
135{
136 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
137 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
138 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
139 DS * 8, ES * 8, FS * 8, GS * 8
140};
141
142#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
143
144/* So code below doesn't have to care, i386 or amd64. */
145#define ORIG_EAX ORIG_RAX
bc9540e8 146#define REGSIZE 8
d0722149
DE
147
148static const int x86_64_regmap[] =
149{
150 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
151 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
152 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
153 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
154 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
155 DS * 8, ES * 8, FS * 8, GS * 8,
156 -1, -1, -1, -1, -1, -1, -1, -1,
157 -1, -1, -1, -1, -1, -1, -1, -1,
158 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
159 -1,
160 -1, -1, -1, -1, -1, -1, -1, -1,
161 ORIG_RAX * 8,
2735833d
WT
162#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
163 21 * 8, 22 * 8,
164#else
165 -1, -1,
166#endif
a196ebeb 167 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
168 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
169 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
170 -1, -1, -1, -1, -1, -1, -1, -1,
171 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
172 -1, -1, -1, -1, -1, -1, -1, -1,
173 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
174 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
175 -1, -1, -1, -1, -1, -1, -1, -1,
176 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
177 -1, -1, -1, -1, -1, -1, -1, -1,
178 -1 /* pkru */
d0722149
DE
179};
180
181#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 182#define X86_64_USER_REGS (GS + 1)
d0722149
DE
183
184#else /* ! __x86_64__ */
185
186/* Mapping between the general-purpose registers in `struct user'
187 format and GDB's register array layout. */
188static /*const*/ int i386_regmap[] =
189{
190 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
191 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
192 EIP * 4, EFL * 4, CS * 4, SS * 4,
193 DS * 4, ES * 4, FS * 4, GS * 4
194};
195
196#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
197
bc9540e8
PA
198#define REGSIZE 4
199
d0722149 200#endif
3aee8918
PA
201
202#ifdef __x86_64__
203
204/* Returns true if the current inferior belongs to a x86-64 process,
205 per the tdesc. */
206
207static int
208is_64bit_tdesc (void)
209{
0bfdf32f 210 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
211
212 return register_size (regcache->tdesc, 0) == 8;
213}
214
215#endif
216
d0722149
DE
217\f
218/* Called by libthread_db. */
219
220ps_err_e
754653a7 221ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
222 lwpid_t lwpid, int idx, void **base)
223{
224#ifdef __x86_64__
3aee8918 225 int use_64bit = is_64bit_tdesc ();
d0722149
DE
226
227 if (use_64bit)
228 {
229 switch (idx)
230 {
231 case FS:
232 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
233 return PS_OK;
234 break;
235 case GS:
236 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
237 return PS_OK;
238 break;
239 default:
240 return PS_BADADDR;
241 }
242 return PS_ERR;
243 }
244#endif
245
246 {
247 unsigned int desc[4];
248
249 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
250 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
251 return PS_ERR;
252
d1ec4ce7
DE
253 /* Ensure we properly extend the value to 64-bits for x86_64. */
254 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
255 return PS_OK;
256 }
257}
fa593d66
PA
258
259/* Get the thread area address. This is used to recognize which
260 thread is which when tracing with the in-process agent library. We
261 don't read anything from the address, and treat it as opaque; it's
262 the address itself that we assume is unique per-thread. */
263
264static int
265x86_get_thread_area (int lwpid, CORE_ADDR *addr)
266{
267#ifdef __x86_64__
3aee8918 268 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
269
270 if (use_64bit)
271 {
272 void *base;
273 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
274 {
275 *addr = (CORE_ADDR) (uintptr_t) base;
276 return 0;
277 }
278
279 return -1;
280 }
281#endif
282
283 {
f2907e49 284 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
285 struct thread_info *thr = get_lwp_thread (lwp);
286 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
287 unsigned int desc[4];
288 ULONGEST gs = 0;
289 const int reg_thread_area = 3; /* bits to scale down register value. */
290 int idx;
291
292 collect_register_by_name (regcache, "gs", &gs);
293
294 idx = gs >> reg_thread_area;
295
296 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 297 lwpid_of (thr),
493e2a69 298 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
299 return -1;
300
301 *addr = desc[1];
302 return 0;
303 }
304}
305
306
d0722149 307\f
daca57a7
TBA
308bool
309x86_target::low_cannot_store_register (int regno)
d0722149 310{
3aee8918
PA
311#ifdef __x86_64__
312 if (is_64bit_tdesc ())
daca57a7 313 return false;
3aee8918
PA
314#endif
315
d0722149
DE
316 return regno >= I386_NUM_REGS;
317}
318
daca57a7
TBA
319bool
320x86_target::low_cannot_fetch_register (int regno)
d0722149 321{
3aee8918
PA
322#ifdef __x86_64__
323 if (is_64bit_tdesc ())
daca57a7 324 return false;
3aee8918
PA
325#endif
326
d0722149
DE
327 return regno >= I386_NUM_REGS;
328}
329
330static void
442ea881 331x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
332{
333 int i;
334
335#ifdef __x86_64__
3aee8918 336 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
337 {
338 for (i = 0; i < X86_64_NUM_REGS; i++)
339 if (x86_64_regmap[i] != -1)
442ea881 340 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
341
342#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
343 {
344 unsigned long base;
345 int lwpid = lwpid_of (current_thread);
346
347 collect_register_by_name (regcache, "fs_base", &base);
348 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
349
350 collect_register_by_name (regcache, "gs_base", &base);
351 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
352 }
353#endif
354
d0722149
DE
355 return;
356 }
9e0aa64f
JK
357
358 /* 32-bit inferior registers need to be zero-extended.
359 Callers would read uninitialized memory otherwise. */
360 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
361#endif
362
363 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 364 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 365
442ea881 366 collect_register_by_name (regcache, "orig_eax",
bc9540e8 367 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 368
e90a813d 369#ifdef __x86_64__
3f52fdbc
KB
370 /* Sign extend EAX value to avoid potential syscall restart
371 problems.
372
373 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
374 for a detailed explanation. */
375 if (register_size (regcache->tdesc, 0) == 4)
376 {
377 void *ptr = ((gdb_byte *) buf
378 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
379
380 *(int64_t *) ptr = *(int32_t *) ptr;
381 }
e90a813d 382#endif
d0722149
DE
383}
384
385static void
442ea881 386x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
387{
388 int i;
389
390#ifdef __x86_64__
3aee8918 391 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
392 {
393 for (i = 0; i < X86_64_NUM_REGS; i++)
394 if (x86_64_regmap[i] != -1)
442ea881 395 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
396
397#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
398 {
399 unsigned long base;
400 int lwpid = lwpid_of (current_thread);
401
402 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
403 supply_register_by_name (regcache, "fs_base", &base);
404
405 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
406 supply_register_by_name (regcache, "gs_base", &base);
407 }
408#endif
d0722149
DE
409 return;
410 }
411#endif
412
413 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 414 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 415
442ea881 416 supply_register_by_name (regcache, "orig_eax",
bc9540e8 417 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
418}
419
420static void
442ea881 421x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
422{
423#ifdef __x86_64__
442ea881 424 i387_cache_to_fxsave (regcache, buf);
d0722149 425#else
442ea881 426 i387_cache_to_fsave (regcache, buf);
d0722149
DE
427#endif
428}
429
430static void
442ea881 431x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
432{
433#ifdef __x86_64__
442ea881 434 i387_fxsave_to_cache (regcache, buf);
d0722149 435#else
442ea881 436 i387_fsave_to_cache (regcache, buf);
d0722149
DE
437#endif
438}
439
440#ifndef __x86_64__
441
442static void
442ea881 443x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 444{
442ea881 445 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
446}
447
448static void
442ea881 449x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 450{
442ea881 451 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
452}
453
454#endif
455
1570b33e
L
456static void
457x86_fill_xstateregset (struct regcache *regcache, void *buf)
458{
459 i387_cache_to_xsave (regcache, buf);
460}
461
462static void
463x86_store_xstateregset (struct regcache *regcache, const void *buf)
464{
465 i387_xsave_to_cache (regcache, buf);
466}
467
d0722149
DE
468/* ??? The non-biarch i386 case stores all the i387 regs twice.
469 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
470 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
471 doesn't work. IWBN to avoid the duplication in the case where it
472 does work. Maybe the arch_setup routine could check whether it works
3aee8918 473 and update the supported regsets accordingly. */
d0722149 474
3aee8918 475static struct regset_info x86_regsets[] =
d0722149
DE
476{
477#ifdef HAVE_PTRACE_GETREGS
1570b33e 478 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
479 GENERAL_REGS,
480 x86_fill_gregset, x86_store_gregset },
1570b33e
L
481 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
482 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
483# ifndef __x86_64__
484# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 485 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
486 EXTENDED_REGS,
487 x86_fill_fpxregset, x86_store_fpxregset },
488# endif
489# endif
1570b33e 490 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
491 FP_REGS,
492 x86_fill_fpregset, x86_store_fpregset },
493#endif /* HAVE_PTRACE_GETREGS */
50bc912a 494 NULL_REGSET
d0722149
DE
495};
496
497static CORE_ADDR
442ea881 498x86_get_pc (struct regcache *regcache)
d0722149 499{
3aee8918 500 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
501
502 if (use_64bit)
503 {
6598661d
PA
504 uint64_t pc;
505
442ea881 506 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
507 return (CORE_ADDR) pc;
508 }
509 else
510 {
6598661d
PA
511 uint32_t pc;
512
442ea881 513 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
514 return (CORE_ADDR) pc;
515 }
516}
517
518static void
442ea881 519x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 520{
3aee8918 521 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
522
523 if (use_64bit)
524 {
6598661d
PA
525 uint64_t newpc = pc;
526
442ea881 527 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
528 }
529 else
530 {
6598661d
PA
531 uint32_t newpc = pc;
532
442ea881 533 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
534 }
535}
536\f
dd373349 537static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
538#define x86_breakpoint_len 1
539
540static int
541x86_breakpoint_at (CORE_ADDR pc)
542{
543 unsigned char c;
544
52405d85 545 the_target->read_memory (pc, &c, 1);
d0722149
DE
546 if (c == 0xCC)
547 return 1;
548
549 return 0;
550}
551\f
42995dbd 552/* Low-level function vector. */
df7e5265 553struct x86_dr_low_type x86_dr_low =
42995dbd 554 {
d33472ad
GB
555 x86_linux_dr_set_control,
556 x86_linux_dr_set_addr,
557 x86_linux_dr_get_addr,
558 x86_linux_dr_get_status,
559 x86_linux_dr_get_control,
42995dbd
GB
560 sizeof (void *),
561 };
aa5ca48f 562\f
90d74c30 563/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
564
565static int
802e8e6d
PA
566x86_supports_z_point_type (char z_type)
567{
568 switch (z_type)
569 {
570 case Z_PACKET_SW_BP:
571 case Z_PACKET_HW_BP:
572 case Z_PACKET_WRITE_WP:
573 case Z_PACKET_ACCESS_WP:
574 return 1;
575 default:
576 return 0;
577 }
578}
579
580static int
581x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
582 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
583{
584 struct process_info *proc = current_process ();
802e8e6d 585
aa5ca48f
DE
586 switch (type)
587 {
802e8e6d
PA
588 case raw_bkpt_type_hw:
589 case raw_bkpt_type_write_wp:
590 case raw_bkpt_type_access_wp:
a4165e94 591 {
802e8e6d
PA
592 enum target_hw_bp_type hw_type
593 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 594 struct x86_debug_reg_state *state
fe978cb0 595 = &proc->priv->arch_private->debug_reg_state;
a4165e94 596
df7e5265 597 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 598 }
961bd387 599
aa5ca48f
DE
600 default:
601 /* Unsupported. */
602 return 1;
603 }
604}
605
606static int
802e8e6d
PA
607x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
608 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
609{
610 struct process_info *proc = current_process ();
802e8e6d 611
aa5ca48f
DE
612 switch (type)
613 {
802e8e6d
PA
614 case raw_bkpt_type_hw:
615 case raw_bkpt_type_write_wp:
616 case raw_bkpt_type_access_wp:
a4165e94 617 {
802e8e6d
PA
618 enum target_hw_bp_type hw_type
619 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 620 struct x86_debug_reg_state *state
fe978cb0 621 = &proc->priv->arch_private->debug_reg_state;
a4165e94 622
df7e5265 623 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 624 }
aa5ca48f
DE
625 default:
626 /* Unsupported. */
627 return 1;
628 }
629}
630
631static int
632x86_stopped_by_watchpoint (void)
633{
634 struct process_info *proc = current_process ();
fe978cb0 635 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
636}
637
638static CORE_ADDR
639x86_stopped_data_address (void)
640{
641 struct process_info *proc = current_process ();
642 CORE_ADDR addr;
fe978cb0 643 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 644 &addr))
aa5ca48f
DE
645 return addr;
646 return 0;
647}
648\f
649/* Called when a new process is created. */
650
651static struct arch_process_info *
652x86_linux_new_process (void)
653{
ed859da7 654 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 655
df7e5265 656 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
657
658 return info;
659}
660
04ec7890
SM
661/* Called when a process is being deleted. */
662
663static void
664x86_linux_delete_process (struct arch_process_info *info)
665{
666 xfree (info);
667}
668
3a8a0396
DB
669/* Target routine for linux_new_fork. */
670
671static void
672x86_linux_new_fork (struct process_info *parent, struct process_info *child)
673{
674 /* These are allocated by linux_add_process. */
675 gdb_assert (parent->priv != NULL
676 && parent->priv->arch_private != NULL);
677 gdb_assert (child->priv != NULL
678 && child->priv->arch_private != NULL);
679
680 /* Linux kernel before 2.6.33 commit
681 72f674d203cd230426437cdcf7dd6f681dad8b0d
682 will inherit hardware debug registers from parent
683 on fork/vfork/clone. Newer Linux kernels create such tasks with
684 zeroed debug registers.
685
686 GDB core assumes the child inherits the watchpoints/hw
687 breakpoints of the parent, and will remove them all from the
688 forked off process. Copy the debug registers mirrors into the
689 new process so that all breakpoints and watchpoints can be
690 removed together. The debug registers mirror will become zeroed
691 in the end before detaching the forked off process, thus making
692 this compatible with older Linux kernels too. */
693
694 *child->priv->arch_private = *parent->priv->arch_private;
695}
696
70a0bb6b
GB
697/* See nat/x86-dregs.h. */
698
699struct x86_debug_reg_state *
700x86_debug_reg_state (pid_t pid)
701{
702 struct process_info *proc = find_process_pid (pid);
703
704 return &proc->priv->arch_private->debug_reg_state;
705}
aa5ca48f 706\f
d0722149
DE
707/* When GDBSERVER is built as a 64-bit application on linux, the
708 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
709 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
710 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
711 conversion in-place ourselves. */
712
9cf12d57 713/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
714 layout of the inferiors' architecture. Returns true if any
715 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 716 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
717 INF. */
718
719static int
9cf12d57 720x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
721{
722#ifdef __x86_64__
760256f9 723 unsigned int machine;
0bfdf32f 724 int tid = lwpid_of (current_thread);
760256f9
PA
725 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
726
d0722149 727 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 728 if (!is_64bit_tdesc ())
9cf12d57 729 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 730 FIXUP_32);
c92b5177 731 /* No fixup for native x32 GDB. */
760256f9 732 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 733 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 734 FIXUP_X32);
d0722149
DE
735#endif
736
737 return 0;
738}
739\f
1570b33e
L
740static int use_xml;
741
3aee8918
PA
742/* Format of XSAVE extended state is:
743 struct
744 {
745 fxsave_bytes[0..463]
746 sw_usable_bytes[464..511]
747 xstate_hdr_bytes[512..575]
748 avx_bytes[576..831]
749 future_state etc
750 };
751
752 Same memory layout will be used for the coredump NT_X86_XSTATE
753 representing the XSAVE extended state registers.
754
755 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
756 extended state mask, which is the same as the extended control register
757 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
758 together with the mask saved in the xstate_hdr_bytes to determine what
759 states the processor/OS supports and what state, used or initialized,
760 the process/thread is in. */
761#define I386_LINUX_XSAVE_XCR0_OFFSET 464
762
763/* Does the current host support the GETFPXREGS request? The header
764 file may or may not define it, and even if it is defined, the
765 kernel will return EIO if it's running on a pre-SSE processor. */
766int have_ptrace_getfpxregs =
767#ifdef HAVE_PTRACE_GETFPXREGS
768 -1
769#else
770 0
771#endif
772;
1570b33e 773
3aee8918
PA
774/* Get Linux/x86 target description from running target. */
775
776static const struct target_desc *
777x86_linux_read_description (void)
1570b33e 778{
3aee8918
PA
779 unsigned int machine;
780 int is_elf64;
a196ebeb 781 int xcr0_features;
3aee8918
PA
782 int tid;
783 static uint64_t xcr0;
3a13a53b 784 struct regset_info *regset;
1570b33e 785
0bfdf32f 786 tid = lwpid_of (current_thread);
1570b33e 787
3aee8918 788 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 789
3aee8918 790 if (sizeof (void *) == 4)
3a13a53b 791 {
3aee8918
PA
792 if (is_elf64 > 0)
793 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
794#ifndef __x86_64__
795 else if (machine == EM_X86_64)
796 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
797#endif
798 }
3a13a53b 799
3aee8918
PA
800#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
801 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
802 {
803 elf_fpxregset_t fpxregs;
3a13a53b 804
3aee8918 805 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 806 {
3aee8918
PA
807 have_ptrace_getfpxregs = 0;
808 have_ptrace_getregset = 0;
f49ff000 809 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 810 }
3aee8918
PA
811 else
812 have_ptrace_getfpxregs = 1;
3a13a53b 813 }
1570b33e
L
814#endif
815
816 if (!use_xml)
817 {
df7e5265 818 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 819
1570b33e
L
820 /* Don't use XML. */
821#ifdef __x86_64__
3aee8918
PA
822 if (machine == EM_X86_64)
823 return tdesc_amd64_linux_no_xml;
1570b33e 824 else
1570b33e 825#endif
3aee8918 826 return tdesc_i386_linux_no_xml;
1570b33e
L
827 }
828
1570b33e
L
829 if (have_ptrace_getregset == -1)
830 {
df7e5265 831 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 832 struct iovec iov;
1570b33e
L
833
834 iov.iov_base = xstateregs;
835 iov.iov_len = sizeof (xstateregs);
836
837 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
838 if (ptrace (PTRACE_GETREGSET, tid,
839 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
840 have_ptrace_getregset = 0;
841 else
1570b33e 842 {
3aee8918
PA
843 have_ptrace_getregset = 1;
844
845 /* Get XCR0 from XSAVE extended state. */
846 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
847 / sizeof (uint64_t))];
848
849 /* Use PTRACE_GETREGSET if it is available. */
850 for (regset = x86_regsets;
851 regset->fill_function != NULL; regset++)
852 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 853 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
854 else if (regset->type != GENERAL_REGS)
855 regset->size = 0;
1570b33e 856 }
1570b33e
L
857 }
858
3aee8918 859 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 860 xcr0_features = (have_ptrace_getregset
2e1e43e1 861 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 862
a196ebeb 863 if (xcr0_features)
3aee8918 864 x86_xcr0 = xcr0;
1570b33e 865
3aee8918
PA
866 if (machine == EM_X86_64)
867 {
1570b33e 868#ifdef __x86_64__
b4570e4b 869 const target_desc *tdesc = NULL;
a196ebeb 870
b4570e4b 871 if (xcr0_features)
3aee8918 872 {
b4570e4b
YQ
873 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
874 !is_elf64);
1570b33e 875 }
b4570e4b
YQ
876
877 if (tdesc == NULL)
878 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
879 return tdesc;
3aee8918 880#endif
1570b33e 881 }
3aee8918
PA
882 else
883 {
f49ff000 884 const target_desc *tdesc = NULL;
a1fa17ee 885
f49ff000
YQ
886 if (xcr0_features)
887 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 888
f49ff000
YQ
889 if (tdesc == NULL)
890 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 891
f49ff000 892 return tdesc;
3aee8918
PA
893 }
894
895 gdb_assert_not_reached ("failed to return tdesc");
896}
897
3aee8918
PA
898/* Update all the target description of all processes; a new GDB
899 connected, and it may or not support xml target descriptions. */
900
797bcff5
TBA
901void
902x86_target::update_xmltarget ()
3aee8918 903{
0bfdf32f 904 struct thread_info *saved_thread = current_thread;
3aee8918
PA
905
906 /* Before changing the register cache's internal layout, flush the
907 contents of the current valid caches back to the threads, and
908 release the current regcache objects. */
909 regcache_release ();
910
797bcff5 911 for_each_process ([this] (process_info *proc) {
9179355e
SM
912 int pid = proc->pid;
913
914 /* Look up any thread of this process. */
915 current_thread = find_any_thread_of_pid (pid);
916
797bcff5 917 low_arch_setup ();
9179355e 918 });
3aee8918 919
0bfdf32f 920 current_thread = saved_thread;
1570b33e
L
921}
922
923/* Process qSupported query, "xmlRegisters=". Update the buffer size for
924 PTRACE_GETREGSET. */
925
926static void
06e03fff 927x86_linux_process_qsupported (char **features, int count)
1570b33e 928{
06e03fff
PA
929 int i;
930
1570b33e
L
931 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
932 with "i386" in qSupported query, it supports x86 XML target
933 descriptions. */
934 use_xml = 0;
06e03fff 935 for (i = 0; i < count; i++)
1570b33e 936 {
06e03fff 937 const char *feature = features[i];
1570b33e 938
06e03fff 939 if (startswith (feature, "xmlRegisters="))
1570b33e 940 {
06e03fff 941 char *copy = xstrdup (feature + 13);
06e03fff 942
ca3a04f6
CB
943 char *saveptr;
944 for (char *p = strtok_r (copy, ",", &saveptr);
945 p != NULL;
946 p = strtok_r (NULL, ",", &saveptr))
1570b33e 947 {
06e03fff
PA
948 if (strcmp (p, "i386") == 0)
949 {
950 use_xml = 1;
951 break;
952 }
1570b33e 953 }
1570b33e 954
06e03fff
PA
955 free (copy);
956 }
1570b33e 957 }
797bcff5 958 the_x86_target.update_xmltarget ();
1570b33e
L
959}
960
3aee8918 961/* Common for x86/x86-64. */
d0722149 962
3aee8918
PA
963static struct regsets_info x86_regsets_info =
964 {
965 x86_regsets, /* regsets */
966 0, /* num_regsets */
967 NULL, /* disabled_regsets */
968 };
214d508e
L
969
970#ifdef __x86_64__
3aee8918
PA
971static struct regs_info amd64_linux_regs_info =
972 {
973 NULL, /* regset_bitmap */
974 NULL, /* usrregs_info */
975 &x86_regsets_info
976 };
d0722149 977#endif
3aee8918
PA
978static struct usrregs_info i386_linux_usrregs_info =
979 {
980 I386_NUM_REGS,
981 i386_regmap,
982 };
d0722149 983
3aee8918
PA
984static struct regs_info i386_linux_regs_info =
985 {
986 NULL, /* regset_bitmap */
987 &i386_linux_usrregs_info,
988 &x86_regsets_info
989 };
d0722149 990
aa8d21c9
TBA
991const regs_info *
992x86_target::get_regs_info ()
3aee8918
PA
993{
994#ifdef __x86_64__
995 if (is_64bit_tdesc ())
996 return &amd64_linux_regs_info;
997 else
998#endif
999 return &i386_linux_regs_info;
1000}
d0722149 1001
3aee8918
PA
1002/* Initialize the target description for the architecture of the
1003 inferior. */
1570b33e 1004
797bcff5
TBA
1005void
1006x86_target::low_arch_setup ()
3aee8918
PA
1007{
1008 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1009}
1010
82075af2
JS
1011/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1012 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1013
1014static void
4cc32bec 1015x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
1016{
1017 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1018
1019 if (use_64bit)
1020 {
1021 long l_sysno;
82075af2
JS
1022
1023 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1024 *sysno = (int) l_sysno;
82075af2
JS
1025 }
1026 else
4cc32bec 1027 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1028}
1029
219f2f23
PA
1030static int
1031x86_supports_tracepoints (void)
1032{
1033 return 1;
1034}
1035
fa593d66
PA
1036static void
1037append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1038{
4196ab2a 1039 target_write_memory (*to, buf, len);
fa593d66
PA
1040 *to += len;
1041}
1042
1043static int
a121b7c1 1044push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1045{
1046 unsigned char *buf_org = buf;
1047
1048 while (1)
1049 {
1050 char *endptr;
1051 unsigned long ul = strtoul (op, &endptr, 16);
1052
1053 if (endptr == op)
1054 break;
1055
1056 *buf++ = ul;
1057 op = endptr;
1058 }
1059
1060 return buf - buf_org;
1061}
1062
1063#ifdef __x86_64__
1064
1065/* Build a jump pad that saves registers and calls a collection
1066 function. Writes a jump instruction to the jump pad to
1067 JJUMPAD_INSN. The caller is responsible to write it in at the
1068 tracepoint address. */
1069
1070static int
1071amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1072 CORE_ADDR collector,
1073 CORE_ADDR lockaddr,
1074 ULONGEST orig_size,
1075 CORE_ADDR *jump_entry,
405f8e94
SS
1076 CORE_ADDR *trampoline,
1077 ULONGEST *trampoline_size,
fa593d66
PA
1078 unsigned char *jjump_pad_insn,
1079 ULONGEST *jjump_pad_insn_size,
1080 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1081 CORE_ADDR *adjusted_insn_addr_end,
1082 char *err)
fa593d66
PA
1083{
1084 unsigned char buf[40];
1085 int i, offset;
f4647387
YQ
1086 int64_t loffset;
1087
fa593d66
PA
1088 CORE_ADDR buildaddr = *jump_entry;
1089
1090 /* Build the jump pad. */
1091
1092 /* First, do tracepoint data collection. Save registers. */
1093 i = 0;
1094 /* Need to ensure stack pointer saved first. */
1095 buf[i++] = 0x54; /* push %rsp */
1096 buf[i++] = 0x55; /* push %rbp */
1097 buf[i++] = 0x57; /* push %rdi */
1098 buf[i++] = 0x56; /* push %rsi */
1099 buf[i++] = 0x52; /* push %rdx */
1100 buf[i++] = 0x51; /* push %rcx */
1101 buf[i++] = 0x53; /* push %rbx */
1102 buf[i++] = 0x50; /* push %rax */
1103 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1104 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1105 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1106 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1107 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1108 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1109 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1110 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1111 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1112 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1113 buf[i++] = 0xbf;
c8ef42ee
PA
1114 memcpy (buf + i, &tpaddr, 8);
1115 i += 8;
fa593d66
PA
1116 buf[i++] = 0x57; /* push %rdi */
1117 append_insns (&buildaddr, i, buf);
1118
1119 /* Stack space for the collecting_t object. */
1120 i = 0;
1121 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1122 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1123 memcpy (buf + i, &tpoint, 8);
1124 i += 8;
1125 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1126 i += push_opcode (&buf[i],
1127 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1128 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1129 append_insns (&buildaddr, i, buf);
1130
1131 /* spin-lock. */
1132 i = 0;
1133 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1134 memcpy (&buf[i], (void *) &lockaddr, 8);
1135 i += 8;
1136 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1137 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1138 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1139 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1140 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1141 append_insns (&buildaddr, i, buf);
1142
1143 /* Set up the gdb_collect call. */
1144 /* At this point, (stack pointer + 0x18) is the base of our saved
1145 register block. */
1146
1147 i = 0;
1148 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1149 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1150
1151 /* tpoint address may be 64-bit wide. */
1152 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1153 memcpy (buf + i, &tpoint, 8);
1154 i += 8;
1155 append_insns (&buildaddr, i, buf);
1156
1157 /* The collector function being in the shared library, may be
1158 >31-bits away off the jump pad. */
1159 i = 0;
1160 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1161 memcpy (buf + i, &collector, 8);
1162 i += 8;
1163 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1164 append_insns (&buildaddr, i, buf);
1165
1166 /* Clear the spin-lock. */
1167 i = 0;
1168 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1169 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1170 memcpy (buf + i, &lockaddr, 8);
1171 i += 8;
1172 append_insns (&buildaddr, i, buf);
1173
1174 /* Remove stack that had been used for the collect_t object. */
1175 i = 0;
1176 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1177 append_insns (&buildaddr, i, buf);
1178
1179 /* Restore register state. */
1180 i = 0;
1181 buf[i++] = 0x48; /* add $0x8,%rsp */
1182 buf[i++] = 0x83;
1183 buf[i++] = 0xc4;
1184 buf[i++] = 0x08;
1185 buf[i++] = 0x9d; /* popfq */
1186 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1187 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1188 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1189 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1190 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1191 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1192 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1193 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1194 buf[i++] = 0x58; /* pop %rax */
1195 buf[i++] = 0x5b; /* pop %rbx */
1196 buf[i++] = 0x59; /* pop %rcx */
1197 buf[i++] = 0x5a; /* pop %rdx */
1198 buf[i++] = 0x5e; /* pop %rsi */
1199 buf[i++] = 0x5f; /* pop %rdi */
1200 buf[i++] = 0x5d; /* pop %rbp */
1201 buf[i++] = 0x5c; /* pop %rsp */
1202 append_insns (&buildaddr, i, buf);
1203
1204 /* Now, adjust the original instruction to execute in the jump
1205 pad. */
1206 *adjusted_insn_addr = buildaddr;
1207 relocate_instruction (&buildaddr, tpaddr);
1208 *adjusted_insn_addr_end = buildaddr;
1209
1210 /* Finally, write a jump back to the program. */
f4647387
YQ
1211
1212 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1213 if (loffset > INT_MAX || loffset < INT_MIN)
1214 {
1215 sprintf (err,
1216 "E.Jump back from jump pad too far from tracepoint "
1217 "(offset 0x%" PRIx64 " > int32).", loffset);
1218 return 1;
1219 }
1220
1221 offset = (int) loffset;
fa593d66
PA
1222 memcpy (buf, jump_insn, sizeof (jump_insn));
1223 memcpy (buf + 1, &offset, 4);
1224 append_insns (&buildaddr, sizeof (jump_insn), buf);
1225
1226 /* The jump pad is now built. Wire in a jump to our jump pad. This
1227 is always done last (by our caller actually), so that we can
1228 install fast tracepoints with threads running. This relies on
1229 the agent's atomic write support. */
f4647387
YQ
1230 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1231 if (loffset > INT_MAX || loffset < INT_MIN)
1232 {
1233 sprintf (err,
1234 "E.Jump pad too far from tracepoint "
1235 "(offset 0x%" PRIx64 " > int32).", loffset);
1236 return 1;
1237 }
1238
1239 offset = (int) loffset;
1240
fa593d66
PA
1241 memcpy (buf, jump_insn, sizeof (jump_insn));
1242 memcpy (buf + 1, &offset, 4);
1243 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1244 *jjump_pad_insn_size = sizeof (jump_insn);
1245
1246 /* Return the end address of our pad. */
1247 *jump_entry = buildaddr;
1248
1249 return 0;
1250}
1251
1252#endif /* __x86_64__ */
1253
1254/* Build a jump pad that saves registers and calls a collection
1255 function. Writes a jump instruction to the jump pad to
1256 JJUMPAD_INSN. The caller is responsible to write it in at the
1257 tracepoint address. */
1258
1259static int
1260i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1261 CORE_ADDR collector,
1262 CORE_ADDR lockaddr,
1263 ULONGEST orig_size,
1264 CORE_ADDR *jump_entry,
405f8e94
SS
1265 CORE_ADDR *trampoline,
1266 ULONGEST *trampoline_size,
fa593d66
PA
1267 unsigned char *jjump_pad_insn,
1268 ULONGEST *jjump_pad_insn_size,
1269 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1270 CORE_ADDR *adjusted_insn_addr_end,
1271 char *err)
fa593d66
PA
1272{
1273 unsigned char buf[0x100];
1274 int i, offset;
1275 CORE_ADDR buildaddr = *jump_entry;
1276
1277 /* Build the jump pad. */
1278
1279 /* First, do tracepoint data collection. Save registers. */
1280 i = 0;
1281 buf[i++] = 0x60; /* pushad */
1282 buf[i++] = 0x68; /* push tpaddr aka $pc */
1283 *((int *)(buf + i)) = (int) tpaddr;
1284 i += 4;
1285 buf[i++] = 0x9c; /* pushf */
1286 buf[i++] = 0x1e; /* push %ds */
1287 buf[i++] = 0x06; /* push %es */
1288 buf[i++] = 0x0f; /* push %fs */
1289 buf[i++] = 0xa0;
1290 buf[i++] = 0x0f; /* push %gs */
1291 buf[i++] = 0xa8;
1292 buf[i++] = 0x16; /* push %ss */
1293 buf[i++] = 0x0e; /* push %cs */
1294 append_insns (&buildaddr, i, buf);
1295
1296 /* Stack space for the collecting_t object. */
1297 i = 0;
1298 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1299
1300 /* Build the object. */
1301 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1302 memcpy (buf + i, &tpoint, 4);
1303 i += 4;
1304 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1305
1306 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1307 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1308 append_insns (&buildaddr, i, buf);
1309
1310 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1311 If we cared for it, this could be using xchg alternatively. */
1312
1313 i = 0;
1314 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1315 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1316 %esp,<lockaddr> */
1317 memcpy (&buf[i], (void *) &lockaddr, 4);
1318 i += 4;
1319 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1320 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1321 append_insns (&buildaddr, i, buf);
1322
1323
1324 /* Set up arguments to the gdb_collect call. */
1325 i = 0;
1326 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1327 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1328 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1329 append_insns (&buildaddr, i, buf);
1330
1331 i = 0;
1332 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1333 append_insns (&buildaddr, i, buf);
1334
1335 i = 0;
1336 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1337 memcpy (&buf[i], (void *) &tpoint, 4);
1338 i += 4;
1339 append_insns (&buildaddr, i, buf);
1340
1341 buf[0] = 0xe8; /* call <reladdr> */
1342 offset = collector - (buildaddr + sizeof (jump_insn));
1343 memcpy (buf + 1, &offset, 4);
1344 append_insns (&buildaddr, 5, buf);
1345 /* Clean up after the call. */
1346 buf[0] = 0x83; /* add $0x8,%esp */
1347 buf[1] = 0xc4;
1348 buf[2] = 0x08;
1349 append_insns (&buildaddr, 3, buf);
1350
1351
1352 /* Clear the spin-lock. This would need the LOCK prefix on older
1353 broken archs. */
1354 i = 0;
1355 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1356 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1357 memcpy (buf + i, &lockaddr, 4);
1358 i += 4;
1359 append_insns (&buildaddr, i, buf);
1360
1361
1362 /* Remove stack that had been used for the collect_t object. */
1363 i = 0;
1364 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1365 append_insns (&buildaddr, i, buf);
1366
1367 i = 0;
1368 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1369 buf[i++] = 0xc4;
1370 buf[i++] = 0x04;
1371 buf[i++] = 0x17; /* pop %ss */
1372 buf[i++] = 0x0f; /* pop %gs */
1373 buf[i++] = 0xa9;
1374 buf[i++] = 0x0f; /* pop %fs */
1375 buf[i++] = 0xa1;
1376 buf[i++] = 0x07; /* pop %es */
405f8e94 1377 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1378 buf[i++] = 0x9d; /* popf */
1379 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1380 buf[i++] = 0xc4;
1381 buf[i++] = 0x04;
1382 buf[i++] = 0x61; /* popad */
1383 append_insns (&buildaddr, i, buf);
1384
1385 /* Now, adjust the original instruction to execute in the jump
1386 pad. */
1387 *adjusted_insn_addr = buildaddr;
1388 relocate_instruction (&buildaddr, tpaddr);
1389 *adjusted_insn_addr_end = buildaddr;
1390
1391 /* Write the jump back to the program. */
1392 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1393 memcpy (buf, jump_insn, sizeof (jump_insn));
1394 memcpy (buf + 1, &offset, 4);
1395 append_insns (&buildaddr, sizeof (jump_insn), buf);
1396
1397 /* The jump pad is now built. Wire in a jump to our jump pad. This
1398 is always done last (by our caller actually), so that we can
1399 install fast tracepoints with threads running. This relies on
1400 the agent's atomic write support. */
405f8e94
SS
1401 if (orig_size == 4)
1402 {
1403 /* Create a trampoline. */
1404 *trampoline_size = sizeof (jump_insn);
1405 if (!claim_trampoline_space (*trampoline_size, trampoline))
1406 {
1407 /* No trampoline space available. */
1408 strcpy (err,
1409 "E.Cannot allocate trampoline space needed for fast "
1410 "tracepoints on 4-byte instructions.");
1411 return 1;
1412 }
1413
1414 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1415 memcpy (buf, jump_insn, sizeof (jump_insn));
1416 memcpy (buf + 1, &offset, 4);
4196ab2a 1417 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1418
1419 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1420 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1421 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1422 memcpy (buf + 2, &offset, 2);
1423 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1424 *jjump_pad_insn_size = sizeof (small_jump_insn);
1425 }
1426 else
1427 {
1428 /* Else use a 32-bit relative jump instruction. */
1429 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1430 memcpy (buf, jump_insn, sizeof (jump_insn));
1431 memcpy (buf + 1, &offset, 4);
1432 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1433 *jjump_pad_insn_size = sizeof (jump_insn);
1434 }
fa593d66
PA
1435
1436 /* Return the end address of our pad. */
1437 *jump_entry = buildaddr;
1438
1439 return 0;
1440}
1441
1442static int
1443x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1444 CORE_ADDR collector,
1445 CORE_ADDR lockaddr,
1446 ULONGEST orig_size,
1447 CORE_ADDR *jump_entry,
405f8e94
SS
1448 CORE_ADDR *trampoline,
1449 ULONGEST *trampoline_size,
fa593d66
PA
1450 unsigned char *jjump_pad_insn,
1451 ULONGEST *jjump_pad_insn_size,
1452 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1453 CORE_ADDR *adjusted_insn_addr_end,
1454 char *err)
fa593d66
PA
1455{
1456#ifdef __x86_64__
3aee8918 1457 if (is_64bit_tdesc ())
fa593d66
PA
1458 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1459 collector, lockaddr,
1460 orig_size, jump_entry,
405f8e94 1461 trampoline, trampoline_size,
fa593d66
PA
1462 jjump_pad_insn,
1463 jjump_pad_insn_size,
1464 adjusted_insn_addr,
405f8e94
SS
1465 adjusted_insn_addr_end,
1466 err);
fa593d66
PA
1467#endif
1468
1469 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1470 collector, lockaddr,
1471 orig_size, jump_entry,
405f8e94 1472 trampoline, trampoline_size,
fa593d66
PA
1473 jjump_pad_insn,
1474 jjump_pad_insn_size,
1475 adjusted_insn_addr,
405f8e94
SS
1476 adjusted_insn_addr_end,
1477 err);
1478}
1479
1480/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1481 architectures. */
1482
1483static int
1484x86_get_min_fast_tracepoint_insn_len (void)
1485{
1486 static int warned_about_fast_tracepoints = 0;
1487
1488#ifdef __x86_64__
1489 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1490 used for fast tracepoints. */
3aee8918 1491 if (is_64bit_tdesc ())
405f8e94
SS
1492 return 5;
1493#endif
1494
58b4daa5 1495 if (agent_loaded_p ())
405f8e94
SS
1496 {
1497 char errbuf[IPA_BUFSIZ];
1498
1499 errbuf[0] = '\0';
1500
1501 /* On x86, if trampolines are available, then 4-byte jump instructions
1502 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1503 with a 4-byte offset are used instead. */
1504 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1505 return 4;
1506 else
1507 {
1508 /* GDB has no channel to explain to user why a shorter fast
1509 tracepoint is not possible, but at least make GDBserver
1510 mention that something has gone awry. */
1511 if (!warned_about_fast_tracepoints)
1512 {
422186a9 1513 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1514 warned_about_fast_tracepoints = 1;
1515 }
1516 return 5;
1517 }
1518 }
1519 else
1520 {
1521 /* Indicate that the minimum length is currently unknown since the IPA
1522 has not loaded yet. */
1523 return 0;
1524 }
fa593d66
PA
1525}
1526
6a271cae
PA
1527static void
1528add_insns (unsigned char *start, int len)
1529{
1530 CORE_ADDR buildaddr = current_insn_ptr;
1531
1532 if (debug_threads)
87ce2a04
DE
1533 debug_printf ("Adding %d bytes of insn at %s\n",
1534 len, paddress (buildaddr));
6a271cae
PA
1535
1536 append_insns (&buildaddr, len, start);
1537 current_insn_ptr = buildaddr;
1538}
1539
6a271cae
PA
1540/* Our general strategy for emitting code is to avoid specifying raw
1541 bytes whenever possible, and instead copy a block of inline asm
1542 that is embedded in the function. This is a little messy, because
1543 we need to keep the compiler from discarding what looks like dead
1544 code, plus suppress various warnings. */
1545
9e4344e5
PA
1546#define EMIT_ASM(NAME, INSNS) \
1547 do \
1548 { \
1549 extern unsigned char start_ ## NAME, end_ ## NAME; \
1550 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1551 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1552 "\t" "start_" #NAME ":" \
1553 "\t" INSNS "\n" \
1554 "\t" "end_" #NAME ":"); \
1555 } while (0)
6a271cae
PA
1556
1557#ifdef __x86_64__
1558
1559#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1560 do \
1561 { \
1562 extern unsigned char start_ ## NAME, end_ ## NAME; \
1563 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1564 __asm__ (".code32\n" \
1565 "\t" "jmp end_" #NAME "\n" \
1566 "\t" "start_" #NAME ":\n" \
1567 "\t" INSNS "\n" \
1568 "\t" "end_" #NAME ":\n" \
1569 ".code64\n"); \
1570 } while (0)
6a271cae
PA
1571
1572#else
1573
1574#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1575
1576#endif
1577
1578#ifdef __x86_64__
1579
1580static void
1581amd64_emit_prologue (void)
1582{
1583 EMIT_ASM (amd64_prologue,
1584 "pushq %rbp\n\t"
1585 "movq %rsp,%rbp\n\t"
1586 "sub $0x20,%rsp\n\t"
1587 "movq %rdi,-8(%rbp)\n\t"
1588 "movq %rsi,-16(%rbp)");
1589}
1590
1591
1592static void
1593amd64_emit_epilogue (void)
1594{
1595 EMIT_ASM (amd64_epilogue,
1596 "movq -16(%rbp),%rdi\n\t"
1597 "movq %rax,(%rdi)\n\t"
1598 "xor %rax,%rax\n\t"
1599 "leave\n\t"
1600 "ret");
1601}
1602
1603static void
1604amd64_emit_add (void)
1605{
1606 EMIT_ASM (amd64_add,
1607 "add (%rsp),%rax\n\t"
1608 "lea 0x8(%rsp),%rsp");
1609}
1610
1611static void
1612amd64_emit_sub (void)
1613{
1614 EMIT_ASM (amd64_sub,
1615 "sub %rax,(%rsp)\n\t"
1616 "pop %rax");
1617}
1618
1619static void
1620amd64_emit_mul (void)
1621{
1622 emit_error = 1;
1623}
1624
1625static void
1626amd64_emit_lsh (void)
1627{
1628 emit_error = 1;
1629}
1630
1631static void
1632amd64_emit_rsh_signed (void)
1633{
1634 emit_error = 1;
1635}
1636
1637static void
1638amd64_emit_rsh_unsigned (void)
1639{
1640 emit_error = 1;
1641}
1642
1643static void
1644amd64_emit_ext (int arg)
1645{
1646 switch (arg)
1647 {
1648 case 8:
1649 EMIT_ASM (amd64_ext_8,
1650 "cbtw\n\t"
1651 "cwtl\n\t"
1652 "cltq");
1653 break;
1654 case 16:
1655 EMIT_ASM (amd64_ext_16,
1656 "cwtl\n\t"
1657 "cltq");
1658 break;
1659 case 32:
1660 EMIT_ASM (amd64_ext_32,
1661 "cltq");
1662 break;
1663 default:
1664 emit_error = 1;
1665 }
1666}
1667
1668static void
1669amd64_emit_log_not (void)
1670{
1671 EMIT_ASM (amd64_log_not,
1672 "test %rax,%rax\n\t"
1673 "sete %cl\n\t"
1674 "movzbq %cl,%rax");
1675}
1676
1677static void
1678amd64_emit_bit_and (void)
1679{
1680 EMIT_ASM (amd64_and,
1681 "and (%rsp),%rax\n\t"
1682 "lea 0x8(%rsp),%rsp");
1683}
1684
1685static void
1686amd64_emit_bit_or (void)
1687{
1688 EMIT_ASM (amd64_or,
1689 "or (%rsp),%rax\n\t"
1690 "lea 0x8(%rsp),%rsp");
1691}
1692
1693static void
1694amd64_emit_bit_xor (void)
1695{
1696 EMIT_ASM (amd64_xor,
1697 "xor (%rsp),%rax\n\t"
1698 "lea 0x8(%rsp),%rsp");
1699}
1700
1701static void
1702amd64_emit_bit_not (void)
1703{
1704 EMIT_ASM (amd64_bit_not,
1705 "xorq $0xffffffffffffffff,%rax");
1706}
1707
1708static void
1709amd64_emit_equal (void)
1710{
1711 EMIT_ASM (amd64_equal,
1712 "cmp %rax,(%rsp)\n\t"
1713 "je .Lamd64_equal_true\n\t"
1714 "xor %rax,%rax\n\t"
1715 "jmp .Lamd64_equal_end\n\t"
1716 ".Lamd64_equal_true:\n\t"
1717 "mov $0x1,%rax\n\t"
1718 ".Lamd64_equal_end:\n\t"
1719 "lea 0x8(%rsp),%rsp");
1720}
1721
1722static void
1723amd64_emit_less_signed (void)
1724{
1725 EMIT_ASM (amd64_less_signed,
1726 "cmp %rax,(%rsp)\n\t"
1727 "jl .Lamd64_less_signed_true\n\t"
1728 "xor %rax,%rax\n\t"
1729 "jmp .Lamd64_less_signed_end\n\t"
1730 ".Lamd64_less_signed_true:\n\t"
1731 "mov $1,%rax\n\t"
1732 ".Lamd64_less_signed_end:\n\t"
1733 "lea 0x8(%rsp),%rsp");
1734}
1735
1736static void
1737amd64_emit_less_unsigned (void)
1738{
1739 EMIT_ASM (amd64_less_unsigned,
1740 "cmp %rax,(%rsp)\n\t"
1741 "jb .Lamd64_less_unsigned_true\n\t"
1742 "xor %rax,%rax\n\t"
1743 "jmp .Lamd64_less_unsigned_end\n\t"
1744 ".Lamd64_less_unsigned_true:\n\t"
1745 "mov $1,%rax\n\t"
1746 ".Lamd64_less_unsigned_end:\n\t"
1747 "lea 0x8(%rsp),%rsp");
1748}
1749
1750static void
1751amd64_emit_ref (int size)
1752{
1753 switch (size)
1754 {
1755 case 1:
1756 EMIT_ASM (amd64_ref1,
1757 "movb (%rax),%al");
1758 break;
1759 case 2:
1760 EMIT_ASM (amd64_ref2,
1761 "movw (%rax),%ax");
1762 break;
1763 case 4:
1764 EMIT_ASM (amd64_ref4,
1765 "movl (%rax),%eax");
1766 break;
1767 case 8:
1768 EMIT_ASM (amd64_ref8,
1769 "movq (%rax),%rax");
1770 break;
1771 }
1772}
1773
1774static void
1775amd64_emit_if_goto (int *offset_p, int *size_p)
1776{
1777 EMIT_ASM (amd64_if_goto,
1778 "mov %rax,%rcx\n\t"
1779 "pop %rax\n\t"
1780 "cmp $0,%rcx\n\t"
1781 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1782 if (offset_p)
1783 *offset_p = 10;
1784 if (size_p)
1785 *size_p = 4;
1786}
1787
1788static void
1789amd64_emit_goto (int *offset_p, int *size_p)
1790{
1791 EMIT_ASM (amd64_goto,
1792 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1793 if (offset_p)
1794 *offset_p = 1;
1795 if (size_p)
1796 *size_p = 4;
1797}
1798
1799static void
1800amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1801{
1802 int diff = (to - (from + size));
1803 unsigned char buf[sizeof (int)];
1804
1805 if (size != 4)
1806 {
1807 emit_error = 1;
1808 return;
1809 }
1810
1811 memcpy (buf, &diff, sizeof (int));
4196ab2a 1812 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1813}
1814
1815static void
4e29fb54 1816amd64_emit_const (LONGEST num)
6a271cae
PA
1817{
1818 unsigned char buf[16];
1819 int i;
1820 CORE_ADDR buildaddr = current_insn_ptr;
1821
1822 i = 0;
1823 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1824 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1825 i += 8;
1826 append_insns (&buildaddr, i, buf);
1827 current_insn_ptr = buildaddr;
1828}
1829
1830static void
1831amd64_emit_call (CORE_ADDR fn)
1832{
1833 unsigned char buf[16];
1834 int i;
1835 CORE_ADDR buildaddr;
4e29fb54 1836 LONGEST offset64;
6a271cae
PA
1837
1838 /* The destination function being in the shared library, may be
1839 >31-bits away off the compiled code pad. */
1840
1841 buildaddr = current_insn_ptr;
1842
1843 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1844
1845 i = 0;
1846
1847 if (offset64 > INT_MAX || offset64 < INT_MIN)
1848 {
1849 /* Offset is too large for a call. Use callq, but that requires
1850 a register, so avoid it if possible. Use r10, since it is
1851 call-clobbered, we don't have to push/pop it. */
1852 buf[i++] = 0x48; /* mov $fn,%r10 */
1853 buf[i++] = 0xba;
1854 memcpy (buf + i, &fn, 8);
1855 i += 8;
1856 buf[i++] = 0xff; /* callq *%r10 */
1857 buf[i++] = 0xd2;
1858 }
1859 else
1860 {
1861 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1862
1863 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1864 memcpy (buf + i, &offset32, 4);
1865 i += 4;
1866 }
1867
1868 append_insns (&buildaddr, i, buf);
1869 current_insn_ptr = buildaddr;
1870}
1871
1872static void
1873amd64_emit_reg (int reg)
1874{
1875 unsigned char buf[16];
1876 int i;
1877 CORE_ADDR buildaddr;
1878
1879 /* Assume raw_regs is still in %rdi. */
1880 buildaddr = current_insn_ptr;
1881 i = 0;
1882 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1883 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1884 i += 4;
1885 append_insns (&buildaddr, i, buf);
1886 current_insn_ptr = buildaddr;
1887 amd64_emit_call (get_raw_reg_func_addr ());
1888}
1889
1890static void
1891amd64_emit_pop (void)
1892{
1893 EMIT_ASM (amd64_pop,
1894 "pop %rax");
1895}
1896
1897static void
1898amd64_emit_stack_flush (void)
1899{
1900 EMIT_ASM (amd64_stack_flush,
1901 "push %rax");
1902}
1903
1904static void
1905amd64_emit_zero_ext (int arg)
1906{
1907 switch (arg)
1908 {
1909 case 8:
1910 EMIT_ASM (amd64_zero_ext_8,
1911 "and $0xff,%rax");
1912 break;
1913 case 16:
1914 EMIT_ASM (amd64_zero_ext_16,
1915 "and $0xffff,%rax");
1916 break;
1917 case 32:
1918 EMIT_ASM (amd64_zero_ext_32,
1919 "mov $0xffffffff,%rcx\n\t"
1920 "and %rcx,%rax");
1921 break;
1922 default:
1923 emit_error = 1;
1924 }
1925}
1926
1927static void
1928amd64_emit_swap (void)
1929{
1930 EMIT_ASM (amd64_swap,
1931 "mov %rax,%rcx\n\t"
1932 "pop %rax\n\t"
1933 "push %rcx");
1934}
1935
1936static void
1937amd64_emit_stack_adjust (int n)
1938{
1939 unsigned char buf[16];
1940 int i;
1941 CORE_ADDR buildaddr = current_insn_ptr;
1942
1943 i = 0;
1944 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1945 buf[i++] = 0x8d;
1946 buf[i++] = 0x64;
1947 buf[i++] = 0x24;
1948 /* This only handles adjustments up to 16, but we don't expect any more. */
1949 buf[i++] = n * 8;
1950 append_insns (&buildaddr, i, buf);
1951 current_insn_ptr = buildaddr;
1952}
1953
1954/* FN's prototype is `LONGEST(*fn)(int)'. */
1955
1956static void
1957amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1958{
1959 unsigned char buf[16];
1960 int i;
1961 CORE_ADDR buildaddr;
1962
1963 buildaddr = current_insn_ptr;
1964 i = 0;
1965 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1966 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1967 i += 4;
1968 append_insns (&buildaddr, i, buf);
1969 current_insn_ptr = buildaddr;
1970 amd64_emit_call (fn);
1971}
1972
4e29fb54 1973/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
1974
1975static void
1976amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1977{
1978 unsigned char buf[16];
1979 int i;
1980 CORE_ADDR buildaddr;
1981
1982 buildaddr = current_insn_ptr;
1983 i = 0;
1984 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1985 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1986 i += 4;
1987 append_insns (&buildaddr, i, buf);
1988 current_insn_ptr = buildaddr;
1989 EMIT_ASM (amd64_void_call_2_a,
1990 /* Save away a copy of the stack top. */
1991 "push %rax\n\t"
1992 /* Also pass top as the second argument. */
1993 "mov %rax,%rsi");
1994 amd64_emit_call (fn);
1995 EMIT_ASM (amd64_void_call_2_b,
1996 /* Restore the stack top, %rax may have been trashed. */
1997 "pop %rax");
1998}
1999
df4a0200 2000static void
6b9801d4
SS
2001amd64_emit_eq_goto (int *offset_p, int *size_p)
2002{
2003 EMIT_ASM (amd64_eq,
2004 "cmp %rax,(%rsp)\n\t"
2005 "jne .Lamd64_eq_fallthru\n\t"
2006 "lea 0x8(%rsp),%rsp\n\t"
2007 "pop %rax\n\t"
2008 /* jmp, but don't trust the assembler to choose the right jump */
2009 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2010 ".Lamd64_eq_fallthru:\n\t"
2011 "lea 0x8(%rsp),%rsp\n\t"
2012 "pop %rax");
2013
2014 if (offset_p)
2015 *offset_p = 13;
2016 if (size_p)
2017 *size_p = 4;
2018}
2019
df4a0200 2020static void
6b9801d4
SS
2021amd64_emit_ne_goto (int *offset_p, int *size_p)
2022{
2023 EMIT_ASM (amd64_ne,
2024 "cmp %rax,(%rsp)\n\t"
2025 "je .Lamd64_ne_fallthru\n\t"
2026 "lea 0x8(%rsp),%rsp\n\t"
2027 "pop %rax\n\t"
2028 /* jmp, but don't trust the assembler to choose the right jump */
2029 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2030 ".Lamd64_ne_fallthru:\n\t"
2031 "lea 0x8(%rsp),%rsp\n\t"
2032 "pop %rax");
2033
2034 if (offset_p)
2035 *offset_p = 13;
2036 if (size_p)
2037 *size_p = 4;
2038}
2039
df4a0200 2040static void
6b9801d4
SS
2041amd64_emit_lt_goto (int *offset_p, int *size_p)
2042{
2043 EMIT_ASM (amd64_lt,
2044 "cmp %rax,(%rsp)\n\t"
2045 "jnl .Lamd64_lt_fallthru\n\t"
2046 "lea 0x8(%rsp),%rsp\n\t"
2047 "pop %rax\n\t"
2048 /* jmp, but don't trust the assembler to choose the right jump */
2049 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2050 ".Lamd64_lt_fallthru:\n\t"
2051 "lea 0x8(%rsp),%rsp\n\t"
2052 "pop %rax");
2053
2054 if (offset_p)
2055 *offset_p = 13;
2056 if (size_p)
2057 *size_p = 4;
2058}
2059
df4a0200 2060static void
6b9801d4
SS
2061amd64_emit_le_goto (int *offset_p, int *size_p)
2062{
2063 EMIT_ASM (amd64_le,
2064 "cmp %rax,(%rsp)\n\t"
2065 "jnle .Lamd64_le_fallthru\n\t"
2066 "lea 0x8(%rsp),%rsp\n\t"
2067 "pop %rax\n\t"
2068 /* jmp, but don't trust the assembler to choose the right jump */
2069 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2070 ".Lamd64_le_fallthru:\n\t"
2071 "lea 0x8(%rsp),%rsp\n\t"
2072 "pop %rax");
2073
2074 if (offset_p)
2075 *offset_p = 13;
2076 if (size_p)
2077 *size_p = 4;
2078}
2079
df4a0200 2080static void
6b9801d4
SS
2081amd64_emit_gt_goto (int *offset_p, int *size_p)
2082{
2083 EMIT_ASM (amd64_gt,
2084 "cmp %rax,(%rsp)\n\t"
2085 "jng .Lamd64_gt_fallthru\n\t"
2086 "lea 0x8(%rsp),%rsp\n\t"
2087 "pop %rax\n\t"
2088 /* jmp, but don't trust the assembler to choose the right jump */
2089 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2090 ".Lamd64_gt_fallthru:\n\t"
2091 "lea 0x8(%rsp),%rsp\n\t"
2092 "pop %rax");
2093
2094 if (offset_p)
2095 *offset_p = 13;
2096 if (size_p)
2097 *size_p = 4;
2098}
2099
df4a0200 2100static void
6b9801d4
SS
2101amd64_emit_ge_goto (int *offset_p, int *size_p)
2102{
2103 EMIT_ASM (amd64_ge,
2104 "cmp %rax,(%rsp)\n\t"
2105 "jnge .Lamd64_ge_fallthru\n\t"
2106 ".Lamd64_ge_jump:\n\t"
2107 "lea 0x8(%rsp),%rsp\n\t"
2108 "pop %rax\n\t"
2109 /* jmp, but don't trust the assembler to choose the right jump */
2110 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2111 ".Lamd64_ge_fallthru:\n\t"
2112 "lea 0x8(%rsp),%rsp\n\t"
2113 "pop %rax");
2114
2115 if (offset_p)
2116 *offset_p = 13;
2117 if (size_p)
2118 *size_p = 4;
2119}
2120
6a271cae
PA
2121struct emit_ops amd64_emit_ops =
2122 {
2123 amd64_emit_prologue,
2124 amd64_emit_epilogue,
2125 amd64_emit_add,
2126 amd64_emit_sub,
2127 amd64_emit_mul,
2128 amd64_emit_lsh,
2129 amd64_emit_rsh_signed,
2130 amd64_emit_rsh_unsigned,
2131 amd64_emit_ext,
2132 amd64_emit_log_not,
2133 amd64_emit_bit_and,
2134 amd64_emit_bit_or,
2135 amd64_emit_bit_xor,
2136 amd64_emit_bit_not,
2137 amd64_emit_equal,
2138 amd64_emit_less_signed,
2139 amd64_emit_less_unsigned,
2140 amd64_emit_ref,
2141 amd64_emit_if_goto,
2142 amd64_emit_goto,
2143 amd64_write_goto_address,
2144 amd64_emit_const,
2145 amd64_emit_call,
2146 amd64_emit_reg,
2147 amd64_emit_pop,
2148 amd64_emit_stack_flush,
2149 amd64_emit_zero_ext,
2150 amd64_emit_swap,
2151 amd64_emit_stack_adjust,
2152 amd64_emit_int_call_1,
6b9801d4
SS
2153 amd64_emit_void_call_2,
2154 amd64_emit_eq_goto,
2155 amd64_emit_ne_goto,
2156 amd64_emit_lt_goto,
2157 amd64_emit_le_goto,
2158 amd64_emit_gt_goto,
2159 amd64_emit_ge_goto
6a271cae
PA
2160 };
2161
2162#endif /* __x86_64__ */
2163
2164static void
2165i386_emit_prologue (void)
2166{
2167 EMIT_ASM32 (i386_prologue,
2168 "push %ebp\n\t"
bf15cbda
SS
2169 "mov %esp,%ebp\n\t"
2170 "push %ebx");
6a271cae
PA
2171 /* At this point, the raw regs base address is at 8(%ebp), and the
2172 value pointer is at 12(%ebp). */
2173}
2174
2175static void
2176i386_emit_epilogue (void)
2177{
2178 EMIT_ASM32 (i386_epilogue,
2179 "mov 12(%ebp),%ecx\n\t"
2180 "mov %eax,(%ecx)\n\t"
2181 "mov %ebx,0x4(%ecx)\n\t"
2182 "xor %eax,%eax\n\t"
bf15cbda 2183 "pop %ebx\n\t"
6a271cae
PA
2184 "pop %ebp\n\t"
2185 "ret");
2186}
2187
2188static void
2189i386_emit_add (void)
2190{
2191 EMIT_ASM32 (i386_add,
2192 "add (%esp),%eax\n\t"
2193 "adc 0x4(%esp),%ebx\n\t"
2194 "lea 0x8(%esp),%esp");
2195}
2196
2197static void
2198i386_emit_sub (void)
2199{
2200 EMIT_ASM32 (i386_sub,
2201 "subl %eax,(%esp)\n\t"
2202 "sbbl %ebx,4(%esp)\n\t"
2203 "pop %eax\n\t"
2204 "pop %ebx\n\t");
2205}
2206
2207static void
2208i386_emit_mul (void)
2209{
2210 emit_error = 1;
2211}
2212
2213static void
2214i386_emit_lsh (void)
2215{
2216 emit_error = 1;
2217}
2218
2219static void
2220i386_emit_rsh_signed (void)
2221{
2222 emit_error = 1;
2223}
2224
2225static void
2226i386_emit_rsh_unsigned (void)
2227{
2228 emit_error = 1;
2229}
2230
2231static void
2232i386_emit_ext (int arg)
2233{
2234 switch (arg)
2235 {
2236 case 8:
2237 EMIT_ASM32 (i386_ext_8,
2238 "cbtw\n\t"
2239 "cwtl\n\t"
2240 "movl %eax,%ebx\n\t"
2241 "sarl $31,%ebx");
2242 break;
2243 case 16:
2244 EMIT_ASM32 (i386_ext_16,
2245 "cwtl\n\t"
2246 "movl %eax,%ebx\n\t"
2247 "sarl $31,%ebx");
2248 break;
2249 case 32:
2250 EMIT_ASM32 (i386_ext_32,
2251 "movl %eax,%ebx\n\t"
2252 "sarl $31,%ebx");
2253 break;
2254 default:
2255 emit_error = 1;
2256 }
2257}
2258
2259static void
2260i386_emit_log_not (void)
2261{
2262 EMIT_ASM32 (i386_log_not,
2263 "or %ebx,%eax\n\t"
2264 "test %eax,%eax\n\t"
2265 "sete %cl\n\t"
2266 "xor %ebx,%ebx\n\t"
2267 "movzbl %cl,%eax");
2268}
2269
2270static void
2271i386_emit_bit_and (void)
2272{
2273 EMIT_ASM32 (i386_and,
2274 "and (%esp),%eax\n\t"
2275 "and 0x4(%esp),%ebx\n\t"
2276 "lea 0x8(%esp),%esp");
2277}
2278
2279static void
2280i386_emit_bit_or (void)
2281{
2282 EMIT_ASM32 (i386_or,
2283 "or (%esp),%eax\n\t"
2284 "or 0x4(%esp),%ebx\n\t"
2285 "lea 0x8(%esp),%esp");
2286}
2287
2288static void
2289i386_emit_bit_xor (void)
2290{
2291 EMIT_ASM32 (i386_xor,
2292 "xor (%esp),%eax\n\t"
2293 "xor 0x4(%esp),%ebx\n\t"
2294 "lea 0x8(%esp),%esp");
2295}
2296
2297static void
2298i386_emit_bit_not (void)
2299{
2300 EMIT_ASM32 (i386_bit_not,
2301 "xor $0xffffffff,%eax\n\t"
2302 "xor $0xffffffff,%ebx\n\t");
2303}
2304
2305static void
2306i386_emit_equal (void)
2307{
2308 EMIT_ASM32 (i386_equal,
2309 "cmpl %ebx,4(%esp)\n\t"
2310 "jne .Li386_equal_false\n\t"
2311 "cmpl %eax,(%esp)\n\t"
2312 "je .Li386_equal_true\n\t"
2313 ".Li386_equal_false:\n\t"
2314 "xor %eax,%eax\n\t"
2315 "jmp .Li386_equal_end\n\t"
2316 ".Li386_equal_true:\n\t"
2317 "mov $1,%eax\n\t"
2318 ".Li386_equal_end:\n\t"
2319 "xor %ebx,%ebx\n\t"
2320 "lea 0x8(%esp),%esp");
2321}
2322
2323static void
2324i386_emit_less_signed (void)
2325{
2326 EMIT_ASM32 (i386_less_signed,
2327 "cmpl %ebx,4(%esp)\n\t"
2328 "jl .Li386_less_signed_true\n\t"
2329 "jne .Li386_less_signed_false\n\t"
2330 "cmpl %eax,(%esp)\n\t"
2331 "jl .Li386_less_signed_true\n\t"
2332 ".Li386_less_signed_false:\n\t"
2333 "xor %eax,%eax\n\t"
2334 "jmp .Li386_less_signed_end\n\t"
2335 ".Li386_less_signed_true:\n\t"
2336 "mov $1,%eax\n\t"
2337 ".Li386_less_signed_end:\n\t"
2338 "xor %ebx,%ebx\n\t"
2339 "lea 0x8(%esp),%esp");
2340}
2341
2342static void
2343i386_emit_less_unsigned (void)
2344{
2345 EMIT_ASM32 (i386_less_unsigned,
2346 "cmpl %ebx,4(%esp)\n\t"
2347 "jb .Li386_less_unsigned_true\n\t"
2348 "jne .Li386_less_unsigned_false\n\t"
2349 "cmpl %eax,(%esp)\n\t"
2350 "jb .Li386_less_unsigned_true\n\t"
2351 ".Li386_less_unsigned_false:\n\t"
2352 "xor %eax,%eax\n\t"
2353 "jmp .Li386_less_unsigned_end\n\t"
2354 ".Li386_less_unsigned_true:\n\t"
2355 "mov $1,%eax\n\t"
2356 ".Li386_less_unsigned_end:\n\t"
2357 "xor %ebx,%ebx\n\t"
2358 "lea 0x8(%esp),%esp");
2359}
2360
2361static void
2362i386_emit_ref (int size)
2363{
2364 switch (size)
2365 {
2366 case 1:
2367 EMIT_ASM32 (i386_ref1,
2368 "movb (%eax),%al");
2369 break;
2370 case 2:
2371 EMIT_ASM32 (i386_ref2,
2372 "movw (%eax),%ax");
2373 break;
2374 case 4:
2375 EMIT_ASM32 (i386_ref4,
2376 "movl (%eax),%eax");
2377 break;
2378 case 8:
2379 EMIT_ASM32 (i386_ref8,
2380 "movl 4(%eax),%ebx\n\t"
2381 "movl (%eax),%eax");
2382 break;
2383 }
2384}
2385
2386static void
2387i386_emit_if_goto (int *offset_p, int *size_p)
2388{
2389 EMIT_ASM32 (i386_if_goto,
2390 "mov %eax,%ecx\n\t"
2391 "or %ebx,%ecx\n\t"
2392 "pop %eax\n\t"
2393 "pop %ebx\n\t"
2394 "cmpl $0,%ecx\n\t"
2395 /* Don't trust the assembler to choose the right jump */
2396 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2397
2398 if (offset_p)
2399 *offset_p = 11; /* be sure that this matches the sequence above */
2400 if (size_p)
2401 *size_p = 4;
2402}
2403
2404static void
2405i386_emit_goto (int *offset_p, int *size_p)
2406{
2407 EMIT_ASM32 (i386_goto,
2408 /* Don't trust the assembler to choose the right jump */
2409 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2410 if (offset_p)
2411 *offset_p = 1;
2412 if (size_p)
2413 *size_p = 4;
2414}
2415
2416static void
2417i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2418{
2419 int diff = (to - (from + size));
2420 unsigned char buf[sizeof (int)];
2421
2422 /* We're only doing 4-byte sizes at the moment. */
2423 if (size != 4)
2424 {
2425 emit_error = 1;
2426 return;
2427 }
2428
2429 memcpy (buf, &diff, sizeof (int));
4196ab2a 2430 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2431}
2432
2433static void
4e29fb54 2434i386_emit_const (LONGEST num)
6a271cae
PA
2435{
2436 unsigned char buf[16];
b00ad6ff 2437 int i, hi, lo;
6a271cae
PA
2438 CORE_ADDR buildaddr = current_insn_ptr;
2439
2440 i = 0;
2441 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2442 lo = num & 0xffffffff;
2443 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2444 i += 4;
2445 hi = ((num >> 32) & 0xffffffff);
2446 if (hi)
2447 {
2448 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2449 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2450 i += 4;
2451 }
2452 else
2453 {
2454 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2455 }
2456 append_insns (&buildaddr, i, buf);
2457 current_insn_ptr = buildaddr;
2458}
2459
2460static void
2461i386_emit_call (CORE_ADDR fn)
2462{
2463 unsigned char buf[16];
2464 int i, offset;
2465 CORE_ADDR buildaddr;
2466
2467 buildaddr = current_insn_ptr;
2468 i = 0;
2469 buf[i++] = 0xe8; /* call <reladdr> */
2470 offset = ((int) fn) - (buildaddr + 5);
2471 memcpy (buf + 1, &offset, 4);
2472 append_insns (&buildaddr, 5, buf);
2473 current_insn_ptr = buildaddr;
2474}
2475
2476static void
2477i386_emit_reg (int reg)
2478{
2479 unsigned char buf[16];
2480 int i;
2481 CORE_ADDR buildaddr;
2482
2483 EMIT_ASM32 (i386_reg_a,
2484 "sub $0x8,%esp");
2485 buildaddr = current_insn_ptr;
2486 i = 0;
2487 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2488 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2489 i += 4;
2490 append_insns (&buildaddr, i, buf);
2491 current_insn_ptr = buildaddr;
2492 EMIT_ASM32 (i386_reg_b,
2493 "mov %eax,4(%esp)\n\t"
2494 "mov 8(%ebp),%eax\n\t"
2495 "mov %eax,(%esp)");
2496 i386_emit_call (get_raw_reg_func_addr ());
2497 EMIT_ASM32 (i386_reg_c,
2498 "xor %ebx,%ebx\n\t"
2499 "lea 0x8(%esp),%esp");
2500}
2501
2502static void
2503i386_emit_pop (void)
2504{
2505 EMIT_ASM32 (i386_pop,
2506 "pop %eax\n\t"
2507 "pop %ebx");
2508}
2509
2510static void
2511i386_emit_stack_flush (void)
2512{
2513 EMIT_ASM32 (i386_stack_flush,
2514 "push %ebx\n\t"
2515 "push %eax");
2516}
2517
2518static void
2519i386_emit_zero_ext (int arg)
2520{
2521 switch (arg)
2522 {
2523 case 8:
2524 EMIT_ASM32 (i386_zero_ext_8,
2525 "and $0xff,%eax\n\t"
2526 "xor %ebx,%ebx");
2527 break;
2528 case 16:
2529 EMIT_ASM32 (i386_zero_ext_16,
2530 "and $0xffff,%eax\n\t"
2531 "xor %ebx,%ebx");
2532 break;
2533 case 32:
2534 EMIT_ASM32 (i386_zero_ext_32,
2535 "xor %ebx,%ebx");
2536 break;
2537 default:
2538 emit_error = 1;
2539 }
2540}
2541
2542static void
2543i386_emit_swap (void)
2544{
2545 EMIT_ASM32 (i386_swap,
2546 "mov %eax,%ecx\n\t"
2547 "mov %ebx,%edx\n\t"
2548 "pop %eax\n\t"
2549 "pop %ebx\n\t"
2550 "push %edx\n\t"
2551 "push %ecx");
2552}
2553
2554static void
2555i386_emit_stack_adjust (int n)
2556{
2557 unsigned char buf[16];
2558 int i;
2559 CORE_ADDR buildaddr = current_insn_ptr;
2560
2561 i = 0;
2562 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2563 buf[i++] = 0x64;
2564 buf[i++] = 0x24;
2565 buf[i++] = n * 8;
2566 append_insns (&buildaddr, i, buf);
2567 current_insn_ptr = buildaddr;
2568}
2569
2570/* FN's prototype is `LONGEST(*fn)(int)'. */
2571
2572static void
2573i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2574{
2575 unsigned char buf[16];
2576 int i;
2577 CORE_ADDR buildaddr;
2578
2579 EMIT_ASM32 (i386_int_call_1_a,
2580 /* Reserve a bit of stack space. */
2581 "sub $0x8,%esp");
2582 /* Put the one argument on the stack. */
2583 buildaddr = current_insn_ptr;
2584 i = 0;
2585 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2586 buf[i++] = 0x04;
2587 buf[i++] = 0x24;
b00ad6ff 2588 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2589 i += 4;
2590 append_insns (&buildaddr, i, buf);
2591 current_insn_ptr = buildaddr;
2592 i386_emit_call (fn);
2593 EMIT_ASM32 (i386_int_call_1_c,
2594 "mov %edx,%ebx\n\t"
2595 "lea 0x8(%esp),%esp");
2596}
2597
4e29fb54 2598/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2599
2600static void
2601i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2602{
2603 unsigned char buf[16];
2604 int i;
2605 CORE_ADDR buildaddr;
2606
2607 EMIT_ASM32 (i386_void_call_2_a,
2608 /* Preserve %eax only; we don't have to worry about %ebx. */
2609 "push %eax\n\t"
2610 /* Reserve a bit of stack space for arguments. */
2611 "sub $0x10,%esp\n\t"
2612 /* Copy "top" to the second argument position. (Note that
2613 we can't assume function won't scribble on its
2614 arguments, so don't try to restore from this.) */
2615 "mov %eax,4(%esp)\n\t"
2616 "mov %ebx,8(%esp)");
2617 /* Put the first argument on the stack. */
2618 buildaddr = current_insn_ptr;
2619 i = 0;
2620 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2621 buf[i++] = 0x04;
2622 buf[i++] = 0x24;
b00ad6ff 2623 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2624 i += 4;
2625 append_insns (&buildaddr, i, buf);
2626 current_insn_ptr = buildaddr;
2627 i386_emit_call (fn);
2628 EMIT_ASM32 (i386_void_call_2_b,
2629 "lea 0x10(%esp),%esp\n\t"
2630 /* Restore original stack top. */
2631 "pop %eax");
2632}
2633
6b9801d4 2634
df4a0200 2635static void
6b9801d4
SS
2636i386_emit_eq_goto (int *offset_p, int *size_p)
2637{
2638 EMIT_ASM32 (eq,
2639 /* Check low half first, more likely to be decider */
2640 "cmpl %eax,(%esp)\n\t"
2641 "jne .Leq_fallthru\n\t"
2642 "cmpl %ebx,4(%esp)\n\t"
2643 "jne .Leq_fallthru\n\t"
2644 "lea 0x8(%esp),%esp\n\t"
2645 "pop %eax\n\t"
2646 "pop %ebx\n\t"
2647 /* jmp, but don't trust the assembler to choose the right jump */
2648 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2649 ".Leq_fallthru:\n\t"
2650 "lea 0x8(%esp),%esp\n\t"
2651 "pop %eax\n\t"
2652 "pop %ebx");
2653
2654 if (offset_p)
2655 *offset_p = 18;
2656 if (size_p)
2657 *size_p = 4;
2658}
2659
df4a0200 2660static void
6b9801d4
SS
2661i386_emit_ne_goto (int *offset_p, int *size_p)
2662{
2663 EMIT_ASM32 (ne,
2664 /* Check low half first, more likely to be decider */
2665 "cmpl %eax,(%esp)\n\t"
2666 "jne .Lne_jump\n\t"
2667 "cmpl %ebx,4(%esp)\n\t"
2668 "je .Lne_fallthru\n\t"
2669 ".Lne_jump:\n\t"
2670 "lea 0x8(%esp),%esp\n\t"
2671 "pop %eax\n\t"
2672 "pop %ebx\n\t"
2673 /* jmp, but don't trust the assembler to choose the right jump */
2674 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2675 ".Lne_fallthru:\n\t"
2676 "lea 0x8(%esp),%esp\n\t"
2677 "pop %eax\n\t"
2678 "pop %ebx");
2679
2680 if (offset_p)
2681 *offset_p = 18;
2682 if (size_p)
2683 *size_p = 4;
2684}
2685
df4a0200 2686static void
6b9801d4
SS
2687i386_emit_lt_goto (int *offset_p, int *size_p)
2688{
2689 EMIT_ASM32 (lt,
2690 "cmpl %ebx,4(%esp)\n\t"
2691 "jl .Llt_jump\n\t"
2692 "jne .Llt_fallthru\n\t"
2693 "cmpl %eax,(%esp)\n\t"
2694 "jnl .Llt_fallthru\n\t"
2695 ".Llt_jump:\n\t"
2696 "lea 0x8(%esp),%esp\n\t"
2697 "pop %eax\n\t"
2698 "pop %ebx\n\t"
2699 /* jmp, but don't trust the assembler to choose the right jump */
2700 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2701 ".Llt_fallthru:\n\t"
2702 "lea 0x8(%esp),%esp\n\t"
2703 "pop %eax\n\t"
2704 "pop %ebx");
2705
2706 if (offset_p)
2707 *offset_p = 20;
2708 if (size_p)
2709 *size_p = 4;
2710}
2711
df4a0200 2712static void
6b9801d4
SS
2713i386_emit_le_goto (int *offset_p, int *size_p)
2714{
2715 EMIT_ASM32 (le,
2716 "cmpl %ebx,4(%esp)\n\t"
2717 "jle .Lle_jump\n\t"
2718 "jne .Lle_fallthru\n\t"
2719 "cmpl %eax,(%esp)\n\t"
2720 "jnle .Lle_fallthru\n\t"
2721 ".Lle_jump:\n\t"
2722 "lea 0x8(%esp),%esp\n\t"
2723 "pop %eax\n\t"
2724 "pop %ebx\n\t"
2725 /* jmp, but don't trust the assembler to choose the right jump */
2726 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2727 ".Lle_fallthru:\n\t"
2728 "lea 0x8(%esp),%esp\n\t"
2729 "pop %eax\n\t"
2730 "pop %ebx");
2731
2732 if (offset_p)
2733 *offset_p = 20;
2734 if (size_p)
2735 *size_p = 4;
2736}
2737
df4a0200 2738static void
6b9801d4
SS
2739i386_emit_gt_goto (int *offset_p, int *size_p)
2740{
2741 EMIT_ASM32 (gt,
2742 "cmpl %ebx,4(%esp)\n\t"
2743 "jg .Lgt_jump\n\t"
2744 "jne .Lgt_fallthru\n\t"
2745 "cmpl %eax,(%esp)\n\t"
2746 "jng .Lgt_fallthru\n\t"
2747 ".Lgt_jump:\n\t"
2748 "lea 0x8(%esp),%esp\n\t"
2749 "pop %eax\n\t"
2750 "pop %ebx\n\t"
2751 /* jmp, but don't trust the assembler to choose the right jump */
2752 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2753 ".Lgt_fallthru:\n\t"
2754 "lea 0x8(%esp),%esp\n\t"
2755 "pop %eax\n\t"
2756 "pop %ebx");
2757
2758 if (offset_p)
2759 *offset_p = 20;
2760 if (size_p)
2761 *size_p = 4;
2762}
2763
df4a0200 2764static void
6b9801d4
SS
2765i386_emit_ge_goto (int *offset_p, int *size_p)
2766{
2767 EMIT_ASM32 (ge,
2768 "cmpl %ebx,4(%esp)\n\t"
2769 "jge .Lge_jump\n\t"
2770 "jne .Lge_fallthru\n\t"
2771 "cmpl %eax,(%esp)\n\t"
2772 "jnge .Lge_fallthru\n\t"
2773 ".Lge_jump:\n\t"
2774 "lea 0x8(%esp),%esp\n\t"
2775 "pop %eax\n\t"
2776 "pop %ebx\n\t"
2777 /* jmp, but don't trust the assembler to choose the right jump */
2778 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2779 ".Lge_fallthru:\n\t"
2780 "lea 0x8(%esp),%esp\n\t"
2781 "pop %eax\n\t"
2782 "pop %ebx");
2783
2784 if (offset_p)
2785 *offset_p = 20;
2786 if (size_p)
2787 *size_p = 4;
2788}
2789
6a271cae
PA
2790struct emit_ops i386_emit_ops =
2791 {
2792 i386_emit_prologue,
2793 i386_emit_epilogue,
2794 i386_emit_add,
2795 i386_emit_sub,
2796 i386_emit_mul,
2797 i386_emit_lsh,
2798 i386_emit_rsh_signed,
2799 i386_emit_rsh_unsigned,
2800 i386_emit_ext,
2801 i386_emit_log_not,
2802 i386_emit_bit_and,
2803 i386_emit_bit_or,
2804 i386_emit_bit_xor,
2805 i386_emit_bit_not,
2806 i386_emit_equal,
2807 i386_emit_less_signed,
2808 i386_emit_less_unsigned,
2809 i386_emit_ref,
2810 i386_emit_if_goto,
2811 i386_emit_goto,
2812 i386_write_goto_address,
2813 i386_emit_const,
2814 i386_emit_call,
2815 i386_emit_reg,
2816 i386_emit_pop,
2817 i386_emit_stack_flush,
2818 i386_emit_zero_ext,
2819 i386_emit_swap,
2820 i386_emit_stack_adjust,
2821 i386_emit_int_call_1,
6b9801d4
SS
2822 i386_emit_void_call_2,
2823 i386_emit_eq_goto,
2824 i386_emit_ne_goto,
2825 i386_emit_lt_goto,
2826 i386_emit_le_goto,
2827 i386_emit_gt_goto,
2828 i386_emit_ge_goto
6a271cae
PA
2829 };
2830
2831
2832static struct emit_ops *
2833x86_emit_ops (void)
2834{
2835#ifdef __x86_64__
3aee8918 2836 if (is_64bit_tdesc ())
6a271cae
PA
2837 return &amd64_emit_ops;
2838 else
2839#endif
2840 return &i386_emit_ops;
2841}
2842
dd373349
AT
2843/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2844
2845static const gdb_byte *
2846x86_sw_breakpoint_from_kind (int kind, int *size)
2847{
2848 *size = x86_breakpoint_len;
2849 return x86_breakpoint;
2850}
2851
c2d6af84
PA
2852static int
2853x86_supports_range_stepping (void)
2854{
2855 return 1;
2856}
2857
7d00775e
AT
2858/* Implementation of linux_target_ops method "supports_hardware_single_step".
2859 */
2860
2861static int
2862x86_supports_hardware_single_step (void)
2863{
2864 return 1;
2865}
2866
ae91f625
MK
2867static int
2868x86_get_ipa_tdesc_idx (void)
2869{
2870 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2871 const struct target_desc *tdesc = regcache->tdesc;
2872
2873#ifdef __x86_64__
b4570e4b 2874 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2875#endif
2876
f49ff000 2877 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2878 return X86_TDESC_SSE;
ae91f625 2879
f49ff000 2880 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2881}
2882
d0722149
DE
2883/* This is initialized assuming an amd64 target.
2884 x86_arch_setup will correct it for i386 or amd64 targets. */
2885
2886struct linux_target_ops the_low_target =
2887{
d0722149
DE
2888 x86_get_pc,
2889 x86_set_pc,
dd373349
AT
2890 NULL, /* breakpoint_kind_from_pc */
2891 x86_sw_breakpoint_from_kind,
d0722149
DE
2892 NULL,
2893 1,
2894 x86_breakpoint_at,
802e8e6d 2895 x86_supports_z_point_type,
aa5ca48f
DE
2896 x86_insert_point,
2897 x86_remove_point,
2898 x86_stopped_by_watchpoint,
2899 x86_stopped_data_address,
d0722149
DE
2900 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2901 native i386 case (no registers smaller than an xfer unit), and are not
2902 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2903 NULL,
2904 NULL,
2905 /* need to fix up i386 siginfo if host is amd64 */
2906 x86_siginfo_fixup,
aa5ca48f 2907 x86_linux_new_process,
04ec7890 2908 x86_linux_delete_process,
aa5ca48f 2909 x86_linux_new_thread,
466eecee 2910 x86_linux_delete_thread,
3a8a0396 2911 x86_linux_new_fork,
1570b33e 2912 x86_linux_prepare_to_resume,
219f2f23 2913 x86_linux_process_qsupported,
fa593d66
PA
2914 x86_supports_tracepoints,
2915 x86_get_thread_area,
6a271cae 2916 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2917 x86_emit_ops,
2918 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 2919 x86_supports_range_stepping,
7d00775e
AT
2920 NULL, /* breakpoint_kind_from_current_state */
2921 x86_supports_hardware_single_step,
82075af2 2922 x86_get_syscall_trapinfo,
ae91f625 2923 x86_get_ipa_tdesc_idx,
d0722149 2924};
3aee8918 2925
ef0478f6
TBA
2926/* The linux target ops object. */
2927
2928linux_process_target *the_linux_target = &the_x86_target;
2929
3aee8918
PA
2930void
2931initialize_low_arch (void)
2932{
2933 /* Initialize the Linux target descriptions. */
2934#ifdef __x86_64__
cc397f3a 2935 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
2936 copy_target_description (tdesc_amd64_linux_no_xml,
2937 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2938 false));
3aee8918
PA
2939 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2940#endif
f49ff000 2941
cc397f3a 2942 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
2943 copy_target_description (tdesc_i386_linux_no_xml,
2944 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
2945 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2946
2947 initialize_regsets_info (&x86_regsets_info);
2948}
This page took 1.083682 seconds and 4 git commands to generate.