gdbserver/linux-low: turn 'arch_setup' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
797bcff5
TBA
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
107protected:
108
109 void low_arch_setup () override;
ef0478f6
TBA
110};
111
112/* The singleton target ops object. */
113
114static x86_target the_x86_target;
115
aa5ca48f
DE
116/* Per-process arch-specific data we want to keep. */
117
118struct arch_process_info
119{
df7e5265 120 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
121};
122
d0722149
DE
123#ifdef __x86_64__
124
125/* Mapping between the general-purpose registers in `struct user'
126 format and GDB's register array layout.
127 Note that the transfer layout uses 64-bit regs. */
128static /*const*/ int i386_regmap[] =
129{
130 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
131 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
132 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
133 DS * 8, ES * 8, FS * 8, GS * 8
134};
135
136#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
137
138/* So code below doesn't have to care, i386 or amd64. */
139#define ORIG_EAX ORIG_RAX
bc9540e8 140#define REGSIZE 8
d0722149
DE
141
142static const int x86_64_regmap[] =
143{
144 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
145 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
146 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
147 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
148 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
149 DS * 8, ES * 8, FS * 8, GS * 8,
150 -1, -1, -1, -1, -1, -1, -1, -1,
151 -1, -1, -1, -1, -1, -1, -1, -1,
152 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
153 -1,
154 -1, -1, -1, -1, -1, -1, -1, -1,
155 ORIG_RAX * 8,
2735833d
WT
156#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
157 21 * 8, 22 * 8,
158#else
159 -1, -1,
160#endif
a196ebeb 161 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
162 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
163 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
164 -1, -1, -1, -1, -1, -1, -1, -1,
165 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
166 -1, -1, -1, -1, -1, -1, -1, -1,
167 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
168 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
169 -1, -1, -1, -1, -1, -1, -1, -1,
170 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
171 -1, -1, -1, -1, -1, -1, -1, -1,
172 -1 /* pkru */
d0722149
DE
173};
174
175#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 176#define X86_64_USER_REGS (GS + 1)
d0722149
DE
177
178#else /* ! __x86_64__ */
179
180/* Mapping between the general-purpose registers in `struct user'
181 format and GDB's register array layout. */
182static /*const*/ int i386_regmap[] =
183{
184 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
185 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
186 EIP * 4, EFL * 4, CS * 4, SS * 4,
187 DS * 4, ES * 4, FS * 4, GS * 4
188};
189
190#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
191
bc9540e8
PA
192#define REGSIZE 4
193
d0722149 194#endif
3aee8918
PA
195
196#ifdef __x86_64__
197
198/* Returns true if the current inferior belongs to a x86-64 process,
199 per the tdesc. */
200
201static int
202is_64bit_tdesc (void)
203{
0bfdf32f 204 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
205
206 return register_size (regcache->tdesc, 0) == 8;
207}
208
209#endif
210
d0722149
DE
211\f
212/* Called by libthread_db. */
213
214ps_err_e
754653a7 215ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
216 lwpid_t lwpid, int idx, void **base)
217{
218#ifdef __x86_64__
3aee8918 219 int use_64bit = is_64bit_tdesc ();
d0722149
DE
220
221 if (use_64bit)
222 {
223 switch (idx)
224 {
225 case FS:
226 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
227 return PS_OK;
228 break;
229 case GS:
230 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
231 return PS_OK;
232 break;
233 default:
234 return PS_BADADDR;
235 }
236 return PS_ERR;
237 }
238#endif
239
240 {
241 unsigned int desc[4];
242
243 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
244 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
245 return PS_ERR;
246
d1ec4ce7
DE
247 /* Ensure we properly extend the value to 64-bits for x86_64. */
248 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
249 return PS_OK;
250 }
251}
fa593d66
PA
252
253/* Get the thread area address. This is used to recognize which
254 thread is which when tracing with the in-process agent library. We
255 don't read anything from the address, and treat it as opaque; it's
256 the address itself that we assume is unique per-thread. */
257
258static int
259x86_get_thread_area (int lwpid, CORE_ADDR *addr)
260{
261#ifdef __x86_64__
3aee8918 262 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
263
264 if (use_64bit)
265 {
266 void *base;
267 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
268 {
269 *addr = (CORE_ADDR) (uintptr_t) base;
270 return 0;
271 }
272
273 return -1;
274 }
275#endif
276
277 {
f2907e49 278 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
279 struct thread_info *thr = get_lwp_thread (lwp);
280 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
281 unsigned int desc[4];
282 ULONGEST gs = 0;
283 const int reg_thread_area = 3; /* bits to scale down register value. */
284 int idx;
285
286 collect_register_by_name (regcache, "gs", &gs);
287
288 idx = gs >> reg_thread_area;
289
290 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 291 lwpid_of (thr),
493e2a69 292 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
293 return -1;
294
295 *addr = desc[1];
296 return 0;
297 }
298}
299
300
d0722149
DE
301\f
302static int
3aee8918 303x86_cannot_store_register (int regno)
d0722149 304{
3aee8918
PA
305#ifdef __x86_64__
306 if (is_64bit_tdesc ())
307 return 0;
308#endif
309
d0722149
DE
310 return regno >= I386_NUM_REGS;
311}
312
313static int
3aee8918 314x86_cannot_fetch_register (int regno)
d0722149 315{
3aee8918
PA
316#ifdef __x86_64__
317 if (is_64bit_tdesc ())
318 return 0;
319#endif
320
d0722149
DE
321 return regno >= I386_NUM_REGS;
322}
323
324static void
442ea881 325x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
326{
327 int i;
328
329#ifdef __x86_64__
3aee8918 330 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
331 {
332 for (i = 0; i < X86_64_NUM_REGS; i++)
333 if (x86_64_regmap[i] != -1)
442ea881 334 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
335
336#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
337 {
338 unsigned long base;
339 int lwpid = lwpid_of (current_thread);
340
341 collect_register_by_name (regcache, "fs_base", &base);
342 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
343
344 collect_register_by_name (regcache, "gs_base", &base);
345 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
346 }
347#endif
348
d0722149
DE
349 return;
350 }
9e0aa64f
JK
351
352 /* 32-bit inferior registers need to be zero-extended.
353 Callers would read uninitialized memory otherwise. */
354 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
355#endif
356
357 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 358 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 359
442ea881 360 collect_register_by_name (regcache, "orig_eax",
bc9540e8 361 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 362
e90a813d 363#ifdef __x86_64__
3f52fdbc
KB
364 /* Sign extend EAX value to avoid potential syscall restart
365 problems.
366
367 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
368 for a detailed explanation. */
369 if (register_size (regcache->tdesc, 0) == 4)
370 {
371 void *ptr = ((gdb_byte *) buf
372 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
373
374 *(int64_t *) ptr = *(int32_t *) ptr;
375 }
e90a813d 376#endif
d0722149
DE
377}
378
379static void
442ea881 380x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
381{
382 int i;
383
384#ifdef __x86_64__
3aee8918 385 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
386 {
387 for (i = 0; i < X86_64_NUM_REGS; i++)
388 if (x86_64_regmap[i] != -1)
442ea881 389 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
390
391#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
392 {
393 unsigned long base;
394 int lwpid = lwpid_of (current_thread);
395
396 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
397 supply_register_by_name (regcache, "fs_base", &base);
398
399 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
400 supply_register_by_name (regcache, "gs_base", &base);
401 }
402#endif
d0722149
DE
403 return;
404 }
405#endif
406
407 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 408 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 409
442ea881 410 supply_register_by_name (regcache, "orig_eax",
bc9540e8 411 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
412}
413
414static void
442ea881 415x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
416{
417#ifdef __x86_64__
442ea881 418 i387_cache_to_fxsave (regcache, buf);
d0722149 419#else
442ea881 420 i387_cache_to_fsave (regcache, buf);
d0722149
DE
421#endif
422}
423
424static void
442ea881 425x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
426{
427#ifdef __x86_64__
442ea881 428 i387_fxsave_to_cache (regcache, buf);
d0722149 429#else
442ea881 430 i387_fsave_to_cache (regcache, buf);
d0722149
DE
431#endif
432}
433
434#ifndef __x86_64__
435
436static void
442ea881 437x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 438{
442ea881 439 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
440}
441
442static void
442ea881 443x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 444{
442ea881 445 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
446}
447
448#endif
449
1570b33e
L
450static void
451x86_fill_xstateregset (struct regcache *regcache, void *buf)
452{
453 i387_cache_to_xsave (regcache, buf);
454}
455
456static void
457x86_store_xstateregset (struct regcache *regcache, const void *buf)
458{
459 i387_xsave_to_cache (regcache, buf);
460}
461
d0722149
DE
462/* ??? The non-biarch i386 case stores all the i387 regs twice.
463 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
464 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
465 doesn't work. IWBN to avoid the duplication in the case where it
466 does work. Maybe the arch_setup routine could check whether it works
3aee8918 467 and update the supported regsets accordingly. */
d0722149 468
3aee8918 469static struct regset_info x86_regsets[] =
d0722149
DE
470{
471#ifdef HAVE_PTRACE_GETREGS
1570b33e 472 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
473 GENERAL_REGS,
474 x86_fill_gregset, x86_store_gregset },
1570b33e
L
475 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
476 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
477# ifndef __x86_64__
478# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 479 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
480 EXTENDED_REGS,
481 x86_fill_fpxregset, x86_store_fpxregset },
482# endif
483# endif
1570b33e 484 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
485 FP_REGS,
486 x86_fill_fpregset, x86_store_fpregset },
487#endif /* HAVE_PTRACE_GETREGS */
50bc912a 488 NULL_REGSET
d0722149
DE
489};
490
491static CORE_ADDR
442ea881 492x86_get_pc (struct regcache *regcache)
d0722149 493{
3aee8918 494 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
495
496 if (use_64bit)
497 {
6598661d
PA
498 uint64_t pc;
499
442ea881 500 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
501 return (CORE_ADDR) pc;
502 }
503 else
504 {
6598661d
PA
505 uint32_t pc;
506
442ea881 507 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
508 return (CORE_ADDR) pc;
509 }
510}
511
512static void
442ea881 513x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 514{
3aee8918 515 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
516
517 if (use_64bit)
518 {
6598661d
PA
519 uint64_t newpc = pc;
520
442ea881 521 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
522 }
523 else
524 {
6598661d
PA
525 uint32_t newpc = pc;
526
442ea881 527 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
528 }
529}
530\f
dd373349 531static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
532#define x86_breakpoint_len 1
533
534static int
535x86_breakpoint_at (CORE_ADDR pc)
536{
537 unsigned char c;
538
52405d85 539 the_target->read_memory (pc, &c, 1);
d0722149
DE
540 if (c == 0xCC)
541 return 1;
542
543 return 0;
544}
545\f
42995dbd 546/* Low-level function vector. */
df7e5265 547struct x86_dr_low_type x86_dr_low =
42995dbd 548 {
d33472ad
GB
549 x86_linux_dr_set_control,
550 x86_linux_dr_set_addr,
551 x86_linux_dr_get_addr,
552 x86_linux_dr_get_status,
553 x86_linux_dr_get_control,
42995dbd
GB
554 sizeof (void *),
555 };
aa5ca48f 556\f
90d74c30 557/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
558
559static int
802e8e6d
PA
560x86_supports_z_point_type (char z_type)
561{
562 switch (z_type)
563 {
564 case Z_PACKET_SW_BP:
565 case Z_PACKET_HW_BP:
566 case Z_PACKET_WRITE_WP:
567 case Z_PACKET_ACCESS_WP:
568 return 1;
569 default:
570 return 0;
571 }
572}
573
574static int
575x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
576 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
577{
578 struct process_info *proc = current_process ();
802e8e6d 579
aa5ca48f
DE
580 switch (type)
581 {
802e8e6d
PA
582 case raw_bkpt_type_hw:
583 case raw_bkpt_type_write_wp:
584 case raw_bkpt_type_access_wp:
a4165e94 585 {
802e8e6d
PA
586 enum target_hw_bp_type hw_type
587 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 588 struct x86_debug_reg_state *state
fe978cb0 589 = &proc->priv->arch_private->debug_reg_state;
a4165e94 590
df7e5265 591 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 592 }
961bd387 593
aa5ca48f
DE
594 default:
595 /* Unsupported. */
596 return 1;
597 }
598}
599
600static int
802e8e6d
PA
601x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
602 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
603{
604 struct process_info *proc = current_process ();
802e8e6d 605
aa5ca48f
DE
606 switch (type)
607 {
802e8e6d
PA
608 case raw_bkpt_type_hw:
609 case raw_bkpt_type_write_wp:
610 case raw_bkpt_type_access_wp:
a4165e94 611 {
802e8e6d
PA
612 enum target_hw_bp_type hw_type
613 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 614 struct x86_debug_reg_state *state
fe978cb0 615 = &proc->priv->arch_private->debug_reg_state;
a4165e94 616
df7e5265 617 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 618 }
aa5ca48f
DE
619 default:
620 /* Unsupported. */
621 return 1;
622 }
623}
624
625static int
626x86_stopped_by_watchpoint (void)
627{
628 struct process_info *proc = current_process ();
fe978cb0 629 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
630}
631
632static CORE_ADDR
633x86_stopped_data_address (void)
634{
635 struct process_info *proc = current_process ();
636 CORE_ADDR addr;
fe978cb0 637 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 638 &addr))
aa5ca48f
DE
639 return addr;
640 return 0;
641}
642\f
643/* Called when a new process is created. */
644
645static struct arch_process_info *
646x86_linux_new_process (void)
647{
ed859da7 648 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 649
df7e5265 650 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
651
652 return info;
653}
654
04ec7890
SM
655/* Called when a process is being deleted. */
656
657static void
658x86_linux_delete_process (struct arch_process_info *info)
659{
660 xfree (info);
661}
662
3a8a0396
DB
663/* Target routine for linux_new_fork. */
664
665static void
666x86_linux_new_fork (struct process_info *parent, struct process_info *child)
667{
668 /* These are allocated by linux_add_process. */
669 gdb_assert (parent->priv != NULL
670 && parent->priv->arch_private != NULL);
671 gdb_assert (child->priv != NULL
672 && child->priv->arch_private != NULL);
673
674 /* Linux kernel before 2.6.33 commit
675 72f674d203cd230426437cdcf7dd6f681dad8b0d
676 will inherit hardware debug registers from parent
677 on fork/vfork/clone. Newer Linux kernels create such tasks with
678 zeroed debug registers.
679
680 GDB core assumes the child inherits the watchpoints/hw
681 breakpoints of the parent, and will remove them all from the
682 forked off process. Copy the debug registers mirrors into the
683 new process so that all breakpoints and watchpoints can be
684 removed together. The debug registers mirror will become zeroed
685 in the end before detaching the forked off process, thus making
686 this compatible with older Linux kernels too. */
687
688 *child->priv->arch_private = *parent->priv->arch_private;
689}
690
70a0bb6b
GB
691/* See nat/x86-dregs.h. */
692
693struct x86_debug_reg_state *
694x86_debug_reg_state (pid_t pid)
695{
696 struct process_info *proc = find_process_pid (pid);
697
698 return &proc->priv->arch_private->debug_reg_state;
699}
aa5ca48f 700\f
d0722149
DE
701/* When GDBSERVER is built as a 64-bit application on linux, the
702 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
703 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
704 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
705 conversion in-place ourselves. */
706
9cf12d57 707/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
708 layout of the inferiors' architecture. Returns true if any
709 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 710 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
711 INF. */
712
713static int
9cf12d57 714x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
715{
716#ifdef __x86_64__
760256f9 717 unsigned int machine;
0bfdf32f 718 int tid = lwpid_of (current_thread);
760256f9
PA
719 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
720
d0722149 721 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 722 if (!is_64bit_tdesc ())
9cf12d57 723 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 724 FIXUP_32);
c92b5177 725 /* No fixup for native x32 GDB. */
760256f9 726 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 727 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 728 FIXUP_X32);
d0722149
DE
729#endif
730
731 return 0;
732}
733\f
1570b33e
L
734static int use_xml;
735
3aee8918
PA
736/* Format of XSAVE extended state is:
737 struct
738 {
739 fxsave_bytes[0..463]
740 sw_usable_bytes[464..511]
741 xstate_hdr_bytes[512..575]
742 avx_bytes[576..831]
743 future_state etc
744 };
745
746 Same memory layout will be used for the coredump NT_X86_XSTATE
747 representing the XSAVE extended state registers.
748
749 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
750 extended state mask, which is the same as the extended control register
751 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
752 together with the mask saved in the xstate_hdr_bytes to determine what
753 states the processor/OS supports and what state, used or initialized,
754 the process/thread is in. */
755#define I386_LINUX_XSAVE_XCR0_OFFSET 464
756
757/* Does the current host support the GETFPXREGS request? The header
758 file may or may not define it, and even if it is defined, the
759 kernel will return EIO if it's running on a pre-SSE processor. */
760int have_ptrace_getfpxregs =
761#ifdef HAVE_PTRACE_GETFPXREGS
762 -1
763#else
764 0
765#endif
766;
1570b33e 767
3aee8918
PA
768/* Get Linux/x86 target description from running target. */
769
770static const struct target_desc *
771x86_linux_read_description (void)
1570b33e 772{
3aee8918
PA
773 unsigned int machine;
774 int is_elf64;
a196ebeb 775 int xcr0_features;
3aee8918
PA
776 int tid;
777 static uint64_t xcr0;
3a13a53b 778 struct regset_info *regset;
1570b33e 779
0bfdf32f 780 tid = lwpid_of (current_thread);
1570b33e 781
3aee8918 782 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 783
3aee8918 784 if (sizeof (void *) == 4)
3a13a53b 785 {
3aee8918
PA
786 if (is_elf64 > 0)
787 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
788#ifndef __x86_64__
789 else if (machine == EM_X86_64)
790 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
791#endif
792 }
3a13a53b 793
3aee8918
PA
794#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
795 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
796 {
797 elf_fpxregset_t fpxregs;
3a13a53b 798
3aee8918 799 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 800 {
3aee8918
PA
801 have_ptrace_getfpxregs = 0;
802 have_ptrace_getregset = 0;
f49ff000 803 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 804 }
3aee8918
PA
805 else
806 have_ptrace_getfpxregs = 1;
3a13a53b 807 }
1570b33e
L
808#endif
809
810 if (!use_xml)
811 {
df7e5265 812 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 813
1570b33e
L
814 /* Don't use XML. */
815#ifdef __x86_64__
3aee8918
PA
816 if (machine == EM_X86_64)
817 return tdesc_amd64_linux_no_xml;
1570b33e 818 else
1570b33e 819#endif
3aee8918 820 return tdesc_i386_linux_no_xml;
1570b33e
L
821 }
822
1570b33e
L
823 if (have_ptrace_getregset == -1)
824 {
df7e5265 825 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 826 struct iovec iov;
1570b33e
L
827
828 iov.iov_base = xstateregs;
829 iov.iov_len = sizeof (xstateregs);
830
831 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
832 if (ptrace (PTRACE_GETREGSET, tid,
833 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
834 have_ptrace_getregset = 0;
835 else
1570b33e 836 {
3aee8918
PA
837 have_ptrace_getregset = 1;
838
839 /* Get XCR0 from XSAVE extended state. */
840 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
841 / sizeof (uint64_t))];
842
843 /* Use PTRACE_GETREGSET if it is available. */
844 for (regset = x86_regsets;
845 regset->fill_function != NULL; regset++)
846 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 847 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
848 else if (regset->type != GENERAL_REGS)
849 regset->size = 0;
1570b33e 850 }
1570b33e
L
851 }
852
3aee8918 853 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 854 xcr0_features = (have_ptrace_getregset
2e1e43e1 855 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 856
a196ebeb 857 if (xcr0_features)
3aee8918 858 x86_xcr0 = xcr0;
1570b33e 859
3aee8918
PA
860 if (machine == EM_X86_64)
861 {
1570b33e 862#ifdef __x86_64__
b4570e4b 863 const target_desc *tdesc = NULL;
a196ebeb 864
b4570e4b 865 if (xcr0_features)
3aee8918 866 {
b4570e4b
YQ
867 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
868 !is_elf64);
1570b33e 869 }
b4570e4b
YQ
870
871 if (tdesc == NULL)
872 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
873 return tdesc;
3aee8918 874#endif
1570b33e 875 }
3aee8918
PA
876 else
877 {
f49ff000 878 const target_desc *tdesc = NULL;
a1fa17ee 879
f49ff000
YQ
880 if (xcr0_features)
881 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 882
f49ff000
YQ
883 if (tdesc == NULL)
884 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 885
f49ff000 886 return tdesc;
3aee8918
PA
887 }
888
889 gdb_assert_not_reached ("failed to return tdesc");
890}
891
3aee8918
PA
892/* Update all the target description of all processes; a new GDB
893 connected, and it may or not support xml target descriptions. */
894
797bcff5
TBA
895void
896x86_target::update_xmltarget ()
3aee8918 897{
0bfdf32f 898 struct thread_info *saved_thread = current_thread;
3aee8918
PA
899
900 /* Before changing the register cache's internal layout, flush the
901 contents of the current valid caches back to the threads, and
902 release the current regcache objects. */
903 regcache_release ();
904
797bcff5 905 for_each_process ([this] (process_info *proc) {
9179355e
SM
906 int pid = proc->pid;
907
908 /* Look up any thread of this process. */
909 current_thread = find_any_thread_of_pid (pid);
910
797bcff5 911 low_arch_setup ();
9179355e 912 });
3aee8918 913
0bfdf32f 914 current_thread = saved_thread;
1570b33e
L
915}
916
917/* Process qSupported query, "xmlRegisters=". Update the buffer size for
918 PTRACE_GETREGSET. */
919
920static void
06e03fff 921x86_linux_process_qsupported (char **features, int count)
1570b33e 922{
06e03fff
PA
923 int i;
924
1570b33e
L
925 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
926 with "i386" in qSupported query, it supports x86 XML target
927 descriptions. */
928 use_xml = 0;
06e03fff 929 for (i = 0; i < count; i++)
1570b33e 930 {
06e03fff 931 const char *feature = features[i];
1570b33e 932
06e03fff 933 if (startswith (feature, "xmlRegisters="))
1570b33e 934 {
06e03fff 935 char *copy = xstrdup (feature + 13);
06e03fff 936
ca3a04f6
CB
937 char *saveptr;
938 for (char *p = strtok_r (copy, ",", &saveptr);
939 p != NULL;
940 p = strtok_r (NULL, ",", &saveptr))
1570b33e 941 {
06e03fff
PA
942 if (strcmp (p, "i386") == 0)
943 {
944 use_xml = 1;
945 break;
946 }
1570b33e 947 }
1570b33e 948
06e03fff
PA
949 free (copy);
950 }
1570b33e 951 }
797bcff5 952 the_x86_target.update_xmltarget ();
1570b33e
L
953}
954
3aee8918 955/* Common for x86/x86-64. */
d0722149 956
3aee8918
PA
957static struct regsets_info x86_regsets_info =
958 {
959 x86_regsets, /* regsets */
960 0, /* num_regsets */
961 NULL, /* disabled_regsets */
962 };
214d508e
L
963
964#ifdef __x86_64__
3aee8918
PA
965static struct regs_info amd64_linux_regs_info =
966 {
967 NULL, /* regset_bitmap */
968 NULL, /* usrregs_info */
969 &x86_regsets_info
970 };
d0722149 971#endif
3aee8918
PA
972static struct usrregs_info i386_linux_usrregs_info =
973 {
974 I386_NUM_REGS,
975 i386_regmap,
976 };
d0722149 977
3aee8918
PA
978static struct regs_info i386_linux_regs_info =
979 {
980 NULL, /* regset_bitmap */
981 &i386_linux_usrregs_info,
982 &x86_regsets_info
983 };
d0722149 984
df4a0200 985static const struct regs_info *
3aee8918
PA
986x86_linux_regs_info (void)
987{
988#ifdef __x86_64__
989 if (is_64bit_tdesc ())
990 return &amd64_linux_regs_info;
991 else
992#endif
993 return &i386_linux_regs_info;
994}
d0722149 995
3aee8918
PA
996/* Initialize the target description for the architecture of the
997 inferior. */
1570b33e 998
797bcff5
TBA
999void
1000x86_target::low_arch_setup ()
3aee8918
PA
1001{
1002 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1003}
1004
82075af2
JS
1005/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1006 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1007
1008static void
4cc32bec 1009x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
1010{
1011 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1012
1013 if (use_64bit)
1014 {
1015 long l_sysno;
82075af2
JS
1016
1017 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1018 *sysno = (int) l_sysno;
82075af2
JS
1019 }
1020 else
4cc32bec 1021 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1022}
1023
219f2f23
PA
1024static int
1025x86_supports_tracepoints (void)
1026{
1027 return 1;
1028}
1029
fa593d66
PA
1030static void
1031append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1032{
4196ab2a 1033 target_write_memory (*to, buf, len);
fa593d66
PA
1034 *to += len;
1035}
1036
1037static int
a121b7c1 1038push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1039{
1040 unsigned char *buf_org = buf;
1041
1042 while (1)
1043 {
1044 char *endptr;
1045 unsigned long ul = strtoul (op, &endptr, 16);
1046
1047 if (endptr == op)
1048 break;
1049
1050 *buf++ = ul;
1051 op = endptr;
1052 }
1053
1054 return buf - buf_org;
1055}
1056
1057#ifdef __x86_64__
1058
1059/* Build a jump pad that saves registers and calls a collection
1060 function. Writes a jump instruction to the jump pad to
1061 JJUMPAD_INSN. The caller is responsible to write it in at the
1062 tracepoint address. */
1063
1064static int
1065amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1066 CORE_ADDR collector,
1067 CORE_ADDR lockaddr,
1068 ULONGEST orig_size,
1069 CORE_ADDR *jump_entry,
405f8e94
SS
1070 CORE_ADDR *trampoline,
1071 ULONGEST *trampoline_size,
fa593d66
PA
1072 unsigned char *jjump_pad_insn,
1073 ULONGEST *jjump_pad_insn_size,
1074 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1075 CORE_ADDR *adjusted_insn_addr_end,
1076 char *err)
fa593d66
PA
1077{
1078 unsigned char buf[40];
1079 int i, offset;
f4647387
YQ
1080 int64_t loffset;
1081
fa593d66
PA
1082 CORE_ADDR buildaddr = *jump_entry;
1083
1084 /* Build the jump pad. */
1085
1086 /* First, do tracepoint data collection. Save registers. */
1087 i = 0;
1088 /* Need to ensure stack pointer saved first. */
1089 buf[i++] = 0x54; /* push %rsp */
1090 buf[i++] = 0x55; /* push %rbp */
1091 buf[i++] = 0x57; /* push %rdi */
1092 buf[i++] = 0x56; /* push %rsi */
1093 buf[i++] = 0x52; /* push %rdx */
1094 buf[i++] = 0x51; /* push %rcx */
1095 buf[i++] = 0x53; /* push %rbx */
1096 buf[i++] = 0x50; /* push %rax */
1097 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1098 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1099 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1100 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1101 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1102 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1103 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1104 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1105 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1106 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1107 buf[i++] = 0xbf;
c8ef42ee
PA
1108 memcpy (buf + i, &tpaddr, 8);
1109 i += 8;
fa593d66
PA
1110 buf[i++] = 0x57; /* push %rdi */
1111 append_insns (&buildaddr, i, buf);
1112
1113 /* Stack space for the collecting_t object. */
1114 i = 0;
1115 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1116 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1117 memcpy (buf + i, &tpoint, 8);
1118 i += 8;
1119 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1120 i += push_opcode (&buf[i],
1121 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1122 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1123 append_insns (&buildaddr, i, buf);
1124
1125 /* spin-lock. */
1126 i = 0;
1127 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1128 memcpy (&buf[i], (void *) &lockaddr, 8);
1129 i += 8;
1130 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1131 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1132 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1133 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1134 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1135 append_insns (&buildaddr, i, buf);
1136
1137 /* Set up the gdb_collect call. */
1138 /* At this point, (stack pointer + 0x18) is the base of our saved
1139 register block. */
1140
1141 i = 0;
1142 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1143 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1144
1145 /* tpoint address may be 64-bit wide. */
1146 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1147 memcpy (buf + i, &tpoint, 8);
1148 i += 8;
1149 append_insns (&buildaddr, i, buf);
1150
1151 /* The collector function being in the shared library, may be
1152 >31-bits away off the jump pad. */
1153 i = 0;
1154 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1155 memcpy (buf + i, &collector, 8);
1156 i += 8;
1157 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1158 append_insns (&buildaddr, i, buf);
1159
1160 /* Clear the spin-lock. */
1161 i = 0;
1162 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1163 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1164 memcpy (buf + i, &lockaddr, 8);
1165 i += 8;
1166 append_insns (&buildaddr, i, buf);
1167
1168 /* Remove stack that had been used for the collect_t object. */
1169 i = 0;
1170 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1171 append_insns (&buildaddr, i, buf);
1172
1173 /* Restore register state. */
1174 i = 0;
1175 buf[i++] = 0x48; /* add $0x8,%rsp */
1176 buf[i++] = 0x83;
1177 buf[i++] = 0xc4;
1178 buf[i++] = 0x08;
1179 buf[i++] = 0x9d; /* popfq */
1180 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1181 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1182 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1183 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1184 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1185 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1186 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1187 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1188 buf[i++] = 0x58; /* pop %rax */
1189 buf[i++] = 0x5b; /* pop %rbx */
1190 buf[i++] = 0x59; /* pop %rcx */
1191 buf[i++] = 0x5a; /* pop %rdx */
1192 buf[i++] = 0x5e; /* pop %rsi */
1193 buf[i++] = 0x5f; /* pop %rdi */
1194 buf[i++] = 0x5d; /* pop %rbp */
1195 buf[i++] = 0x5c; /* pop %rsp */
1196 append_insns (&buildaddr, i, buf);
1197
1198 /* Now, adjust the original instruction to execute in the jump
1199 pad. */
1200 *adjusted_insn_addr = buildaddr;
1201 relocate_instruction (&buildaddr, tpaddr);
1202 *adjusted_insn_addr_end = buildaddr;
1203
1204 /* Finally, write a jump back to the program. */
f4647387
YQ
1205
1206 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1207 if (loffset > INT_MAX || loffset < INT_MIN)
1208 {
1209 sprintf (err,
1210 "E.Jump back from jump pad too far from tracepoint "
1211 "(offset 0x%" PRIx64 " > int32).", loffset);
1212 return 1;
1213 }
1214
1215 offset = (int) loffset;
fa593d66
PA
1216 memcpy (buf, jump_insn, sizeof (jump_insn));
1217 memcpy (buf + 1, &offset, 4);
1218 append_insns (&buildaddr, sizeof (jump_insn), buf);
1219
1220 /* The jump pad is now built. Wire in a jump to our jump pad. This
1221 is always done last (by our caller actually), so that we can
1222 install fast tracepoints with threads running. This relies on
1223 the agent's atomic write support. */
f4647387
YQ
1224 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1225 if (loffset > INT_MAX || loffset < INT_MIN)
1226 {
1227 sprintf (err,
1228 "E.Jump pad too far from tracepoint "
1229 "(offset 0x%" PRIx64 " > int32).", loffset);
1230 return 1;
1231 }
1232
1233 offset = (int) loffset;
1234
fa593d66
PA
1235 memcpy (buf, jump_insn, sizeof (jump_insn));
1236 memcpy (buf + 1, &offset, 4);
1237 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1238 *jjump_pad_insn_size = sizeof (jump_insn);
1239
1240 /* Return the end address of our pad. */
1241 *jump_entry = buildaddr;
1242
1243 return 0;
1244}
1245
1246#endif /* __x86_64__ */
1247
1248/* Build a jump pad that saves registers and calls a collection
1249 function. Writes a jump instruction to the jump pad to
1250 JJUMPAD_INSN. The caller is responsible to write it in at the
1251 tracepoint address. */
1252
1253static int
1254i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1255 CORE_ADDR collector,
1256 CORE_ADDR lockaddr,
1257 ULONGEST orig_size,
1258 CORE_ADDR *jump_entry,
405f8e94
SS
1259 CORE_ADDR *trampoline,
1260 ULONGEST *trampoline_size,
fa593d66
PA
1261 unsigned char *jjump_pad_insn,
1262 ULONGEST *jjump_pad_insn_size,
1263 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1264 CORE_ADDR *adjusted_insn_addr_end,
1265 char *err)
fa593d66
PA
1266{
1267 unsigned char buf[0x100];
1268 int i, offset;
1269 CORE_ADDR buildaddr = *jump_entry;
1270
1271 /* Build the jump pad. */
1272
1273 /* First, do tracepoint data collection. Save registers. */
1274 i = 0;
1275 buf[i++] = 0x60; /* pushad */
1276 buf[i++] = 0x68; /* push tpaddr aka $pc */
1277 *((int *)(buf + i)) = (int) tpaddr;
1278 i += 4;
1279 buf[i++] = 0x9c; /* pushf */
1280 buf[i++] = 0x1e; /* push %ds */
1281 buf[i++] = 0x06; /* push %es */
1282 buf[i++] = 0x0f; /* push %fs */
1283 buf[i++] = 0xa0;
1284 buf[i++] = 0x0f; /* push %gs */
1285 buf[i++] = 0xa8;
1286 buf[i++] = 0x16; /* push %ss */
1287 buf[i++] = 0x0e; /* push %cs */
1288 append_insns (&buildaddr, i, buf);
1289
1290 /* Stack space for the collecting_t object. */
1291 i = 0;
1292 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1293
1294 /* Build the object. */
1295 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1296 memcpy (buf + i, &tpoint, 4);
1297 i += 4;
1298 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1299
1300 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1301 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1302 append_insns (&buildaddr, i, buf);
1303
1304 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1305 If we cared for it, this could be using xchg alternatively. */
1306
1307 i = 0;
1308 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1309 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1310 %esp,<lockaddr> */
1311 memcpy (&buf[i], (void *) &lockaddr, 4);
1312 i += 4;
1313 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1314 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1315 append_insns (&buildaddr, i, buf);
1316
1317
1318 /* Set up arguments to the gdb_collect call. */
1319 i = 0;
1320 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1321 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1322 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1323 append_insns (&buildaddr, i, buf);
1324
1325 i = 0;
1326 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1327 append_insns (&buildaddr, i, buf);
1328
1329 i = 0;
1330 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1331 memcpy (&buf[i], (void *) &tpoint, 4);
1332 i += 4;
1333 append_insns (&buildaddr, i, buf);
1334
1335 buf[0] = 0xe8; /* call <reladdr> */
1336 offset = collector - (buildaddr + sizeof (jump_insn));
1337 memcpy (buf + 1, &offset, 4);
1338 append_insns (&buildaddr, 5, buf);
1339 /* Clean up after the call. */
1340 buf[0] = 0x83; /* add $0x8,%esp */
1341 buf[1] = 0xc4;
1342 buf[2] = 0x08;
1343 append_insns (&buildaddr, 3, buf);
1344
1345
1346 /* Clear the spin-lock. This would need the LOCK prefix on older
1347 broken archs. */
1348 i = 0;
1349 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1350 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1351 memcpy (buf + i, &lockaddr, 4);
1352 i += 4;
1353 append_insns (&buildaddr, i, buf);
1354
1355
1356 /* Remove stack that had been used for the collect_t object. */
1357 i = 0;
1358 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1359 append_insns (&buildaddr, i, buf);
1360
1361 i = 0;
1362 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1363 buf[i++] = 0xc4;
1364 buf[i++] = 0x04;
1365 buf[i++] = 0x17; /* pop %ss */
1366 buf[i++] = 0x0f; /* pop %gs */
1367 buf[i++] = 0xa9;
1368 buf[i++] = 0x0f; /* pop %fs */
1369 buf[i++] = 0xa1;
1370 buf[i++] = 0x07; /* pop %es */
405f8e94 1371 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1372 buf[i++] = 0x9d; /* popf */
1373 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1374 buf[i++] = 0xc4;
1375 buf[i++] = 0x04;
1376 buf[i++] = 0x61; /* popad */
1377 append_insns (&buildaddr, i, buf);
1378
1379 /* Now, adjust the original instruction to execute in the jump
1380 pad. */
1381 *adjusted_insn_addr = buildaddr;
1382 relocate_instruction (&buildaddr, tpaddr);
1383 *adjusted_insn_addr_end = buildaddr;
1384
1385 /* Write the jump back to the program. */
1386 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1387 memcpy (buf, jump_insn, sizeof (jump_insn));
1388 memcpy (buf + 1, &offset, 4);
1389 append_insns (&buildaddr, sizeof (jump_insn), buf);
1390
1391 /* The jump pad is now built. Wire in a jump to our jump pad. This
1392 is always done last (by our caller actually), so that we can
1393 install fast tracepoints with threads running. This relies on
1394 the agent's atomic write support. */
405f8e94
SS
1395 if (orig_size == 4)
1396 {
1397 /* Create a trampoline. */
1398 *trampoline_size = sizeof (jump_insn);
1399 if (!claim_trampoline_space (*trampoline_size, trampoline))
1400 {
1401 /* No trampoline space available. */
1402 strcpy (err,
1403 "E.Cannot allocate trampoline space needed for fast "
1404 "tracepoints on 4-byte instructions.");
1405 return 1;
1406 }
1407
1408 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1409 memcpy (buf, jump_insn, sizeof (jump_insn));
1410 memcpy (buf + 1, &offset, 4);
4196ab2a 1411 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1412
1413 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1414 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1415 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1416 memcpy (buf + 2, &offset, 2);
1417 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1418 *jjump_pad_insn_size = sizeof (small_jump_insn);
1419 }
1420 else
1421 {
1422 /* Else use a 32-bit relative jump instruction. */
1423 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1424 memcpy (buf, jump_insn, sizeof (jump_insn));
1425 memcpy (buf + 1, &offset, 4);
1426 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1427 *jjump_pad_insn_size = sizeof (jump_insn);
1428 }
fa593d66
PA
1429
1430 /* Return the end address of our pad. */
1431 *jump_entry = buildaddr;
1432
1433 return 0;
1434}
1435
1436static int
1437x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1438 CORE_ADDR collector,
1439 CORE_ADDR lockaddr,
1440 ULONGEST orig_size,
1441 CORE_ADDR *jump_entry,
405f8e94
SS
1442 CORE_ADDR *trampoline,
1443 ULONGEST *trampoline_size,
fa593d66
PA
1444 unsigned char *jjump_pad_insn,
1445 ULONGEST *jjump_pad_insn_size,
1446 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1447 CORE_ADDR *adjusted_insn_addr_end,
1448 char *err)
fa593d66
PA
1449{
1450#ifdef __x86_64__
3aee8918 1451 if (is_64bit_tdesc ())
fa593d66
PA
1452 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1453 collector, lockaddr,
1454 orig_size, jump_entry,
405f8e94 1455 trampoline, trampoline_size,
fa593d66
PA
1456 jjump_pad_insn,
1457 jjump_pad_insn_size,
1458 adjusted_insn_addr,
405f8e94
SS
1459 adjusted_insn_addr_end,
1460 err);
fa593d66
PA
1461#endif
1462
1463 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1464 collector, lockaddr,
1465 orig_size, jump_entry,
405f8e94 1466 trampoline, trampoline_size,
fa593d66
PA
1467 jjump_pad_insn,
1468 jjump_pad_insn_size,
1469 adjusted_insn_addr,
405f8e94
SS
1470 adjusted_insn_addr_end,
1471 err);
1472}
1473
1474/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1475 architectures. */
1476
1477static int
1478x86_get_min_fast_tracepoint_insn_len (void)
1479{
1480 static int warned_about_fast_tracepoints = 0;
1481
1482#ifdef __x86_64__
1483 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1484 used for fast tracepoints. */
3aee8918 1485 if (is_64bit_tdesc ())
405f8e94
SS
1486 return 5;
1487#endif
1488
58b4daa5 1489 if (agent_loaded_p ())
405f8e94
SS
1490 {
1491 char errbuf[IPA_BUFSIZ];
1492
1493 errbuf[0] = '\0';
1494
1495 /* On x86, if trampolines are available, then 4-byte jump instructions
1496 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1497 with a 4-byte offset are used instead. */
1498 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1499 return 4;
1500 else
1501 {
1502 /* GDB has no channel to explain to user why a shorter fast
1503 tracepoint is not possible, but at least make GDBserver
1504 mention that something has gone awry. */
1505 if (!warned_about_fast_tracepoints)
1506 {
422186a9 1507 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1508 warned_about_fast_tracepoints = 1;
1509 }
1510 return 5;
1511 }
1512 }
1513 else
1514 {
1515 /* Indicate that the minimum length is currently unknown since the IPA
1516 has not loaded yet. */
1517 return 0;
1518 }
fa593d66
PA
1519}
1520
6a271cae
PA
1521static void
1522add_insns (unsigned char *start, int len)
1523{
1524 CORE_ADDR buildaddr = current_insn_ptr;
1525
1526 if (debug_threads)
87ce2a04
DE
1527 debug_printf ("Adding %d bytes of insn at %s\n",
1528 len, paddress (buildaddr));
6a271cae
PA
1529
1530 append_insns (&buildaddr, len, start);
1531 current_insn_ptr = buildaddr;
1532}
1533
6a271cae
PA
1534/* Our general strategy for emitting code is to avoid specifying raw
1535 bytes whenever possible, and instead copy a block of inline asm
1536 that is embedded in the function. This is a little messy, because
1537 we need to keep the compiler from discarding what looks like dead
1538 code, plus suppress various warnings. */
1539
9e4344e5
PA
1540#define EMIT_ASM(NAME, INSNS) \
1541 do \
1542 { \
1543 extern unsigned char start_ ## NAME, end_ ## NAME; \
1544 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1545 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1546 "\t" "start_" #NAME ":" \
1547 "\t" INSNS "\n" \
1548 "\t" "end_" #NAME ":"); \
1549 } while (0)
6a271cae
PA
1550
1551#ifdef __x86_64__
1552
1553#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1554 do \
1555 { \
1556 extern unsigned char start_ ## NAME, end_ ## NAME; \
1557 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1558 __asm__ (".code32\n" \
1559 "\t" "jmp end_" #NAME "\n" \
1560 "\t" "start_" #NAME ":\n" \
1561 "\t" INSNS "\n" \
1562 "\t" "end_" #NAME ":\n" \
1563 ".code64\n"); \
1564 } while (0)
6a271cae
PA
1565
1566#else
1567
1568#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1569
1570#endif
1571
1572#ifdef __x86_64__
1573
1574static void
1575amd64_emit_prologue (void)
1576{
1577 EMIT_ASM (amd64_prologue,
1578 "pushq %rbp\n\t"
1579 "movq %rsp,%rbp\n\t"
1580 "sub $0x20,%rsp\n\t"
1581 "movq %rdi,-8(%rbp)\n\t"
1582 "movq %rsi,-16(%rbp)");
1583}
1584
1585
1586static void
1587amd64_emit_epilogue (void)
1588{
1589 EMIT_ASM (amd64_epilogue,
1590 "movq -16(%rbp),%rdi\n\t"
1591 "movq %rax,(%rdi)\n\t"
1592 "xor %rax,%rax\n\t"
1593 "leave\n\t"
1594 "ret");
1595}
1596
1597static void
1598amd64_emit_add (void)
1599{
1600 EMIT_ASM (amd64_add,
1601 "add (%rsp),%rax\n\t"
1602 "lea 0x8(%rsp),%rsp");
1603}
1604
1605static void
1606amd64_emit_sub (void)
1607{
1608 EMIT_ASM (amd64_sub,
1609 "sub %rax,(%rsp)\n\t"
1610 "pop %rax");
1611}
1612
1613static void
1614amd64_emit_mul (void)
1615{
1616 emit_error = 1;
1617}
1618
1619static void
1620amd64_emit_lsh (void)
1621{
1622 emit_error = 1;
1623}
1624
1625static void
1626amd64_emit_rsh_signed (void)
1627{
1628 emit_error = 1;
1629}
1630
1631static void
1632amd64_emit_rsh_unsigned (void)
1633{
1634 emit_error = 1;
1635}
1636
1637static void
1638amd64_emit_ext (int arg)
1639{
1640 switch (arg)
1641 {
1642 case 8:
1643 EMIT_ASM (amd64_ext_8,
1644 "cbtw\n\t"
1645 "cwtl\n\t"
1646 "cltq");
1647 break;
1648 case 16:
1649 EMIT_ASM (amd64_ext_16,
1650 "cwtl\n\t"
1651 "cltq");
1652 break;
1653 case 32:
1654 EMIT_ASM (amd64_ext_32,
1655 "cltq");
1656 break;
1657 default:
1658 emit_error = 1;
1659 }
1660}
1661
1662static void
1663amd64_emit_log_not (void)
1664{
1665 EMIT_ASM (amd64_log_not,
1666 "test %rax,%rax\n\t"
1667 "sete %cl\n\t"
1668 "movzbq %cl,%rax");
1669}
1670
1671static void
1672amd64_emit_bit_and (void)
1673{
1674 EMIT_ASM (amd64_and,
1675 "and (%rsp),%rax\n\t"
1676 "lea 0x8(%rsp),%rsp");
1677}
1678
1679static void
1680amd64_emit_bit_or (void)
1681{
1682 EMIT_ASM (amd64_or,
1683 "or (%rsp),%rax\n\t"
1684 "lea 0x8(%rsp),%rsp");
1685}
1686
1687static void
1688amd64_emit_bit_xor (void)
1689{
1690 EMIT_ASM (amd64_xor,
1691 "xor (%rsp),%rax\n\t"
1692 "lea 0x8(%rsp),%rsp");
1693}
1694
1695static void
1696amd64_emit_bit_not (void)
1697{
1698 EMIT_ASM (amd64_bit_not,
1699 "xorq $0xffffffffffffffff,%rax");
1700}
1701
1702static void
1703amd64_emit_equal (void)
1704{
1705 EMIT_ASM (amd64_equal,
1706 "cmp %rax,(%rsp)\n\t"
1707 "je .Lamd64_equal_true\n\t"
1708 "xor %rax,%rax\n\t"
1709 "jmp .Lamd64_equal_end\n\t"
1710 ".Lamd64_equal_true:\n\t"
1711 "mov $0x1,%rax\n\t"
1712 ".Lamd64_equal_end:\n\t"
1713 "lea 0x8(%rsp),%rsp");
1714}
1715
1716static void
1717amd64_emit_less_signed (void)
1718{
1719 EMIT_ASM (amd64_less_signed,
1720 "cmp %rax,(%rsp)\n\t"
1721 "jl .Lamd64_less_signed_true\n\t"
1722 "xor %rax,%rax\n\t"
1723 "jmp .Lamd64_less_signed_end\n\t"
1724 ".Lamd64_less_signed_true:\n\t"
1725 "mov $1,%rax\n\t"
1726 ".Lamd64_less_signed_end:\n\t"
1727 "lea 0x8(%rsp),%rsp");
1728}
1729
1730static void
1731amd64_emit_less_unsigned (void)
1732{
1733 EMIT_ASM (amd64_less_unsigned,
1734 "cmp %rax,(%rsp)\n\t"
1735 "jb .Lamd64_less_unsigned_true\n\t"
1736 "xor %rax,%rax\n\t"
1737 "jmp .Lamd64_less_unsigned_end\n\t"
1738 ".Lamd64_less_unsigned_true:\n\t"
1739 "mov $1,%rax\n\t"
1740 ".Lamd64_less_unsigned_end:\n\t"
1741 "lea 0x8(%rsp),%rsp");
1742}
1743
1744static void
1745amd64_emit_ref (int size)
1746{
1747 switch (size)
1748 {
1749 case 1:
1750 EMIT_ASM (amd64_ref1,
1751 "movb (%rax),%al");
1752 break;
1753 case 2:
1754 EMIT_ASM (amd64_ref2,
1755 "movw (%rax),%ax");
1756 break;
1757 case 4:
1758 EMIT_ASM (amd64_ref4,
1759 "movl (%rax),%eax");
1760 break;
1761 case 8:
1762 EMIT_ASM (amd64_ref8,
1763 "movq (%rax),%rax");
1764 break;
1765 }
1766}
1767
1768static void
1769amd64_emit_if_goto (int *offset_p, int *size_p)
1770{
1771 EMIT_ASM (amd64_if_goto,
1772 "mov %rax,%rcx\n\t"
1773 "pop %rax\n\t"
1774 "cmp $0,%rcx\n\t"
1775 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1776 if (offset_p)
1777 *offset_p = 10;
1778 if (size_p)
1779 *size_p = 4;
1780}
1781
1782static void
1783amd64_emit_goto (int *offset_p, int *size_p)
1784{
1785 EMIT_ASM (amd64_goto,
1786 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1787 if (offset_p)
1788 *offset_p = 1;
1789 if (size_p)
1790 *size_p = 4;
1791}
1792
1793static void
1794amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1795{
1796 int diff = (to - (from + size));
1797 unsigned char buf[sizeof (int)];
1798
1799 if (size != 4)
1800 {
1801 emit_error = 1;
1802 return;
1803 }
1804
1805 memcpy (buf, &diff, sizeof (int));
4196ab2a 1806 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1807}
1808
1809static void
4e29fb54 1810amd64_emit_const (LONGEST num)
6a271cae
PA
1811{
1812 unsigned char buf[16];
1813 int i;
1814 CORE_ADDR buildaddr = current_insn_ptr;
1815
1816 i = 0;
1817 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1818 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1819 i += 8;
1820 append_insns (&buildaddr, i, buf);
1821 current_insn_ptr = buildaddr;
1822}
1823
1824static void
1825amd64_emit_call (CORE_ADDR fn)
1826{
1827 unsigned char buf[16];
1828 int i;
1829 CORE_ADDR buildaddr;
4e29fb54 1830 LONGEST offset64;
6a271cae
PA
1831
1832 /* The destination function being in the shared library, may be
1833 >31-bits away off the compiled code pad. */
1834
1835 buildaddr = current_insn_ptr;
1836
1837 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1838
1839 i = 0;
1840
1841 if (offset64 > INT_MAX || offset64 < INT_MIN)
1842 {
1843 /* Offset is too large for a call. Use callq, but that requires
1844 a register, so avoid it if possible. Use r10, since it is
1845 call-clobbered, we don't have to push/pop it. */
1846 buf[i++] = 0x48; /* mov $fn,%r10 */
1847 buf[i++] = 0xba;
1848 memcpy (buf + i, &fn, 8);
1849 i += 8;
1850 buf[i++] = 0xff; /* callq *%r10 */
1851 buf[i++] = 0xd2;
1852 }
1853 else
1854 {
1855 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1856
1857 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1858 memcpy (buf + i, &offset32, 4);
1859 i += 4;
1860 }
1861
1862 append_insns (&buildaddr, i, buf);
1863 current_insn_ptr = buildaddr;
1864}
1865
1866static void
1867amd64_emit_reg (int reg)
1868{
1869 unsigned char buf[16];
1870 int i;
1871 CORE_ADDR buildaddr;
1872
1873 /* Assume raw_regs is still in %rdi. */
1874 buildaddr = current_insn_ptr;
1875 i = 0;
1876 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1877 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1878 i += 4;
1879 append_insns (&buildaddr, i, buf);
1880 current_insn_ptr = buildaddr;
1881 amd64_emit_call (get_raw_reg_func_addr ());
1882}
1883
1884static void
1885amd64_emit_pop (void)
1886{
1887 EMIT_ASM (amd64_pop,
1888 "pop %rax");
1889}
1890
1891static void
1892amd64_emit_stack_flush (void)
1893{
1894 EMIT_ASM (amd64_stack_flush,
1895 "push %rax");
1896}
1897
1898static void
1899amd64_emit_zero_ext (int arg)
1900{
1901 switch (arg)
1902 {
1903 case 8:
1904 EMIT_ASM (amd64_zero_ext_8,
1905 "and $0xff,%rax");
1906 break;
1907 case 16:
1908 EMIT_ASM (amd64_zero_ext_16,
1909 "and $0xffff,%rax");
1910 break;
1911 case 32:
1912 EMIT_ASM (amd64_zero_ext_32,
1913 "mov $0xffffffff,%rcx\n\t"
1914 "and %rcx,%rax");
1915 break;
1916 default:
1917 emit_error = 1;
1918 }
1919}
1920
1921static void
1922amd64_emit_swap (void)
1923{
1924 EMIT_ASM (amd64_swap,
1925 "mov %rax,%rcx\n\t"
1926 "pop %rax\n\t"
1927 "push %rcx");
1928}
1929
1930static void
1931amd64_emit_stack_adjust (int n)
1932{
1933 unsigned char buf[16];
1934 int i;
1935 CORE_ADDR buildaddr = current_insn_ptr;
1936
1937 i = 0;
1938 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1939 buf[i++] = 0x8d;
1940 buf[i++] = 0x64;
1941 buf[i++] = 0x24;
1942 /* This only handles adjustments up to 16, but we don't expect any more. */
1943 buf[i++] = n * 8;
1944 append_insns (&buildaddr, i, buf);
1945 current_insn_ptr = buildaddr;
1946}
1947
1948/* FN's prototype is `LONGEST(*fn)(int)'. */
1949
1950static void
1951amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1952{
1953 unsigned char buf[16];
1954 int i;
1955 CORE_ADDR buildaddr;
1956
1957 buildaddr = current_insn_ptr;
1958 i = 0;
1959 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1960 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1961 i += 4;
1962 append_insns (&buildaddr, i, buf);
1963 current_insn_ptr = buildaddr;
1964 amd64_emit_call (fn);
1965}
1966
4e29fb54 1967/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
1968
1969static void
1970amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1971{
1972 unsigned char buf[16];
1973 int i;
1974 CORE_ADDR buildaddr;
1975
1976 buildaddr = current_insn_ptr;
1977 i = 0;
1978 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1979 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1980 i += 4;
1981 append_insns (&buildaddr, i, buf);
1982 current_insn_ptr = buildaddr;
1983 EMIT_ASM (amd64_void_call_2_a,
1984 /* Save away a copy of the stack top. */
1985 "push %rax\n\t"
1986 /* Also pass top as the second argument. */
1987 "mov %rax,%rsi");
1988 amd64_emit_call (fn);
1989 EMIT_ASM (amd64_void_call_2_b,
1990 /* Restore the stack top, %rax may have been trashed. */
1991 "pop %rax");
1992}
1993
df4a0200 1994static void
6b9801d4
SS
1995amd64_emit_eq_goto (int *offset_p, int *size_p)
1996{
1997 EMIT_ASM (amd64_eq,
1998 "cmp %rax,(%rsp)\n\t"
1999 "jne .Lamd64_eq_fallthru\n\t"
2000 "lea 0x8(%rsp),%rsp\n\t"
2001 "pop %rax\n\t"
2002 /* jmp, but don't trust the assembler to choose the right jump */
2003 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2004 ".Lamd64_eq_fallthru:\n\t"
2005 "lea 0x8(%rsp),%rsp\n\t"
2006 "pop %rax");
2007
2008 if (offset_p)
2009 *offset_p = 13;
2010 if (size_p)
2011 *size_p = 4;
2012}
2013
df4a0200 2014static void
6b9801d4
SS
2015amd64_emit_ne_goto (int *offset_p, int *size_p)
2016{
2017 EMIT_ASM (amd64_ne,
2018 "cmp %rax,(%rsp)\n\t"
2019 "je .Lamd64_ne_fallthru\n\t"
2020 "lea 0x8(%rsp),%rsp\n\t"
2021 "pop %rax\n\t"
2022 /* jmp, but don't trust the assembler to choose the right jump */
2023 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2024 ".Lamd64_ne_fallthru:\n\t"
2025 "lea 0x8(%rsp),%rsp\n\t"
2026 "pop %rax");
2027
2028 if (offset_p)
2029 *offset_p = 13;
2030 if (size_p)
2031 *size_p = 4;
2032}
2033
df4a0200 2034static void
6b9801d4
SS
2035amd64_emit_lt_goto (int *offset_p, int *size_p)
2036{
2037 EMIT_ASM (amd64_lt,
2038 "cmp %rax,(%rsp)\n\t"
2039 "jnl .Lamd64_lt_fallthru\n\t"
2040 "lea 0x8(%rsp),%rsp\n\t"
2041 "pop %rax\n\t"
2042 /* jmp, but don't trust the assembler to choose the right jump */
2043 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2044 ".Lamd64_lt_fallthru:\n\t"
2045 "lea 0x8(%rsp),%rsp\n\t"
2046 "pop %rax");
2047
2048 if (offset_p)
2049 *offset_p = 13;
2050 if (size_p)
2051 *size_p = 4;
2052}
2053
df4a0200 2054static void
6b9801d4
SS
2055amd64_emit_le_goto (int *offset_p, int *size_p)
2056{
2057 EMIT_ASM (amd64_le,
2058 "cmp %rax,(%rsp)\n\t"
2059 "jnle .Lamd64_le_fallthru\n\t"
2060 "lea 0x8(%rsp),%rsp\n\t"
2061 "pop %rax\n\t"
2062 /* jmp, but don't trust the assembler to choose the right jump */
2063 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2064 ".Lamd64_le_fallthru:\n\t"
2065 "lea 0x8(%rsp),%rsp\n\t"
2066 "pop %rax");
2067
2068 if (offset_p)
2069 *offset_p = 13;
2070 if (size_p)
2071 *size_p = 4;
2072}
2073
df4a0200 2074static void
6b9801d4
SS
2075amd64_emit_gt_goto (int *offset_p, int *size_p)
2076{
2077 EMIT_ASM (amd64_gt,
2078 "cmp %rax,(%rsp)\n\t"
2079 "jng .Lamd64_gt_fallthru\n\t"
2080 "lea 0x8(%rsp),%rsp\n\t"
2081 "pop %rax\n\t"
2082 /* jmp, but don't trust the assembler to choose the right jump */
2083 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2084 ".Lamd64_gt_fallthru:\n\t"
2085 "lea 0x8(%rsp),%rsp\n\t"
2086 "pop %rax");
2087
2088 if (offset_p)
2089 *offset_p = 13;
2090 if (size_p)
2091 *size_p = 4;
2092}
2093
df4a0200 2094static void
6b9801d4
SS
2095amd64_emit_ge_goto (int *offset_p, int *size_p)
2096{
2097 EMIT_ASM (amd64_ge,
2098 "cmp %rax,(%rsp)\n\t"
2099 "jnge .Lamd64_ge_fallthru\n\t"
2100 ".Lamd64_ge_jump:\n\t"
2101 "lea 0x8(%rsp),%rsp\n\t"
2102 "pop %rax\n\t"
2103 /* jmp, but don't trust the assembler to choose the right jump */
2104 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2105 ".Lamd64_ge_fallthru:\n\t"
2106 "lea 0x8(%rsp),%rsp\n\t"
2107 "pop %rax");
2108
2109 if (offset_p)
2110 *offset_p = 13;
2111 if (size_p)
2112 *size_p = 4;
2113}
2114
6a271cae
PA
2115struct emit_ops amd64_emit_ops =
2116 {
2117 amd64_emit_prologue,
2118 amd64_emit_epilogue,
2119 amd64_emit_add,
2120 amd64_emit_sub,
2121 amd64_emit_mul,
2122 amd64_emit_lsh,
2123 amd64_emit_rsh_signed,
2124 amd64_emit_rsh_unsigned,
2125 amd64_emit_ext,
2126 amd64_emit_log_not,
2127 amd64_emit_bit_and,
2128 amd64_emit_bit_or,
2129 amd64_emit_bit_xor,
2130 amd64_emit_bit_not,
2131 amd64_emit_equal,
2132 amd64_emit_less_signed,
2133 amd64_emit_less_unsigned,
2134 amd64_emit_ref,
2135 amd64_emit_if_goto,
2136 amd64_emit_goto,
2137 amd64_write_goto_address,
2138 amd64_emit_const,
2139 amd64_emit_call,
2140 amd64_emit_reg,
2141 amd64_emit_pop,
2142 amd64_emit_stack_flush,
2143 amd64_emit_zero_ext,
2144 amd64_emit_swap,
2145 amd64_emit_stack_adjust,
2146 amd64_emit_int_call_1,
6b9801d4
SS
2147 amd64_emit_void_call_2,
2148 amd64_emit_eq_goto,
2149 amd64_emit_ne_goto,
2150 amd64_emit_lt_goto,
2151 amd64_emit_le_goto,
2152 amd64_emit_gt_goto,
2153 amd64_emit_ge_goto
6a271cae
PA
2154 };
2155
2156#endif /* __x86_64__ */
2157
2158static void
2159i386_emit_prologue (void)
2160{
2161 EMIT_ASM32 (i386_prologue,
2162 "push %ebp\n\t"
bf15cbda
SS
2163 "mov %esp,%ebp\n\t"
2164 "push %ebx");
6a271cae
PA
2165 /* At this point, the raw regs base address is at 8(%ebp), and the
2166 value pointer is at 12(%ebp). */
2167}
2168
2169static void
2170i386_emit_epilogue (void)
2171{
2172 EMIT_ASM32 (i386_epilogue,
2173 "mov 12(%ebp),%ecx\n\t"
2174 "mov %eax,(%ecx)\n\t"
2175 "mov %ebx,0x4(%ecx)\n\t"
2176 "xor %eax,%eax\n\t"
bf15cbda 2177 "pop %ebx\n\t"
6a271cae
PA
2178 "pop %ebp\n\t"
2179 "ret");
2180}
2181
2182static void
2183i386_emit_add (void)
2184{
2185 EMIT_ASM32 (i386_add,
2186 "add (%esp),%eax\n\t"
2187 "adc 0x4(%esp),%ebx\n\t"
2188 "lea 0x8(%esp),%esp");
2189}
2190
2191static void
2192i386_emit_sub (void)
2193{
2194 EMIT_ASM32 (i386_sub,
2195 "subl %eax,(%esp)\n\t"
2196 "sbbl %ebx,4(%esp)\n\t"
2197 "pop %eax\n\t"
2198 "pop %ebx\n\t");
2199}
2200
2201static void
2202i386_emit_mul (void)
2203{
2204 emit_error = 1;
2205}
2206
2207static void
2208i386_emit_lsh (void)
2209{
2210 emit_error = 1;
2211}
2212
2213static void
2214i386_emit_rsh_signed (void)
2215{
2216 emit_error = 1;
2217}
2218
2219static void
2220i386_emit_rsh_unsigned (void)
2221{
2222 emit_error = 1;
2223}
2224
2225static void
2226i386_emit_ext (int arg)
2227{
2228 switch (arg)
2229 {
2230 case 8:
2231 EMIT_ASM32 (i386_ext_8,
2232 "cbtw\n\t"
2233 "cwtl\n\t"
2234 "movl %eax,%ebx\n\t"
2235 "sarl $31,%ebx");
2236 break;
2237 case 16:
2238 EMIT_ASM32 (i386_ext_16,
2239 "cwtl\n\t"
2240 "movl %eax,%ebx\n\t"
2241 "sarl $31,%ebx");
2242 break;
2243 case 32:
2244 EMIT_ASM32 (i386_ext_32,
2245 "movl %eax,%ebx\n\t"
2246 "sarl $31,%ebx");
2247 break;
2248 default:
2249 emit_error = 1;
2250 }
2251}
2252
2253static void
2254i386_emit_log_not (void)
2255{
2256 EMIT_ASM32 (i386_log_not,
2257 "or %ebx,%eax\n\t"
2258 "test %eax,%eax\n\t"
2259 "sete %cl\n\t"
2260 "xor %ebx,%ebx\n\t"
2261 "movzbl %cl,%eax");
2262}
2263
2264static void
2265i386_emit_bit_and (void)
2266{
2267 EMIT_ASM32 (i386_and,
2268 "and (%esp),%eax\n\t"
2269 "and 0x4(%esp),%ebx\n\t"
2270 "lea 0x8(%esp),%esp");
2271}
2272
2273static void
2274i386_emit_bit_or (void)
2275{
2276 EMIT_ASM32 (i386_or,
2277 "or (%esp),%eax\n\t"
2278 "or 0x4(%esp),%ebx\n\t"
2279 "lea 0x8(%esp),%esp");
2280}
2281
2282static void
2283i386_emit_bit_xor (void)
2284{
2285 EMIT_ASM32 (i386_xor,
2286 "xor (%esp),%eax\n\t"
2287 "xor 0x4(%esp),%ebx\n\t"
2288 "lea 0x8(%esp),%esp");
2289}
2290
2291static void
2292i386_emit_bit_not (void)
2293{
2294 EMIT_ASM32 (i386_bit_not,
2295 "xor $0xffffffff,%eax\n\t"
2296 "xor $0xffffffff,%ebx\n\t");
2297}
2298
2299static void
2300i386_emit_equal (void)
2301{
2302 EMIT_ASM32 (i386_equal,
2303 "cmpl %ebx,4(%esp)\n\t"
2304 "jne .Li386_equal_false\n\t"
2305 "cmpl %eax,(%esp)\n\t"
2306 "je .Li386_equal_true\n\t"
2307 ".Li386_equal_false:\n\t"
2308 "xor %eax,%eax\n\t"
2309 "jmp .Li386_equal_end\n\t"
2310 ".Li386_equal_true:\n\t"
2311 "mov $1,%eax\n\t"
2312 ".Li386_equal_end:\n\t"
2313 "xor %ebx,%ebx\n\t"
2314 "lea 0x8(%esp),%esp");
2315}
2316
2317static void
2318i386_emit_less_signed (void)
2319{
2320 EMIT_ASM32 (i386_less_signed,
2321 "cmpl %ebx,4(%esp)\n\t"
2322 "jl .Li386_less_signed_true\n\t"
2323 "jne .Li386_less_signed_false\n\t"
2324 "cmpl %eax,(%esp)\n\t"
2325 "jl .Li386_less_signed_true\n\t"
2326 ".Li386_less_signed_false:\n\t"
2327 "xor %eax,%eax\n\t"
2328 "jmp .Li386_less_signed_end\n\t"
2329 ".Li386_less_signed_true:\n\t"
2330 "mov $1,%eax\n\t"
2331 ".Li386_less_signed_end:\n\t"
2332 "xor %ebx,%ebx\n\t"
2333 "lea 0x8(%esp),%esp");
2334}
2335
2336static void
2337i386_emit_less_unsigned (void)
2338{
2339 EMIT_ASM32 (i386_less_unsigned,
2340 "cmpl %ebx,4(%esp)\n\t"
2341 "jb .Li386_less_unsigned_true\n\t"
2342 "jne .Li386_less_unsigned_false\n\t"
2343 "cmpl %eax,(%esp)\n\t"
2344 "jb .Li386_less_unsigned_true\n\t"
2345 ".Li386_less_unsigned_false:\n\t"
2346 "xor %eax,%eax\n\t"
2347 "jmp .Li386_less_unsigned_end\n\t"
2348 ".Li386_less_unsigned_true:\n\t"
2349 "mov $1,%eax\n\t"
2350 ".Li386_less_unsigned_end:\n\t"
2351 "xor %ebx,%ebx\n\t"
2352 "lea 0x8(%esp),%esp");
2353}
2354
2355static void
2356i386_emit_ref (int size)
2357{
2358 switch (size)
2359 {
2360 case 1:
2361 EMIT_ASM32 (i386_ref1,
2362 "movb (%eax),%al");
2363 break;
2364 case 2:
2365 EMIT_ASM32 (i386_ref2,
2366 "movw (%eax),%ax");
2367 break;
2368 case 4:
2369 EMIT_ASM32 (i386_ref4,
2370 "movl (%eax),%eax");
2371 break;
2372 case 8:
2373 EMIT_ASM32 (i386_ref8,
2374 "movl 4(%eax),%ebx\n\t"
2375 "movl (%eax),%eax");
2376 break;
2377 }
2378}
2379
2380static void
2381i386_emit_if_goto (int *offset_p, int *size_p)
2382{
2383 EMIT_ASM32 (i386_if_goto,
2384 "mov %eax,%ecx\n\t"
2385 "or %ebx,%ecx\n\t"
2386 "pop %eax\n\t"
2387 "pop %ebx\n\t"
2388 "cmpl $0,%ecx\n\t"
2389 /* Don't trust the assembler to choose the right jump */
2390 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2391
2392 if (offset_p)
2393 *offset_p = 11; /* be sure that this matches the sequence above */
2394 if (size_p)
2395 *size_p = 4;
2396}
2397
2398static void
2399i386_emit_goto (int *offset_p, int *size_p)
2400{
2401 EMIT_ASM32 (i386_goto,
2402 /* Don't trust the assembler to choose the right jump */
2403 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2404 if (offset_p)
2405 *offset_p = 1;
2406 if (size_p)
2407 *size_p = 4;
2408}
2409
2410static void
2411i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2412{
2413 int diff = (to - (from + size));
2414 unsigned char buf[sizeof (int)];
2415
2416 /* We're only doing 4-byte sizes at the moment. */
2417 if (size != 4)
2418 {
2419 emit_error = 1;
2420 return;
2421 }
2422
2423 memcpy (buf, &diff, sizeof (int));
4196ab2a 2424 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2425}
2426
2427static void
4e29fb54 2428i386_emit_const (LONGEST num)
6a271cae
PA
2429{
2430 unsigned char buf[16];
b00ad6ff 2431 int i, hi, lo;
6a271cae
PA
2432 CORE_ADDR buildaddr = current_insn_ptr;
2433
2434 i = 0;
2435 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2436 lo = num & 0xffffffff;
2437 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2438 i += 4;
2439 hi = ((num >> 32) & 0xffffffff);
2440 if (hi)
2441 {
2442 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2443 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2444 i += 4;
2445 }
2446 else
2447 {
2448 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2449 }
2450 append_insns (&buildaddr, i, buf);
2451 current_insn_ptr = buildaddr;
2452}
2453
2454static void
2455i386_emit_call (CORE_ADDR fn)
2456{
2457 unsigned char buf[16];
2458 int i, offset;
2459 CORE_ADDR buildaddr;
2460
2461 buildaddr = current_insn_ptr;
2462 i = 0;
2463 buf[i++] = 0xe8; /* call <reladdr> */
2464 offset = ((int) fn) - (buildaddr + 5);
2465 memcpy (buf + 1, &offset, 4);
2466 append_insns (&buildaddr, 5, buf);
2467 current_insn_ptr = buildaddr;
2468}
2469
2470static void
2471i386_emit_reg (int reg)
2472{
2473 unsigned char buf[16];
2474 int i;
2475 CORE_ADDR buildaddr;
2476
2477 EMIT_ASM32 (i386_reg_a,
2478 "sub $0x8,%esp");
2479 buildaddr = current_insn_ptr;
2480 i = 0;
2481 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2482 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2483 i += 4;
2484 append_insns (&buildaddr, i, buf);
2485 current_insn_ptr = buildaddr;
2486 EMIT_ASM32 (i386_reg_b,
2487 "mov %eax,4(%esp)\n\t"
2488 "mov 8(%ebp),%eax\n\t"
2489 "mov %eax,(%esp)");
2490 i386_emit_call (get_raw_reg_func_addr ());
2491 EMIT_ASM32 (i386_reg_c,
2492 "xor %ebx,%ebx\n\t"
2493 "lea 0x8(%esp),%esp");
2494}
2495
2496static void
2497i386_emit_pop (void)
2498{
2499 EMIT_ASM32 (i386_pop,
2500 "pop %eax\n\t"
2501 "pop %ebx");
2502}
2503
2504static void
2505i386_emit_stack_flush (void)
2506{
2507 EMIT_ASM32 (i386_stack_flush,
2508 "push %ebx\n\t"
2509 "push %eax");
2510}
2511
2512static void
2513i386_emit_zero_ext (int arg)
2514{
2515 switch (arg)
2516 {
2517 case 8:
2518 EMIT_ASM32 (i386_zero_ext_8,
2519 "and $0xff,%eax\n\t"
2520 "xor %ebx,%ebx");
2521 break;
2522 case 16:
2523 EMIT_ASM32 (i386_zero_ext_16,
2524 "and $0xffff,%eax\n\t"
2525 "xor %ebx,%ebx");
2526 break;
2527 case 32:
2528 EMIT_ASM32 (i386_zero_ext_32,
2529 "xor %ebx,%ebx");
2530 break;
2531 default:
2532 emit_error = 1;
2533 }
2534}
2535
2536static void
2537i386_emit_swap (void)
2538{
2539 EMIT_ASM32 (i386_swap,
2540 "mov %eax,%ecx\n\t"
2541 "mov %ebx,%edx\n\t"
2542 "pop %eax\n\t"
2543 "pop %ebx\n\t"
2544 "push %edx\n\t"
2545 "push %ecx");
2546}
2547
2548static void
2549i386_emit_stack_adjust (int n)
2550{
2551 unsigned char buf[16];
2552 int i;
2553 CORE_ADDR buildaddr = current_insn_ptr;
2554
2555 i = 0;
2556 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2557 buf[i++] = 0x64;
2558 buf[i++] = 0x24;
2559 buf[i++] = n * 8;
2560 append_insns (&buildaddr, i, buf);
2561 current_insn_ptr = buildaddr;
2562}
2563
2564/* FN's prototype is `LONGEST(*fn)(int)'. */
2565
2566static void
2567i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2568{
2569 unsigned char buf[16];
2570 int i;
2571 CORE_ADDR buildaddr;
2572
2573 EMIT_ASM32 (i386_int_call_1_a,
2574 /* Reserve a bit of stack space. */
2575 "sub $0x8,%esp");
2576 /* Put the one argument on the stack. */
2577 buildaddr = current_insn_ptr;
2578 i = 0;
2579 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2580 buf[i++] = 0x04;
2581 buf[i++] = 0x24;
b00ad6ff 2582 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2583 i += 4;
2584 append_insns (&buildaddr, i, buf);
2585 current_insn_ptr = buildaddr;
2586 i386_emit_call (fn);
2587 EMIT_ASM32 (i386_int_call_1_c,
2588 "mov %edx,%ebx\n\t"
2589 "lea 0x8(%esp),%esp");
2590}
2591
4e29fb54 2592/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2593
2594static void
2595i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2596{
2597 unsigned char buf[16];
2598 int i;
2599 CORE_ADDR buildaddr;
2600
2601 EMIT_ASM32 (i386_void_call_2_a,
2602 /* Preserve %eax only; we don't have to worry about %ebx. */
2603 "push %eax\n\t"
2604 /* Reserve a bit of stack space for arguments. */
2605 "sub $0x10,%esp\n\t"
2606 /* Copy "top" to the second argument position. (Note that
2607 we can't assume function won't scribble on its
2608 arguments, so don't try to restore from this.) */
2609 "mov %eax,4(%esp)\n\t"
2610 "mov %ebx,8(%esp)");
2611 /* Put the first argument on the stack. */
2612 buildaddr = current_insn_ptr;
2613 i = 0;
2614 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2615 buf[i++] = 0x04;
2616 buf[i++] = 0x24;
b00ad6ff 2617 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2618 i += 4;
2619 append_insns (&buildaddr, i, buf);
2620 current_insn_ptr = buildaddr;
2621 i386_emit_call (fn);
2622 EMIT_ASM32 (i386_void_call_2_b,
2623 "lea 0x10(%esp),%esp\n\t"
2624 /* Restore original stack top. */
2625 "pop %eax");
2626}
2627
6b9801d4 2628
df4a0200 2629static void
6b9801d4
SS
2630i386_emit_eq_goto (int *offset_p, int *size_p)
2631{
2632 EMIT_ASM32 (eq,
2633 /* Check low half first, more likely to be decider */
2634 "cmpl %eax,(%esp)\n\t"
2635 "jne .Leq_fallthru\n\t"
2636 "cmpl %ebx,4(%esp)\n\t"
2637 "jne .Leq_fallthru\n\t"
2638 "lea 0x8(%esp),%esp\n\t"
2639 "pop %eax\n\t"
2640 "pop %ebx\n\t"
2641 /* jmp, but don't trust the assembler to choose the right jump */
2642 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2643 ".Leq_fallthru:\n\t"
2644 "lea 0x8(%esp),%esp\n\t"
2645 "pop %eax\n\t"
2646 "pop %ebx");
2647
2648 if (offset_p)
2649 *offset_p = 18;
2650 if (size_p)
2651 *size_p = 4;
2652}
2653
df4a0200 2654static void
6b9801d4
SS
2655i386_emit_ne_goto (int *offset_p, int *size_p)
2656{
2657 EMIT_ASM32 (ne,
2658 /* Check low half first, more likely to be decider */
2659 "cmpl %eax,(%esp)\n\t"
2660 "jne .Lne_jump\n\t"
2661 "cmpl %ebx,4(%esp)\n\t"
2662 "je .Lne_fallthru\n\t"
2663 ".Lne_jump:\n\t"
2664 "lea 0x8(%esp),%esp\n\t"
2665 "pop %eax\n\t"
2666 "pop %ebx\n\t"
2667 /* jmp, but don't trust the assembler to choose the right jump */
2668 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2669 ".Lne_fallthru:\n\t"
2670 "lea 0x8(%esp),%esp\n\t"
2671 "pop %eax\n\t"
2672 "pop %ebx");
2673
2674 if (offset_p)
2675 *offset_p = 18;
2676 if (size_p)
2677 *size_p = 4;
2678}
2679
df4a0200 2680static void
6b9801d4
SS
2681i386_emit_lt_goto (int *offset_p, int *size_p)
2682{
2683 EMIT_ASM32 (lt,
2684 "cmpl %ebx,4(%esp)\n\t"
2685 "jl .Llt_jump\n\t"
2686 "jne .Llt_fallthru\n\t"
2687 "cmpl %eax,(%esp)\n\t"
2688 "jnl .Llt_fallthru\n\t"
2689 ".Llt_jump:\n\t"
2690 "lea 0x8(%esp),%esp\n\t"
2691 "pop %eax\n\t"
2692 "pop %ebx\n\t"
2693 /* jmp, but don't trust the assembler to choose the right jump */
2694 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2695 ".Llt_fallthru:\n\t"
2696 "lea 0x8(%esp),%esp\n\t"
2697 "pop %eax\n\t"
2698 "pop %ebx");
2699
2700 if (offset_p)
2701 *offset_p = 20;
2702 if (size_p)
2703 *size_p = 4;
2704}
2705
df4a0200 2706static void
6b9801d4
SS
2707i386_emit_le_goto (int *offset_p, int *size_p)
2708{
2709 EMIT_ASM32 (le,
2710 "cmpl %ebx,4(%esp)\n\t"
2711 "jle .Lle_jump\n\t"
2712 "jne .Lle_fallthru\n\t"
2713 "cmpl %eax,(%esp)\n\t"
2714 "jnle .Lle_fallthru\n\t"
2715 ".Lle_jump:\n\t"
2716 "lea 0x8(%esp),%esp\n\t"
2717 "pop %eax\n\t"
2718 "pop %ebx\n\t"
2719 /* jmp, but don't trust the assembler to choose the right jump */
2720 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2721 ".Lle_fallthru:\n\t"
2722 "lea 0x8(%esp),%esp\n\t"
2723 "pop %eax\n\t"
2724 "pop %ebx");
2725
2726 if (offset_p)
2727 *offset_p = 20;
2728 if (size_p)
2729 *size_p = 4;
2730}
2731
df4a0200 2732static void
6b9801d4
SS
2733i386_emit_gt_goto (int *offset_p, int *size_p)
2734{
2735 EMIT_ASM32 (gt,
2736 "cmpl %ebx,4(%esp)\n\t"
2737 "jg .Lgt_jump\n\t"
2738 "jne .Lgt_fallthru\n\t"
2739 "cmpl %eax,(%esp)\n\t"
2740 "jng .Lgt_fallthru\n\t"
2741 ".Lgt_jump:\n\t"
2742 "lea 0x8(%esp),%esp\n\t"
2743 "pop %eax\n\t"
2744 "pop %ebx\n\t"
2745 /* jmp, but don't trust the assembler to choose the right jump */
2746 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2747 ".Lgt_fallthru:\n\t"
2748 "lea 0x8(%esp),%esp\n\t"
2749 "pop %eax\n\t"
2750 "pop %ebx");
2751
2752 if (offset_p)
2753 *offset_p = 20;
2754 if (size_p)
2755 *size_p = 4;
2756}
2757
df4a0200 2758static void
6b9801d4
SS
2759i386_emit_ge_goto (int *offset_p, int *size_p)
2760{
2761 EMIT_ASM32 (ge,
2762 "cmpl %ebx,4(%esp)\n\t"
2763 "jge .Lge_jump\n\t"
2764 "jne .Lge_fallthru\n\t"
2765 "cmpl %eax,(%esp)\n\t"
2766 "jnge .Lge_fallthru\n\t"
2767 ".Lge_jump:\n\t"
2768 "lea 0x8(%esp),%esp\n\t"
2769 "pop %eax\n\t"
2770 "pop %ebx\n\t"
2771 /* jmp, but don't trust the assembler to choose the right jump */
2772 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2773 ".Lge_fallthru:\n\t"
2774 "lea 0x8(%esp),%esp\n\t"
2775 "pop %eax\n\t"
2776 "pop %ebx");
2777
2778 if (offset_p)
2779 *offset_p = 20;
2780 if (size_p)
2781 *size_p = 4;
2782}
2783
6a271cae
PA
2784struct emit_ops i386_emit_ops =
2785 {
2786 i386_emit_prologue,
2787 i386_emit_epilogue,
2788 i386_emit_add,
2789 i386_emit_sub,
2790 i386_emit_mul,
2791 i386_emit_lsh,
2792 i386_emit_rsh_signed,
2793 i386_emit_rsh_unsigned,
2794 i386_emit_ext,
2795 i386_emit_log_not,
2796 i386_emit_bit_and,
2797 i386_emit_bit_or,
2798 i386_emit_bit_xor,
2799 i386_emit_bit_not,
2800 i386_emit_equal,
2801 i386_emit_less_signed,
2802 i386_emit_less_unsigned,
2803 i386_emit_ref,
2804 i386_emit_if_goto,
2805 i386_emit_goto,
2806 i386_write_goto_address,
2807 i386_emit_const,
2808 i386_emit_call,
2809 i386_emit_reg,
2810 i386_emit_pop,
2811 i386_emit_stack_flush,
2812 i386_emit_zero_ext,
2813 i386_emit_swap,
2814 i386_emit_stack_adjust,
2815 i386_emit_int_call_1,
6b9801d4
SS
2816 i386_emit_void_call_2,
2817 i386_emit_eq_goto,
2818 i386_emit_ne_goto,
2819 i386_emit_lt_goto,
2820 i386_emit_le_goto,
2821 i386_emit_gt_goto,
2822 i386_emit_ge_goto
6a271cae
PA
2823 };
2824
2825
2826static struct emit_ops *
2827x86_emit_ops (void)
2828{
2829#ifdef __x86_64__
3aee8918 2830 if (is_64bit_tdesc ())
6a271cae
PA
2831 return &amd64_emit_ops;
2832 else
2833#endif
2834 return &i386_emit_ops;
2835}
2836
dd373349
AT
2837/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2838
2839static const gdb_byte *
2840x86_sw_breakpoint_from_kind (int kind, int *size)
2841{
2842 *size = x86_breakpoint_len;
2843 return x86_breakpoint;
2844}
2845
c2d6af84
PA
2846static int
2847x86_supports_range_stepping (void)
2848{
2849 return 1;
2850}
2851
7d00775e
AT
2852/* Implementation of linux_target_ops method "supports_hardware_single_step".
2853 */
2854
2855static int
2856x86_supports_hardware_single_step (void)
2857{
2858 return 1;
2859}
2860
ae91f625
MK
2861static int
2862x86_get_ipa_tdesc_idx (void)
2863{
2864 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2865 const struct target_desc *tdesc = regcache->tdesc;
2866
2867#ifdef __x86_64__
b4570e4b 2868 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2869#endif
2870
f49ff000 2871 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2872 return X86_TDESC_SSE;
ae91f625 2873
f49ff000 2874 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2875}
2876
d0722149
DE
2877/* This is initialized assuming an amd64 target.
2878 x86_arch_setup will correct it for i386 or amd64 targets. */
2879
2880struct linux_target_ops the_low_target =
2881{
3aee8918
PA
2882 x86_linux_regs_info,
2883 x86_cannot_fetch_register,
2884 x86_cannot_store_register,
c14dfd32 2885 NULL, /* fetch_register */
d0722149
DE
2886 x86_get_pc,
2887 x86_set_pc,
dd373349
AT
2888 NULL, /* breakpoint_kind_from_pc */
2889 x86_sw_breakpoint_from_kind,
d0722149
DE
2890 NULL,
2891 1,
2892 x86_breakpoint_at,
802e8e6d 2893 x86_supports_z_point_type,
aa5ca48f
DE
2894 x86_insert_point,
2895 x86_remove_point,
2896 x86_stopped_by_watchpoint,
2897 x86_stopped_data_address,
d0722149
DE
2898 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2899 native i386 case (no registers smaller than an xfer unit), and are not
2900 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2901 NULL,
2902 NULL,
2903 /* need to fix up i386 siginfo if host is amd64 */
2904 x86_siginfo_fixup,
aa5ca48f 2905 x86_linux_new_process,
04ec7890 2906 x86_linux_delete_process,
aa5ca48f 2907 x86_linux_new_thread,
466eecee 2908 x86_linux_delete_thread,
3a8a0396 2909 x86_linux_new_fork,
1570b33e 2910 x86_linux_prepare_to_resume,
219f2f23 2911 x86_linux_process_qsupported,
fa593d66
PA
2912 x86_supports_tracepoints,
2913 x86_get_thread_area,
6a271cae 2914 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2915 x86_emit_ops,
2916 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 2917 x86_supports_range_stepping,
7d00775e
AT
2918 NULL, /* breakpoint_kind_from_current_state */
2919 x86_supports_hardware_single_step,
82075af2 2920 x86_get_syscall_trapinfo,
ae91f625 2921 x86_get_ipa_tdesc_idx,
d0722149 2922};
3aee8918 2923
ef0478f6
TBA
2924/* The linux target ops object. */
2925
2926linux_process_target *the_linux_target = &the_x86_target;
2927
3aee8918
PA
2928void
2929initialize_low_arch (void)
2930{
2931 /* Initialize the Linux target descriptions. */
2932#ifdef __x86_64__
cc397f3a 2933 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
2934 copy_target_description (tdesc_amd64_linux_no_xml,
2935 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2936 false));
3aee8918
PA
2937 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2938#endif
f49ff000 2939
cc397f3a 2940 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
2941 copy_target_description (tdesc_i386_linux_no_xml,
2942 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
2943 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2944
2945 initialize_regsets_info (&x86_regsets_info);
2946}
This page took 0.98765 seconds and 4 git commands to generate.