gdbserver/linux-low: turn watchpoint ops into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
797bcff5
TBA
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
aa8d21c9
TBA
107 const regs_info *get_regs_info () override;
108
3ca4edb6
TBA
109 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
110
007c9b97
TBA
111 bool supports_z_point_type (char z_type) override;
112
797bcff5
TBA
113protected:
114
115 void low_arch_setup () override;
daca57a7
TBA
116
117 bool low_cannot_fetch_register (int regno) override;
118
119 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
120
121 bool low_supports_breakpoints () override;
122
123 CORE_ADDR low_get_pc (regcache *regcache) override;
124
125 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d4807ea2
TBA
126
127 int low_decr_pc_after_break () override;
d7146cda
TBA
128
129 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
130
131 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
132 int size, raw_breakpoint *bp) override;
133
134 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
135 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
136
137 bool low_stopped_by_watchpoint () override;
138
139 CORE_ADDR low_stopped_data_address () override;
ef0478f6
TBA
140};
141
142/* The singleton target ops object. */
143
144static x86_target the_x86_target;
145
aa5ca48f
DE
146/* Per-process arch-specific data we want to keep. */
147
148struct arch_process_info
149{
df7e5265 150 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
151};
152
d0722149
DE
153#ifdef __x86_64__
154
155/* Mapping between the general-purpose registers in `struct user'
156 format and GDB's register array layout.
157 Note that the transfer layout uses 64-bit regs. */
158static /*const*/ int i386_regmap[] =
159{
160 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
161 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
162 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
163 DS * 8, ES * 8, FS * 8, GS * 8
164};
165
166#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
167
168/* So code below doesn't have to care, i386 or amd64. */
169#define ORIG_EAX ORIG_RAX
bc9540e8 170#define REGSIZE 8
d0722149
DE
171
172static const int x86_64_regmap[] =
173{
174 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
175 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
176 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
177 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
178 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
179 DS * 8, ES * 8, FS * 8, GS * 8,
180 -1, -1, -1, -1, -1, -1, -1, -1,
181 -1, -1, -1, -1, -1, -1, -1, -1,
182 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
183 -1,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 ORIG_RAX * 8,
2735833d
WT
186#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
187 21 * 8, 22 * 8,
188#else
189 -1, -1,
190#endif
a196ebeb 191 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
192 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
193 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
196 -1, -1, -1, -1, -1, -1, -1, -1,
197 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
201 -1, -1, -1, -1, -1, -1, -1, -1,
202 -1 /* pkru */
d0722149
DE
203};
204
205#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 206#define X86_64_USER_REGS (GS + 1)
d0722149
DE
207
208#else /* ! __x86_64__ */
209
210/* Mapping between the general-purpose registers in `struct user'
211 format and GDB's register array layout. */
212static /*const*/ int i386_regmap[] =
213{
214 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
215 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
216 EIP * 4, EFL * 4, CS * 4, SS * 4,
217 DS * 4, ES * 4, FS * 4, GS * 4
218};
219
220#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
221
bc9540e8
PA
222#define REGSIZE 4
223
d0722149 224#endif
3aee8918
PA
225
226#ifdef __x86_64__
227
228/* Returns true if the current inferior belongs to a x86-64 process,
229 per the tdesc. */
230
231static int
232is_64bit_tdesc (void)
233{
0bfdf32f 234 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
235
236 return register_size (regcache->tdesc, 0) == 8;
237}
238
239#endif
240
d0722149
DE
241\f
242/* Called by libthread_db. */
243
244ps_err_e
754653a7 245ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
246 lwpid_t lwpid, int idx, void **base)
247{
248#ifdef __x86_64__
3aee8918 249 int use_64bit = is_64bit_tdesc ();
d0722149
DE
250
251 if (use_64bit)
252 {
253 switch (idx)
254 {
255 case FS:
256 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
257 return PS_OK;
258 break;
259 case GS:
260 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
261 return PS_OK;
262 break;
263 default:
264 return PS_BADADDR;
265 }
266 return PS_ERR;
267 }
268#endif
269
270 {
271 unsigned int desc[4];
272
273 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
274 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
275 return PS_ERR;
276
d1ec4ce7
DE
277 /* Ensure we properly extend the value to 64-bits for x86_64. */
278 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
279 return PS_OK;
280 }
281}
fa593d66
PA
282
283/* Get the thread area address. This is used to recognize which
284 thread is which when tracing with the in-process agent library. We
285 don't read anything from the address, and treat it as opaque; it's
286 the address itself that we assume is unique per-thread. */
287
288static int
289x86_get_thread_area (int lwpid, CORE_ADDR *addr)
290{
291#ifdef __x86_64__
3aee8918 292 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
293
294 if (use_64bit)
295 {
296 void *base;
297 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
298 {
299 *addr = (CORE_ADDR) (uintptr_t) base;
300 return 0;
301 }
302
303 return -1;
304 }
305#endif
306
307 {
f2907e49 308 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
309 struct thread_info *thr = get_lwp_thread (lwp);
310 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
311 unsigned int desc[4];
312 ULONGEST gs = 0;
313 const int reg_thread_area = 3; /* bits to scale down register value. */
314 int idx;
315
316 collect_register_by_name (regcache, "gs", &gs);
317
318 idx = gs >> reg_thread_area;
319
320 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 321 lwpid_of (thr),
493e2a69 322 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
323 return -1;
324
325 *addr = desc[1];
326 return 0;
327 }
328}
329
330
d0722149 331\f
daca57a7
TBA
332bool
333x86_target::low_cannot_store_register (int regno)
d0722149 334{
3aee8918
PA
335#ifdef __x86_64__
336 if (is_64bit_tdesc ())
daca57a7 337 return false;
3aee8918
PA
338#endif
339
d0722149
DE
340 return regno >= I386_NUM_REGS;
341}
342
daca57a7
TBA
343bool
344x86_target::low_cannot_fetch_register (int regno)
d0722149 345{
3aee8918
PA
346#ifdef __x86_64__
347 if (is_64bit_tdesc ())
daca57a7 348 return false;
3aee8918
PA
349#endif
350
d0722149
DE
351 return regno >= I386_NUM_REGS;
352}
353
354static void
442ea881 355x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
356{
357 int i;
358
359#ifdef __x86_64__
3aee8918 360 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
361 {
362 for (i = 0; i < X86_64_NUM_REGS; i++)
363 if (x86_64_regmap[i] != -1)
442ea881 364 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
365
366#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
367 {
368 unsigned long base;
369 int lwpid = lwpid_of (current_thread);
370
371 collect_register_by_name (regcache, "fs_base", &base);
372 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
373
374 collect_register_by_name (regcache, "gs_base", &base);
375 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
376 }
377#endif
378
d0722149
DE
379 return;
380 }
9e0aa64f
JK
381
382 /* 32-bit inferior registers need to be zero-extended.
383 Callers would read uninitialized memory otherwise. */
384 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
385#endif
386
387 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 388 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 389
442ea881 390 collect_register_by_name (regcache, "orig_eax",
bc9540e8 391 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 392
e90a813d 393#ifdef __x86_64__
3f52fdbc
KB
394 /* Sign extend EAX value to avoid potential syscall restart
395 problems.
396
397 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
398 for a detailed explanation. */
399 if (register_size (regcache->tdesc, 0) == 4)
400 {
401 void *ptr = ((gdb_byte *) buf
402 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
403
404 *(int64_t *) ptr = *(int32_t *) ptr;
405 }
e90a813d 406#endif
d0722149
DE
407}
408
409static void
442ea881 410x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
411{
412 int i;
413
414#ifdef __x86_64__
3aee8918 415 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
416 {
417 for (i = 0; i < X86_64_NUM_REGS; i++)
418 if (x86_64_regmap[i] != -1)
442ea881 419 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
420
421#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
422 {
423 unsigned long base;
424 int lwpid = lwpid_of (current_thread);
425
426 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
427 supply_register_by_name (regcache, "fs_base", &base);
428
429 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
430 supply_register_by_name (regcache, "gs_base", &base);
431 }
432#endif
d0722149
DE
433 return;
434 }
435#endif
436
437 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 438 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 439
442ea881 440 supply_register_by_name (regcache, "orig_eax",
bc9540e8 441 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
442}
443
444static void
442ea881 445x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
446{
447#ifdef __x86_64__
442ea881 448 i387_cache_to_fxsave (regcache, buf);
d0722149 449#else
442ea881 450 i387_cache_to_fsave (regcache, buf);
d0722149
DE
451#endif
452}
453
454static void
442ea881 455x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
456{
457#ifdef __x86_64__
442ea881 458 i387_fxsave_to_cache (regcache, buf);
d0722149 459#else
442ea881 460 i387_fsave_to_cache (regcache, buf);
d0722149
DE
461#endif
462}
463
464#ifndef __x86_64__
465
466static void
442ea881 467x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 468{
442ea881 469 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
470}
471
472static void
442ea881 473x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 474{
442ea881 475 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
476}
477
478#endif
479
1570b33e
L
480static void
481x86_fill_xstateregset (struct regcache *regcache, void *buf)
482{
483 i387_cache_to_xsave (regcache, buf);
484}
485
486static void
487x86_store_xstateregset (struct regcache *regcache, const void *buf)
488{
489 i387_xsave_to_cache (regcache, buf);
490}
491
d0722149
DE
492/* ??? The non-biarch i386 case stores all the i387 regs twice.
493 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
494 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
495 doesn't work. IWBN to avoid the duplication in the case where it
496 does work. Maybe the arch_setup routine could check whether it works
3aee8918 497 and update the supported regsets accordingly. */
d0722149 498
3aee8918 499static struct regset_info x86_regsets[] =
d0722149
DE
500{
501#ifdef HAVE_PTRACE_GETREGS
1570b33e 502 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
503 GENERAL_REGS,
504 x86_fill_gregset, x86_store_gregset },
1570b33e
L
505 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
506 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
507# ifndef __x86_64__
508# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 509 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
510 EXTENDED_REGS,
511 x86_fill_fpxregset, x86_store_fpxregset },
512# endif
513# endif
1570b33e 514 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
515 FP_REGS,
516 x86_fill_fpregset, x86_store_fpregset },
517#endif /* HAVE_PTRACE_GETREGS */
50bc912a 518 NULL_REGSET
d0722149
DE
519};
520
bf9ae9d8
TBA
521bool
522x86_target::low_supports_breakpoints ()
523{
524 return true;
525}
526
527CORE_ADDR
528x86_target::low_get_pc (regcache *regcache)
d0722149 529{
3aee8918 530 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
531
532 if (use_64bit)
533 {
6598661d
PA
534 uint64_t pc;
535
442ea881 536 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
537 return (CORE_ADDR) pc;
538 }
539 else
540 {
6598661d
PA
541 uint32_t pc;
542
442ea881 543 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
544 return (CORE_ADDR) pc;
545 }
546}
547
bf9ae9d8
TBA
548void
549x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 550{
3aee8918 551 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
552
553 if (use_64bit)
554 {
6598661d
PA
555 uint64_t newpc = pc;
556
442ea881 557 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
558 }
559 else
560 {
6598661d
PA
561 uint32_t newpc = pc;
562
442ea881 563 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
564 }
565}
d4807ea2
TBA
566
567int
568x86_target::low_decr_pc_after_break ()
569{
570 return 1;
571}
572
d0722149 573\f
dd373349 574static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
575#define x86_breakpoint_len 1
576
d7146cda
TBA
577bool
578x86_target::low_breakpoint_at (CORE_ADDR pc)
d0722149
DE
579{
580 unsigned char c;
581
d7146cda 582 read_memory (pc, &c, 1);
d0722149 583 if (c == 0xCC)
d7146cda 584 return true;
d0722149 585
d7146cda 586 return false;
d0722149
DE
587}
588\f
42995dbd 589/* Low-level function vector. */
df7e5265 590struct x86_dr_low_type x86_dr_low =
42995dbd 591 {
d33472ad
GB
592 x86_linux_dr_set_control,
593 x86_linux_dr_set_addr,
594 x86_linux_dr_get_addr,
595 x86_linux_dr_get_status,
596 x86_linux_dr_get_control,
42995dbd
GB
597 sizeof (void *),
598 };
aa5ca48f 599\f
90d74c30 600/* Breakpoint/Watchpoint support. */
aa5ca48f 601
007c9b97
TBA
602bool
603x86_target::supports_z_point_type (char z_type)
802e8e6d
PA
604{
605 switch (z_type)
606 {
607 case Z_PACKET_SW_BP:
608 case Z_PACKET_HW_BP:
609 case Z_PACKET_WRITE_WP:
610 case Z_PACKET_ACCESS_WP:
007c9b97 611 return true;
802e8e6d 612 default:
007c9b97 613 return false;
802e8e6d
PA
614 }
615}
616
9db9aa23
TBA
617int
618x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
619 int size, raw_breakpoint *bp)
aa5ca48f
DE
620{
621 struct process_info *proc = current_process ();
802e8e6d 622
aa5ca48f
DE
623 switch (type)
624 {
802e8e6d
PA
625 case raw_bkpt_type_hw:
626 case raw_bkpt_type_write_wp:
627 case raw_bkpt_type_access_wp:
a4165e94 628 {
802e8e6d
PA
629 enum target_hw_bp_type hw_type
630 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 631 struct x86_debug_reg_state *state
fe978cb0 632 = &proc->priv->arch_private->debug_reg_state;
a4165e94 633
df7e5265 634 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 635 }
961bd387 636
aa5ca48f
DE
637 default:
638 /* Unsupported. */
639 return 1;
640 }
641}
642
9db9aa23
TBA
643int
644x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
645 int size, raw_breakpoint *bp)
aa5ca48f
DE
646{
647 struct process_info *proc = current_process ();
802e8e6d 648
aa5ca48f
DE
649 switch (type)
650 {
802e8e6d
PA
651 case raw_bkpt_type_hw:
652 case raw_bkpt_type_write_wp:
653 case raw_bkpt_type_access_wp:
a4165e94 654 {
802e8e6d
PA
655 enum target_hw_bp_type hw_type
656 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 657 struct x86_debug_reg_state *state
fe978cb0 658 = &proc->priv->arch_private->debug_reg_state;
a4165e94 659
df7e5265 660 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 661 }
aa5ca48f
DE
662 default:
663 /* Unsupported. */
664 return 1;
665 }
666}
667
ac1bbaca
TBA
668bool
669x86_target::low_stopped_by_watchpoint ()
aa5ca48f
DE
670{
671 struct process_info *proc = current_process ();
fe978cb0 672 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
673}
674
ac1bbaca
TBA
675CORE_ADDR
676x86_target::low_stopped_data_address ()
aa5ca48f
DE
677{
678 struct process_info *proc = current_process ();
679 CORE_ADDR addr;
fe978cb0 680 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 681 &addr))
aa5ca48f
DE
682 return addr;
683 return 0;
684}
685\f
686/* Called when a new process is created. */
687
688static struct arch_process_info *
689x86_linux_new_process (void)
690{
ed859da7 691 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 692
df7e5265 693 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
694
695 return info;
696}
697
04ec7890
SM
698/* Called when a process is being deleted. */
699
700static void
701x86_linux_delete_process (struct arch_process_info *info)
702{
703 xfree (info);
704}
705
3a8a0396
DB
706/* Target routine for linux_new_fork. */
707
708static void
709x86_linux_new_fork (struct process_info *parent, struct process_info *child)
710{
711 /* These are allocated by linux_add_process. */
712 gdb_assert (parent->priv != NULL
713 && parent->priv->arch_private != NULL);
714 gdb_assert (child->priv != NULL
715 && child->priv->arch_private != NULL);
716
717 /* Linux kernel before 2.6.33 commit
718 72f674d203cd230426437cdcf7dd6f681dad8b0d
719 will inherit hardware debug registers from parent
720 on fork/vfork/clone. Newer Linux kernels create such tasks with
721 zeroed debug registers.
722
723 GDB core assumes the child inherits the watchpoints/hw
724 breakpoints of the parent, and will remove them all from the
725 forked off process. Copy the debug registers mirrors into the
726 new process so that all breakpoints and watchpoints can be
727 removed together. The debug registers mirror will become zeroed
728 in the end before detaching the forked off process, thus making
729 this compatible with older Linux kernels too. */
730
731 *child->priv->arch_private = *parent->priv->arch_private;
732}
733
70a0bb6b
GB
734/* See nat/x86-dregs.h. */
735
736struct x86_debug_reg_state *
737x86_debug_reg_state (pid_t pid)
738{
739 struct process_info *proc = find_process_pid (pid);
740
741 return &proc->priv->arch_private->debug_reg_state;
742}
aa5ca48f 743\f
d0722149
DE
744/* When GDBSERVER is built as a 64-bit application on linux, the
745 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
746 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
747 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
748 conversion in-place ourselves. */
749
9cf12d57 750/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
751 layout of the inferiors' architecture. Returns true if any
752 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 753 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
754 INF. */
755
756static int
9cf12d57 757x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
758{
759#ifdef __x86_64__
760256f9 760 unsigned int machine;
0bfdf32f 761 int tid = lwpid_of (current_thread);
760256f9
PA
762 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
763
d0722149 764 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 765 if (!is_64bit_tdesc ())
9cf12d57 766 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 767 FIXUP_32);
c92b5177 768 /* No fixup for native x32 GDB. */
760256f9 769 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 770 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 771 FIXUP_X32);
d0722149
DE
772#endif
773
774 return 0;
775}
776\f
1570b33e
L
777static int use_xml;
778
3aee8918
PA
779/* Format of XSAVE extended state is:
780 struct
781 {
782 fxsave_bytes[0..463]
783 sw_usable_bytes[464..511]
784 xstate_hdr_bytes[512..575]
785 avx_bytes[576..831]
786 future_state etc
787 };
788
789 Same memory layout will be used for the coredump NT_X86_XSTATE
790 representing the XSAVE extended state registers.
791
792 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
793 extended state mask, which is the same as the extended control register
794 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
795 together with the mask saved in the xstate_hdr_bytes to determine what
796 states the processor/OS supports and what state, used or initialized,
797 the process/thread is in. */
798#define I386_LINUX_XSAVE_XCR0_OFFSET 464
799
800/* Does the current host support the GETFPXREGS request? The header
801 file may or may not define it, and even if it is defined, the
802 kernel will return EIO if it's running on a pre-SSE processor. */
803int have_ptrace_getfpxregs =
804#ifdef HAVE_PTRACE_GETFPXREGS
805 -1
806#else
807 0
808#endif
809;
1570b33e 810
3aee8918
PA
811/* Get Linux/x86 target description from running target. */
812
813static const struct target_desc *
814x86_linux_read_description (void)
1570b33e 815{
3aee8918
PA
816 unsigned int machine;
817 int is_elf64;
a196ebeb 818 int xcr0_features;
3aee8918
PA
819 int tid;
820 static uint64_t xcr0;
3a13a53b 821 struct regset_info *regset;
1570b33e 822
0bfdf32f 823 tid = lwpid_of (current_thread);
1570b33e 824
3aee8918 825 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 826
3aee8918 827 if (sizeof (void *) == 4)
3a13a53b 828 {
3aee8918
PA
829 if (is_elf64 > 0)
830 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
831#ifndef __x86_64__
832 else if (machine == EM_X86_64)
833 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
834#endif
835 }
3a13a53b 836
3aee8918
PA
837#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
838 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
839 {
840 elf_fpxregset_t fpxregs;
3a13a53b 841
3aee8918 842 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 843 {
3aee8918
PA
844 have_ptrace_getfpxregs = 0;
845 have_ptrace_getregset = 0;
f49ff000 846 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 847 }
3aee8918
PA
848 else
849 have_ptrace_getfpxregs = 1;
3a13a53b 850 }
1570b33e
L
851#endif
852
853 if (!use_xml)
854 {
df7e5265 855 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 856
1570b33e
L
857 /* Don't use XML. */
858#ifdef __x86_64__
3aee8918
PA
859 if (machine == EM_X86_64)
860 return tdesc_amd64_linux_no_xml;
1570b33e 861 else
1570b33e 862#endif
3aee8918 863 return tdesc_i386_linux_no_xml;
1570b33e
L
864 }
865
1570b33e
L
866 if (have_ptrace_getregset == -1)
867 {
df7e5265 868 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 869 struct iovec iov;
1570b33e
L
870
871 iov.iov_base = xstateregs;
872 iov.iov_len = sizeof (xstateregs);
873
874 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
875 if (ptrace (PTRACE_GETREGSET, tid,
876 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
877 have_ptrace_getregset = 0;
878 else
1570b33e 879 {
3aee8918
PA
880 have_ptrace_getregset = 1;
881
882 /* Get XCR0 from XSAVE extended state. */
883 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
884 / sizeof (uint64_t))];
885
886 /* Use PTRACE_GETREGSET if it is available. */
887 for (regset = x86_regsets;
888 regset->fill_function != NULL; regset++)
889 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 890 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
891 else if (regset->type != GENERAL_REGS)
892 regset->size = 0;
1570b33e 893 }
1570b33e
L
894 }
895
3aee8918 896 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 897 xcr0_features = (have_ptrace_getregset
2e1e43e1 898 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 899
a196ebeb 900 if (xcr0_features)
3aee8918 901 x86_xcr0 = xcr0;
1570b33e 902
3aee8918
PA
903 if (machine == EM_X86_64)
904 {
1570b33e 905#ifdef __x86_64__
b4570e4b 906 const target_desc *tdesc = NULL;
a196ebeb 907
b4570e4b 908 if (xcr0_features)
3aee8918 909 {
b4570e4b
YQ
910 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
911 !is_elf64);
1570b33e 912 }
b4570e4b
YQ
913
914 if (tdesc == NULL)
915 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
916 return tdesc;
3aee8918 917#endif
1570b33e 918 }
3aee8918
PA
919 else
920 {
f49ff000 921 const target_desc *tdesc = NULL;
a1fa17ee 922
f49ff000
YQ
923 if (xcr0_features)
924 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 925
f49ff000
YQ
926 if (tdesc == NULL)
927 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 928
f49ff000 929 return tdesc;
3aee8918
PA
930 }
931
932 gdb_assert_not_reached ("failed to return tdesc");
933}
934
3aee8918
PA
935/* Update all the target description of all processes; a new GDB
936 connected, and it may or not support xml target descriptions. */
937
797bcff5
TBA
938void
939x86_target::update_xmltarget ()
3aee8918 940{
0bfdf32f 941 struct thread_info *saved_thread = current_thread;
3aee8918
PA
942
943 /* Before changing the register cache's internal layout, flush the
944 contents of the current valid caches back to the threads, and
945 release the current regcache objects. */
946 regcache_release ();
947
797bcff5 948 for_each_process ([this] (process_info *proc) {
9179355e
SM
949 int pid = proc->pid;
950
951 /* Look up any thread of this process. */
952 current_thread = find_any_thread_of_pid (pid);
953
797bcff5 954 low_arch_setup ();
9179355e 955 });
3aee8918 956
0bfdf32f 957 current_thread = saved_thread;
1570b33e
L
958}
959
960/* Process qSupported query, "xmlRegisters=". Update the buffer size for
961 PTRACE_GETREGSET. */
962
963static void
06e03fff 964x86_linux_process_qsupported (char **features, int count)
1570b33e 965{
06e03fff
PA
966 int i;
967
1570b33e
L
968 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
969 with "i386" in qSupported query, it supports x86 XML target
970 descriptions. */
971 use_xml = 0;
06e03fff 972 for (i = 0; i < count; i++)
1570b33e 973 {
06e03fff 974 const char *feature = features[i];
1570b33e 975
06e03fff 976 if (startswith (feature, "xmlRegisters="))
1570b33e 977 {
06e03fff 978 char *copy = xstrdup (feature + 13);
06e03fff 979
ca3a04f6
CB
980 char *saveptr;
981 for (char *p = strtok_r (copy, ",", &saveptr);
982 p != NULL;
983 p = strtok_r (NULL, ",", &saveptr))
1570b33e 984 {
06e03fff
PA
985 if (strcmp (p, "i386") == 0)
986 {
987 use_xml = 1;
988 break;
989 }
1570b33e 990 }
1570b33e 991
06e03fff
PA
992 free (copy);
993 }
1570b33e 994 }
797bcff5 995 the_x86_target.update_xmltarget ();
1570b33e
L
996}
997
3aee8918 998/* Common for x86/x86-64. */
d0722149 999
3aee8918
PA
1000static struct regsets_info x86_regsets_info =
1001 {
1002 x86_regsets, /* regsets */
1003 0, /* num_regsets */
1004 NULL, /* disabled_regsets */
1005 };
214d508e
L
1006
1007#ifdef __x86_64__
3aee8918
PA
1008static struct regs_info amd64_linux_regs_info =
1009 {
1010 NULL, /* regset_bitmap */
1011 NULL, /* usrregs_info */
1012 &x86_regsets_info
1013 };
d0722149 1014#endif
3aee8918
PA
1015static struct usrregs_info i386_linux_usrregs_info =
1016 {
1017 I386_NUM_REGS,
1018 i386_regmap,
1019 };
d0722149 1020
3aee8918
PA
1021static struct regs_info i386_linux_regs_info =
1022 {
1023 NULL, /* regset_bitmap */
1024 &i386_linux_usrregs_info,
1025 &x86_regsets_info
1026 };
d0722149 1027
aa8d21c9
TBA
1028const regs_info *
1029x86_target::get_regs_info ()
3aee8918
PA
1030{
1031#ifdef __x86_64__
1032 if (is_64bit_tdesc ())
1033 return &amd64_linux_regs_info;
1034 else
1035#endif
1036 return &i386_linux_regs_info;
1037}
d0722149 1038
3aee8918
PA
1039/* Initialize the target description for the architecture of the
1040 inferior. */
1570b33e 1041
797bcff5
TBA
1042void
1043x86_target::low_arch_setup ()
3aee8918
PA
1044{
1045 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1046}
1047
82075af2
JS
1048/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1049 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1050
1051static void
4cc32bec 1052x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
1053{
1054 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1055
1056 if (use_64bit)
1057 {
1058 long l_sysno;
82075af2
JS
1059
1060 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1061 *sysno = (int) l_sysno;
82075af2
JS
1062 }
1063 else
4cc32bec 1064 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1065}
1066
219f2f23
PA
1067static int
1068x86_supports_tracepoints (void)
1069{
1070 return 1;
1071}
1072
fa593d66
PA
1073static void
1074append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1075{
4196ab2a 1076 target_write_memory (*to, buf, len);
fa593d66
PA
1077 *to += len;
1078}
1079
1080static int
a121b7c1 1081push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1082{
1083 unsigned char *buf_org = buf;
1084
1085 while (1)
1086 {
1087 char *endptr;
1088 unsigned long ul = strtoul (op, &endptr, 16);
1089
1090 if (endptr == op)
1091 break;
1092
1093 *buf++ = ul;
1094 op = endptr;
1095 }
1096
1097 return buf - buf_org;
1098}
1099
1100#ifdef __x86_64__
1101
1102/* Build a jump pad that saves registers and calls a collection
1103 function. Writes a jump instruction to the jump pad to
1104 JJUMPAD_INSN. The caller is responsible to write it in at the
1105 tracepoint address. */
1106
1107static int
1108amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1109 CORE_ADDR collector,
1110 CORE_ADDR lockaddr,
1111 ULONGEST orig_size,
1112 CORE_ADDR *jump_entry,
405f8e94
SS
1113 CORE_ADDR *trampoline,
1114 ULONGEST *trampoline_size,
fa593d66
PA
1115 unsigned char *jjump_pad_insn,
1116 ULONGEST *jjump_pad_insn_size,
1117 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1118 CORE_ADDR *adjusted_insn_addr_end,
1119 char *err)
fa593d66
PA
1120{
1121 unsigned char buf[40];
1122 int i, offset;
f4647387
YQ
1123 int64_t loffset;
1124
fa593d66
PA
1125 CORE_ADDR buildaddr = *jump_entry;
1126
1127 /* Build the jump pad. */
1128
1129 /* First, do tracepoint data collection. Save registers. */
1130 i = 0;
1131 /* Need to ensure stack pointer saved first. */
1132 buf[i++] = 0x54; /* push %rsp */
1133 buf[i++] = 0x55; /* push %rbp */
1134 buf[i++] = 0x57; /* push %rdi */
1135 buf[i++] = 0x56; /* push %rsi */
1136 buf[i++] = 0x52; /* push %rdx */
1137 buf[i++] = 0x51; /* push %rcx */
1138 buf[i++] = 0x53; /* push %rbx */
1139 buf[i++] = 0x50; /* push %rax */
1140 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1141 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1142 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1143 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1144 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1145 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1146 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1147 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1148 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1149 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1150 buf[i++] = 0xbf;
c8ef42ee
PA
1151 memcpy (buf + i, &tpaddr, 8);
1152 i += 8;
fa593d66
PA
1153 buf[i++] = 0x57; /* push %rdi */
1154 append_insns (&buildaddr, i, buf);
1155
1156 /* Stack space for the collecting_t object. */
1157 i = 0;
1158 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1159 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1160 memcpy (buf + i, &tpoint, 8);
1161 i += 8;
1162 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1163 i += push_opcode (&buf[i],
1164 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1165 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1166 append_insns (&buildaddr, i, buf);
1167
1168 /* spin-lock. */
1169 i = 0;
1170 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1171 memcpy (&buf[i], (void *) &lockaddr, 8);
1172 i += 8;
1173 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1174 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1175 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1176 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1177 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1178 append_insns (&buildaddr, i, buf);
1179
1180 /* Set up the gdb_collect call. */
1181 /* At this point, (stack pointer + 0x18) is the base of our saved
1182 register block. */
1183
1184 i = 0;
1185 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1186 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1187
1188 /* tpoint address may be 64-bit wide. */
1189 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1190 memcpy (buf + i, &tpoint, 8);
1191 i += 8;
1192 append_insns (&buildaddr, i, buf);
1193
1194 /* The collector function being in the shared library, may be
1195 >31-bits away off the jump pad. */
1196 i = 0;
1197 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1198 memcpy (buf + i, &collector, 8);
1199 i += 8;
1200 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1201 append_insns (&buildaddr, i, buf);
1202
1203 /* Clear the spin-lock. */
1204 i = 0;
1205 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1206 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1207 memcpy (buf + i, &lockaddr, 8);
1208 i += 8;
1209 append_insns (&buildaddr, i, buf);
1210
1211 /* Remove stack that had been used for the collect_t object. */
1212 i = 0;
1213 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1214 append_insns (&buildaddr, i, buf);
1215
1216 /* Restore register state. */
1217 i = 0;
1218 buf[i++] = 0x48; /* add $0x8,%rsp */
1219 buf[i++] = 0x83;
1220 buf[i++] = 0xc4;
1221 buf[i++] = 0x08;
1222 buf[i++] = 0x9d; /* popfq */
1223 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1224 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1225 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1226 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1227 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1228 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1229 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1230 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1231 buf[i++] = 0x58; /* pop %rax */
1232 buf[i++] = 0x5b; /* pop %rbx */
1233 buf[i++] = 0x59; /* pop %rcx */
1234 buf[i++] = 0x5a; /* pop %rdx */
1235 buf[i++] = 0x5e; /* pop %rsi */
1236 buf[i++] = 0x5f; /* pop %rdi */
1237 buf[i++] = 0x5d; /* pop %rbp */
1238 buf[i++] = 0x5c; /* pop %rsp */
1239 append_insns (&buildaddr, i, buf);
1240
1241 /* Now, adjust the original instruction to execute in the jump
1242 pad. */
1243 *adjusted_insn_addr = buildaddr;
1244 relocate_instruction (&buildaddr, tpaddr);
1245 *adjusted_insn_addr_end = buildaddr;
1246
1247 /* Finally, write a jump back to the program. */
f4647387
YQ
1248
1249 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1250 if (loffset > INT_MAX || loffset < INT_MIN)
1251 {
1252 sprintf (err,
1253 "E.Jump back from jump pad too far from tracepoint "
1254 "(offset 0x%" PRIx64 " > int32).", loffset);
1255 return 1;
1256 }
1257
1258 offset = (int) loffset;
fa593d66
PA
1259 memcpy (buf, jump_insn, sizeof (jump_insn));
1260 memcpy (buf + 1, &offset, 4);
1261 append_insns (&buildaddr, sizeof (jump_insn), buf);
1262
1263 /* The jump pad is now built. Wire in a jump to our jump pad. This
1264 is always done last (by our caller actually), so that we can
1265 install fast tracepoints with threads running. This relies on
1266 the agent's atomic write support. */
f4647387
YQ
1267 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1268 if (loffset > INT_MAX || loffset < INT_MIN)
1269 {
1270 sprintf (err,
1271 "E.Jump pad too far from tracepoint "
1272 "(offset 0x%" PRIx64 " > int32).", loffset);
1273 return 1;
1274 }
1275
1276 offset = (int) loffset;
1277
fa593d66
PA
1278 memcpy (buf, jump_insn, sizeof (jump_insn));
1279 memcpy (buf + 1, &offset, 4);
1280 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1281 *jjump_pad_insn_size = sizeof (jump_insn);
1282
1283 /* Return the end address of our pad. */
1284 *jump_entry = buildaddr;
1285
1286 return 0;
1287}
1288
1289#endif /* __x86_64__ */
1290
1291/* Build a jump pad that saves registers and calls a collection
1292 function. Writes a jump instruction to the jump pad to
1293 JJUMPAD_INSN. The caller is responsible to write it in at the
1294 tracepoint address. */
1295
1296static int
1297i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1298 CORE_ADDR collector,
1299 CORE_ADDR lockaddr,
1300 ULONGEST orig_size,
1301 CORE_ADDR *jump_entry,
405f8e94
SS
1302 CORE_ADDR *trampoline,
1303 ULONGEST *trampoline_size,
fa593d66
PA
1304 unsigned char *jjump_pad_insn,
1305 ULONGEST *jjump_pad_insn_size,
1306 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1307 CORE_ADDR *adjusted_insn_addr_end,
1308 char *err)
fa593d66
PA
1309{
1310 unsigned char buf[0x100];
1311 int i, offset;
1312 CORE_ADDR buildaddr = *jump_entry;
1313
1314 /* Build the jump pad. */
1315
1316 /* First, do tracepoint data collection. Save registers. */
1317 i = 0;
1318 buf[i++] = 0x60; /* pushad */
1319 buf[i++] = 0x68; /* push tpaddr aka $pc */
1320 *((int *)(buf + i)) = (int) tpaddr;
1321 i += 4;
1322 buf[i++] = 0x9c; /* pushf */
1323 buf[i++] = 0x1e; /* push %ds */
1324 buf[i++] = 0x06; /* push %es */
1325 buf[i++] = 0x0f; /* push %fs */
1326 buf[i++] = 0xa0;
1327 buf[i++] = 0x0f; /* push %gs */
1328 buf[i++] = 0xa8;
1329 buf[i++] = 0x16; /* push %ss */
1330 buf[i++] = 0x0e; /* push %cs */
1331 append_insns (&buildaddr, i, buf);
1332
1333 /* Stack space for the collecting_t object. */
1334 i = 0;
1335 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1336
1337 /* Build the object. */
1338 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1339 memcpy (buf + i, &tpoint, 4);
1340 i += 4;
1341 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1342
1343 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1344 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1345 append_insns (&buildaddr, i, buf);
1346
1347 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1348 If we cared for it, this could be using xchg alternatively. */
1349
1350 i = 0;
1351 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1352 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1353 %esp,<lockaddr> */
1354 memcpy (&buf[i], (void *) &lockaddr, 4);
1355 i += 4;
1356 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1357 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1358 append_insns (&buildaddr, i, buf);
1359
1360
1361 /* Set up arguments to the gdb_collect call. */
1362 i = 0;
1363 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1364 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1365 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1366 append_insns (&buildaddr, i, buf);
1367
1368 i = 0;
1369 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1370 append_insns (&buildaddr, i, buf);
1371
1372 i = 0;
1373 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1374 memcpy (&buf[i], (void *) &tpoint, 4);
1375 i += 4;
1376 append_insns (&buildaddr, i, buf);
1377
1378 buf[0] = 0xe8; /* call <reladdr> */
1379 offset = collector - (buildaddr + sizeof (jump_insn));
1380 memcpy (buf + 1, &offset, 4);
1381 append_insns (&buildaddr, 5, buf);
1382 /* Clean up after the call. */
1383 buf[0] = 0x83; /* add $0x8,%esp */
1384 buf[1] = 0xc4;
1385 buf[2] = 0x08;
1386 append_insns (&buildaddr, 3, buf);
1387
1388
1389 /* Clear the spin-lock. This would need the LOCK prefix on older
1390 broken archs. */
1391 i = 0;
1392 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1393 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1394 memcpy (buf + i, &lockaddr, 4);
1395 i += 4;
1396 append_insns (&buildaddr, i, buf);
1397
1398
1399 /* Remove stack that had been used for the collect_t object. */
1400 i = 0;
1401 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1402 append_insns (&buildaddr, i, buf);
1403
1404 i = 0;
1405 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1406 buf[i++] = 0xc4;
1407 buf[i++] = 0x04;
1408 buf[i++] = 0x17; /* pop %ss */
1409 buf[i++] = 0x0f; /* pop %gs */
1410 buf[i++] = 0xa9;
1411 buf[i++] = 0x0f; /* pop %fs */
1412 buf[i++] = 0xa1;
1413 buf[i++] = 0x07; /* pop %es */
405f8e94 1414 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1415 buf[i++] = 0x9d; /* popf */
1416 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1417 buf[i++] = 0xc4;
1418 buf[i++] = 0x04;
1419 buf[i++] = 0x61; /* popad */
1420 append_insns (&buildaddr, i, buf);
1421
1422 /* Now, adjust the original instruction to execute in the jump
1423 pad. */
1424 *adjusted_insn_addr = buildaddr;
1425 relocate_instruction (&buildaddr, tpaddr);
1426 *adjusted_insn_addr_end = buildaddr;
1427
1428 /* Write the jump back to the program. */
1429 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1430 memcpy (buf, jump_insn, sizeof (jump_insn));
1431 memcpy (buf + 1, &offset, 4);
1432 append_insns (&buildaddr, sizeof (jump_insn), buf);
1433
1434 /* The jump pad is now built. Wire in a jump to our jump pad. This
1435 is always done last (by our caller actually), so that we can
1436 install fast tracepoints with threads running. This relies on
1437 the agent's atomic write support. */
405f8e94
SS
1438 if (orig_size == 4)
1439 {
1440 /* Create a trampoline. */
1441 *trampoline_size = sizeof (jump_insn);
1442 if (!claim_trampoline_space (*trampoline_size, trampoline))
1443 {
1444 /* No trampoline space available. */
1445 strcpy (err,
1446 "E.Cannot allocate trampoline space needed for fast "
1447 "tracepoints on 4-byte instructions.");
1448 return 1;
1449 }
1450
1451 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1452 memcpy (buf, jump_insn, sizeof (jump_insn));
1453 memcpy (buf + 1, &offset, 4);
4196ab2a 1454 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1455
1456 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1457 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1458 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1459 memcpy (buf + 2, &offset, 2);
1460 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1461 *jjump_pad_insn_size = sizeof (small_jump_insn);
1462 }
1463 else
1464 {
1465 /* Else use a 32-bit relative jump instruction. */
1466 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1467 memcpy (buf, jump_insn, sizeof (jump_insn));
1468 memcpy (buf + 1, &offset, 4);
1469 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1470 *jjump_pad_insn_size = sizeof (jump_insn);
1471 }
fa593d66
PA
1472
1473 /* Return the end address of our pad. */
1474 *jump_entry = buildaddr;
1475
1476 return 0;
1477}
1478
1479static int
1480x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1481 CORE_ADDR collector,
1482 CORE_ADDR lockaddr,
1483 ULONGEST orig_size,
1484 CORE_ADDR *jump_entry,
405f8e94
SS
1485 CORE_ADDR *trampoline,
1486 ULONGEST *trampoline_size,
fa593d66
PA
1487 unsigned char *jjump_pad_insn,
1488 ULONGEST *jjump_pad_insn_size,
1489 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1490 CORE_ADDR *adjusted_insn_addr_end,
1491 char *err)
fa593d66
PA
1492{
1493#ifdef __x86_64__
3aee8918 1494 if (is_64bit_tdesc ())
fa593d66
PA
1495 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1496 collector, lockaddr,
1497 orig_size, jump_entry,
405f8e94 1498 trampoline, trampoline_size,
fa593d66
PA
1499 jjump_pad_insn,
1500 jjump_pad_insn_size,
1501 adjusted_insn_addr,
405f8e94
SS
1502 adjusted_insn_addr_end,
1503 err);
fa593d66
PA
1504#endif
1505
1506 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1507 collector, lockaddr,
1508 orig_size, jump_entry,
405f8e94 1509 trampoline, trampoline_size,
fa593d66
PA
1510 jjump_pad_insn,
1511 jjump_pad_insn_size,
1512 adjusted_insn_addr,
405f8e94
SS
1513 adjusted_insn_addr_end,
1514 err);
1515}
1516
1517/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1518 architectures. */
1519
1520static int
1521x86_get_min_fast_tracepoint_insn_len (void)
1522{
1523 static int warned_about_fast_tracepoints = 0;
1524
1525#ifdef __x86_64__
1526 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1527 used for fast tracepoints. */
3aee8918 1528 if (is_64bit_tdesc ())
405f8e94
SS
1529 return 5;
1530#endif
1531
58b4daa5 1532 if (agent_loaded_p ())
405f8e94
SS
1533 {
1534 char errbuf[IPA_BUFSIZ];
1535
1536 errbuf[0] = '\0';
1537
1538 /* On x86, if trampolines are available, then 4-byte jump instructions
1539 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1540 with a 4-byte offset are used instead. */
1541 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1542 return 4;
1543 else
1544 {
1545 /* GDB has no channel to explain to user why a shorter fast
1546 tracepoint is not possible, but at least make GDBserver
1547 mention that something has gone awry. */
1548 if (!warned_about_fast_tracepoints)
1549 {
422186a9 1550 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1551 warned_about_fast_tracepoints = 1;
1552 }
1553 return 5;
1554 }
1555 }
1556 else
1557 {
1558 /* Indicate that the minimum length is currently unknown since the IPA
1559 has not loaded yet. */
1560 return 0;
1561 }
fa593d66
PA
1562}
1563
6a271cae
PA
1564static void
1565add_insns (unsigned char *start, int len)
1566{
1567 CORE_ADDR buildaddr = current_insn_ptr;
1568
1569 if (debug_threads)
87ce2a04
DE
1570 debug_printf ("Adding %d bytes of insn at %s\n",
1571 len, paddress (buildaddr));
6a271cae
PA
1572
1573 append_insns (&buildaddr, len, start);
1574 current_insn_ptr = buildaddr;
1575}
1576
6a271cae
PA
1577/* Our general strategy for emitting code is to avoid specifying raw
1578 bytes whenever possible, and instead copy a block of inline asm
1579 that is embedded in the function. This is a little messy, because
1580 we need to keep the compiler from discarding what looks like dead
1581 code, plus suppress various warnings. */
1582
9e4344e5
PA
1583#define EMIT_ASM(NAME, INSNS) \
1584 do \
1585 { \
1586 extern unsigned char start_ ## NAME, end_ ## NAME; \
1587 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1588 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1589 "\t" "start_" #NAME ":" \
1590 "\t" INSNS "\n" \
1591 "\t" "end_" #NAME ":"); \
1592 } while (0)
6a271cae
PA
1593
1594#ifdef __x86_64__
1595
1596#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1597 do \
1598 { \
1599 extern unsigned char start_ ## NAME, end_ ## NAME; \
1600 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1601 __asm__ (".code32\n" \
1602 "\t" "jmp end_" #NAME "\n" \
1603 "\t" "start_" #NAME ":\n" \
1604 "\t" INSNS "\n" \
1605 "\t" "end_" #NAME ":\n" \
1606 ".code64\n"); \
1607 } while (0)
6a271cae
PA
1608
1609#else
1610
1611#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1612
1613#endif
1614
1615#ifdef __x86_64__
1616
1617static void
1618amd64_emit_prologue (void)
1619{
1620 EMIT_ASM (amd64_prologue,
1621 "pushq %rbp\n\t"
1622 "movq %rsp,%rbp\n\t"
1623 "sub $0x20,%rsp\n\t"
1624 "movq %rdi,-8(%rbp)\n\t"
1625 "movq %rsi,-16(%rbp)");
1626}
1627
1628
1629static void
1630amd64_emit_epilogue (void)
1631{
1632 EMIT_ASM (amd64_epilogue,
1633 "movq -16(%rbp),%rdi\n\t"
1634 "movq %rax,(%rdi)\n\t"
1635 "xor %rax,%rax\n\t"
1636 "leave\n\t"
1637 "ret");
1638}
1639
1640static void
1641amd64_emit_add (void)
1642{
1643 EMIT_ASM (amd64_add,
1644 "add (%rsp),%rax\n\t"
1645 "lea 0x8(%rsp),%rsp");
1646}
1647
1648static void
1649amd64_emit_sub (void)
1650{
1651 EMIT_ASM (amd64_sub,
1652 "sub %rax,(%rsp)\n\t"
1653 "pop %rax");
1654}
1655
1656static void
1657amd64_emit_mul (void)
1658{
1659 emit_error = 1;
1660}
1661
1662static void
1663amd64_emit_lsh (void)
1664{
1665 emit_error = 1;
1666}
1667
1668static void
1669amd64_emit_rsh_signed (void)
1670{
1671 emit_error = 1;
1672}
1673
1674static void
1675amd64_emit_rsh_unsigned (void)
1676{
1677 emit_error = 1;
1678}
1679
1680static void
1681amd64_emit_ext (int arg)
1682{
1683 switch (arg)
1684 {
1685 case 8:
1686 EMIT_ASM (amd64_ext_8,
1687 "cbtw\n\t"
1688 "cwtl\n\t"
1689 "cltq");
1690 break;
1691 case 16:
1692 EMIT_ASM (amd64_ext_16,
1693 "cwtl\n\t"
1694 "cltq");
1695 break;
1696 case 32:
1697 EMIT_ASM (amd64_ext_32,
1698 "cltq");
1699 break;
1700 default:
1701 emit_error = 1;
1702 }
1703}
1704
1705static void
1706amd64_emit_log_not (void)
1707{
1708 EMIT_ASM (amd64_log_not,
1709 "test %rax,%rax\n\t"
1710 "sete %cl\n\t"
1711 "movzbq %cl,%rax");
1712}
1713
1714static void
1715amd64_emit_bit_and (void)
1716{
1717 EMIT_ASM (amd64_and,
1718 "and (%rsp),%rax\n\t"
1719 "lea 0x8(%rsp),%rsp");
1720}
1721
1722static void
1723amd64_emit_bit_or (void)
1724{
1725 EMIT_ASM (amd64_or,
1726 "or (%rsp),%rax\n\t"
1727 "lea 0x8(%rsp),%rsp");
1728}
1729
1730static void
1731amd64_emit_bit_xor (void)
1732{
1733 EMIT_ASM (amd64_xor,
1734 "xor (%rsp),%rax\n\t"
1735 "lea 0x8(%rsp),%rsp");
1736}
1737
1738static void
1739amd64_emit_bit_not (void)
1740{
1741 EMIT_ASM (amd64_bit_not,
1742 "xorq $0xffffffffffffffff,%rax");
1743}
1744
1745static void
1746amd64_emit_equal (void)
1747{
1748 EMIT_ASM (amd64_equal,
1749 "cmp %rax,(%rsp)\n\t"
1750 "je .Lamd64_equal_true\n\t"
1751 "xor %rax,%rax\n\t"
1752 "jmp .Lamd64_equal_end\n\t"
1753 ".Lamd64_equal_true:\n\t"
1754 "mov $0x1,%rax\n\t"
1755 ".Lamd64_equal_end:\n\t"
1756 "lea 0x8(%rsp),%rsp");
1757}
1758
1759static void
1760amd64_emit_less_signed (void)
1761{
1762 EMIT_ASM (amd64_less_signed,
1763 "cmp %rax,(%rsp)\n\t"
1764 "jl .Lamd64_less_signed_true\n\t"
1765 "xor %rax,%rax\n\t"
1766 "jmp .Lamd64_less_signed_end\n\t"
1767 ".Lamd64_less_signed_true:\n\t"
1768 "mov $1,%rax\n\t"
1769 ".Lamd64_less_signed_end:\n\t"
1770 "lea 0x8(%rsp),%rsp");
1771}
1772
1773static void
1774amd64_emit_less_unsigned (void)
1775{
1776 EMIT_ASM (amd64_less_unsigned,
1777 "cmp %rax,(%rsp)\n\t"
1778 "jb .Lamd64_less_unsigned_true\n\t"
1779 "xor %rax,%rax\n\t"
1780 "jmp .Lamd64_less_unsigned_end\n\t"
1781 ".Lamd64_less_unsigned_true:\n\t"
1782 "mov $1,%rax\n\t"
1783 ".Lamd64_less_unsigned_end:\n\t"
1784 "lea 0x8(%rsp),%rsp");
1785}
1786
1787static void
1788amd64_emit_ref (int size)
1789{
1790 switch (size)
1791 {
1792 case 1:
1793 EMIT_ASM (amd64_ref1,
1794 "movb (%rax),%al");
1795 break;
1796 case 2:
1797 EMIT_ASM (amd64_ref2,
1798 "movw (%rax),%ax");
1799 break;
1800 case 4:
1801 EMIT_ASM (amd64_ref4,
1802 "movl (%rax),%eax");
1803 break;
1804 case 8:
1805 EMIT_ASM (amd64_ref8,
1806 "movq (%rax),%rax");
1807 break;
1808 }
1809}
1810
1811static void
1812amd64_emit_if_goto (int *offset_p, int *size_p)
1813{
1814 EMIT_ASM (amd64_if_goto,
1815 "mov %rax,%rcx\n\t"
1816 "pop %rax\n\t"
1817 "cmp $0,%rcx\n\t"
1818 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1819 if (offset_p)
1820 *offset_p = 10;
1821 if (size_p)
1822 *size_p = 4;
1823}
1824
1825static void
1826amd64_emit_goto (int *offset_p, int *size_p)
1827{
1828 EMIT_ASM (amd64_goto,
1829 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1830 if (offset_p)
1831 *offset_p = 1;
1832 if (size_p)
1833 *size_p = 4;
1834}
1835
1836static void
1837amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1838{
1839 int diff = (to - (from + size));
1840 unsigned char buf[sizeof (int)];
1841
1842 if (size != 4)
1843 {
1844 emit_error = 1;
1845 return;
1846 }
1847
1848 memcpy (buf, &diff, sizeof (int));
4196ab2a 1849 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1850}
1851
1852static void
4e29fb54 1853amd64_emit_const (LONGEST num)
6a271cae
PA
1854{
1855 unsigned char buf[16];
1856 int i;
1857 CORE_ADDR buildaddr = current_insn_ptr;
1858
1859 i = 0;
1860 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1861 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1862 i += 8;
1863 append_insns (&buildaddr, i, buf);
1864 current_insn_ptr = buildaddr;
1865}
1866
1867static void
1868amd64_emit_call (CORE_ADDR fn)
1869{
1870 unsigned char buf[16];
1871 int i;
1872 CORE_ADDR buildaddr;
4e29fb54 1873 LONGEST offset64;
6a271cae
PA
1874
1875 /* The destination function being in the shared library, may be
1876 >31-bits away off the compiled code pad. */
1877
1878 buildaddr = current_insn_ptr;
1879
1880 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1881
1882 i = 0;
1883
1884 if (offset64 > INT_MAX || offset64 < INT_MIN)
1885 {
1886 /* Offset is too large for a call. Use callq, but that requires
1887 a register, so avoid it if possible. Use r10, since it is
1888 call-clobbered, we don't have to push/pop it. */
1889 buf[i++] = 0x48; /* mov $fn,%r10 */
1890 buf[i++] = 0xba;
1891 memcpy (buf + i, &fn, 8);
1892 i += 8;
1893 buf[i++] = 0xff; /* callq *%r10 */
1894 buf[i++] = 0xd2;
1895 }
1896 else
1897 {
1898 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1899
1900 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1901 memcpy (buf + i, &offset32, 4);
1902 i += 4;
1903 }
1904
1905 append_insns (&buildaddr, i, buf);
1906 current_insn_ptr = buildaddr;
1907}
1908
1909static void
1910amd64_emit_reg (int reg)
1911{
1912 unsigned char buf[16];
1913 int i;
1914 CORE_ADDR buildaddr;
1915
1916 /* Assume raw_regs is still in %rdi. */
1917 buildaddr = current_insn_ptr;
1918 i = 0;
1919 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1920 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1921 i += 4;
1922 append_insns (&buildaddr, i, buf);
1923 current_insn_ptr = buildaddr;
1924 amd64_emit_call (get_raw_reg_func_addr ());
1925}
1926
1927static void
1928amd64_emit_pop (void)
1929{
1930 EMIT_ASM (amd64_pop,
1931 "pop %rax");
1932}
1933
1934static void
1935amd64_emit_stack_flush (void)
1936{
1937 EMIT_ASM (amd64_stack_flush,
1938 "push %rax");
1939}
1940
1941static void
1942amd64_emit_zero_ext (int arg)
1943{
1944 switch (arg)
1945 {
1946 case 8:
1947 EMIT_ASM (amd64_zero_ext_8,
1948 "and $0xff,%rax");
1949 break;
1950 case 16:
1951 EMIT_ASM (amd64_zero_ext_16,
1952 "and $0xffff,%rax");
1953 break;
1954 case 32:
1955 EMIT_ASM (amd64_zero_ext_32,
1956 "mov $0xffffffff,%rcx\n\t"
1957 "and %rcx,%rax");
1958 break;
1959 default:
1960 emit_error = 1;
1961 }
1962}
1963
1964static void
1965amd64_emit_swap (void)
1966{
1967 EMIT_ASM (amd64_swap,
1968 "mov %rax,%rcx\n\t"
1969 "pop %rax\n\t"
1970 "push %rcx");
1971}
1972
1973static void
1974amd64_emit_stack_adjust (int n)
1975{
1976 unsigned char buf[16];
1977 int i;
1978 CORE_ADDR buildaddr = current_insn_ptr;
1979
1980 i = 0;
1981 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1982 buf[i++] = 0x8d;
1983 buf[i++] = 0x64;
1984 buf[i++] = 0x24;
1985 /* This only handles adjustments up to 16, but we don't expect any more. */
1986 buf[i++] = n * 8;
1987 append_insns (&buildaddr, i, buf);
1988 current_insn_ptr = buildaddr;
1989}
1990
1991/* FN's prototype is `LONGEST(*fn)(int)'. */
1992
1993static void
1994amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1995{
1996 unsigned char buf[16];
1997 int i;
1998 CORE_ADDR buildaddr;
1999
2000 buildaddr = current_insn_ptr;
2001 i = 0;
2002 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2003 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2004 i += 4;
2005 append_insns (&buildaddr, i, buf);
2006 current_insn_ptr = buildaddr;
2007 amd64_emit_call (fn);
2008}
2009
4e29fb54 2010/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2011
2012static void
2013amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2014{
2015 unsigned char buf[16];
2016 int i;
2017 CORE_ADDR buildaddr;
2018
2019 buildaddr = current_insn_ptr;
2020 i = 0;
2021 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2022 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2023 i += 4;
2024 append_insns (&buildaddr, i, buf);
2025 current_insn_ptr = buildaddr;
2026 EMIT_ASM (amd64_void_call_2_a,
2027 /* Save away a copy of the stack top. */
2028 "push %rax\n\t"
2029 /* Also pass top as the second argument. */
2030 "mov %rax,%rsi");
2031 amd64_emit_call (fn);
2032 EMIT_ASM (amd64_void_call_2_b,
2033 /* Restore the stack top, %rax may have been trashed. */
2034 "pop %rax");
2035}
2036
df4a0200 2037static void
6b9801d4
SS
2038amd64_emit_eq_goto (int *offset_p, int *size_p)
2039{
2040 EMIT_ASM (amd64_eq,
2041 "cmp %rax,(%rsp)\n\t"
2042 "jne .Lamd64_eq_fallthru\n\t"
2043 "lea 0x8(%rsp),%rsp\n\t"
2044 "pop %rax\n\t"
2045 /* jmp, but don't trust the assembler to choose the right jump */
2046 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2047 ".Lamd64_eq_fallthru:\n\t"
2048 "lea 0x8(%rsp),%rsp\n\t"
2049 "pop %rax");
2050
2051 if (offset_p)
2052 *offset_p = 13;
2053 if (size_p)
2054 *size_p = 4;
2055}
2056
df4a0200 2057static void
6b9801d4
SS
2058amd64_emit_ne_goto (int *offset_p, int *size_p)
2059{
2060 EMIT_ASM (amd64_ne,
2061 "cmp %rax,(%rsp)\n\t"
2062 "je .Lamd64_ne_fallthru\n\t"
2063 "lea 0x8(%rsp),%rsp\n\t"
2064 "pop %rax\n\t"
2065 /* jmp, but don't trust the assembler to choose the right jump */
2066 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2067 ".Lamd64_ne_fallthru:\n\t"
2068 "lea 0x8(%rsp),%rsp\n\t"
2069 "pop %rax");
2070
2071 if (offset_p)
2072 *offset_p = 13;
2073 if (size_p)
2074 *size_p = 4;
2075}
2076
df4a0200 2077static void
6b9801d4
SS
2078amd64_emit_lt_goto (int *offset_p, int *size_p)
2079{
2080 EMIT_ASM (amd64_lt,
2081 "cmp %rax,(%rsp)\n\t"
2082 "jnl .Lamd64_lt_fallthru\n\t"
2083 "lea 0x8(%rsp),%rsp\n\t"
2084 "pop %rax\n\t"
2085 /* jmp, but don't trust the assembler to choose the right jump */
2086 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2087 ".Lamd64_lt_fallthru:\n\t"
2088 "lea 0x8(%rsp),%rsp\n\t"
2089 "pop %rax");
2090
2091 if (offset_p)
2092 *offset_p = 13;
2093 if (size_p)
2094 *size_p = 4;
2095}
2096
df4a0200 2097static void
6b9801d4
SS
2098amd64_emit_le_goto (int *offset_p, int *size_p)
2099{
2100 EMIT_ASM (amd64_le,
2101 "cmp %rax,(%rsp)\n\t"
2102 "jnle .Lamd64_le_fallthru\n\t"
2103 "lea 0x8(%rsp),%rsp\n\t"
2104 "pop %rax\n\t"
2105 /* jmp, but don't trust the assembler to choose the right jump */
2106 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2107 ".Lamd64_le_fallthru:\n\t"
2108 "lea 0x8(%rsp),%rsp\n\t"
2109 "pop %rax");
2110
2111 if (offset_p)
2112 *offset_p = 13;
2113 if (size_p)
2114 *size_p = 4;
2115}
2116
df4a0200 2117static void
6b9801d4
SS
2118amd64_emit_gt_goto (int *offset_p, int *size_p)
2119{
2120 EMIT_ASM (amd64_gt,
2121 "cmp %rax,(%rsp)\n\t"
2122 "jng .Lamd64_gt_fallthru\n\t"
2123 "lea 0x8(%rsp),%rsp\n\t"
2124 "pop %rax\n\t"
2125 /* jmp, but don't trust the assembler to choose the right jump */
2126 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2127 ".Lamd64_gt_fallthru:\n\t"
2128 "lea 0x8(%rsp),%rsp\n\t"
2129 "pop %rax");
2130
2131 if (offset_p)
2132 *offset_p = 13;
2133 if (size_p)
2134 *size_p = 4;
2135}
2136
df4a0200 2137static void
6b9801d4
SS
2138amd64_emit_ge_goto (int *offset_p, int *size_p)
2139{
2140 EMIT_ASM (amd64_ge,
2141 "cmp %rax,(%rsp)\n\t"
2142 "jnge .Lamd64_ge_fallthru\n\t"
2143 ".Lamd64_ge_jump:\n\t"
2144 "lea 0x8(%rsp),%rsp\n\t"
2145 "pop %rax\n\t"
2146 /* jmp, but don't trust the assembler to choose the right jump */
2147 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2148 ".Lamd64_ge_fallthru:\n\t"
2149 "lea 0x8(%rsp),%rsp\n\t"
2150 "pop %rax");
2151
2152 if (offset_p)
2153 *offset_p = 13;
2154 if (size_p)
2155 *size_p = 4;
2156}
2157
6a271cae
PA
2158struct emit_ops amd64_emit_ops =
2159 {
2160 amd64_emit_prologue,
2161 amd64_emit_epilogue,
2162 amd64_emit_add,
2163 amd64_emit_sub,
2164 amd64_emit_mul,
2165 amd64_emit_lsh,
2166 amd64_emit_rsh_signed,
2167 amd64_emit_rsh_unsigned,
2168 amd64_emit_ext,
2169 amd64_emit_log_not,
2170 amd64_emit_bit_and,
2171 amd64_emit_bit_or,
2172 amd64_emit_bit_xor,
2173 amd64_emit_bit_not,
2174 amd64_emit_equal,
2175 amd64_emit_less_signed,
2176 amd64_emit_less_unsigned,
2177 amd64_emit_ref,
2178 amd64_emit_if_goto,
2179 amd64_emit_goto,
2180 amd64_write_goto_address,
2181 amd64_emit_const,
2182 amd64_emit_call,
2183 amd64_emit_reg,
2184 amd64_emit_pop,
2185 amd64_emit_stack_flush,
2186 amd64_emit_zero_ext,
2187 amd64_emit_swap,
2188 amd64_emit_stack_adjust,
2189 amd64_emit_int_call_1,
6b9801d4
SS
2190 amd64_emit_void_call_2,
2191 amd64_emit_eq_goto,
2192 amd64_emit_ne_goto,
2193 amd64_emit_lt_goto,
2194 amd64_emit_le_goto,
2195 amd64_emit_gt_goto,
2196 amd64_emit_ge_goto
6a271cae
PA
2197 };
2198
2199#endif /* __x86_64__ */
2200
2201static void
2202i386_emit_prologue (void)
2203{
2204 EMIT_ASM32 (i386_prologue,
2205 "push %ebp\n\t"
bf15cbda
SS
2206 "mov %esp,%ebp\n\t"
2207 "push %ebx");
6a271cae
PA
2208 /* At this point, the raw regs base address is at 8(%ebp), and the
2209 value pointer is at 12(%ebp). */
2210}
2211
2212static void
2213i386_emit_epilogue (void)
2214{
2215 EMIT_ASM32 (i386_epilogue,
2216 "mov 12(%ebp),%ecx\n\t"
2217 "mov %eax,(%ecx)\n\t"
2218 "mov %ebx,0x4(%ecx)\n\t"
2219 "xor %eax,%eax\n\t"
bf15cbda 2220 "pop %ebx\n\t"
6a271cae
PA
2221 "pop %ebp\n\t"
2222 "ret");
2223}
2224
2225static void
2226i386_emit_add (void)
2227{
2228 EMIT_ASM32 (i386_add,
2229 "add (%esp),%eax\n\t"
2230 "adc 0x4(%esp),%ebx\n\t"
2231 "lea 0x8(%esp),%esp");
2232}
2233
2234static void
2235i386_emit_sub (void)
2236{
2237 EMIT_ASM32 (i386_sub,
2238 "subl %eax,(%esp)\n\t"
2239 "sbbl %ebx,4(%esp)\n\t"
2240 "pop %eax\n\t"
2241 "pop %ebx\n\t");
2242}
2243
2244static void
2245i386_emit_mul (void)
2246{
2247 emit_error = 1;
2248}
2249
2250static void
2251i386_emit_lsh (void)
2252{
2253 emit_error = 1;
2254}
2255
2256static void
2257i386_emit_rsh_signed (void)
2258{
2259 emit_error = 1;
2260}
2261
2262static void
2263i386_emit_rsh_unsigned (void)
2264{
2265 emit_error = 1;
2266}
2267
2268static void
2269i386_emit_ext (int arg)
2270{
2271 switch (arg)
2272 {
2273 case 8:
2274 EMIT_ASM32 (i386_ext_8,
2275 "cbtw\n\t"
2276 "cwtl\n\t"
2277 "movl %eax,%ebx\n\t"
2278 "sarl $31,%ebx");
2279 break;
2280 case 16:
2281 EMIT_ASM32 (i386_ext_16,
2282 "cwtl\n\t"
2283 "movl %eax,%ebx\n\t"
2284 "sarl $31,%ebx");
2285 break;
2286 case 32:
2287 EMIT_ASM32 (i386_ext_32,
2288 "movl %eax,%ebx\n\t"
2289 "sarl $31,%ebx");
2290 break;
2291 default:
2292 emit_error = 1;
2293 }
2294}
2295
2296static void
2297i386_emit_log_not (void)
2298{
2299 EMIT_ASM32 (i386_log_not,
2300 "or %ebx,%eax\n\t"
2301 "test %eax,%eax\n\t"
2302 "sete %cl\n\t"
2303 "xor %ebx,%ebx\n\t"
2304 "movzbl %cl,%eax");
2305}
2306
2307static void
2308i386_emit_bit_and (void)
2309{
2310 EMIT_ASM32 (i386_and,
2311 "and (%esp),%eax\n\t"
2312 "and 0x4(%esp),%ebx\n\t"
2313 "lea 0x8(%esp),%esp");
2314}
2315
2316static void
2317i386_emit_bit_or (void)
2318{
2319 EMIT_ASM32 (i386_or,
2320 "or (%esp),%eax\n\t"
2321 "or 0x4(%esp),%ebx\n\t"
2322 "lea 0x8(%esp),%esp");
2323}
2324
2325static void
2326i386_emit_bit_xor (void)
2327{
2328 EMIT_ASM32 (i386_xor,
2329 "xor (%esp),%eax\n\t"
2330 "xor 0x4(%esp),%ebx\n\t"
2331 "lea 0x8(%esp),%esp");
2332}
2333
2334static void
2335i386_emit_bit_not (void)
2336{
2337 EMIT_ASM32 (i386_bit_not,
2338 "xor $0xffffffff,%eax\n\t"
2339 "xor $0xffffffff,%ebx\n\t");
2340}
2341
2342static void
2343i386_emit_equal (void)
2344{
2345 EMIT_ASM32 (i386_equal,
2346 "cmpl %ebx,4(%esp)\n\t"
2347 "jne .Li386_equal_false\n\t"
2348 "cmpl %eax,(%esp)\n\t"
2349 "je .Li386_equal_true\n\t"
2350 ".Li386_equal_false:\n\t"
2351 "xor %eax,%eax\n\t"
2352 "jmp .Li386_equal_end\n\t"
2353 ".Li386_equal_true:\n\t"
2354 "mov $1,%eax\n\t"
2355 ".Li386_equal_end:\n\t"
2356 "xor %ebx,%ebx\n\t"
2357 "lea 0x8(%esp),%esp");
2358}
2359
2360static void
2361i386_emit_less_signed (void)
2362{
2363 EMIT_ASM32 (i386_less_signed,
2364 "cmpl %ebx,4(%esp)\n\t"
2365 "jl .Li386_less_signed_true\n\t"
2366 "jne .Li386_less_signed_false\n\t"
2367 "cmpl %eax,(%esp)\n\t"
2368 "jl .Li386_less_signed_true\n\t"
2369 ".Li386_less_signed_false:\n\t"
2370 "xor %eax,%eax\n\t"
2371 "jmp .Li386_less_signed_end\n\t"
2372 ".Li386_less_signed_true:\n\t"
2373 "mov $1,%eax\n\t"
2374 ".Li386_less_signed_end:\n\t"
2375 "xor %ebx,%ebx\n\t"
2376 "lea 0x8(%esp),%esp");
2377}
2378
2379static void
2380i386_emit_less_unsigned (void)
2381{
2382 EMIT_ASM32 (i386_less_unsigned,
2383 "cmpl %ebx,4(%esp)\n\t"
2384 "jb .Li386_less_unsigned_true\n\t"
2385 "jne .Li386_less_unsigned_false\n\t"
2386 "cmpl %eax,(%esp)\n\t"
2387 "jb .Li386_less_unsigned_true\n\t"
2388 ".Li386_less_unsigned_false:\n\t"
2389 "xor %eax,%eax\n\t"
2390 "jmp .Li386_less_unsigned_end\n\t"
2391 ".Li386_less_unsigned_true:\n\t"
2392 "mov $1,%eax\n\t"
2393 ".Li386_less_unsigned_end:\n\t"
2394 "xor %ebx,%ebx\n\t"
2395 "lea 0x8(%esp),%esp");
2396}
2397
2398static void
2399i386_emit_ref (int size)
2400{
2401 switch (size)
2402 {
2403 case 1:
2404 EMIT_ASM32 (i386_ref1,
2405 "movb (%eax),%al");
2406 break;
2407 case 2:
2408 EMIT_ASM32 (i386_ref2,
2409 "movw (%eax),%ax");
2410 break;
2411 case 4:
2412 EMIT_ASM32 (i386_ref4,
2413 "movl (%eax),%eax");
2414 break;
2415 case 8:
2416 EMIT_ASM32 (i386_ref8,
2417 "movl 4(%eax),%ebx\n\t"
2418 "movl (%eax),%eax");
2419 break;
2420 }
2421}
2422
2423static void
2424i386_emit_if_goto (int *offset_p, int *size_p)
2425{
2426 EMIT_ASM32 (i386_if_goto,
2427 "mov %eax,%ecx\n\t"
2428 "or %ebx,%ecx\n\t"
2429 "pop %eax\n\t"
2430 "pop %ebx\n\t"
2431 "cmpl $0,%ecx\n\t"
2432 /* Don't trust the assembler to choose the right jump */
2433 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2434
2435 if (offset_p)
2436 *offset_p = 11; /* be sure that this matches the sequence above */
2437 if (size_p)
2438 *size_p = 4;
2439}
2440
2441static void
2442i386_emit_goto (int *offset_p, int *size_p)
2443{
2444 EMIT_ASM32 (i386_goto,
2445 /* Don't trust the assembler to choose the right jump */
2446 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2447 if (offset_p)
2448 *offset_p = 1;
2449 if (size_p)
2450 *size_p = 4;
2451}
2452
2453static void
2454i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2455{
2456 int diff = (to - (from + size));
2457 unsigned char buf[sizeof (int)];
2458
2459 /* We're only doing 4-byte sizes at the moment. */
2460 if (size != 4)
2461 {
2462 emit_error = 1;
2463 return;
2464 }
2465
2466 memcpy (buf, &diff, sizeof (int));
4196ab2a 2467 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2468}
2469
2470static void
4e29fb54 2471i386_emit_const (LONGEST num)
6a271cae
PA
2472{
2473 unsigned char buf[16];
b00ad6ff 2474 int i, hi, lo;
6a271cae
PA
2475 CORE_ADDR buildaddr = current_insn_ptr;
2476
2477 i = 0;
2478 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2479 lo = num & 0xffffffff;
2480 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2481 i += 4;
2482 hi = ((num >> 32) & 0xffffffff);
2483 if (hi)
2484 {
2485 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2486 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2487 i += 4;
2488 }
2489 else
2490 {
2491 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2492 }
2493 append_insns (&buildaddr, i, buf);
2494 current_insn_ptr = buildaddr;
2495}
2496
2497static void
2498i386_emit_call (CORE_ADDR fn)
2499{
2500 unsigned char buf[16];
2501 int i, offset;
2502 CORE_ADDR buildaddr;
2503
2504 buildaddr = current_insn_ptr;
2505 i = 0;
2506 buf[i++] = 0xe8; /* call <reladdr> */
2507 offset = ((int) fn) - (buildaddr + 5);
2508 memcpy (buf + 1, &offset, 4);
2509 append_insns (&buildaddr, 5, buf);
2510 current_insn_ptr = buildaddr;
2511}
2512
2513static void
2514i386_emit_reg (int reg)
2515{
2516 unsigned char buf[16];
2517 int i;
2518 CORE_ADDR buildaddr;
2519
2520 EMIT_ASM32 (i386_reg_a,
2521 "sub $0x8,%esp");
2522 buildaddr = current_insn_ptr;
2523 i = 0;
2524 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2525 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2526 i += 4;
2527 append_insns (&buildaddr, i, buf);
2528 current_insn_ptr = buildaddr;
2529 EMIT_ASM32 (i386_reg_b,
2530 "mov %eax,4(%esp)\n\t"
2531 "mov 8(%ebp),%eax\n\t"
2532 "mov %eax,(%esp)");
2533 i386_emit_call (get_raw_reg_func_addr ());
2534 EMIT_ASM32 (i386_reg_c,
2535 "xor %ebx,%ebx\n\t"
2536 "lea 0x8(%esp),%esp");
2537}
2538
2539static void
2540i386_emit_pop (void)
2541{
2542 EMIT_ASM32 (i386_pop,
2543 "pop %eax\n\t"
2544 "pop %ebx");
2545}
2546
2547static void
2548i386_emit_stack_flush (void)
2549{
2550 EMIT_ASM32 (i386_stack_flush,
2551 "push %ebx\n\t"
2552 "push %eax");
2553}
2554
2555static void
2556i386_emit_zero_ext (int arg)
2557{
2558 switch (arg)
2559 {
2560 case 8:
2561 EMIT_ASM32 (i386_zero_ext_8,
2562 "and $0xff,%eax\n\t"
2563 "xor %ebx,%ebx");
2564 break;
2565 case 16:
2566 EMIT_ASM32 (i386_zero_ext_16,
2567 "and $0xffff,%eax\n\t"
2568 "xor %ebx,%ebx");
2569 break;
2570 case 32:
2571 EMIT_ASM32 (i386_zero_ext_32,
2572 "xor %ebx,%ebx");
2573 break;
2574 default:
2575 emit_error = 1;
2576 }
2577}
2578
2579static void
2580i386_emit_swap (void)
2581{
2582 EMIT_ASM32 (i386_swap,
2583 "mov %eax,%ecx\n\t"
2584 "mov %ebx,%edx\n\t"
2585 "pop %eax\n\t"
2586 "pop %ebx\n\t"
2587 "push %edx\n\t"
2588 "push %ecx");
2589}
2590
2591static void
2592i386_emit_stack_adjust (int n)
2593{
2594 unsigned char buf[16];
2595 int i;
2596 CORE_ADDR buildaddr = current_insn_ptr;
2597
2598 i = 0;
2599 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2600 buf[i++] = 0x64;
2601 buf[i++] = 0x24;
2602 buf[i++] = n * 8;
2603 append_insns (&buildaddr, i, buf);
2604 current_insn_ptr = buildaddr;
2605}
2606
2607/* FN's prototype is `LONGEST(*fn)(int)'. */
2608
2609static void
2610i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2611{
2612 unsigned char buf[16];
2613 int i;
2614 CORE_ADDR buildaddr;
2615
2616 EMIT_ASM32 (i386_int_call_1_a,
2617 /* Reserve a bit of stack space. */
2618 "sub $0x8,%esp");
2619 /* Put the one argument on the stack. */
2620 buildaddr = current_insn_ptr;
2621 i = 0;
2622 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2623 buf[i++] = 0x04;
2624 buf[i++] = 0x24;
b00ad6ff 2625 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2626 i += 4;
2627 append_insns (&buildaddr, i, buf);
2628 current_insn_ptr = buildaddr;
2629 i386_emit_call (fn);
2630 EMIT_ASM32 (i386_int_call_1_c,
2631 "mov %edx,%ebx\n\t"
2632 "lea 0x8(%esp),%esp");
2633}
2634
4e29fb54 2635/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2636
2637static void
2638i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2639{
2640 unsigned char buf[16];
2641 int i;
2642 CORE_ADDR buildaddr;
2643
2644 EMIT_ASM32 (i386_void_call_2_a,
2645 /* Preserve %eax only; we don't have to worry about %ebx. */
2646 "push %eax\n\t"
2647 /* Reserve a bit of stack space for arguments. */
2648 "sub $0x10,%esp\n\t"
2649 /* Copy "top" to the second argument position. (Note that
2650 we can't assume function won't scribble on its
2651 arguments, so don't try to restore from this.) */
2652 "mov %eax,4(%esp)\n\t"
2653 "mov %ebx,8(%esp)");
2654 /* Put the first argument on the stack. */
2655 buildaddr = current_insn_ptr;
2656 i = 0;
2657 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2658 buf[i++] = 0x04;
2659 buf[i++] = 0x24;
b00ad6ff 2660 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2661 i += 4;
2662 append_insns (&buildaddr, i, buf);
2663 current_insn_ptr = buildaddr;
2664 i386_emit_call (fn);
2665 EMIT_ASM32 (i386_void_call_2_b,
2666 "lea 0x10(%esp),%esp\n\t"
2667 /* Restore original stack top. */
2668 "pop %eax");
2669}
2670
6b9801d4 2671
df4a0200 2672static void
6b9801d4
SS
2673i386_emit_eq_goto (int *offset_p, int *size_p)
2674{
2675 EMIT_ASM32 (eq,
2676 /* Check low half first, more likely to be decider */
2677 "cmpl %eax,(%esp)\n\t"
2678 "jne .Leq_fallthru\n\t"
2679 "cmpl %ebx,4(%esp)\n\t"
2680 "jne .Leq_fallthru\n\t"
2681 "lea 0x8(%esp),%esp\n\t"
2682 "pop %eax\n\t"
2683 "pop %ebx\n\t"
2684 /* jmp, but don't trust the assembler to choose the right jump */
2685 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2686 ".Leq_fallthru:\n\t"
2687 "lea 0x8(%esp),%esp\n\t"
2688 "pop %eax\n\t"
2689 "pop %ebx");
2690
2691 if (offset_p)
2692 *offset_p = 18;
2693 if (size_p)
2694 *size_p = 4;
2695}
2696
df4a0200 2697static void
6b9801d4
SS
2698i386_emit_ne_goto (int *offset_p, int *size_p)
2699{
2700 EMIT_ASM32 (ne,
2701 /* Check low half first, more likely to be decider */
2702 "cmpl %eax,(%esp)\n\t"
2703 "jne .Lne_jump\n\t"
2704 "cmpl %ebx,4(%esp)\n\t"
2705 "je .Lne_fallthru\n\t"
2706 ".Lne_jump:\n\t"
2707 "lea 0x8(%esp),%esp\n\t"
2708 "pop %eax\n\t"
2709 "pop %ebx\n\t"
2710 /* jmp, but don't trust the assembler to choose the right jump */
2711 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2712 ".Lne_fallthru:\n\t"
2713 "lea 0x8(%esp),%esp\n\t"
2714 "pop %eax\n\t"
2715 "pop %ebx");
2716
2717 if (offset_p)
2718 *offset_p = 18;
2719 if (size_p)
2720 *size_p = 4;
2721}
2722
df4a0200 2723static void
6b9801d4
SS
2724i386_emit_lt_goto (int *offset_p, int *size_p)
2725{
2726 EMIT_ASM32 (lt,
2727 "cmpl %ebx,4(%esp)\n\t"
2728 "jl .Llt_jump\n\t"
2729 "jne .Llt_fallthru\n\t"
2730 "cmpl %eax,(%esp)\n\t"
2731 "jnl .Llt_fallthru\n\t"
2732 ".Llt_jump:\n\t"
2733 "lea 0x8(%esp),%esp\n\t"
2734 "pop %eax\n\t"
2735 "pop %ebx\n\t"
2736 /* jmp, but don't trust the assembler to choose the right jump */
2737 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2738 ".Llt_fallthru:\n\t"
2739 "lea 0x8(%esp),%esp\n\t"
2740 "pop %eax\n\t"
2741 "pop %ebx");
2742
2743 if (offset_p)
2744 *offset_p = 20;
2745 if (size_p)
2746 *size_p = 4;
2747}
2748
df4a0200 2749static void
6b9801d4
SS
2750i386_emit_le_goto (int *offset_p, int *size_p)
2751{
2752 EMIT_ASM32 (le,
2753 "cmpl %ebx,4(%esp)\n\t"
2754 "jle .Lle_jump\n\t"
2755 "jne .Lle_fallthru\n\t"
2756 "cmpl %eax,(%esp)\n\t"
2757 "jnle .Lle_fallthru\n\t"
2758 ".Lle_jump:\n\t"
2759 "lea 0x8(%esp),%esp\n\t"
2760 "pop %eax\n\t"
2761 "pop %ebx\n\t"
2762 /* jmp, but don't trust the assembler to choose the right jump */
2763 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2764 ".Lle_fallthru:\n\t"
2765 "lea 0x8(%esp),%esp\n\t"
2766 "pop %eax\n\t"
2767 "pop %ebx");
2768
2769 if (offset_p)
2770 *offset_p = 20;
2771 if (size_p)
2772 *size_p = 4;
2773}
2774
df4a0200 2775static void
6b9801d4
SS
2776i386_emit_gt_goto (int *offset_p, int *size_p)
2777{
2778 EMIT_ASM32 (gt,
2779 "cmpl %ebx,4(%esp)\n\t"
2780 "jg .Lgt_jump\n\t"
2781 "jne .Lgt_fallthru\n\t"
2782 "cmpl %eax,(%esp)\n\t"
2783 "jng .Lgt_fallthru\n\t"
2784 ".Lgt_jump:\n\t"
2785 "lea 0x8(%esp),%esp\n\t"
2786 "pop %eax\n\t"
2787 "pop %ebx\n\t"
2788 /* jmp, but don't trust the assembler to choose the right jump */
2789 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2790 ".Lgt_fallthru:\n\t"
2791 "lea 0x8(%esp),%esp\n\t"
2792 "pop %eax\n\t"
2793 "pop %ebx");
2794
2795 if (offset_p)
2796 *offset_p = 20;
2797 if (size_p)
2798 *size_p = 4;
2799}
2800
df4a0200 2801static void
6b9801d4
SS
2802i386_emit_ge_goto (int *offset_p, int *size_p)
2803{
2804 EMIT_ASM32 (ge,
2805 "cmpl %ebx,4(%esp)\n\t"
2806 "jge .Lge_jump\n\t"
2807 "jne .Lge_fallthru\n\t"
2808 "cmpl %eax,(%esp)\n\t"
2809 "jnge .Lge_fallthru\n\t"
2810 ".Lge_jump:\n\t"
2811 "lea 0x8(%esp),%esp\n\t"
2812 "pop %eax\n\t"
2813 "pop %ebx\n\t"
2814 /* jmp, but don't trust the assembler to choose the right jump */
2815 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2816 ".Lge_fallthru:\n\t"
2817 "lea 0x8(%esp),%esp\n\t"
2818 "pop %eax\n\t"
2819 "pop %ebx");
2820
2821 if (offset_p)
2822 *offset_p = 20;
2823 if (size_p)
2824 *size_p = 4;
2825}
2826
6a271cae
PA
2827struct emit_ops i386_emit_ops =
2828 {
2829 i386_emit_prologue,
2830 i386_emit_epilogue,
2831 i386_emit_add,
2832 i386_emit_sub,
2833 i386_emit_mul,
2834 i386_emit_lsh,
2835 i386_emit_rsh_signed,
2836 i386_emit_rsh_unsigned,
2837 i386_emit_ext,
2838 i386_emit_log_not,
2839 i386_emit_bit_and,
2840 i386_emit_bit_or,
2841 i386_emit_bit_xor,
2842 i386_emit_bit_not,
2843 i386_emit_equal,
2844 i386_emit_less_signed,
2845 i386_emit_less_unsigned,
2846 i386_emit_ref,
2847 i386_emit_if_goto,
2848 i386_emit_goto,
2849 i386_write_goto_address,
2850 i386_emit_const,
2851 i386_emit_call,
2852 i386_emit_reg,
2853 i386_emit_pop,
2854 i386_emit_stack_flush,
2855 i386_emit_zero_ext,
2856 i386_emit_swap,
2857 i386_emit_stack_adjust,
2858 i386_emit_int_call_1,
6b9801d4
SS
2859 i386_emit_void_call_2,
2860 i386_emit_eq_goto,
2861 i386_emit_ne_goto,
2862 i386_emit_lt_goto,
2863 i386_emit_le_goto,
2864 i386_emit_gt_goto,
2865 i386_emit_ge_goto
6a271cae
PA
2866 };
2867
2868
2869static struct emit_ops *
2870x86_emit_ops (void)
2871{
2872#ifdef __x86_64__
3aee8918 2873 if (is_64bit_tdesc ())
6a271cae
PA
2874 return &amd64_emit_ops;
2875 else
2876#endif
2877 return &i386_emit_ops;
2878}
2879
3ca4edb6 2880/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2881
3ca4edb6
TBA
2882const gdb_byte *
2883x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2884{
2885 *size = x86_breakpoint_len;
2886 return x86_breakpoint;
2887}
2888
c2d6af84
PA
2889static int
2890x86_supports_range_stepping (void)
2891{
2892 return 1;
2893}
2894
7d00775e
AT
2895/* Implementation of linux_target_ops method "supports_hardware_single_step".
2896 */
2897
2898static int
2899x86_supports_hardware_single_step (void)
2900{
2901 return 1;
2902}
2903
ae91f625
MK
2904static int
2905x86_get_ipa_tdesc_idx (void)
2906{
2907 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2908 const struct target_desc *tdesc = regcache->tdesc;
2909
2910#ifdef __x86_64__
b4570e4b 2911 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2912#endif
2913
f49ff000 2914 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2915 return X86_TDESC_SSE;
ae91f625 2916
f49ff000 2917 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2918}
2919
d0722149
DE
2920/* This is initialized assuming an amd64 target.
2921 x86_arch_setup will correct it for i386 or amd64 targets. */
2922
2923struct linux_target_ops the_low_target =
2924{
d0722149
DE
2925 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2926 native i386 case (no registers smaller than an xfer unit), and are not
2927 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2928 NULL,
2929 NULL,
2930 /* need to fix up i386 siginfo if host is amd64 */
2931 x86_siginfo_fixup,
aa5ca48f 2932 x86_linux_new_process,
04ec7890 2933 x86_linux_delete_process,
aa5ca48f 2934 x86_linux_new_thread,
466eecee 2935 x86_linux_delete_thread,
3a8a0396 2936 x86_linux_new_fork,
1570b33e 2937 x86_linux_prepare_to_resume,
219f2f23 2938 x86_linux_process_qsupported,
fa593d66
PA
2939 x86_supports_tracepoints,
2940 x86_get_thread_area,
6a271cae 2941 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2942 x86_emit_ops,
2943 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 2944 x86_supports_range_stepping,
7d00775e 2945 x86_supports_hardware_single_step,
82075af2 2946 x86_get_syscall_trapinfo,
ae91f625 2947 x86_get_ipa_tdesc_idx,
d0722149 2948};
3aee8918 2949
ef0478f6
TBA
2950/* The linux target ops object. */
2951
2952linux_process_target *the_linux_target = &the_x86_target;
2953
3aee8918
PA
2954void
2955initialize_low_arch (void)
2956{
2957 /* Initialize the Linux target descriptions. */
2958#ifdef __x86_64__
cc397f3a 2959 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
2960 copy_target_description (tdesc_amd64_linux_no_xml,
2961 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2962 false));
3aee8918
PA
2963 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2964#endif
f49ff000 2965
cc397f3a 2966 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
2967 copy_target_description (tdesc_i386_linux_no_xml,
2968 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
2969 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2970
2971 initialize_regsets_info (&x86_regsets_info);
2972}
This page took 1.073241 seconds and 4 git commands to generate.