gdbserver/linux-low: turn 'get_thread_area' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
aa8d21c9
TBA
103 const regs_info *get_regs_info () override;
104
3ca4edb6
TBA
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
007c9b97
TBA
107 bool supports_z_point_type (char z_type) override;
108
a5b5da92
TBA
109 void process_qsupported (char **features, int count) override;
110
47f70aa7
TBA
111 bool supports_tracepoints () override;
112
797bcff5
TBA
113protected:
114
115 void low_arch_setup () override;
daca57a7
TBA
116
117 bool low_cannot_fetch_register (int regno) override;
118
119 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
120
121 bool low_supports_breakpoints () override;
122
123 CORE_ADDR low_get_pc (regcache *regcache) override;
124
125 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d4807ea2
TBA
126
127 int low_decr_pc_after_break () override;
d7146cda
TBA
128
129 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
130
131 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
132 int size, raw_breakpoint *bp) override;
133
134 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
135 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
136
137 bool low_stopped_by_watchpoint () override;
138
139 CORE_ADDR low_stopped_data_address () override;
b35db733
TBA
140
141 /* collect_ptrace_register/supply_ptrace_register are not needed in the
142 native i386 case (no registers smaller than an xfer unit), and are not
143 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
cb63de7c
TBA
144
145 /* Need to fix up i386 siginfo if host is amd64. */
146 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
147 int direction) override;
fd000fb3
TBA
148
149 arch_process_info *low_new_process () override;
150
151 void low_delete_process (arch_process_info *info) override;
152
153 void low_new_thread (lwp_info *) override;
154
155 void low_delete_thread (arch_lwp_info *) override;
156
157 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
158
159 void low_prepare_to_resume (lwp_info *lwp) override;
a5b5da92 160
13e567af
TBA
161 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
162
a5b5da92
TBA
163private:
164
165 /* Update all the target description of all processes; a new GDB
166 connected, and it may or not support xml target descriptions. */
167 void update_xmltarget ();
ef0478f6
TBA
168};
169
170/* The singleton target ops object. */
171
172static x86_target the_x86_target;
173
aa5ca48f
DE
174/* Per-process arch-specific data we want to keep. */
175
176struct arch_process_info
177{
df7e5265 178 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
179};
180
d0722149
DE
181#ifdef __x86_64__
182
183/* Mapping between the general-purpose registers in `struct user'
184 format and GDB's register array layout.
185 Note that the transfer layout uses 64-bit regs. */
186static /*const*/ int i386_regmap[] =
187{
188 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
189 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
190 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
191 DS * 8, ES * 8, FS * 8, GS * 8
192};
193
194#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
195
196/* So code below doesn't have to care, i386 or amd64. */
197#define ORIG_EAX ORIG_RAX
bc9540e8 198#define REGSIZE 8
d0722149
DE
199
200static const int x86_64_regmap[] =
201{
202 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
203 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
204 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
205 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
206 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
207 DS * 8, ES * 8, FS * 8, GS * 8,
208 -1, -1, -1, -1, -1, -1, -1, -1,
209 -1, -1, -1, -1, -1, -1, -1, -1,
210 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
211 -1,
212 -1, -1, -1, -1, -1, -1, -1, -1,
213 ORIG_RAX * 8,
2735833d
WT
214#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
215 21 * 8, 22 * 8,
216#else
217 -1, -1,
218#endif
a196ebeb 219 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
220 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
221 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
222 -1, -1, -1, -1, -1, -1, -1, -1,
223 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
224 -1, -1, -1, -1, -1, -1, -1, -1,
225 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
226 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
227 -1, -1, -1, -1, -1, -1, -1, -1,
228 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
229 -1, -1, -1, -1, -1, -1, -1, -1,
230 -1 /* pkru */
d0722149
DE
231};
232
233#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 234#define X86_64_USER_REGS (GS + 1)
d0722149
DE
235
236#else /* ! __x86_64__ */
237
238/* Mapping between the general-purpose registers in `struct user'
239 format and GDB's register array layout. */
240static /*const*/ int i386_regmap[] =
241{
242 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
243 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
244 EIP * 4, EFL * 4, CS * 4, SS * 4,
245 DS * 4, ES * 4, FS * 4, GS * 4
246};
247
248#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
249
bc9540e8
PA
250#define REGSIZE 4
251
d0722149 252#endif
3aee8918
PA
253
254#ifdef __x86_64__
255
256/* Returns true if the current inferior belongs to a x86-64 process,
257 per the tdesc. */
258
259static int
260is_64bit_tdesc (void)
261{
0bfdf32f 262 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
263
264 return register_size (regcache->tdesc, 0) == 8;
265}
266
267#endif
268
d0722149
DE
269\f
270/* Called by libthread_db. */
271
272ps_err_e
754653a7 273ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
274 lwpid_t lwpid, int idx, void **base)
275{
276#ifdef __x86_64__
3aee8918 277 int use_64bit = is_64bit_tdesc ();
d0722149
DE
278
279 if (use_64bit)
280 {
281 switch (idx)
282 {
283 case FS:
284 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
285 return PS_OK;
286 break;
287 case GS:
288 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
289 return PS_OK;
290 break;
291 default:
292 return PS_BADADDR;
293 }
294 return PS_ERR;
295 }
296#endif
297
298 {
299 unsigned int desc[4];
300
301 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
302 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
303 return PS_ERR;
304
d1ec4ce7
DE
305 /* Ensure we properly extend the value to 64-bits for x86_64. */
306 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
307 return PS_OK;
308 }
309}
fa593d66
PA
310
311/* Get the thread area address. This is used to recognize which
312 thread is which when tracing with the in-process agent library. We
313 don't read anything from the address, and treat it as opaque; it's
314 the address itself that we assume is unique per-thread. */
315
13e567af
TBA
316int
317x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
fa593d66
PA
318{
319#ifdef __x86_64__
3aee8918 320 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
321
322 if (use_64bit)
323 {
324 void *base;
325 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
326 {
327 *addr = (CORE_ADDR) (uintptr_t) base;
328 return 0;
329 }
330
331 return -1;
332 }
333#endif
334
335 {
f2907e49 336 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
337 struct thread_info *thr = get_lwp_thread (lwp);
338 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
339 unsigned int desc[4];
340 ULONGEST gs = 0;
341 const int reg_thread_area = 3; /* bits to scale down register value. */
342 int idx;
343
344 collect_register_by_name (regcache, "gs", &gs);
345
346 idx = gs >> reg_thread_area;
347
348 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 349 lwpid_of (thr),
493e2a69 350 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
351 return -1;
352
353 *addr = desc[1];
354 return 0;
355 }
356}
357
358
d0722149 359\f
daca57a7
TBA
360bool
361x86_target::low_cannot_store_register (int regno)
d0722149 362{
3aee8918
PA
363#ifdef __x86_64__
364 if (is_64bit_tdesc ())
daca57a7 365 return false;
3aee8918
PA
366#endif
367
d0722149
DE
368 return regno >= I386_NUM_REGS;
369}
370
daca57a7
TBA
371bool
372x86_target::low_cannot_fetch_register (int regno)
d0722149 373{
3aee8918
PA
374#ifdef __x86_64__
375 if (is_64bit_tdesc ())
daca57a7 376 return false;
3aee8918
PA
377#endif
378
d0722149
DE
379 return regno >= I386_NUM_REGS;
380}
381
382static void
442ea881 383x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
384{
385 int i;
386
387#ifdef __x86_64__
3aee8918 388 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
389 {
390 for (i = 0; i < X86_64_NUM_REGS; i++)
391 if (x86_64_regmap[i] != -1)
442ea881 392 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
393
394#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
395 {
396 unsigned long base;
397 int lwpid = lwpid_of (current_thread);
398
399 collect_register_by_name (regcache, "fs_base", &base);
400 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
401
402 collect_register_by_name (regcache, "gs_base", &base);
403 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
404 }
405#endif
406
d0722149
DE
407 return;
408 }
9e0aa64f
JK
409
410 /* 32-bit inferior registers need to be zero-extended.
411 Callers would read uninitialized memory otherwise. */
412 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
413#endif
414
415 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 416 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 417
442ea881 418 collect_register_by_name (regcache, "orig_eax",
bc9540e8 419 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 420
e90a813d 421#ifdef __x86_64__
3f52fdbc
KB
422 /* Sign extend EAX value to avoid potential syscall restart
423 problems.
424
425 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
426 for a detailed explanation. */
427 if (register_size (regcache->tdesc, 0) == 4)
428 {
429 void *ptr = ((gdb_byte *) buf
430 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
431
432 *(int64_t *) ptr = *(int32_t *) ptr;
433 }
e90a813d 434#endif
d0722149
DE
435}
436
437static void
442ea881 438x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
439{
440 int i;
441
442#ifdef __x86_64__
3aee8918 443 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
444 {
445 for (i = 0; i < X86_64_NUM_REGS; i++)
446 if (x86_64_regmap[i] != -1)
442ea881 447 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
448
449#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
450 {
451 unsigned long base;
452 int lwpid = lwpid_of (current_thread);
453
454 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
455 supply_register_by_name (regcache, "fs_base", &base);
456
457 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
458 supply_register_by_name (regcache, "gs_base", &base);
459 }
460#endif
d0722149
DE
461 return;
462 }
463#endif
464
465 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 466 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 467
442ea881 468 supply_register_by_name (regcache, "orig_eax",
bc9540e8 469 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
470}
471
472static void
442ea881 473x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
474{
475#ifdef __x86_64__
442ea881 476 i387_cache_to_fxsave (regcache, buf);
d0722149 477#else
442ea881 478 i387_cache_to_fsave (regcache, buf);
d0722149
DE
479#endif
480}
481
482static void
442ea881 483x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
484{
485#ifdef __x86_64__
442ea881 486 i387_fxsave_to_cache (regcache, buf);
d0722149 487#else
442ea881 488 i387_fsave_to_cache (regcache, buf);
d0722149
DE
489#endif
490}
491
492#ifndef __x86_64__
493
494static void
442ea881 495x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 496{
442ea881 497 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
498}
499
500static void
442ea881 501x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 502{
442ea881 503 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
504}
505
506#endif
507
1570b33e
L
508static void
509x86_fill_xstateregset (struct regcache *regcache, void *buf)
510{
511 i387_cache_to_xsave (regcache, buf);
512}
513
514static void
515x86_store_xstateregset (struct regcache *regcache, const void *buf)
516{
517 i387_xsave_to_cache (regcache, buf);
518}
519
d0722149
DE
520/* ??? The non-biarch i386 case stores all the i387 regs twice.
521 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
522 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
523 doesn't work. IWBN to avoid the duplication in the case where it
524 does work. Maybe the arch_setup routine could check whether it works
3aee8918 525 and update the supported regsets accordingly. */
d0722149 526
3aee8918 527static struct regset_info x86_regsets[] =
d0722149
DE
528{
529#ifdef HAVE_PTRACE_GETREGS
1570b33e 530 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
531 GENERAL_REGS,
532 x86_fill_gregset, x86_store_gregset },
1570b33e
L
533 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
534 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
535# ifndef __x86_64__
536# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 537 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
538 EXTENDED_REGS,
539 x86_fill_fpxregset, x86_store_fpxregset },
540# endif
541# endif
1570b33e 542 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
543 FP_REGS,
544 x86_fill_fpregset, x86_store_fpregset },
545#endif /* HAVE_PTRACE_GETREGS */
50bc912a 546 NULL_REGSET
d0722149
DE
547};
548
bf9ae9d8
TBA
549bool
550x86_target::low_supports_breakpoints ()
551{
552 return true;
553}
554
555CORE_ADDR
556x86_target::low_get_pc (regcache *regcache)
d0722149 557{
3aee8918 558 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
559
560 if (use_64bit)
561 {
6598661d
PA
562 uint64_t pc;
563
442ea881 564 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
565 return (CORE_ADDR) pc;
566 }
567 else
568 {
6598661d
PA
569 uint32_t pc;
570
442ea881 571 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
572 return (CORE_ADDR) pc;
573 }
574}
575
bf9ae9d8
TBA
576void
577x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 578{
3aee8918 579 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
580
581 if (use_64bit)
582 {
6598661d
PA
583 uint64_t newpc = pc;
584
442ea881 585 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
586 }
587 else
588 {
6598661d
PA
589 uint32_t newpc = pc;
590
442ea881 591 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
592 }
593}
d4807ea2
TBA
594
595int
596x86_target::low_decr_pc_after_break ()
597{
598 return 1;
599}
600
d0722149 601\f
dd373349 602static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
603#define x86_breakpoint_len 1
604
d7146cda
TBA
605bool
606x86_target::low_breakpoint_at (CORE_ADDR pc)
d0722149
DE
607{
608 unsigned char c;
609
d7146cda 610 read_memory (pc, &c, 1);
d0722149 611 if (c == 0xCC)
d7146cda 612 return true;
d0722149 613
d7146cda 614 return false;
d0722149
DE
615}
616\f
42995dbd 617/* Low-level function vector. */
df7e5265 618struct x86_dr_low_type x86_dr_low =
42995dbd 619 {
d33472ad
GB
620 x86_linux_dr_set_control,
621 x86_linux_dr_set_addr,
622 x86_linux_dr_get_addr,
623 x86_linux_dr_get_status,
624 x86_linux_dr_get_control,
42995dbd
GB
625 sizeof (void *),
626 };
aa5ca48f 627\f
90d74c30 628/* Breakpoint/Watchpoint support. */
aa5ca48f 629
007c9b97
TBA
630bool
631x86_target::supports_z_point_type (char z_type)
802e8e6d
PA
632{
633 switch (z_type)
634 {
635 case Z_PACKET_SW_BP:
636 case Z_PACKET_HW_BP:
637 case Z_PACKET_WRITE_WP:
638 case Z_PACKET_ACCESS_WP:
007c9b97 639 return true;
802e8e6d 640 default:
007c9b97 641 return false;
802e8e6d
PA
642 }
643}
644
9db9aa23
TBA
645int
646x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
647 int size, raw_breakpoint *bp)
aa5ca48f
DE
648{
649 struct process_info *proc = current_process ();
802e8e6d 650
aa5ca48f
DE
651 switch (type)
652 {
802e8e6d
PA
653 case raw_bkpt_type_hw:
654 case raw_bkpt_type_write_wp:
655 case raw_bkpt_type_access_wp:
a4165e94 656 {
802e8e6d
PA
657 enum target_hw_bp_type hw_type
658 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 659 struct x86_debug_reg_state *state
fe978cb0 660 = &proc->priv->arch_private->debug_reg_state;
a4165e94 661
df7e5265 662 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 663 }
961bd387 664
aa5ca48f
DE
665 default:
666 /* Unsupported. */
667 return 1;
668 }
669}
670
9db9aa23
TBA
671int
672x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
673 int size, raw_breakpoint *bp)
aa5ca48f
DE
674{
675 struct process_info *proc = current_process ();
802e8e6d 676
aa5ca48f
DE
677 switch (type)
678 {
802e8e6d
PA
679 case raw_bkpt_type_hw:
680 case raw_bkpt_type_write_wp:
681 case raw_bkpt_type_access_wp:
a4165e94 682 {
802e8e6d
PA
683 enum target_hw_bp_type hw_type
684 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 685 struct x86_debug_reg_state *state
fe978cb0 686 = &proc->priv->arch_private->debug_reg_state;
a4165e94 687
df7e5265 688 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 689 }
aa5ca48f
DE
690 default:
691 /* Unsupported. */
692 return 1;
693 }
694}
695
ac1bbaca
TBA
696bool
697x86_target::low_stopped_by_watchpoint ()
aa5ca48f
DE
698{
699 struct process_info *proc = current_process ();
fe978cb0 700 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
701}
702
ac1bbaca
TBA
703CORE_ADDR
704x86_target::low_stopped_data_address ()
aa5ca48f
DE
705{
706 struct process_info *proc = current_process ();
707 CORE_ADDR addr;
fe978cb0 708 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 709 &addr))
aa5ca48f
DE
710 return addr;
711 return 0;
712}
713\f
714/* Called when a new process is created. */
715
fd000fb3
TBA
716arch_process_info *
717x86_target::low_new_process ()
aa5ca48f 718{
ed859da7 719 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 720
df7e5265 721 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
722
723 return info;
724}
725
04ec7890
SM
726/* Called when a process is being deleted. */
727
fd000fb3
TBA
728void
729x86_target::low_delete_process (arch_process_info *info)
04ec7890
SM
730{
731 xfree (info);
732}
733
fd000fb3
TBA
734void
735x86_target::low_new_thread (lwp_info *lwp)
736{
737 /* This comes from nat/. */
738 x86_linux_new_thread (lwp);
739}
3a8a0396 740
fd000fb3
TBA
741void
742x86_target::low_delete_thread (arch_lwp_info *alwp)
743{
744 /* This comes from nat/. */
745 x86_linux_delete_thread (alwp);
746}
747
748/* Target routine for new_fork. */
749
750void
751x86_target::low_new_fork (process_info *parent, process_info *child)
3a8a0396
DB
752{
753 /* These are allocated by linux_add_process. */
754 gdb_assert (parent->priv != NULL
755 && parent->priv->arch_private != NULL);
756 gdb_assert (child->priv != NULL
757 && child->priv->arch_private != NULL);
758
759 /* Linux kernel before 2.6.33 commit
760 72f674d203cd230426437cdcf7dd6f681dad8b0d
761 will inherit hardware debug registers from parent
762 on fork/vfork/clone. Newer Linux kernels create such tasks with
763 zeroed debug registers.
764
765 GDB core assumes the child inherits the watchpoints/hw
766 breakpoints of the parent, and will remove them all from the
767 forked off process. Copy the debug registers mirrors into the
768 new process so that all breakpoints and watchpoints can be
769 removed together. The debug registers mirror will become zeroed
770 in the end before detaching the forked off process, thus making
771 this compatible with older Linux kernels too. */
772
773 *child->priv->arch_private = *parent->priv->arch_private;
774}
775
d7599cc0
TBA
776void
777x86_target::low_prepare_to_resume (lwp_info *lwp)
778{
779 /* This comes from nat/. */
780 x86_linux_prepare_to_resume (lwp);
781}
782
70a0bb6b
GB
783/* See nat/x86-dregs.h. */
784
785struct x86_debug_reg_state *
786x86_debug_reg_state (pid_t pid)
787{
788 struct process_info *proc = find_process_pid (pid);
789
790 return &proc->priv->arch_private->debug_reg_state;
791}
aa5ca48f 792\f
d0722149
DE
793/* When GDBSERVER is built as a 64-bit application on linux, the
794 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
795 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
796 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
797 conversion in-place ourselves. */
798
9cf12d57 799/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
800 layout of the inferiors' architecture. Returns true if any
801 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 802 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
803 INF. */
804
cb63de7c
TBA
805bool
806x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
807{
808#ifdef __x86_64__
760256f9 809 unsigned int machine;
0bfdf32f 810 int tid = lwpid_of (current_thread);
760256f9
PA
811 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
812
d0722149 813 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 814 if (!is_64bit_tdesc ())
9cf12d57 815 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 816 FIXUP_32);
c92b5177 817 /* No fixup for native x32 GDB. */
760256f9 818 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 819 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 820 FIXUP_X32);
d0722149
DE
821#endif
822
cb63de7c 823 return false;
d0722149
DE
824}
825\f
1570b33e
L
826static int use_xml;
827
3aee8918
PA
828/* Format of XSAVE extended state is:
829 struct
830 {
831 fxsave_bytes[0..463]
832 sw_usable_bytes[464..511]
833 xstate_hdr_bytes[512..575]
834 avx_bytes[576..831]
835 future_state etc
836 };
837
838 Same memory layout will be used for the coredump NT_X86_XSTATE
839 representing the XSAVE extended state registers.
840
841 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
842 extended state mask, which is the same as the extended control register
843 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
844 together with the mask saved in the xstate_hdr_bytes to determine what
845 states the processor/OS supports and what state, used or initialized,
846 the process/thread is in. */
847#define I386_LINUX_XSAVE_XCR0_OFFSET 464
848
849/* Does the current host support the GETFPXREGS request? The header
850 file may or may not define it, and even if it is defined, the
851 kernel will return EIO if it's running on a pre-SSE processor. */
852int have_ptrace_getfpxregs =
853#ifdef HAVE_PTRACE_GETFPXREGS
854 -1
855#else
856 0
857#endif
858;
1570b33e 859
3aee8918
PA
860/* Get Linux/x86 target description from running target. */
861
862static const struct target_desc *
863x86_linux_read_description (void)
1570b33e 864{
3aee8918
PA
865 unsigned int machine;
866 int is_elf64;
a196ebeb 867 int xcr0_features;
3aee8918
PA
868 int tid;
869 static uint64_t xcr0;
3a13a53b 870 struct regset_info *regset;
1570b33e 871
0bfdf32f 872 tid = lwpid_of (current_thread);
1570b33e 873
3aee8918 874 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 875
3aee8918 876 if (sizeof (void *) == 4)
3a13a53b 877 {
3aee8918
PA
878 if (is_elf64 > 0)
879 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
880#ifndef __x86_64__
881 else if (machine == EM_X86_64)
882 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
883#endif
884 }
3a13a53b 885
3aee8918
PA
886#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
887 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
888 {
889 elf_fpxregset_t fpxregs;
3a13a53b 890
3aee8918 891 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 892 {
3aee8918
PA
893 have_ptrace_getfpxregs = 0;
894 have_ptrace_getregset = 0;
f49ff000 895 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 896 }
3aee8918
PA
897 else
898 have_ptrace_getfpxregs = 1;
3a13a53b 899 }
1570b33e
L
900#endif
901
902 if (!use_xml)
903 {
df7e5265 904 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 905
1570b33e
L
906 /* Don't use XML. */
907#ifdef __x86_64__
3aee8918
PA
908 if (machine == EM_X86_64)
909 return tdesc_amd64_linux_no_xml;
1570b33e 910 else
1570b33e 911#endif
3aee8918 912 return tdesc_i386_linux_no_xml;
1570b33e
L
913 }
914
1570b33e
L
915 if (have_ptrace_getregset == -1)
916 {
df7e5265 917 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 918 struct iovec iov;
1570b33e
L
919
920 iov.iov_base = xstateregs;
921 iov.iov_len = sizeof (xstateregs);
922
923 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
924 if (ptrace (PTRACE_GETREGSET, tid,
925 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
926 have_ptrace_getregset = 0;
927 else
1570b33e 928 {
3aee8918
PA
929 have_ptrace_getregset = 1;
930
931 /* Get XCR0 from XSAVE extended state. */
932 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
933 / sizeof (uint64_t))];
934
935 /* Use PTRACE_GETREGSET if it is available. */
936 for (regset = x86_regsets;
937 regset->fill_function != NULL; regset++)
938 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 939 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
940 else if (regset->type != GENERAL_REGS)
941 regset->size = 0;
1570b33e 942 }
1570b33e
L
943 }
944
3aee8918 945 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 946 xcr0_features = (have_ptrace_getregset
2e1e43e1 947 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 948
a196ebeb 949 if (xcr0_features)
3aee8918 950 x86_xcr0 = xcr0;
1570b33e 951
3aee8918
PA
952 if (machine == EM_X86_64)
953 {
1570b33e 954#ifdef __x86_64__
b4570e4b 955 const target_desc *tdesc = NULL;
a196ebeb 956
b4570e4b 957 if (xcr0_features)
3aee8918 958 {
b4570e4b
YQ
959 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
960 !is_elf64);
1570b33e 961 }
b4570e4b
YQ
962
963 if (tdesc == NULL)
964 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
965 return tdesc;
3aee8918 966#endif
1570b33e 967 }
3aee8918
PA
968 else
969 {
f49ff000 970 const target_desc *tdesc = NULL;
a1fa17ee 971
f49ff000
YQ
972 if (xcr0_features)
973 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 974
f49ff000
YQ
975 if (tdesc == NULL)
976 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 977
f49ff000 978 return tdesc;
3aee8918
PA
979 }
980
981 gdb_assert_not_reached ("failed to return tdesc");
982}
983
3aee8918
PA
984/* Update all the target description of all processes; a new GDB
985 connected, and it may or not support xml target descriptions. */
986
797bcff5
TBA
987void
988x86_target::update_xmltarget ()
3aee8918 989{
0bfdf32f 990 struct thread_info *saved_thread = current_thread;
3aee8918
PA
991
992 /* Before changing the register cache's internal layout, flush the
993 contents of the current valid caches back to the threads, and
994 release the current regcache objects. */
995 regcache_release ();
996
797bcff5 997 for_each_process ([this] (process_info *proc) {
9179355e
SM
998 int pid = proc->pid;
999
1000 /* Look up any thread of this process. */
1001 current_thread = find_any_thread_of_pid (pid);
1002
797bcff5 1003 low_arch_setup ();
9179355e 1004 });
3aee8918 1005
0bfdf32f 1006 current_thread = saved_thread;
1570b33e
L
1007}
1008
1009/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1010 PTRACE_GETREGSET. */
1011
a5b5da92
TBA
1012void
1013x86_target::process_qsupported (char **features, int count)
1570b33e 1014{
06e03fff
PA
1015 int i;
1016
1570b33e
L
1017 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1018 with "i386" in qSupported query, it supports x86 XML target
1019 descriptions. */
1020 use_xml = 0;
06e03fff 1021 for (i = 0; i < count; i++)
1570b33e 1022 {
06e03fff 1023 const char *feature = features[i];
1570b33e 1024
06e03fff 1025 if (startswith (feature, "xmlRegisters="))
1570b33e 1026 {
06e03fff 1027 char *copy = xstrdup (feature + 13);
06e03fff 1028
ca3a04f6
CB
1029 char *saveptr;
1030 for (char *p = strtok_r (copy, ",", &saveptr);
1031 p != NULL;
1032 p = strtok_r (NULL, ",", &saveptr))
1570b33e 1033 {
06e03fff
PA
1034 if (strcmp (p, "i386") == 0)
1035 {
1036 use_xml = 1;
1037 break;
1038 }
1570b33e 1039 }
1570b33e 1040
06e03fff
PA
1041 free (copy);
1042 }
1570b33e 1043 }
a5b5da92 1044 update_xmltarget ();
1570b33e
L
1045}
1046
3aee8918 1047/* Common for x86/x86-64. */
d0722149 1048
3aee8918
PA
1049static struct regsets_info x86_regsets_info =
1050 {
1051 x86_regsets, /* regsets */
1052 0, /* num_regsets */
1053 NULL, /* disabled_regsets */
1054 };
214d508e
L
1055
1056#ifdef __x86_64__
3aee8918
PA
1057static struct regs_info amd64_linux_regs_info =
1058 {
1059 NULL, /* regset_bitmap */
1060 NULL, /* usrregs_info */
1061 &x86_regsets_info
1062 };
d0722149 1063#endif
3aee8918
PA
1064static struct usrregs_info i386_linux_usrregs_info =
1065 {
1066 I386_NUM_REGS,
1067 i386_regmap,
1068 };
d0722149 1069
3aee8918
PA
1070static struct regs_info i386_linux_regs_info =
1071 {
1072 NULL, /* regset_bitmap */
1073 &i386_linux_usrregs_info,
1074 &x86_regsets_info
1075 };
d0722149 1076
aa8d21c9
TBA
1077const regs_info *
1078x86_target::get_regs_info ()
3aee8918
PA
1079{
1080#ifdef __x86_64__
1081 if (is_64bit_tdesc ())
1082 return &amd64_linux_regs_info;
1083 else
1084#endif
1085 return &i386_linux_regs_info;
1086}
d0722149 1087
3aee8918
PA
1088/* Initialize the target description for the architecture of the
1089 inferior. */
1570b33e 1090
797bcff5
TBA
1091void
1092x86_target::low_arch_setup ()
3aee8918
PA
1093{
1094 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1095}
1096
82075af2
JS
1097/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1098 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1099
1100static void
4cc32bec 1101x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
1102{
1103 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1104
1105 if (use_64bit)
1106 {
1107 long l_sysno;
82075af2
JS
1108
1109 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1110 *sysno = (int) l_sysno;
82075af2
JS
1111 }
1112 else
4cc32bec 1113 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1114}
1115
47f70aa7
TBA
1116bool
1117x86_target::supports_tracepoints ()
219f2f23 1118{
47f70aa7 1119 return true;
219f2f23
PA
1120}
1121
fa593d66
PA
1122static void
1123append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1124{
4196ab2a 1125 target_write_memory (*to, buf, len);
fa593d66
PA
1126 *to += len;
1127}
1128
1129static int
a121b7c1 1130push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1131{
1132 unsigned char *buf_org = buf;
1133
1134 while (1)
1135 {
1136 char *endptr;
1137 unsigned long ul = strtoul (op, &endptr, 16);
1138
1139 if (endptr == op)
1140 break;
1141
1142 *buf++ = ul;
1143 op = endptr;
1144 }
1145
1146 return buf - buf_org;
1147}
1148
1149#ifdef __x86_64__
1150
1151/* Build a jump pad that saves registers and calls a collection
1152 function. Writes a jump instruction to the jump pad to
1153 JJUMPAD_INSN. The caller is responsible to write it in at the
1154 tracepoint address. */
1155
1156static int
1157amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1158 CORE_ADDR collector,
1159 CORE_ADDR lockaddr,
1160 ULONGEST orig_size,
1161 CORE_ADDR *jump_entry,
405f8e94
SS
1162 CORE_ADDR *trampoline,
1163 ULONGEST *trampoline_size,
fa593d66
PA
1164 unsigned char *jjump_pad_insn,
1165 ULONGEST *jjump_pad_insn_size,
1166 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1167 CORE_ADDR *adjusted_insn_addr_end,
1168 char *err)
fa593d66
PA
1169{
1170 unsigned char buf[40];
1171 int i, offset;
f4647387
YQ
1172 int64_t loffset;
1173
fa593d66
PA
1174 CORE_ADDR buildaddr = *jump_entry;
1175
1176 /* Build the jump pad. */
1177
1178 /* First, do tracepoint data collection. Save registers. */
1179 i = 0;
1180 /* Need to ensure stack pointer saved first. */
1181 buf[i++] = 0x54; /* push %rsp */
1182 buf[i++] = 0x55; /* push %rbp */
1183 buf[i++] = 0x57; /* push %rdi */
1184 buf[i++] = 0x56; /* push %rsi */
1185 buf[i++] = 0x52; /* push %rdx */
1186 buf[i++] = 0x51; /* push %rcx */
1187 buf[i++] = 0x53; /* push %rbx */
1188 buf[i++] = 0x50; /* push %rax */
1189 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1190 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1191 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1192 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1193 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1194 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1195 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1196 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1197 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1198 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1199 buf[i++] = 0xbf;
c8ef42ee
PA
1200 memcpy (buf + i, &tpaddr, 8);
1201 i += 8;
fa593d66
PA
1202 buf[i++] = 0x57; /* push %rdi */
1203 append_insns (&buildaddr, i, buf);
1204
1205 /* Stack space for the collecting_t object. */
1206 i = 0;
1207 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1208 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1209 memcpy (buf + i, &tpoint, 8);
1210 i += 8;
1211 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1212 i += push_opcode (&buf[i],
1213 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1214 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1215 append_insns (&buildaddr, i, buf);
1216
1217 /* spin-lock. */
1218 i = 0;
1219 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1220 memcpy (&buf[i], (void *) &lockaddr, 8);
1221 i += 8;
1222 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1223 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1224 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1225 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1226 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1227 append_insns (&buildaddr, i, buf);
1228
1229 /* Set up the gdb_collect call. */
1230 /* At this point, (stack pointer + 0x18) is the base of our saved
1231 register block. */
1232
1233 i = 0;
1234 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1235 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1236
1237 /* tpoint address may be 64-bit wide. */
1238 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1239 memcpy (buf + i, &tpoint, 8);
1240 i += 8;
1241 append_insns (&buildaddr, i, buf);
1242
1243 /* The collector function being in the shared library, may be
1244 >31-bits away off the jump pad. */
1245 i = 0;
1246 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1247 memcpy (buf + i, &collector, 8);
1248 i += 8;
1249 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1250 append_insns (&buildaddr, i, buf);
1251
1252 /* Clear the spin-lock. */
1253 i = 0;
1254 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1255 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1256 memcpy (buf + i, &lockaddr, 8);
1257 i += 8;
1258 append_insns (&buildaddr, i, buf);
1259
1260 /* Remove stack that had been used for the collect_t object. */
1261 i = 0;
1262 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1263 append_insns (&buildaddr, i, buf);
1264
1265 /* Restore register state. */
1266 i = 0;
1267 buf[i++] = 0x48; /* add $0x8,%rsp */
1268 buf[i++] = 0x83;
1269 buf[i++] = 0xc4;
1270 buf[i++] = 0x08;
1271 buf[i++] = 0x9d; /* popfq */
1272 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1273 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1274 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1275 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1276 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1277 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1278 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1279 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1280 buf[i++] = 0x58; /* pop %rax */
1281 buf[i++] = 0x5b; /* pop %rbx */
1282 buf[i++] = 0x59; /* pop %rcx */
1283 buf[i++] = 0x5a; /* pop %rdx */
1284 buf[i++] = 0x5e; /* pop %rsi */
1285 buf[i++] = 0x5f; /* pop %rdi */
1286 buf[i++] = 0x5d; /* pop %rbp */
1287 buf[i++] = 0x5c; /* pop %rsp */
1288 append_insns (&buildaddr, i, buf);
1289
1290 /* Now, adjust the original instruction to execute in the jump
1291 pad. */
1292 *adjusted_insn_addr = buildaddr;
1293 relocate_instruction (&buildaddr, tpaddr);
1294 *adjusted_insn_addr_end = buildaddr;
1295
1296 /* Finally, write a jump back to the program. */
f4647387
YQ
1297
1298 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1299 if (loffset > INT_MAX || loffset < INT_MIN)
1300 {
1301 sprintf (err,
1302 "E.Jump back from jump pad too far from tracepoint "
1303 "(offset 0x%" PRIx64 " > int32).", loffset);
1304 return 1;
1305 }
1306
1307 offset = (int) loffset;
fa593d66
PA
1308 memcpy (buf, jump_insn, sizeof (jump_insn));
1309 memcpy (buf + 1, &offset, 4);
1310 append_insns (&buildaddr, sizeof (jump_insn), buf);
1311
1312 /* The jump pad is now built. Wire in a jump to our jump pad. This
1313 is always done last (by our caller actually), so that we can
1314 install fast tracepoints with threads running. This relies on
1315 the agent's atomic write support. */
f4647387
YQ
1316 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1317 if (loffset > INT_MAX || loffset < INT_MIN)
1318 {
1319 sprintf (err,
1320 "E.Jump pad too far from tracepoint "
1321 "(offset 0x%" PRIx64 " > int32).", loffset);
1322 return 1;
1323 }
1324
1325 offset = (int) loffset;
1326
fa593d66
PA
1327 memcpy (buf, jump_insn, sizeof (jump_insn));
1328 memcpy (buf + 1, &offset, 4);
1329 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1330 *jjump_pad_insn_size = sizeof (jump_insn);
1331
1332 /* Return the end address of our pad. */
1333 *jump_entry = buildaddr;
1334
1335 return 0;
1336}
1337
1338#endif /* __x86_64__ */
1339
1340/* Build a jump pad that saves registers and calls a collection
1341 function. Writes a jump instruction to the jump pad to
1342 JJUMPAD_INSN. The caller is responsible to write it in at the
1343 tracepoint address. */
1344
1345static int
1346i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1347 CORE_ADDR collector,
1348 CORE_ADDR lockaddr,
1349 ULONGEST orig_size,
1350 CORE_ADDR *jump_entry,
405f8e94
SS
1351 CORE_ADDR *trampoline,
1352 ULONGEST *trampoline_size,
fa593d66
PA
1353 unsigned char *jjump_pad_insn,
1354 ULONGEST *jjump_pad_insn_size,
1355 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1356 CORE_ADDR *adjusted_insn_addr_end,
1357 char *err)
fa593d66
PA
1358{
1359 unsigned char buf[0x100];
1360 int i, offset;
1361 CORE_ADDR buildaddr = *jump_entry;
1362
1363 /* Build the jump pad. */
1364
1365 /* First, do tracepoint data collection. Save registers. */
1366 i = 0;
1367 buf[i++] = 0x60; /* pushad */
1368 buf[i++] = 0x68; /* push tpaddr aka $pc */
1369 *((int *)(buf + i)) = (int) tpaddr;
1370 i += 4;
1371 buf[i++] = 0x9c; /* pushf */
1372 buf[i++] = 0x1e; /* push %ds */
1373 buf[i++] = 0x06; /* push %es */
1374 buf[i++] = 0x0f; /* push %fs */
1375 buf[i++] = 0xa0;
1376 buf[i++] = 0x0f; /* push %gs */
1377 buf[i++] = 0xa8;
1378 buf[i++] = 0x16; /* push %ss */
1379 buf[i++] = 0x0e; /* push %cs */
1380 append_insns (&buildaddr, i, buf);
1381
1382 /* Stack space for the collecting_t object. */
1383 i = 0;
1384 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1385
1386 /* Build the object. */
1387 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1388 memcpy (buf + i, &tpoint, 4);
1389 i += 4;
1390 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1391
1392 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1393 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1394 append_insns (&buildaddr, i, buf);
1395
1396 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1397 If we cared for it, this could be using xchg alternatively. */
1398
1399 i = 0;
1400 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1401 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1402 %esp,<lockaddr> */
1403 memcpy (&buf[i], (void *) &lockaddr, 4);
1404 i += 4;
1405 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1406 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1407 append_insns (&buildaddr, i, buf);
1408
1409
1410 /* Set up arguments to the gdb_collect call. */
1411 i = 0;
1412 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1413 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1414 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1415 append_insns (&buildaddr, i, buf);
1416
1417 i = 0;
1418 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1419 append_insns (&buildaddr, i, buf);
1420
1421 i = 0;
1422 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1423 memcpy (&buf[i], (void *) &tpoint, 4);
1424 i += 4;
1425 append_insns (&buildaddr, i, buf);
1426
1427 buf[0] = 0xe8; /* call <reladdr> */
1428 offset = collector - (buildaddr + sizeof (jump_insn));
1429 memcpy (buf + 1, &offset, 4);
1430 append_insns (&buildaddr, 5, buf);
1431 /* Clean up after the call. */
1432 buf[0] = 0x83; /* add $0x8,%esp */
1433 buf[1] = 0xc4;
1434 buf[2] = 0x08;
1435 append_insns (&buildaddr, 3, buf);
1436
1437
1438 /* Clear the spin-lock. This would need the LOCK prefix on older
1439 broken archs. */
1440 i = 0;
1441 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1442 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1443 memcpy (buf + i, &lockaddr, 4);
1444 i += 4;
1445 append_insns (&buildaddr, i, buf);
1446
1447
1448 /* Remove stack that had been used for the collect_t object. */
1449 i = 0;
1450 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1451 append_insns (&buildaddr, i, buf);
1452
1453 i = 0;
1454 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1455 buf[i++] = 0xc4;
1456 buf[i++] = 0x04;
1457 buf[i++] = 0x17; /* pop %ss */
1458 buf[i++] = 0x0f; /* pop %gs */
1459 buf[i++] = 0xa9;
1460 buf[i++] = 0x0f; /* pop %fs */
1461 buf[i++] = 0xa1;
1462 buf[i++] = 0x07; /* pop %es */
405f8e94 1463 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1464 buf[i++] = 0x9d; /* popf */
1465 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1466 buf[i++] = 0xc4;
1467 buf[i++] = 0x04;
1468 buf[i++] = 0x61; /* popad */
1469 append_insns (&buildaddr, i, buf);
1470
1471 /* Now, adjust the original instruction to execute in the jump
1472 pad. */
1473 *adjusted_insn_addr = buildaddr;
1474 relocate_instruction (&buildaddr, tpaddr);
1475 *adjusted_insn_addr_end = buildaddr;
1476
1477 /* Write the jump back to the program. */
1478 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1479 memcpy (buf, jump_insn, sizeof (jump_insn));
1480 memcpy (buf + 1, &offset, 4);
1481 append_insns (&buildaddr, sizeof (jump_insn), buf);
1482
1483 /* The jump pad is now built. Wire in a jump to our jump pad. This
1484 is always done last (by our caller actually), so that we can
1485 install fast tracepoints with threads running. This relies on
1486 the agent's atomic write support. */
405f8e94
SS
1487 if (orig_size == 4)
1488 {
1489 /* Create a trampoline. */
1490 *trampoline_size = sizeof (jump_insn);
1491 if (!claim_trampoline_space (*trampoline_size, trampoline))
1492 {
1493 /* No trampoline space available. */
1494 strcpy (err,
1495 "E.Cannot allocate trampoline space needed for fast "
1496 "tracepoints on 4-byte instructions.");
1497 return 1;
1498 }
1499
1500 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1501 memcpy (buf, jump_insn, sizeof (jump_insn));
1502 memcpy (buf + 1, &offset, 4);
4196ab2a 1503 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1504
1505 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1506 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1507 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1508 memcpy (buf + 2, &offset, 2);
1509 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1510 *jjump_pad_insn_size = sizeof (small_jump_insn);
1511 }
1512 else
1513 {
1514 /* Else use a 32-bit relative jump instruction. */
1515 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1516 memcpy (buf, jump_insn, sizeof (jump_insn));
1517 memcpy (buf + 1, &offset, 4);
1518 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1519 *jjump_pad_insn_size = sizeof (jump_insn);
1520 }
fa593d66
PA
1521
1522 /* Return the end address of our pad. */
1523 *jump_entry = buildaddr;
1524
1525 return 0;
1526}
1527
1528static int
1529x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1530 CORE_ADDR collector,
1531 CORE_ADDR lockaddr,
1532 ULONGEST orig_size,
1533 CORE_ADDR *jump_entry,
405f8e94
SS
1534 CORE_ADDR *trampoline,
1535 ULONGEST *trampoline_size,
fa593d66
PA
1536 unsigned char *jjump_pad_insn,
1537 ULONGEST *jjump_pad_insn_size,
1538 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1539 CORE_ADDR *adjusted_insn_addr_end,
1540 char *err)
fa593d66
PA
1541{
1542#ifdef __x86_64__
3aee8918 1543 if (is_64bit_tdesc ())
fa593d66
PA
1544 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1545 collector, lockaddr,
1546 orig_size, jump_entry,
405f8e94 1547 trampoline, trampoline_size,
fa593d66
PA
1548 jjump_pad_insn,
1549 jjump_pad_insn_size,
1550 adjusted_insn_addr,
405f8e94
SS
1551 adjusted_insn_addr_end,
1552 err);
fa593d66
PA
1553#endif
1554
1555 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1556 collector, lockaddr,
1557 orig_size, jump_entry,
405f8e94 1558 trampoline, trampoline_size,
fa593d66
PA
1559 jjump_pad_insn,
1560 jjump_pad_insn_size,
1561 adjusted_insn_addr,
405f8e94
SS
1562 adjusted_insn_addr_end,
1563 err);
1564}
1565
1566/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1567 architectures. */
1568
1569static int
1570x86_get_min_fast_tracepoint_insn_len (void)
1571{
1572 static int warned_about_fast_tracepoints = 0;
1573
1574#ifdef __x86_64__
1575 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1576 used for fast tracepoints. */
3aee8918 1577 if (is_64bit_tdesc ())
405f8e94
SS
1578 return 5;
1579#endif
1580
58b4daa5 1581 if (agent_loaded_p ())
405f8e94
SS
1582 {
1583 char errbuf[IPA_BUFSIZ];
1584
1585 errbuf[0] = '\0';
1586
1587 /* On x86, if trampolines are available, then 4-byte jump instructions
1588 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1589 with a 4-byte offset are used instead. */
1590 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1591 return 4;
1592 else
1593 {
1594 /* GDB has no channel to explain to user why a shorter fast
1595 tracepoint is not possible, but at least make GDBserver
1596 mention that something has gone awry. */
1597 if (!warned_about_fast_tracepoints)
1598 {
422186a9 1599 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1600 warned_about_fast_tracepoints = 1;
1601 }
1602 return 5;
1603 }
1604 }
1605 else
1606 {
1607 /* Indicate that the minimum length is currently unknown since the IPA
1608 has not loaded yet. */
1609 return 0;
1610 }
fa593d66
PA
1611}
1612
6a271cae
PA
1613static void
1614add_insns (unsigned char *start, int len)
1615{
1616 CORE_ADDR buildaddr = current_insn_ptr;
1617
1618 if (debug_threads)
87ce2a04
DE
1619 debug_printf ("Adding %d bytes of insn at %s\n",
1620 len, paddress (buildaddr));
6a271cae
PA
1621
1622 append_insns (&buildaddr, len, start);
1623 current_insn_ptr = buildaddr;
1624}
1625
6a271cae
PA
1626/* Our general strategy for emitting code is to avoid specifying raw
1627 bytes whenever possible, and instead copy a block of inline asm
1628 that is embedded in the function. This is a little messy, because
1629 we need to keep the compiler from discarding what looks like dead
1630 code, plus suppress various warnings. */
1631
9e4344e5
PA
1632#define EMIT_ASM(NAME, INSNS) \
1633 do \
1634 { \
1635 extern unsigned char start_ ## NAME, end_ ## NAME; \
1636 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1637 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1638 "\t" "start_" #NAME ":" \
1639 "\t" INSNS "\n" \
1640 "\t" "end_" #NAME ":"); \
1641 } while (0)
6a271cae
PA
1642
1643#ifdef __x86_64__
1644
1645#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1646 do \
1647 { \
1648 extern unsigned char start_ ## NAME, end_ ## NAME; \
1649 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1650 __asm__ (".code32\n" \
1651 "\t" "jmp end_" #NAME "\n" \
1652 "\t" "start_" #NAME ":\n" \
1653 "\t" INSNS "\n" \
1654 "\t" "end_" #NAME ":\n" \
1655 ".code64\n"); \
1656 } while (0)
6a271cae
PA
1657
1658#else
1659
1660#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1661
1662#endif
1663
1664#ifdef __x86_64__
1665
1666static void
1667amd64_emit_prologue (void)
1668{
1669 EMIT_ASM (amd64_prologue,
1670 "pushq %rbp\n\t"
1671 "movq %rsp,%rbp\n\t"
1672 "sub $0x20,%rsp\n\t"
1673 "movq %rdi,-8(%rbp)\n\t"
1674 "movq %rsi,-16(%rbp)");
1675}
1676
1677
1678static void
1679amd64_emit_epilogue (void)
1680{
1681 EMIT_ASM (amd64_epilogue,
1682 "movq -16(%rbp),%rdi\n\t"
1683 "movq %rax,(%rdi)\n\t"
1684 "xor %rax,%rax\n\t"
1685 "leave\n\t"
1686 "ret");
1687}
1688
1689static void
1690amd64_emit_add (void)
1691{
1692 EMIT_ASM (amd64_add,
1693 "add (%rsp),%rax\n\t"
1694 "lea 0x8(%rsp),%rsp");
1695}
1696
1697static void
1698amd64_emit_sub (void)
1699{
1700 EMIT_ASM (amd64_sub,
1701 "sub %rax,(%rsp)\n\t"
1702 "pop %rax");
1703}
1704
1705static void
1706amd64_emit_mul (void)
1707{
1708 emit_error = 1;
1709}
1710
1711static void
1712amd64_emit_lsh (void)
1713{
1714 emit_error = 1;
1715}
1716
1717static void
1718amd64_emit_rsh_signed (void)
1719{
1720 emit_error = 1;
1721}
1722
1723static void
1724amd64_emit_rsh_unsigned (void)
1725{
1726 emit_error = 1;
1727}
1728
1729static void
1730amd64_emit_ext (int arg)
1731{
1732 switch (arg)
1733 {
1734 case 8:
1735 EMIT_ASM (amd64_ext_8,
1736 "cbtw\n\t"
1737 "cwtl\n\t"
1738 "cltq");
1739 break;
1740 case 16:
1741 EMIT_ASM (amd64_ext_16,
1742 "cwtl\n\t"
1743 "cltq");
1744 break;
1745 case 32:
1746 EMIT_ASM (amd64_ext_32,
1747 "cltq");
1748 break;
1749 default:
1750 emit_error = 1;
1751 }
1752}
1753
1754static void
1755amd64_emit_log_not (void)
1756{
1757 EMIT_ASM (amd64_log_not,
1758 "test %rax,%rax\n\t"
1759 "sete %cl\n\t"
1760 "movzbq %cl,%rax");
1761}
1762
1763static void
1764amd64_emit_bit_and (void)
1765{
1766 EMIT_ASM (amd64_and,
1767 "and (%rsp),%rax\n\t"
1768 "lea 0x8(%rsp),%rsp");
1769}
1770
1771static void
1772amd64_emit_bit_or (void)
1773{
1774 EMIT_ASM (amd64_or,
1775 "or (%rsp),%rax\n\t"
1776 "lea 0x8(%rsp),%rsp");
1777}
1778
1779static void
1780amd64_emit_bit_xor (void)
1781{
1782 EMIT_ASM (amd64_xor,
1783 "xor (%rsp),%rax\n\t"
1784 "lea 0x8(%rsp),%rsp");
1785}
1786
1787static void
1788amd64_emit_bit_not (void)
1789{
1790 EMIT_ASM (amd64_bit_not,
1791 "xorq $0xffffffffffffffff,%rax");
1792}
1793
1794static void
1795amd64_emit_equal (void)
1796{
1797 EMIT_ASM (amd64_equal,
1798 "cmp %rax,(%rsp)\n\t"
1799 "je .Lamd64_equal_true\n\t"
1800 "xor %rax,%rax\n\t"
1801 "jmp .Lamd64_equal_end\n\t"
1802 ".Lamd64_equal_true:\n\t"
1803 "mov $0x1,%rax\n\t"
1804 ".Lamd64_equal_end:\n\t"
1805 "lea 0x8(%rsp),%rsp");
1806}
1807
1808static void
1809amd64_emit_less_signed (void)
1810{
1811 EMIT_ASM (amd64_less_signed,
1812 "cmp %rax,(%rsp)\n\t"
1813 "jl .Lamd64_less_signed_true\n\t"
1814 "xor %rax,%rax\n\t"
1815 "jmp .Lamd64_less_signed_end\n\t"
1816 ".Lamd64_less_signed_true:\n\t"
1817 "mov $1,%rax\n\t"
1818 ".Lamd64_less_signed_end:\n\t"
1819 "lea 0x8(%rsp),%rsp");
1820}
1821
1822static void
1823amd64_emit_less_unsigned (void)
1824{
1825 EMIT_ASM (amd64_less_unsigned,
1826 "cmp %rax,(%rsp)\n\t"
1827 "jb .Lamd64_less_unsigned_true\n\t"
1828 "xor %rax,%rax\n\t"
1829 "jmp .Lamd64_less_unsigned_end\n\t"
1830 ".Lamd64_less_unsigned_true:\n\t"
1831 "mov $1,%rax\n\t"
1832 ".Lamd64_less_unsigned_end:\n\t"
1833 "lea 0x8(%rsp),%rsp");
1834}
1835
1836static void
1837amd64_emit_ref (int size)
1838{
1839 switch (size)
1840 {
1841 case 1:
1842 EMIT_ASM (amd64_ref1,
1843 "movb (%rax),%al");
1844 break;
1845 case 2:
1846 EMIT_ASM (amd64_ref2,
1847 "movw (%rax),%ax");
1848 break;
1849 case 4:
1850 EMIT_ASM (amd64_ref4,
1851 "movl (%rax),%eax");
1852 break;
1853 case 8:
1854 EMIT_ASM (amd64_ref8,
1855 "movq (%rax),%rax");
1856 break;
1857 }
1858}
1859
1860static void
1861amd64_emit_if_goto (int *offset_p, int *size_p)
1862{
1863 EMIT_ASM (amd64_if_goto,
1864 "mov %rax,%rcx\n\t"
1865 "pop %rax\n\t"
1866 "cmp $0,%rcx\n\t"
1867 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1868 if (offset_p)
1869 *offset_p = 10;
1870 if (size_p)
1871 *size_p = 4;
1872}
1873
1874static void
1875amd64_emit_goto (int *offset_p, int *size_p)
1876{
1877 EMIT_ASM (amd64_goto,
1878 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1879 if (offset_p)
1880 *offset_p = 1;
1881 if (size_p)
1882 *size_p = 4;
1883}
1884
1885static void
1886amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1887{
1888 int diff = (to - (from + size));
1889 unsigned char buf[sizeof (int)];
1890
1891 if (size != 4)
1892 {
1893 emit_error = 1;
1894 return;
1895 }
1896
1897 memcpy (buf, &diff, sizeof (int));
4196ab2a 1898 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1899}
1900
1901static void
4e29fb54 1902amd64_emit_const (LONGEST num)
6a271cae
PA
1903{
1904 unsigned char buf[16];
1905 int i;
1906 CORE_ADDR buildaddr = current_insn_ptr;
1907
1908 i = 0;
1909 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1910 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1911 i += 8;
1912 append_insns (&buildaddr, i, buf);
1913 current_insn_ptr = buildaddr;
1914}
1915
1916static void
1917amd64_emit_call (CORE_ADDR fn)
1918{
1919 unsigned char buf[16];
1920 int i;
1921 CORE_ADDR buildaddr;
4e29fb54 1922 LONGEST offset64;
6a271cae
PA
1923
1924 /* The destination function being in the shared library, may be
1925 >31-bits away off the compiled code pad. */
1926
1927 buildaddr = current_insn_ptr;
1928
1929 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1930
1931 i = 0;
1932
1933 if (offset64 > INT_MAX || offset64 < INT_MIN)
1934 {
1935 /* Offset is too large for a call. Use callq, but that requires
1936 a register, so avoid it if possible. Use r10, since it is
1937 call-clobbered, we don't have to push/pop it. */
1938 buf[i++] = 0x48; /* mov $fn,%r10 */
1939 buf[i++] = 0xba;
1940 memcpy (buf + i, &fn, 8);
1941 i += 8;
1942 buf[i++] = 0xff; /* callq *%r10 */
1943 buf[i++] = 0xd2;
1944 }
1945 else
1946 {
1947 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1948
1949 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1950 memcpy (buf + i, &offset32, 4);
1951 i += 4;
1952 }
1953
1954 append_insns (&buildaddr, i, buf);
1955 current_insn_ptr = buildaddr;
1956}
1957
1958static void
1959amd64_emit_reg (int reg)
1960{
1961 unsigned char buf[16];
1962 int i;
1963 CORE_ADDR buildaddr;
1964
1965 /* Assume raw_regs is still in %rdi. */
1966 buildaddr = current_insn_ptr;
1967 i = 0;
1968 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1969 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1970 i += 4;
1971 append_insns (&buildaddr, i, buf);
1972 current_insn_ptr = buildaddr;
1973 amd64_emit_call (get_raw_reg_func_addr ());
1974}
1975
1976static void
1977amd64_emit_pop (void)
1978{
1979 EMIT_ASM (amd64_pop,
1980 "pop %rax");
1981}
1982
1983static void
1984amd64_emit_stack_flush (void)
1985{
1986 EMIT_ASM (amd64_stack_flush,
1987 "push %rax");
1988}
1989
1990static void
1991amd64_emit_zero_ext (int arg)
1992{
1993 switch (arg)
1994 {
1995 case 8:
1996 EMIT_ASM (amd64_zero_ext_8,
1997 "and $0xff,%rax");
1998 break;
1999 case 16:
2000 EMIT_ASM (amd64_zero_ext_16,
2001 "and $0xffff,%rax");
2002 break;
2003 case 32:
2004 EMIT_ASM (amd64_zero_ext_32,
2005 "mov $0xffffffff,%rcx\n\t"
2006 "and %rcx,%rax");
2007 break;
2008 default:
2009 emit_error = 1;
2010 }
2011}
2012
2013static void
2014amd64_emit_swap (void)
2015{
2016 EMIT_ASM (amd64_swap,
2017 "mov %rax,%rcx\n\t"
2018 "pop %rax\n\t"
2019 "push %rcx");
2020}
2021
2022static void
2023amd64_emit_stack_adjust (int n)
2024{
2025 unsigned char buf[16];
2026 int i;
2027 CORE_ADDR buildaddr = current_insn_ptr;
2028
2029 i = 0;
2030 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2031 buf[i++] = 0x8d;
2032 buf[i++] = 0x64;
2033 buf[i++] = 0x24;
2034 /* This only handles adjustments up to 16, but we don't expect any more. */
2035 buf[i++] = n * 8;
2036 append_insns (&buildaddr, i, buf);
2037 current_insn_ptr = buildaddr;
2038}
2039
2040/* FN's prototype is `LONGEST(*fn)(int)'. */
2041
2042static void
2043amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2044{
2045 unsigned char buf[16];
2046 int i;
2047 CORE_ADDR buildaddr;
2048
2049 buildaddr = current_insn_ptr;
2050 i = 0;
2051 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2052 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2053 i += 4;
2054 append_insns (&buildaddr, i, buf);
2055 current_insn_ptr = buildaddr;
2056 amd64_emit_call (fn);
2057}
2058
4e29fb54 2059/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2060
2061static void
2062amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2063{
2064 unsigned char buf[16];
2065 int i;
2066 CORE_ADDR buildaddr;
2067
2068 buildaddr = current_insn_ptr;
2069 i = 0;
2070 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2071 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2072 i += 4;
2073 append_insns (&buildaddr, i, buf);
2074 current_insn_ptr = buildaddr;
2075 EMIT_ASM (amd64_void_call_2_a,
2076 /* Save away a copy of the stack top. */
2077 "push %rax\n\t"
2078 /* Also pass top as the second argument. */
2079 "mov %rax,%rsi");
2080 amd64_emit_call (fn);
2081 EMIT_ASM (amd64_void_call_2_b,
2082 /* Restore the stack top, %rax may have been trashed. */
2083 "pop %rax");
2084}
2085
df4a0200 2086static void
6b9801d4
SS
2087amd64_emit_eq_goto (int *offset_p, int *size_p)
2088{
2089 EMIT_ASM (amd64_eq,
2090 "cmp %rax,(%rsp)\n\t"
2091 "jne .Lamd64_eq_fallthru\n\t"
2092 "lea 0x8(%rsp),%rsp\n\t"
2093 "pop %rax\n\t"
2094 /* jmp, but don't trust the assembler to choose the right jump */
2095 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2096 ".Lamd64_eq_fallthru:\n\t"
2097 "lea 0x8(%rsp),%rsp\n\t"
2098 "pop %rax");
2099
2100 if (offset_p)
2101 *offset_p = 13;
2102 if (size_p)
2103 *size_p = 4;
2104}
2105
df4a0200 2106static void
6b9801d4
SS
2107amd64_emit_ne_goto (int *offset_p, int *size_p)
2108{
2109 EMIT_ASM (amd64_ne,
2110 "cmp %rax,(%rsp)\n\t"
2111 "je .Lamd64_ne_fallthru\n\t"
2112 "lea 0x8(%rsp),%rsp\n\t"
2113 "pop %rax\n\t"
2114 /* jmp, but don't trust the assembler to choose the right jump */
2115 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2116 ".Lamd64_ne_fallthru:\n\t"
2117 "lea 0x8(%rsp),%rsp\n\t"
2118 "pop %rax");
2119
2120 if (offset_p)
2121 *offset_p = 13;
2122 if (size_p)
2123 *size_p = 4;
2124}
2125
df4a0200 2126static void
6b9801d4
SS
2127amd64_emit_lt_goto (int *offset_p, int *size_p)
2128{
2129 EMIT_ASM (amd64_lt,
2130 "cmp %rax,(%rsp)\n\t"
2131 "jnl .Lamd64_lt_fallthru\n\t"
2132 "lea 0x8(%rsp),%rsp\n\t"
2133 "pop %rax\n\t"
2134 /* jmp, but don't trust the assembler to choose the right jump */
2135 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2136 ".Lamd64_lt_fallthru:\n\t"
2137 "lea 0x8(%rsp),%rsp\n\t"
2138 "pop %rax");
2139
2140 if (offset_p)
2141 *offset_p = 13;
2142 if (size_p)
2143 *size_p = 4;
2144}
2145
df4a0200 2146static void
6b9801d4
SS
2147amd64_emit_le_goto (int *offset_p, int *size_p)
2148{
2149 EMIT_ASM (amd64_le,
2150 "cmp %rax,(%rsp)\n\t"
2151 "jnle .Lamd64_le_fallthru\n\t"
2152 "lea 0x8(%rsp),%rsp\n\t"
2153 "pop %rax\n\t"
2154 /* jmp, but don't trust the assembler to choose the right jump */
2155 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2156 ".Lamd64_le_fallthru:\n\t"
2157 "lea 0x8(%rsp),%rsp\n\t"
2158 "pop %rax");
2159
2160 if (offset_p)
2161 *offset_p = 13;
2162 if (size_p)
2163 *size_p = 4;
2164}
2165
df4a0200 2166static void
6b9801d4
SS
2167amd64_emit_gt_goto (int *offset_p, int *size_p)
2168{
2169 EMIT_ASM (amd64_gt,
2170 "cmp %rax,(%rsp)\n\t"
2171 "jng .Lamd64_gt_fallthru\n\t"
2172 "lea 0x8(%rsp),%rsp\n\t"
2173 "pop %rax\n\t"
2174 /* jmp, but don't trust the assembler to choose the right jump */
2175 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2176 ".Lamd64_gt_fallthru:\n\t"
2177 "lea 0x8(%rsp),%rsp\n\t"
2178 "pop %rax");
2179
2180 if (offset_p)
2181 *offset_p = 13;
2182 if (size_p)
2183 *size_p = 4;
2184}
2185
df4a0200 2186static void
6b9801d4
SS
2187amd64_emit_ge_goto (int *offset_p, int *size_p)
2188{
2189 EMIT_ASM (amd64_ge,
2190 "cmp %rax,(%rsp)\n\t"
2191 "jnge .Lamd64_ge_fallthru\n\t"
2192 ".Lamd64_ge_jump:\n\t"
2193 "lea 0x8(%rsp),%rsp\n\t"
2194 "pop %rax\n\t"
2195 /* jmp, but don't trust the assembler to choose the right jump */
2196 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2197 ".Lamd64_ge_fallthru:\n\t"
2198 "lea 0x8(%rsp),%rsp\n\t"
2199 "pop %rax");
2200
2201 if (offset_p)
2202 *offset_p = 13;
2203 if (size_p)
2204 *size_p = 4;
2205}
2206
6a271cae
PA
2207struct emit_ops amd64_emit_ops =
2208 {
2209 amd64_emit_prologue,
2210 amd64_emit_epilogue,
2211 amd64_emit_add,
2212 amd64_emit_sub,
2213 amd64_emit_mul,
2214 amd64_emit_lsh,
2215 amd64_emit_rsh_signed,
2216 amd64_emit_rsh_unsigned,
2217 amd64_emit_ext,
2218 amd64_emit_log_not,
2219 amd64_emit_bit_and,
2220 amd64_emit_bit_or,
2221 amd64_emit_bit_xor,
2222 amd64_emit_bit_not,
2223 amd64_emit_equal,
2224 amd64_emit_less_signed,
2225 amd64_emit_less_unsigned,
2226 amd64_emit_ref,
2227 amd64_emit_if_goto,
2228 amd64_emit_goto,
2229 amd64_write_goto_address,
2230 amd64_emit_const,
2231 amd64_emit_call,
2232 amd64_emit_reg,
2233 amd64_emit_pop,
2234 amd64_emit_stack_flush,
2235 amd64_emit_zero_ext,
2236 amd64_emit_swap,
2237 amd64_emit_stack_adjust,
2238 amd64_emit_int_call_1,
6b9801d4
SS
2239 amd64_emit_void_call_2,
2240 amd64_emit_eq_goto,
2241 amd64_emit_ne_goto,
2242 amd64_emit_lt_goto,
2243 amd64_emit_le_goto,
2244 amd64_emit_gt_goto,
2245 amd64_emit_ge_goto
6a271cae
PA
2246 };
2247
2248#endif /* __x86_64__ */
2249
2250static void
2251i386_emit_prologue (void)
2252{
2253 EMIT_ASM32 (i386_prologue,
2254 "push %ebp\n\t"
bf15cbda
SS
2255 "mov %esp,%ebp\n\t"
2256 "push %ebx");
6a271cae
PA
2257 /* At this point, the raw regs base address is at 8(%ebp), and the
2258 value pointer is at 12(%ebp). */
2259}
2260
2261static void
2262i386_emit_epilogue (void)
2263{
2264 EMIT_ASM32 (i386_epilogue,
2265 "mov 12(%ebp),%ecx\n\t"
2266 "mov %eax,(%ecx)\n\t"
2267 "mov %ebx,0x4(%ecx)\n\t"
2268 "xor %eax,%eax\n\t"
bf15cbda 2269 "pop %ebx\n\t"
6a271cae
PA
2270 "pop %ebp\n\t"
2271 "ret");
2272}
2273
2274static void
2275i386_emit_add (void)
2276{
2277 EMIT_ASM32 (i386_add,
2278 "add (%esp),%eax\n\t"
2279 "adc 0x4(%esp),%ebx\n\t"
2280 "lea 0x8(%esp),%esp");
2281}
2282
2283static void
2284i386_emit_sub (void)
2285{
2286 EMIT_ASM32 (i386_sub,
2287 "subl %eax,(%esp)\n\t"
2288 "sbbl %ebx,4(%esp)\n\t"
2289 "pop %eax\n\t"
2290 "pop %ebx\n\t");
2291}
2292
2293static void
2294i386_emit_mul (void)
2295{
2296 emit_error = 1;
2297}
2298
2299static void
2300i386_emit_lsh (void)
2301{
2302 emit_error = 1;
2303}
2304
2305static void
2306i386_emit_rsh_signed (void)
2307{
2308 emit_error = 1;
2309}
2310
2311static void
2312i386_emit_rsh_unsigned (void)
2313{
2314 emit_error = 1;
2315}
2316
2317static void
2318i386_emit_ext (int arg)
2319{
2320 switch (arg)
2321 {
2322 case 8:
2323 EMIT_ASM32 (i386_ext_8,
2324 "cbtw\n\t"
2325 "cwtl\n\t"
2326 "movl %eax,%ebx\n\t"
2327 "sarl $31,%ebx");
2328 break;
2329 case 16:
2330 EMIT_ASM32 (i386_ext_16,
2331 "cwtl\n\t"
2332 "movl %eax,%ebx\n\t"
2333 "sarl $31,%ebx");
2334 break;
2335 case 32:
2336 EMIT_ASM32 (i386_ext_32,
2337 "movl %eax,%ebx\n\t"
2338 "sarl $31,%ebx");
2339 break;
2340 default:
2341 emit_error = 1;
2342 }
2343}
2344
2345static void
2346i386_emit_log_not (void)
2347{
2348 EMIT_ASM32 (i386_log_not,
2349 "or %ebx,%eax\n\t"
2350 "test %eax,%eax\n\t"
2351 "sete %cl\n\t"
2352 "xor %ebx,%ebx\n\t"
2353 "movzbl %cl,%eax");
2354}
2355
2356static void
2357i386_emit_bit_and (void)
2358{
2359 EMIT_ASM32 (i386_and,
2360 "and (%esp),%eax\n\t"
2361 "and 0x4(%esp),%ebx\n\t"
2362 "lea 0x8(%esp),%esp");
2363}
2364
2365static void
2366i386_emit_bit_or (void)
2367{
2368 EMIT_ASM32 (i386_or,
2369 "or (%esp),%eax\n\t"
2370 "or 0x4(%esp),%ebx\n\t"
2371 "lea 0x8(%esp),%esp");
2372}
2373
2374static void
2375i386_emit_bit_xor (void)
2376{
2377 EMIT_ASM32 (i386_xor,
2378 "xor (%esp),%eax\n\t"
2379 "xor 0x4(%esp),%ebx\n\t"
2380 "lea 0x8(%esp),%esp");
2381}
2382
2383static void
2384i386_emit_bit_not (void)
2385{
2386 EMIT_ASM32 (i386_bit_not,
2387 "xor $0xffffffff,%eax\n\t"
2388 "xor $0xffffffff,%ebx\n\t");
2389}
2390
2391static void
2392i386_emit_equal (void)
2393{
2394 EMIT_ASM32 (i386_equal,
2395 "cmpl %ebx,4(%esp)\n\t"
2396 "jne .Li386_equal_false\n\t"
2397 "cmpl %eax,(%esp)\n\t"
2398 "je .Li386_equal_true\n\t"
2399 ".Li386_equal_false:\n\t"
2400 "xor %eax,%eax\n\t"
2401 "jmp .Li386_equal_end\n\t"
2402 ".Li386_equal_true:\n\t"
2403 "mov $1,%eax\n\t"
2404 ".Li386_equal_end:\n\t"
2405 "xor %ebx,%ebx\n\t"
2406 "lea 0x8(%esp),%esp");
2407}
2408
2409static void
2410i386_emit_less_signed (void)
2411{
2412 EMIT_ASM32 (i386_less_signed,
2413 "cmpl %ebx,4(%esp)\n\t"
2414 "jl .Li386_less_signed_true\n\t"
2415 "jne .Li386_less_signed_false\n\t"
2416 "cmpl %eax,(%esp)\n\t"
2417 "jl .Li386_less_signed_true\n\t"
2418 ".Li386_less_signed_false:\n\t"
2419 "xor %eax,%eax\n\t"
2420 "jmp .Li386_less_signed_end\n\t"
2421 ".Li386_less_signed_true:\n\t"
2422 "mov $1,%eax\n\t"
2423 ".Li386_less_signed_end:\n\t"
2424 "xor %ebx,%ebx\n\t"
2425 "lea 0x8(%esp),%esp");
2426}
2427
2428static void
2429i386_emit_less_unsigned (void)
2430{
2431 EMIT_ASM32 (i386_less_unsigned,
2432 "cmpl %ebx,4(%esp)\n\t"
2433 "jb .Li386_less_unsigned_true\n\t"
2434 "jne .Li386_less_unsigned_false\n\t"
2435 "cmpl %eax,(%esp)\n\t"
2436 "jb .Li386_less_unsigned_true\n\t"
2437 ".Li386_less_unsigned_false:\n\t"
2438 "xor %eax,%eax\n\t"
2439 "jmp .Li386_less_unsigned_end\n\t"
2440 ".Li386_less_unsigned_true:\n\t"
2441 "mov $1,%eax\n\t"
2442 ".Li386_less_unsigned_end:\n\t"
2443 "xor %ebx,%ebx\n\t"
2444 "lea 0x8(%esp),%esp");
2445}
2446
2447static void
2448i386_emit_ref (int size)
2449{
2450 switch (size)
2451 {
2452 case 1:
2453 EMIT_ASM32 (i386_ref1,
2454 "movb (%eax),%al");
2455 break;
2456 case 2:
2457 EMIT_ASM32 (i386_ref2,
2458 "movw (%eax),%ax");
2459 break;
2460 case 4:
2461 EMIT_ASM32 (i386_ref4,
2462 "movl (%eax),%eax");
2463 break;
2464 case 8:
2465 EMIT_ASM32 (i386_ref8,
2466 "movl 4(%eax),%ebx\n\t"
2467 "movl (%eax),%eax");
2468 break;
2469 }
2470}
2471
2472static void
2473i386_emit_if_goto (int *offset_p, int *size_p)
2474{
2475 EMIT_ASM32 (i386_if_goto,
2476 "mov %eax,%ecx\n\t"
2477 "or %ebx,%ecx\n\t"
2478 "pop %eax\n\t"
2479 "pop %ebx\n\t"
2480 "cmpl $0,%ecx\n\t"
2481 /* Don't trust the assembler to choose the right jump */
2482 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2483
2484 if (offset_p)
2485 *offset_p = 11; /* be sure that this matches the sequence above */
2486 if (size_p)
2487 *size_p = 4;
2488}
2489
2490static void
2491i386_emit_goto (int *offset_p, int *size_p)
2492{
2493 EMIT_ASM32 (i386_goto,
2494 /* Don't trust the assembler to choose the right jump */
2495 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2496 if (offset_p)
2497 *offset_p = 1;
2498 if (size_p)
2499 *size_p = 4;
2500}
2501
2502static void
2503i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2504{
2505 int diff = (to - (from + size));
2506 unsigned char buf[sizeof (int)];
2507
2508 /* We're only doing 4-byte sizes at the moment. */
2509 if (size != 4)
2510 {
2511 emit_error = 1;
2512 return;
2513 }
2514
2515 memcpy (buf, &diff, sizeof (int));
4196ab2a 2516 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2517}
2518
2519static void
4e29fb54 2520i386_emit_const (LONGEST num)
6a271cae
PA
2521{
2522 unsigned char buf[16];
b00ad6ff 2523 int i, hi, lo;
6a271cae
PA
2524 CORE_ADDR buildaddr = current_insn_ptr;
2525
2526 i = 0;
2527 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2528 lo = num & 0xffffffff;
2529 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2530 i += 4;
2531 hi = ((num >> 32) & 0xffffffff);
2532 if (hi)
2533 {
2534 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2535 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2536 i += 4;
2537 }
2538 else
2539 {
2540 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2541 }
2542 append_insns (&buildaddr, i, buf);
2543 current_insn_ptr = buildaddr;
2544}
2545
2546static void
2547i386_emit_call (CORE_ADDR fn)
2548{
2549 unsigned char buf[16];
2550 int i, offset;
2551 CORE_ADDR buildaddr;
2552
2553 buildaddr = current_insn_ptr;
2554 i = 0;
2555 buf[i++] = 0xe8; /* call <reladdr> */
2556 offset = ((int) fn) - (buildaddr + 5);
2557 memcpy (buf + 1, &offset, 4);
2558 append_insns (&buildaddr, 5, buf);
2559 current_insn_ptr = buildaddr;
2560}
2561
2562static void
2563i386_emit_reg (int reg)
2564{
2565 unsigned char buf[16];
2566 int i;
2567 CORE_ADDR buildaddr;
2568
2569 EMIT_ASM32 (i386_reg_a,
2570 "sub $0x8,%esp");
2571 buildaddr = current_insn_ptr;
2572 i = 0;
2573 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2574 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2575 i += 4;
2576 append_insns (&buildaddr, i, buf);
2577 current_insn_ptr = buildaddr;
2578 EMIT_ASM32 (i386_reg_b,
2579 "mov %eax,4(%esp)\n\t"
2580 "mov 8(%ebp),%eax\n\t"
2581 "mov %eax,(%esp)");
2582 i386_emit_call (get_raw_reg_func_addr ());
2583 EMIT_ASM32 (i386_reg_c,
2584 "xor %ebx,%ebx\n\t"
2585 "lea 0x8(%esp),%esp");
2586}
2587
2588static void
2589i386_emit_pop (void)
2590{
2591 EMIT_ASM32 (i386_pop,
2592 "pop %eax\n\t"
2593 "pop %ebx");
2594}
2595
2596static void
2597i386_emit_stack_flush (void)
2598{
2599 EMIT_ASM32 (i386_stack_flush,
2600 "push %ebx\n\t"
2601 "push %eax");
2602}
2603
2604static void
2605i386_emit_zero_ext (int arg)
2606{
2607 switch (arg)
2608 {
2609 case 8:
2610 EMIT_ASM32 (i386_zero_ext_8,
2611 "and $0xff,%eax\n\t"
2612 "xor %ebx,%ebx");
2613 break;
2614 case 16:
2615 EMIT_ASM32 (i386_zero_ext_16,
2616 "and $0xffff,%eax\n\t"
2617 "xor %ebx,%ebx");
2618 break;
2619 case 32:
2620 EMIT_ASM32 (i386_zero_ext_32,
2621 "xor %ebx,%ebx");
2622 break;
2623 default:
2624 emit_error = 1;
2625 }
2626}
2627
2628static void
2629i386_emit_swap (void)
2630{
2631 EMIT_ASM32 (i386_swap,
2632 "mov %eax,%ecx\n\t"
2633 "mov %ebx,%edx\n\t"
2634 "pop %eax\n\t"
2635 "pop %ebx\n\t"
2636 "push %edx\n\t"
2637 "push %ecx");
2638}
2639
2640static void
2641i386_emit_stack_adjust (int n)
2642{
2643 unsigned char buf[16];
2644 int i;
2645 CORE_ADDR buildaddr = current_insn_ptr;
2646
2647 i = 0;
2648 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2649 buf[i++] = 0x64;
2650 buf[i++] = 0x24;
2651 buf[i++] = n * 8;
2652 append_insns (&buildaddr, i, buf);
2653 current_insn_ptr = buildaddr;
2654}
2655
2656/* FN's prototype is `LONGEST(*fn)(int)'. */
2657
2658static void
2659i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2660{
2661 unsigned char buf[16];
2662 int i;
2663 CORE_ADDR buildaddr;
2664
2665 EMIT_ASM32 (i386_int_call_1_a,
2666 /* Reserve a bit of stack space. */
2667 "sub $0x8,%esp");
2668 /* Put the one argument on the stack. */
2669 buildaddr = current_insn_ptr;
2670 i = 0;
2671 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2672 buf[i++] = 0x04;
2673 buf[i++] = 0x24;
b00ad6ff 2674 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2675 i += 4;
2676 append_insns (&buildaddr, i, buf);
2677 current_insn_ptr = buildaddr;
2678 i386_emit_call (fn);
2679 EMIT_ASM32 (i386_int_call_1_c,
2680 "mov %edx,%ebx\n\t"
2681 "lea 0x8(%esp),%esp");
2682}
2683
4e29fb54 2684/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2685
2686static void
2687i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2688{
2689 unsigned char buf[16];
2690 int i;
2691 CORE_ADDR buildaddr;
2692
2693 EMIT_ASM32 (i386_void_call_2_a,
2694 /* Preserve %eax only; we don't have to worry about %ebx. */
2695 "push %eax\n\t"
2696 /* Reserve a bit of stack space for arguments. */
2697 "sub $0x10,%esp\n\t"
2698 /* Copy "top" to the second argument position. (Note that
2699 we can't assume function won't scribble on its
2700 arguments, so don't try to restore from this.) */
2701 "mov %eax,4(%esp)\n\t"
2702 "mov %ebx,8(%esp)");
2703 /* Put the first argument on the stack. */
2704 buildaddr = current_insn_ptr;
2705 i = 0;
2706 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2707 buf[i++] = 0x04;
2708 buf[i++] = 0x24;
b00ad6ff 2709 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2710 i += 4;
2711 append_insns (&buildaddr, i, buf);
2712 current_insn_ptr = buildaddr;
2713 i386_emit_call (fn);
2714 EMIT_ASM32 (i386_void_call_2_b,
2715 "lea 0x10(%esp),%esp\n\t"
2716 /* Restore original stack top. */
2717 "pop %eax");
2718}
2719
6b9801d4 2720
df4a0200 2721static void
6b9801d4
SS
2722i386_emit_eq_goto (int *offset_p, int *size_p)
2723{
2724 EMIT_ASM32 (eq,
2725 /* Check low half first, more likely to be decider */
2726 "cmpl %eax,(%esp)\n\t"
2727 "jne .Leq_fallthru\n\t"
2728 "cmpl %ebx,4(%esp)\n\t"
2729 "jne .Leq_fallthru\n\t"
2730 "lea 0x8(%esp),%esp\n\t"
2731 "pop %eax\n\t"
2732 "pop %ebx\n\t"
2733 /* jmp, but don't trust the assembler to choose the right jump */
2734 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2735 ".Leq_fallthru:\n\t"
2736 "lea 0x8(%esp),%esp\n\t"
2737 "pop %eax\n\t"
2738 "pop %ebx");
2739
2740 if (offset_p)
2741 *offset_p = 18;
2742 if (size_p)
2743 *size_p = 4;
2744}
2745
df4a0200 2746static void
6b9801d4
SS
2747i386_emit_ne_goto (int *offset_p, int *size_p)
2748{
2749 EMIT_ASM32 (ne,
2750 /* Check low half first, more likely to be decider */
2751 "cmpl %eax,(%esp)\n\t"
2752 "jne .Lne_jump\n\t"
2753 "cmpl %ebx,4(%esp)\n\t"
2754 "je .Lne_fallthru\n\t"
2755 ".Lne_jump:\n\t"
2756 "lea 0x8(%esp),%esp\n\t"
2757 "pop %eax\n\t"
2758 "pop %ebx\n\t"
2759 /* jmp, but don't trust the assembler to choose the right jump */
2760 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2761 ".Lne_fallthru:\n\t"
2762 "lea 0x8(%esp),%esp\n\t"
2763 "pop %eax\n\t"
2764 "pop %ebx");
2765
2766 if (offset_p)
2767 *offset_p = 18;
2768 if (size_p)
2769 *size_p = 4;
2770}
2771
df4a0200 2772static void
6b9801d4
SS
2773i386_emit_lt_goto (int *offset_p, int *size_p)
2774{
2775 EMIT_ASM32 (lt,
2776 "cmpl %ebx,4(%esp)\n\t"
2777 "jl .Llt_jump\n\t"
2778 "jne .Llt_fallthru\n\t"
2779 "cmpl %eax,(%esp)\n\t"
2780 "jnl .Llt_fallthru\n\t"
2781 ".Llt_jump:\n\t"
2782 "lea 0x8(%esp),%esp\n\t"
2783 "pop %eax\n\t"
2784 "pop %ebx\n\t"
2785 /* jmp, but don't trust the assembler to choose the right jump */
2786 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2787 ".Llt_fallthru:\n\t"
2788 "lea 0x8(%esp),%esp\n\t"
2789 "pop %eax\n\t"
2790 "pop %ebx");
2791
2792 if (offset_p)
2793 *offset_p = 20;
2794 if (size_p)
2795 *size_p = 4;
2796}
2797
df4a0200 2798static void
6b9801d4
SS
2799i386_emit_le_goto (int *offset_p, int *size_p)
2800{
2801 EMIT_ASM32 (le,
2802 "cmpl %ebx,4(%esp)\n\t"
2803 "jle .Lle_jump\n\t"
2804 "jne .Lle_fallthru\n\t"
2805 "cmpl %eax,(%esp)\n\t"
2806 "jnle .Lle_fallthru\n\t"
2807 ".Lle_jump:\n\t"
2808 "lea 0x8(%esp),%esp\n\t"
2809 "pop %eax\n\t"
2810 "pop %ebx\n\t"
2811 /* jmp, but don't trust the assembler to choose the right jump */
2812 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2813 ".Lle_fallthru:\n\t"
2814 "lea 0x8(%esp),%esp\n\t"
2815 "pop %eax\n\t"
2816 "pop %ebx");
2817
2818 if (offset_p)
2819 *offset_p = 20;
2820 if (size_p)
2821 *size_p = 4;
2822}
2823
df4a0200 2824static void
6b9801d4
SS
2825i386_emit_gt_goto (int *offset_p, int *size_p)
2826{
2827 EMIT_ASM32 (gt,
2828 "cmpl %ebx,4(%esp)\n\t"
2829 "jg .Lgt_jump\n\t"
2830 "jne .Lgt_fallthru\n\t"
2831 "cmpl %eax,(%esp)\n\t"
2832 "jng .Lgt_fallthru\n\t"
2833 ".Lgt_jump:\n\t"
2834 "lea 0x8(%esp),%esp\n\t"
2835 "pop %eax\n\t"
2836 "pop %ebx\n\t"
2837 /* jmp, but don't trust the assembler to choose the right jump */
2838 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2839 ".Lgt_fallthru:\n\t"
2840 "lea 0x8(%esp),%esp\n\t"
2841 "pop %eax\n\t"
2842 "pop %ebx");
2843
2844 if (offset_p)
2845 *offset_p = 20;
2846 if (size_p)
2847 *size_p = 4;
2848}
2849
df4a0200 2850static void
6b9801d4
SS
2851i386_emit_ge_goto (int *offset_p, int *size_p)
2852{
2853 EMIT_ASM32 (ge,
2854 "cmpl %ebx,4(%esp)\n\t"
2855 "jge .Lge_jump\n\t"
2856 "jne .Lge_fallthru\n\t"
2857 "cmpl %eax,(%esp)\n\t"
2858 "jnge .Lge_fallthru\n\t"
2859 ".Lge_jump:\n\t"
2860 "lea 0x8(%esp),%esp\n\t"
2861 "pop %eax\n\t"
2862 "pop %ebx\n\t"
2863 /* jmp, but don't trust the assembler to choose the right jump */
2864 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2865 ".Lge_fallthru:\n\t"
2866 "lea 0x8(%esp),%esp\n\t"
2867 "pop %eax\n\t"
2868 "pop %ebx");
2869
2870 if (offset_p)
2871 *offset_p = 20;
2872 if (size_p)
2873 *size_p = 4;
2874}
2875
6a271cae
PA
2876struct emit_ops i386_emit_ops =
2877 {
2878 i386_emit_prologue,
2879 i386_emit_epilogue,
2880 i386_emit_add,
2881 i386_emit_sub,
2882 i386_emit_mul,
2883 i386_emit_lsh,
2884 i386_emit_rsh_signed,
2885 i386_emit_rsh_unsigned,
2886 i386_emit_ext,
2887 i386_emit_log_not,
2888 i386_emit_bit_and,
2889 i386_emit_bit_or,
2890 i386_emit_bit_xor,
2891 i386_emit_bit_not,
2892 i386_emit_equal,
2893 i386_emit_less_signed,
2894 i386_emit_less_unsigned,
2895 i386_emit_ref,
2896 i386_emit_if_goto,
2897 i386_emit_goto,
2898 i386_write_goto_address,
2899 i386_emit_const,
2900 i386_emit_call,
2901 i386_emit_reg,
2902 i386_emit_pop,
2903 i386_emit_stack_flush,
2904 i386_emit_zero_ext,
2905 i386_emit_swap,
2906 i386_emit_stack_adjust,
2907 i386_emit_int_call_1,
6b9801d4
SS
2908 i386_emit_void_call_2,
2909 i386_emit_eq_goto,
2910 i386_emit_ne_goto,
2911 i386_emit_lt_goto,
2912 i386_emit_le_goto,
2913 i386_emit_gt_goto,
2914 i386_emit_ge_goto
6a271cae
PA
2915 };
2916
2917
2918static struct emit_ops *
2919x86_emit_ops (void)
2920{
2921#ifdef __x86_64__
3aee8918 2922 if (is_64bit_tdesc ())
6a271cae
PA
2923 return &amd64_emit_ops;
2924 else
2925#endif
2926 return &i386_emit_ops;
2927}
2928
3ca4edb6 2929/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2930
3ca4edb6
TBA
2931const gdb_byte *
2932x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2933{
2934 *size = x86_breakpoint_len;
2935 return x86_breakpoint;
2936}
2937
c2d6af84
PA
2938static int
2939x86_supports_range_stepping (void)
2940{
2941 return 1;
2942}
2943
7d00775e
AT
2944/* Implementation of linux_target_ops method "supports_hardware_single_step".
2945 */
2946
2947static int
2948x86_supports_hardware_single_step (void)
2949{
2950 return 1;
2951}
2952
ae91f625
MK
2953static int
2954x86_get_ipa_tdesc_idx (void)
2955{
2956 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2957 const struct target_desc *tdesc = regcache->tdesc;
2958
2959#ifdef __x86_64__
b4570e4b 2960 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2961#endif
2962
f49ff000 2963 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2964 return X86_TDESC_SSE;
ae91f625 2965
f49ff000 2966 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2967}
2968
d0722149
DE
2969/* This is initialized assuming an amd64 target.
2970 x86_arch_setup will correct it for i386 or amd64 targets. */
2971
2972struct linux_target_ops the_low_target =
2973{
6a271cae 2974 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2975 x86_emit_ops,
2976 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 2977 x86_supports_range_stepping,
7d00775e 2978 x86_supports_hardware_single_step,
82075af2 2979 x86_get_syscall_trapinfo,
ae91f625 2980 x86_get_ipa_tdesc_idx,
d0722149 2981};
3aee8918 2982
ef0478f6
TBA
2983/* The linux target ops object. */
2984
2985linux_process_target *the_linux_target = &the_x86_target;
2986
3aee8918
PA
2987void
2988initialize_low_arch (void)
2989{
2990 /* Initialize the Linux target descriptions. */
2991#ifdef __x86_64__
cc397f3a 2992 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
2993 copy_target_description (tdesc_amd64_linux_no_xml,
2994 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2995 false));
3aee8918
PA
2996 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2997#endif
f49ff000 2998
cc397f3a 2999 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
3000 copy_target_description (tdesc_i386_linux_no_xml,
3001 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
3002 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3003
3004 initialize_regsets_info (&x86_regsets_info);
3005}
This page took 1.036526 seconds and 4 git commands to generate.