Correct @section placement for makeinfo 4.13
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
CommitLineData
e53bef9f 1/* Target-dependent code for AMD64.
ce0eebec 2
61baf725 3 Copyright (C) 2001-2017 Free Software Foundation, Inc.
5ae96ec1
MK
4
5 Contributed by Jiri Smid, SuSE Labs.
53e95fcf
JS
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
a9762ec7 11 the Free Software Foundation; either version 3 of the License, or
53e95fcf
JS
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
a9762ec7 20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
53e95fcf
JS
21
22#include "defs.h"
35669430
DE
23#include "opcode/i386.h"
24#include "dis-asm.h"
c4f35dd8
MK
25#include "arch-utils.h"
26#include "block.h"
27#include "dummy-frame.h"
28#include "frame.h"
29#include "frame-base.h"
30#include "frame-unwind.h"
53e95fcf 31#include "inferior.h"
45741a9c 32#include "infrun.h"
53e95fcf 33#include "gdbcmd.h"
c4f35dd8
MK
34#include "gdbcore.h"
35#include "objfiles.h"
53e95fcf 36#include "regcache.h"
2c261fae 37#include "regset.h"
53e95fcf 38#include "symfile.h"
eda5a4d7 39#include "disasm.h"
9c1488cb 40#include "amd64-tdep.h"
c4f35dd8 41#include "i387-tdep.h"
97de3545 42#include "x86-xstate.h"
325fac50 43#include <algorithm>
53e95fcf 44
90884b2b 45#include "features/i386/amd64.c"
a055a187 46#include "features/i386/amd64-avx.c"
e43e105e 47#include "features/i386/amd64-mpx.c"
2b863f51 48#include "features/i386/amd64-avx-mpx.c"
a1fa17ee 49#include "features/i386/amd64-avx-avx512.c"
51547df6 50#include "features/i386/amd64-avx-mpx-avx512-pku.c"
01f9f808 51
ac1438b5
L
52#include "features/i386/x32.c"
53#include "features/i386/x32-avx.c"
a1fa17ee 54#include "features/i386/x32-avx-avx512.c"
90884b2b 55
6710bf39
SS
56#include "ax.h"
57#include "ax-gdb.h"
58
e53bef9f
MK
59/* Note that the AMD64 architecture was previously known as x86-64.
60 The latter is (forever) engraved into the canonical system name as
90f90721 61 returned by config.guess, and used as the name for the AMD64 port
e53bef9f
MK
62 of GNU/Linux. The BSD's have renamed their ports to amd64; they
63 don't like to shout. For GDB we prefer the amd64_-prefix over the
64 x86_64_-prefix since it's so much easier to type. */
65
402ecd56 66/* Register information. */
c4f35dd8 67
6707b003 68static const char *amd64_register_names[] =
de220d0f 69{
6707b003 70 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
c4f35dd8
MK
71
72 /* %r8 is indeed register number 8. */
6707b003
UW
73 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
74 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
c4f35dd8 75
af233647 76 /* %st0 is register number 24. */
6707b003
UW
77 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
78 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
c4f35dd8 79
af233647 80 /* %xmm0 is register number 40. */
6707b003
UW
81 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
82 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
83 "mxcsr",
0e04a514
ML
84};
85
a055a187
L
86static const char *amd64_ymm_names[] =
87{
88 "ymm0", "ymm1", "ymm2", "ymm3",
89 "ymm4", "ymm5", "ymm6", "ymm7",
90 "ymm8", "ymm9", "ymm10", "ymm11",
91 "ymm12", "ymm13", "ymm14", "ymm15"
92};
93
01f9f808
MS
94static const char *amd64_ymm_avx512_names[] =
95{
96 "ymm16", "ymm17", "ymm18", "ymm19",
97 "ymm20", "ymm21", "ymm22", "ymm23",
98 "ymm24", "ymm25", "ymm26", "ymm27",
99 "ymm28", "ymm29", "ymm30", "ymm31"
100};
101
a055a187
L
102static const char *amd64_ymmh_names[] =
103{
104 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
105 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
106 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
107 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
108};
de220d0f 109
01f9f808
MS
110static const char *amd64_ymmh_avx512_names[] =
111{
112 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
113 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
114 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
115 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
116};
117
e43e105e
WT
118static const char *amd64_mpx_names[] =
119{
120 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
121};
122
01f9f808
MS
123static const char *amd64_k_names[] =
124{
125 "k0", "k1", "k2", "k3",
126 "k4", "k5", "k6", "k7"
127};
128
129static const char *amd64_zmmh_names[] =
130{
131 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
132 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
133 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
134 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
135 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
136 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
137 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
138 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
139};
140
141static const char *amd64_zmm_names[] =
142{
143 "zmm0", "zmm1", "zmm2", "zmm3",
144 "zmm4", "zmm5", "zmm6", "zmm7",
145 "zmm8", "zmm9", "zmm10", "zmm11",
146 "zmm12", "zmm13", "zmm14", "zmm15",
147 "zmm16", "zmm17", "zmm18", "zmm19",
148 "zmm20", "zmm21", "zmm22", "zmm23",
149 "zmm24", "zmm25", "zmm26", "zmm27",
150 "zmm28", "zmm29", "zmm30", "zmm31"
151};
152
153static const char *amd64_xmm_avx512_names[] = {
154 "xmm16", "xmm17", "xmm18", "xmm19",
155 "xmm20", "xmm21", "xmm22", "xmm23",
156 "xmm24", "xmm25", "xmm26", "xmm27",
157 "xmm28", "xmm29", "xmm30", "xmm31"
158};
159
51547df6
MS
160static const char *amd64_pkeys_names[] = {
161 "pkru"
162};
163
c4f35dd8
MK
164/* DWARF Register Number Mapping as defined in the System V psABI,
165 section 3.6. */
53e95fcf 166
e53bef9f 167static int amd64_dwarf_regmap[] =
0e04a514 168{
c4f35dd8 169 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
90f90721
MK
170 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
171 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
172 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
c4f35dd8
MK
173
174 /* Frame Pointer Register RBP. */
90f90721 175 AMD64_RBP_REGNUM,
c4f35dd8
MK
176
177 /* Stack Pointer Register RSP. */
90f90721 178 AMD64_RSP_REGNUM,
c4f35dd8
MK
179
180 /* Extended Integer Registers 8 - 15. */
5b856f36
PM
181 AMD64_R8_REGNUM, /* %r8 */
182 AMD64_R9_REGNUM, /* %r9 */
183 AMD64_R10_REGNUM, /* %r10 */
184 AMD64_R11_REGNUM, /* %r11 */
185 AMD64_R12_REGNUM, /* %r12 */
186 AMD64_R13_REGNUM, /* %r13 */
187 AMD64_R14_REGNUM, /* %r14 */
188 AMD64_R15_REGNUM, /* %r15 */
c4f35dd8 189
59207364 190 /* Return Address RA. Mapped to RIP. */
90f90721 191 AMD64_RIP_REGNUM,
c4f35dd8
MK
192
193 /* SSE Registers 0 - 7. */
90f90721
MK
194 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
195 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
196 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
197 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
c4f35dd8
MK
198
199 /* Extended SSE Registers 8 - 15. */
90f90721
MK
200 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
201 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
202 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
203 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
c4f35dd8
MK
204
205 /* Floating Point Registers 0-7. */
90f90721
MK
206 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
207 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
208 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
c6f4c129 209 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
f7ca3fcf
PM
210
211 /* MMX Registers 0 - 7.
212 We have to handle those registers specifically, as their register
213 number within GDB depends on the target (or they may even not be
214 available at all). */
215 -1, -1, -1, -1, -1, -1, -1, -1,
216
c6f4c129
JB
217 /* Control and Status Flags Register. */
218 AMD64_EFLAGS_REGNUM,
219
220 /* Selector Registers. */
221 AMD64_ES_REGNUM,
222 AMD64_CS_REGNUM,
223 AMD64_SS_REGNUM,
224 AMD64_DS_REGNUM,
225 AMD64_FS_REGNUM,
226 AMD64_GS_REGNUM,
227 -1,
228 -1,
229
230 /* Segment Base Address Registers. */
231 -1,
232 -1,
233 -1,
234 -1,
235
236 /* Special Selector Registers. */
237 -1,
238 -1,
239
240 /* Floating Point Control Registers. */
241 AMD64_MXCSR_REGNUM,
242 AMD64_FCTRL_REGNUM,
243 AMD64_FSTAT_REGNUM
c4f35dd8 244};
0e04a514 245
e53bef9f
MK
246static const int amd64_dwarf_regmap_len =
247 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
0e04a514 248
c4f35dd8
MK
249/* Convert DWARF register number REG to the appropriate register
250 number used by GDB. */
26abbdc4 251
c4f35dd8 252static int
d3f73121 253amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
53e95fcf 254{
a055a187
L
255 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
256 int ymm0_regnum = tdep->ymm0_regnum;
c4f35dd8 257 int regnum = -1;
53e95fcf 258
16aff9a6 259 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
e53bef9f 260 regnum = amd64_dwarf_regmap[reg];
53e95fcf 261
0fde2c53 262 if (ymm0_regnum >= 0
a055a187
L
263 && i386_xmm_regnum_p (gdbarch, regnum))
264 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
c4f35dd8
MK
265
266 return regnum;
53e95fcf 267}
d532c08f 268
35669430
DE
269/* Map architectural register numbers to gdb register numbers. */
270
271static const int amd64_arch_regmap[16] =
272{
273 AMD64_RAX_REGNUM, /* %rax */
274 AMD64_RCX_REGNUM, /* %rcx */
275 AMD64_RDX_REGNUM, /* %rdx */
276 AMD64_RBX_REGNUM, /* %rbx */
277 AMD64_RSP_REGNUM, /* %rsp */
278 AMD64_RBP_REGNUM, /* %rbp */
279 AMD64_RSI_REGNUM, /* %rsi */
280 AMD64_RDI_REGNUM, /* %rdi */
281 AMD64_R8_REGNUM, /* %r8 */
282 AMD64_R9_REGNUM, /* %r9 */
283 AMD64_R10_REGNUM, /* %r10 */
284 AMD64_R11_REGNUM, /* %r11 */
285 AMD64_R12_REGNUM, /* %r12 */
286 AMD64_R13_REGNUM, /* %r13 */
287 AMD64_R14_REGNUM, /* %r14 */
288 AMD64_R15_REGNUM /* %r15 */
289};
290
291static const int amd64_arch_regmap_len =
292 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
293
294/* Convert architectural register number REG to the appropriate register
295 number used by GDB. */
296
297static int
298amd64_arch_reg_to_regnum (int reg)
299{
300 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
301
302 return amd64_arch_regmap[reg];
303}
304
1ba53b71
L
305/* Register names for byte pseudo-registers. */
306
307static const char *amd64_byte_names[] =
308{
309 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
fe01d668
L
310 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
311 "ah", "bh", "ch", "dh"
1ba53b71
L
312};
313
fe01d668
L
314/* Number of lower byte registers. */
315#define AMD64_NUM_LOWER_BYTE_REGS 16
316
1ba53b71
L
317/* Register names for word pseudo-registers. */
318
319static const char *amd64_word_names[] =
320{
9cad29ac 321 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
1ba53b71
L
322 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
323};
324
325/* Register names for dword pseudo-registers. */
326
327static const char *amd64_dword_names[] =
328{
329 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
fff4548b
MK
330 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
331 "eip"
1ba53b71
L
332};
333
334/* Return the name of register REGNUM. */
335
336static const char *
337amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
338{
339 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
340 if (i386_byte_regnum_p (gdbarch, regnum))
341 return amd64_byte_names[regnum - tdep->al_regnum];
01f9f808
MS
342 else if (i386_zmm_regnum_p (gdbarch, regnum))
343 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
a055a187
L
344 else if (i386_ymm_regnum_p (gdbarch, regnum))
345 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
01f9f808
MS
346 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
347 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
1ba53b71
L
348 else if (i386_word_regnum_p (gdbarch, regnum))
349 return amd64_word_names[regnum - tdep->ax_regnum];
350 else if (i386_dword_regnum_p (gdbarch, regnum))
351 return amd64_dword_names[regnum - tdep->eax_regnum];
352 else
353 return i386_pseudo_register_name (gdbarch, regnum);
354}
355
3543a589
TT
356static struct value *
357amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
358 struct regcache *regcache,
359 int regnum)
1ba53b71 360{
9890e433 361 gdb_byte *raw_buf = (gdb_byte *) alloca (register_size (gdbarch, regnum));
1ba53b71 362 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
05d1431c 363 enum register_status status;
3543a589
TT
364 struct value *result_value;
365 gdb_byte *buf;
366
367 result_value = allocate_value (register_type (gdbarch, regnum));
368 VALUE_LVAL (result_value) = lval_register;
369 VALUE_REGNUM (result_value) = regnum;
370 buf = value_contents_raw (result_value);
1ba53b71
L
371
372 if (i386_byte_regnum_p (gdbarch, regnum))
373 {
374 int gpnum = regnum - tdep->al_regnum;
375
376 /* Extract (always little endian). */
fe01d668
L
377 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
378 {
379 /* Special handling for AH, BH, CH, DH. */
05d1431c
PA
380 status = regcache_raw_read (regcache,
381 gpnum - AMD64_NUM_LOWER_BYTE_REGS,
382 raw_buf);
383 if (status == REG_VALID)
384 memcpy (buf, raw_buf + 1, 1);
3543a589
TT
385 else
386 mark_value_bytes_unavailable (result_value, 0,
387 TYPE_LENGTH (value_type (result_value)));
fe01d668
L
388 }
389 else
390 {
05d1431c
PA
391 status = regcache_raw_read (regcache, gpnum, raw_buf);
392 if (status == REG_VALID)
393 memcpy (buf, raw_buf, 1);
3543a589
TT
394 else
395 mark_value_bytes_unavailable (result_value, 0,
396 TYPE_LENGTH (value_type (result_value)));
fe01d668 397 }
1ba53b71
L
398 }
399 else if (i386_dword_regnum_p (gdbarch, regnum))
400 {
401 int gpnum = regnum - tdep->eax_regnum;
402 /* Extract (always little endian). */
05d1431c
PA
403 status = regcache_raw_read (regcache, gpnum, raw_buf);
404 if (status == REG_VALID)
405 memcpy (buf, raw_buf, 4);
3543a589
TT
406 else
407 mark_value_bytes_unavailable (result_value, 0,
408 TYPE_LENGTH (value_type (result_value)));
1ba53b71
L
409 }
410 else
3543a589
TT
411 i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
412 result_value);
413
414 return result_value;
1ba53b71
L
415}
416
417static void
418amd64_pseudo_register_write (struct gdbarch *gdbarch,
419 struct regcache *regcache,
420 int regnum, const gdb_byte *buf)
421{
9890e433 422 gdb_byte *raw_buf = (gdb_byte *) alloca (register_size (gdbarch, regnum));
1ba53b71
L
423 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
424
425 if (i386_byte_regnum_p (gdbarch, regnum))
426 {
427 int gpnum = regnum - tdep->al_regnum;
428
fe01d668
L
429 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
430 {
431 /* Read ... AH, BH, CH, DH. */
432 regcache_raw_read (regcache,
433 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
434 /* ... Modify ... (always little endian). */
435 memcpy (raw_buf + 1, buf, 1);
436 /* ... Write. */
437 regcache_raw_write (regcache,
438 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
439 }
440 else
441 {
442 /* Read ... */
443 regcache_raw_read (regcache, gpnum, raw_buf);
444 /* ... Modify ... (always little endian). */
445 memcpy (raw_buf, buf, 1);
446 /* ... Write. */
447 regcache_raw_write (regcache, gpnum, raw_buf);
448 }
1ba53b71
L
449 }
450 else if (i386_dword_regnum_p (gdbarch, regnum))
451 {
452 int gpnum = regnum - tdep->eax_regnum;
453
454 /* Read ... */
455 regcache_raw_read (regcache, gpnum, raw_buf);
456 /* ... Modify ... (always little endian). */
457 memcpy (raw_buf, buf, 4);
458 /* ... Write. */
459 regcache_raw_write (regcache, gpnum, raw_buf);
460 }
461 else
462 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
463}
464
62e5fd57
MK
465/* Implement the 'ax_pseudo_register_collect' gdbarch method. */
466
467static int
468amd64_ax_pseudo_register_collect (struct gdbarch *gdbarch,
469 struct agent_expr *ax, int regnum)
470{
471 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
472
473 if (i386_byte_regnum_p (gdbarch, regnum))
474 {
475 int gpnum = regnum - tdep->al_regnum;
476
477 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
478 ax_reg_mask (ax, gpnum - AMD64_NUM_LOWER_BYTE_REGS);
479 else
480 ax_reg_mask (ax, gpnum);
481 return 0;
482 }
483 else if (i386_dword_regnum_p (gdbarch, regnum))
484 {
485 int gpnum = regnum - tdep->eax_regnum;
486
487 ax_reg_mask (ax, gpnum);
488 return 0;
489 }
490 else
491 return i386_ax_pseudo_register_collect (gdbarch, ax, regnum);
492}
493
53e95fcf
JS
494\f
495
bf4d6c1c
JB
496/* Register classes as defined in the psABI. */
497
498enum amd64_reg_class
499{
500 AMD64_INTEGER,
501 AMD64_SSE,
502 AMD64_SSEUP,
503 AMD64_X87,
504 AMD64_X87UP,
505 AMD64_COMPLEX_X87,
506 AMD64_NO_CLASS,
507 AMD64_MEMORY
508};
509
efb1c01c
MK
510/* Return the union class of CLASS1 and CLASS2. See the psABI for
511 details. */
512
513static enum amd64_reg_class
514amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
515{
516 /* Rule (a): If both classes are equal, this is the resulting class. */
517 if (class1 == class2)
518 return class1;
519
520 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
521 is the other class. */
522 if (class1 == AMD64_NO_CLASS)
523 return class2;
524 if (class2 == AMD64_NO_CLASS)
525 return class1;
526
527 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
528 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
529 return AMD64_MEMORY;
530
531 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
532 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
533 return AMD64_INTEGER;
534
535 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
536 MEMORY is used as class. */
537 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
538 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
539 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
540 return AMD64_MEMORY;
541
542 /* Rule (f): Otherwise class SSE is used. */
543 return AMD64_SSE;
544}
545
fe978cb0 546static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]);
bf4d6c1c 547
79b1ab3d
MK
548/* Return non-zero if TYPE is a non-POD structure or union type. */
549
550static int
551amd64_non_pod_p (struct type *type)
552{
553 /* ??? A class with a base class certainly isn't POD, but does this
554 catch all non-POD structure types? */
555 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
556 return 1;
557
558 return 0;
559}
560
efb1c01c
MK
561/* Classify TYPE according to the rules for aggregate (structures and
562 arrays) and union types, and store the result in CLASS. */
c4f35dd8
MK
563
564static void
fe978cb0 565amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
53e95fcf 566{
efb1c01c
MK
567 /* 1. If the size of an object is larger than two eightbytes, or in
568 C++, is a non-POD structure or union type, or contains
569 unaligned fields, it has class memory. */
744a8059 570 if (TYPE_LENGTH (type) > 16 || amd64_non_pod_p (type))
53e95fcf 571 {
fe978cb0 572 theclass[0] = theclass[1] = AMD64_MEMORY;
efb1c01c 573 return;
53e95fcf 574 }
efb1c01c
MK
575
576 /* 2. Both eightbytes get initialized to class NO_CLASS. */
fe978cb0 577 theclass[0] = theclass[1] = AMD64_NO_CLASS;
efb1c01c
MK
578
579 /* 3. Each field of an object is classified recursively so that
580 always two fields are considered. The resulting class is
581 calculated according to the classes of the fields in the
582 eightbyte: */
583
584 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
8ffd9b1b 585 {
efb1c01c
MK
586 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
587
588 /* All fields in an array have the same type. */
fe978cb0
PA
589 amd64_classify (subtype, theclass);
590 if (TYPE_LENGTH (type) > 8 && theclass[1] == AMD64_NO_CLASS)
591 theclass[1] = theclass[0];
8ffd9b1b 592 }
53e95fcf
JS
593 else
594 {
efb1c01c 595 int i;
53e95fcf 596
efb1c01c
MK
597 /* Structure or union. */
598 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
599 || TYPE_CODE (type) == TYPE_CODE_UNION);
600
601 for (i = 0; i < TYPE_NFIELDS (type); i++)
53e95fcf 602 {
efb1c01c
MK
603 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
604 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
605 enum amd64_reg_class subclass[2];
e4e2711a
JB
606 int bitsize = TYPE_FIELD_BITSIZE (type, i);
607 int endpos;
608
609 if (bitsize == 0)
610 bitsize = TYPE_LENGTH (subtype) * 8;
611 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
efb1c01c 612
562c50c2 613 /* Ignore static fields. */
d6a843b5 614 if (field_is_static (&TYPE_FIELD (type, i)))
562c50c2
MK
615 continue;
616
efb1c01c
MK
617 gdb_assert (pos == 0 || pos == 1);
618
619 amd64_classify (subtype, subclass);
fe978cb0 620 theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]);
e4e2711a
JB
621 if (bitsize <= 64 && pos == 0 && endpos == 1)
622 /* This is a bit of an odd case: We have a field that would
623 normally fit in one of the two eightbytes, except that
624 it is placed in a way that this field straddles them.
625 This has been seen with a structure containing an array.
626
627 The ABI is a bit unclear in this case, but we assume that
628 this field's class (stored in subclass[0]) must also be merged
629 into class[1]. In other words, our field has a piece stored
630 in the second eight-byte, and thus its class applies to
631 the second eight-byte as well.
632
633 In the case where the field length exceeds 8 bytes,
634 it should not be necessary to merge the field class
635 into class[1]. As LEN > 8, subclass[1] is necessarily
636 different from AMD64_NO_CLASS. If subclass[1] is equal
637 to subclass[0], then the normal class[1]/subclass[1]
638 merging will take care of everything. For subclass[1]
639 to be different from subclass[0], I can only see the case
640 where we have a SSE/SSEUP or X87/X87UP pair, which both
641 use up all 16 bytes of the aggregate, and are already
642 handled just fine (because each portion sits on its own
643 8-byte). */
fe978cb0 644 theclass[1] = amd64_merge_classes (theclass[1], subclass[0]);
efb1c01c 645 if (pos == 0)
fe978cb0 646 theclass[1] = amd64_merge_classes (theclass[1], subclass[1]);
53e95fcf 647 }
53e95fcf 648 }
efb1c01c
MK
649
650 /* 4. Then a post merger cleanup is done: */
651
652 /* Rule (a): If one of the classes is MEMORY, the whole argument is
653 passed in memory. */
fe978cb0
PA
654 if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY)
655 theclass[0] = theclass[1] = AMD64_MEMORY;
efb1c01c 656
177b42fe 657 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
efb1c01c 658 SSE. */
fe978cb0
PA
659 if (theclass[0] == AMD64_SSEUP)
660 theclass[0] = AMD64_SSE;
661 if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE)
662 theclass[1] = AMD64_SSE;
efb1c01c
MK
663}
664
665/* Classify TYPE, and store the result in CLASS. */
666
bf4d6c1c 667static void
fe978cb0 668amd64_classify (struct type *type, enum amd64_reg_class theclass[2])
efb1c01c
MK
669{
670 enum type_code code = TYPE_CODE (type);
671 int len = TYPE_LENGTH (type);
672
fe978cb0 673 theclass[0] = theclass[1] = AMD64_NO_CLASS;
efb1c01c
MK
674
675 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
5a7225ed
JB
676 long, long long, and pointers are in the INTEGER class. Similarly,
677 range types, used by languages such as Ada, are also in the INTEGER
678 class. */
efb1c01c 679 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
b929c77f 680 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
9db13498 681 || code == TYPE_CODE_CHAR
efb1c01c
MK
682 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
683 && (len == 1 || len == 2 || len == 4 || len == 8))
fe978cb0 684 theclass[0] = AMD64_INTEGER;
efb1c01c 685
5daa78cc
TJB
686 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
687 are in class SSE. */
688 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
689 && (len == 4 || len == 8))
efb1c01c 690 /* FIXME: __m64 . */
fe978cb0 691 theclass[0] = AMD64_SSE;
efb1c01c 692
5daa78cc
TJB
693 /* Arguments of types __float128, _Decimal128 and __m128 are split into
694 two halves. The least significant ones belong to class SSE, the most
efb1c01c 695 significant one to class SSEUP. */
5daa78cc
TJB
696 else if (code == TYPE_CODE_DECFLOAT && len == 16)
697 /* FIXME: __float128, __m128. */
fe978cb0 698 theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP;
efb1c01c
MK
699
700 /* The 64-bit mantissa of arguments of type long double belongs to
701 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
702 class X87UP. */
703 else if (code == TYPE_CODE_FLT && len == 16)
704 /* Class X87 and X87UP. */
fe978cb0 705 theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP;
efb1c01c 706
7f7930dd
MK
707 /* Arguments of complex T where T is one of the types float or
708 double get treated as if they are implemented as:
709
710 struct complexT {
711 T real;
712 T imag;
5f52445b
YQ
713 };
714
715 */
7f7930dd 716 else if (code == TYPE_CODE_COMPLEX && len == 8)
fe978cb0 717 theclass[0] = AMD64_SSE;
7f7930dd 718 else if (code == TYPE_CODE_COMPLEX && len == 16)
fe978cb0 719 theclass[0] = theclass[1] = AMD64_SSE;
7f7930dd
MK
720
721 /* A variable of type complex long double is classified as type
722 COMPLEX_X87. */
723 else if (code == TYPE_CODE_COMPLEX && len == 32)
fe978cb0 724 theclass[0] = AMD64_COMPLEX_X87;
7f7930dd 725
efb1c01c
MK
726 /* Aggregates. */
727 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
728 || code == TYPE_CODE_UNION)
fe978cb0 729 amd64_classify_aggregate (type, theclass);
efb1c01c
MK
730}
731
732static enum return_value_convention
6a3a010b 733amd64_return_value (struct gdbarch *gdbarch, struct value *function,
c055b101 734 struct type *type, struct regcache *regcache,
42835c2b 735 gdb_byte *readbuf, const gdb_byte *writebuf)
efb1c01c 736{
fe978cb0 737 enum amd64_reg_class theclass[2];
efb1c01c 738 int len = TYPE_LENGTH (type);
90f90721
MK
739 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
740 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
efb1c01c
MK
741 int integer_reg = 0;
742 int sse_reg = 0;
743 int i;
744
745 gdb_assert (!(readbuf && writebuf));
746
747 /* 1. Classify the return type with the classification algorithm. */
fe978cb0 748 amd64_classify (type, theclass);
efb1c01c
MK
749
750 /* 2. If the type has class MEMORY, then the caller provides space
6fa57a7d 751 for the return value and passes the address of this storage in
0963b4bd 752 %rdi as if it were the first argument to the function. In effect,
6fa57a7d
MK
753 this address becomes a hidden first argument.
754
755 On return %rax will contain the address that has been passed in
756 by the caller in %rdi. */
fe978cb0 757 if (theclass[0] == AMD64_MEMORY)
6fa57a7d
MK
758 {
759 /* As indicated by the comment above, the ABI guarantees that we
760 can always find the return value just after the function has
761 returned. */
762
763 if (readbuf)
764 {
765 ULONGEST addr;
766
767 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
768 read_memory (addr, readbuf, TYPE_LENGTH (type));
769 }
770
771 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
772 }
efb1c01c 773
7f7930dd
MK
774 /* 8. If the class is COMPLEX_X87, the real part of the value is
775 returned in %st0 and the imaginary part in %st1. */
fe978cb0 776 if (theclass[0] == AMD64_COMPLEX_X87)
7f7930dd
MK
777 {
778 if (readbuf)
779 {
780 regcache_raw_read (regcache, AMD64_ST0_REGNUM, readbuf);
781 regcache_raw_read (regcache, AMD64_ST1_REGNUM, readbuf + 16);
782 }
783
784 if (writebuf)
785 {
786 i387_return_value (gdbarch, regcache);
787 regcache_raw_write (regcache, AMD64_ST0_REGNUM, writebuf);
788 regcache_raw_write (regcache, AMD64_ST1_REGNUM, writebuf + 16);
789
790 /* Fix up the tag word such that both %st(0) and %st(1) are
791 marked as valid. */
792 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
793 }
794
795 return RETURN_VALUE_REGISTER_CONVENTION;
796 }
797
fe978cb0 798 gdb_assert (theclass[1] != AMD64_MEMORY);
bad43aa5 799 gdb_assert (len <= 16);
efb1c01c
MK
800
801 for (i = 0; len > 0; i++, len -= 8)
802 {
803 int regnum = -1;
804 int offset = 0;
805
fe978cb0 806 switch (theclass[i])
efb1c01c
MK
807 {
808 case AMD64_INTEGER:
809 /* 3. If the class is INTEGER, the next available register
810 of the sequence %rax, %rdx is used. */
811 regnum = integer_regnum[integer_reg++];
812 break;
813
814 case AMD64_SSE:
815 /* 4. If the class is SSE, the next available SSE register
816 of the sequence %xmm0, %xmm1 is used. */
817 regnum = sse_regnum[sse_reg++];
818 break;
819
820 case AMD64_SSEUP:
821 /* 5. If the class is SSEUP, the eightbyte is passed in the
822 upper half of the last used SSE register. */
823 gdb_assert (sse_reg > 0);
824 regnum = sse_regnum[sse_reg - 1];
825 offset = 8;
826 break;
827
828 case AMD64_X87:
829 /* 6. If the class is X87, the value is returned on the X87
830 stack in %st0 as 80-bit x87 number. */
90f90721 831 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
832 if (writebuf)
833 i387_return_value (gdbarch, regcache);
834 break;
835
836 case AMD64_X87UP:
837 /* 7. If the class is X87UP, the value is returned together
838 with the previous X87 value in %st0. */
fe978cb0 839 gdb_assert (i > 0 && theclass[0] == AMD64_X87);
90f90721 840 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
841 offset = 8;
842 len = 2;
843 break;
844
845 case AMD64_NO_CLASS:
846 continue;
847
848 default:
849 gdb_assert (!"Unexpected register class.");
850 }
851
852 gdb_assert (regnum != -1);
853
854 if (readbuf)
325fac50 855 regcache_raw_read_part (regcache, regnum, offset, std::min (len, 8),
42835c2b 856 readbuf + i * 8);
efb1c01c 857 if (writebuf)
325fac50 858 regcache_raw_write_part (regcache, regnum, offset, std::min (len, 8),
42835c2b 859 writebuf + i * 8);
efb1c01c
MK
860 }
861
862 return RETURN_VALUE_REGISTER_CONVENTION;
53e95fcf
JS
863}
864\f
865
720aa428
MK
866static CORE_ADDR
867amd64_push_arguments (struct regcache *regcache, int nargs,
6470d250 868 struct value **args, CORE_ADDR sp, int struct_return)
720aa428 869{
bf4d6c1c
JB
870 static int integer_regnum[] =
871 {
872 AMD64_RDI_REGNUM, /* %rdi */
873 AMD64_RSI_REGNUM, /* %rsi */
874 AMD64_RDX_REGNUM, /* %rdx */
875 AMD64_RCX_REGNUM, /* %rcx */
5b856f36
PM
876 AMD64_R8_REGNUM, /* %r8 */
877 AMD64_R9_REGNUM /* %r9 */
bf4d6c1c 878 };
720aa428
MK
879 static int sse_regnum[] =
880 {
881 /* %xmm0 ... %xmm7 */
90f90721
MK
882 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
883 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
884 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
885 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
720aa428 886 };
224c3ddb 887 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
720aa428
MK
888 int num_stack_args = 0;
889 int num_elements = 0;
890 int element = 0;
891 int integer_reg = 0;
892 int sse_reg = 0;
893 int i;
894
6470d250
MK
895 /* Reserve a register for the "hidden" argument. */
896 if (struct_return)
897 integer_reg++;
898
720aa428
MK
899 for (i = 0; i < nargs; i++)
900 {
4991999e 901 struct type *type = value_type (args[i]);
720aa428 902 int len = TYPE_LENGTH (type);
fe978cb0 903 enum amd64_reg_class theclass[2];
720aa428
MK
904 int needed_integer_regs = 0;
905 int needed_sse_regs = 0;
906 int j;
907
908 /* Classify argument. */
fe978cb0 909 amd64_classify (type, theclass);
720aa428
MK
910
911 /* Calculate the number of integer and SSE registers needed for
912 this argument. */
913 for (j = 0; j < 2; j++)
914 {
fe978cb0 915 if (theclass[j] == AMD64_INTEGER)
720aa428 916 needed_integer_regs++;
fe978cb0 917 else if (theclass[j] == AMD64_SSE)
720aa428
MK
918 needed_sse_regs++;
919 }
920
921 /* Check whether enough registers are available, and if the
922 argument should be passed in registers at all. */
bf4d6c1c 923 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
720aa428
MK
924 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
925 || (needed_integer_regs == 0 && needed_sse_regs == 0))
926 {
927 /* The argument will be passed on the stack. */
928 num_elements += ((len + 7) / 8);
849e9755 929 stack_args[num_stack_args++] = args[i];
720aa428
MK
930 }
931 else
932 {
933 /* The argument will be passed in registers. */
d8de1ef7
MK
934 const gdb_byte *valbuf = value_contents (args[i]);
935 gdb_byte buf[8];
720aa428
MK
936
937 gdb_assert (len <= 16);
938
939 for (j = 0; len > 0; j++, len -= 8)
940 {
941 int regnum = -1;
942 int offset = 0;
943
fe978cb0 944 switch (theclass[j])
720aa428
MK
945 {
946 case AMD64_INTEGER:
bf4d6c1c 947 regnum = integer_regnum[integer_reg++];
720aa428
MK
948 break;
949
950 case AMD64_SSE:
951 regnum = sse_regnum[sse_reg++];
952 break;
953
954 case AMD64_SSEUP:
955 gdb_assert (sse_reg > 0);
956 regnum = sse_regnum[sse_reg - 1];
957 offset = 8;
958 break;
959
960 default:
961 gdb_assert (!"Unexpected register class.");
962 }
963
964 gdb_assert (regnum != -1);
965 memset (buf, 0, sizeof buf);
325fac50 966 memcpy (buf, valbuf + j * 8, std::min (len, 8));
720aa428
MK
967 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
968 }
969 }
970 }
971
972 /* Allocate space for the arguments on the stack. */
973 sp -= num_elements * 8;
974
975 /* The psABI says that "The end of the input argument area shall be
976 aligned on a 16 byte boundary." */
977 sp &= ~0xf;
978
979 /* Write out the arguments to the stack. */
980 for (i = 0; i < num_stack_args; i++)
981 {
4991999e 982 struct type *type = value_type (stack_args[i]);
d8de1ef7 983 const gdb_byte *valbuf = value_contents (stack_args[i]);
849e9755
JB
984 int len = TYPE_LENGTH (type);
985
986 write_memory (sp + element * 8, valbuf, len);
987 element += ((len + 7) / 8);
720aa428
MK
988 }
989
990 /* The psABI says that "For calls that may call functions that use
991 varargs or stdargs (prototype-less calls or calls to functions
992 containing ellipsis (...) in the declaration) %al is used as
993 hidden argument to specify the number of SSE registers used. */
90f90721 994 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
720aa428
MK
995 return sp;
996}
997
c4f35dd8 998static CORE_ADDR
7d9b040b 999amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
e53bef9f
MK
1000 struct regcache *regcache, CORE_ADDR bp_addr,
1001 int nargs, struct value **args, CORE_ADDR sp,
1002 int struct_return, CORE_ADDR struct_addr)
53e95fcf 1003{
e17a4113 1004 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
d8de1ef7 1005 gdb_byte buf[8];
c4f35dd8
MK
1006
1007 /* Pass arguments. */
6470d250 1008 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
c4f35dd8
MK
1009
1010 /* Pass "hidden" argument". */
1011 if (struct_return)
1012 {
e17a4113 1013 store_unsigned_integer (buf, 8, byte_order, struct_addr);
bf4d6c1c 1014 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
c4f35dd8
MK
1015 }
1016
1017 /* Store return address. */
1018 sp -= 8;
e17a4113 1019 store_unsigned_integer (buf, 8, byte_order, bp_addr);
c4f35dd8
MK
1020 write_memory (sp, buf, 8);
1021
1022 /* Finally, update the stack pointer... */
e17a4113 1023 store_unsigned_integer (buf, 8, byte_order, sp);
90f90721 1024 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
c4f35dd8
MK
1025
1026 /* ...and fake a frame pointer. */
90f90721 1027 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
c4f35dd8 1028
3e210248 1029 return sp + 16;
53e95fcf 1030}
c4f35dd8 1031\f
35669430
DE
1032/* Displaced instruction handling. */
1033
1034/* A partially decoded instruction.
1035 This contains enough details for displaced stepping purposes. */
1036
1037struct amd64_insn
1038{
1039 /* The number of opcode bytes. */
1040 int opcode_len;
1041 /* The offset of the rex prefix or -1 if not present. */
1042 int rex_offset;
1043 /* The offset to the first opcode byte. */
1044 int opcode_offset;
1045 /* The offset to the modrm byte or -1 if not present. */
1046 int modrm_offset;
1047
1048 /* The raw instruction. */
1049 gdb_byte *raw_insn;
1050};
1051
1052struct displaced_step_closure
1053{
1054 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1055 int tmp_used;
1056 int tmp_regno;
1057 ULONGEST tmp_save;
1058
1059 /* Details of the instruction. */
1060 struct amd64_insn insn_details;
1061
1062 /* Amount of space allocated to insn_buf. */
1063 int max_len;
1064
1065 /* The possibly modified insn.
1066 This is a variable-length field. */
1067 gdb_byte insn_buf[1];
1068};
1069
1070/* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1071 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1072 at which point delete these in favor of libopcodes' versions). */
1073
1074static const unsigned char onebyte_has_modrm[256] = {
1075 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1076 /* ------------------------------- */
1077 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1078 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1079 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1080 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1081 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1082 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1083 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1084 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1085 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1086 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1087 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1088 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1089 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1090 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1091 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1092 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1093 /* ------------------------------- */
1094 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1095};
1096
1097static const unsigned char twobyte_has_modrm[256] = {
1098 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1099 /* ------------------------------- */
1100 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1101 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1102 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1103 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1104 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1105 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1106 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1107 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1108 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1109 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1110 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1111 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1112 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1113 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1114 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1115 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1116 /* ------------------------------- */
1117 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1118};
1119
1120static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1121
1122static int
1123rex_prefix_p (gdb_byte pfx)
1124{
1125 return REX_PREFIX_P (pfx);
1126}
1127
1128/* Skip the legacy instruction prefixes in INSN.
1129 We assume INSN is properly sentineled so we don't have to worry
1130 about falling off the end of the buffer. */
1131
1132static gdb_byte *
1903f0e6 1133amd64_skip_prefixes (gdb_byte *insn)
35669430
DE
1134{
1135 while (1)
1136 {
1137 switch (*insn)
1138 {
1139 case DATA_PREFIX_OPCODE:
1140 case ADDR_PREFIX_OPCODE:
1141 case CS_PREFIX_OPCODE:
1142 case DS_PREFIX_OPCODE:
1143 case ES_PREFIX_OPCODE:
1144 case FS_PREFIX_OPCODE:
1145 case GS_PREFIX_OPCODE:
1146 case SS_PREFIX_OPCODE:
1147 case LOCK_PREFIX_OPCODE:
1148 case REPE_PREFIX_OPCODE:
1149 case REPNE_PREFIX_OPCODE:
1150 ++insn;
1151 continue;
1152 default:
1153 break;
1154 }
1155 break;
1156 }
1157
1158 return insn;
1159}
1160
35669430
DE
1161/* Return an integer register (other than RSP) that is unused as an input
1162 operand in INSN.
1163 In order to not require adding a rex prefix if the insn doesn't already
1164 have one, the result is restricted to RAX ... RDI, sans RSP.
1165 The register numbering of the result follows architecture ordering,
1166 e.g. RDI = 7. */
1167
1168static int
1169amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1170{
1171 /* 1 bit for each reg */
1172 int used_regs_mask = 0;
1173
1174 /* There can be at most 3 int regs used as inputs in an insn, and we have
1175 7 to choose from (RAX ... RDI, sans RSP).
1176 This allows us to take a conservative approach and keep things simple.
1177 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1178 that implicitly specify RAX. */
1179
1180 /* Avoid RAX. */
1181 used_regs_mask |= 1 << EAX_REG_NUM;
1182 /* Similarily avoid RDX, implicit operand in divides. */
1183 used_regs_mask |= 1 << EDX_REG_NUM;
1184 /* Avoid RSP. */
1185 used_regs_mask |= 1 << ESP_REG_NUM;
1186
1187 /* If the opcode is one byte long and there's no ModRM byte,
1188 assume the opcode specifies a register. */
1189 if (details->opcode_len == 1 && details->modrm_offset == -1)
1190 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1191
1192 /* Mark used regs in the modrm/sib bytes. */
1193 if (details->modrm_offset != -1)
1194 {
1195 int modrm = details->raw_insn[details->modrm_offset];
1196 int mod = MODRM_MOD_FIELD (modrm);
1197 int reg = MODRM_REG_FIELD (modrm);
1198 int rm = MODRM_RM_FIELD (modrm);
1199 int have_sib = mod != 3 && rm == 4;
1200
1201 /* Assume the reg field of the modrm byte specifies a register. */
1202 used_regs_mask |= 1 << reg;
1203
1204 if (have_sib)
1205 {
1206 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
d48ebb5b 1207 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
35669430 1208 used_regs_mask |= 1 << base;
d48ebb5b 1209 used_regs_mask |= 1 << idx;
35669430
DE
1210 }
1211 else
1212 {
1213 used_regs_mask |= 1 << rm;
1214 }
1215 }
1216
1217 gdb_assert (used_regs_mask < 256);
1218 gdb_assert (used_regs_mask != 255);
1219
1220 /* Finally, find a free reg. */
1221 {
1222 int i;
1223
1224 for (i = 0; i < 8; ++i)
1225 {
1226 if (! (used_regs_mask & (1 << i)))
1227 return i;
1228 }
1229
1230 /* We shouldn't get here. */
1231 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1232 }
1233}
1234
1235/* Extract the details of INSN that we need. */
1236
1237static void
1238amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1239{
1240 gdb_byte *start = insn;
1241 int need_modrm;
1242
1243 details->raw_insn = insn;
1244
1245 details->opcode_len = -1;
1246 details->rex_offset = -1;
1247 details->opcode_offset = -1;
1248 details->modrm_offset = -1;
1249
1250 /* Skip legacy instruction prefixes. */
1903f0e6 1251 insn = amd64_skip_prefixes (insn);
35669430
DE
1252
1253 /* Skip REX instruction prefix. */
1254 if (rex_prefix_p (*insn))
1255 {
1256 details->rex_offset = insn - start;
1257 ++insn;
1258 }
1259
1260 details->opcode_offset = insn - start;
1261
1262 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1263 {
1264 /* Two or three-byte opcode. */
1265 ++insn;
1266 need_modrm = twobyte_has_modrm[*insn];
1267
1268 /* Check for three-byte opcode. */
1903f0e6 1269 switch (*insn)
35669430 1270 {
1903f0e6
DE
1271 case 0x24:
1272 case 0x25:
1273 case 0x38:
1274 case 0x3a:
1275 case 0x7a:
1276 case 0x7b:
35669430
DE
1277 ++insn;
1278 details->opcode_len = 3;
1903f0e6
DE
1279 break;
1280 default:
1281 details->opcode_len = 2;
1282 break;
35669430 1283 }
35669430
DE
1284 }
1285 else
1286 {
1287 /* One-byte opcode. */
1288 need_modrm = onebyte_has_modrm[*insn];
1289 details->opcode_len = 1;
1290 }
1291
1292 if (need_modrm)
1293 {
1294 ++insn;
1295 details->modrm_offset = insn - start;
1296 }
1297}
1298
1299/* Update %rip-relative addressing in INSN.
1300
1301 %rip-relative addressing only uses a 32-bit displacement.
1302 32 bits is not enough to be guaranteed to cover the distance between where
1303 the real instruction is and where its copy is.
1304 Convert the insn to use base+disp addressing.
1305 We set base = pc + insn_length so we can leave disp unchanged. */
c4f35dd8 1306
35669430
DE
1307static void
1308fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1309 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1310{
1311 const struct amd64_insn *insn_details = &dsc->insn_details;
1312 int modrm_offset = insn_details->modrm_offset;
1313 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1314 CORE_ADDR rip_base;
35669430
DE
1315 int insn_length;
1316 int arch_tmp_regno, tmp_regno;
1317 ULONGEST orig_value;
1318
1319 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1320 ++insn;
1321
1322 /* Compute the rip-relative address. */
eda5a4d7
PA
1323 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf,
1324 dsc->max_len, from);
35669430
DE
1325 rip_base = from + insn_length;
1326
1327 /* We need a register to hold the address.
1328 Pick one not used in the insn.
1329 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1330 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1331 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1332
1333 /* REX.B should be unset as we were using rip-relative addressing,
1334 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1335 if (insn_details->rex_offset != -1)
1336 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1337
1338 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1339 dsc->tmp_regno = tmp_regno;
1340 dsc->tmp_save = orig_value;
1341 dsc->tmp_used = 1;
1342
1343 /* Convert the ModRM field to be base+disp. */
1344 dsc->insn_buf[modrm_offset] &= ~0xc7;
1345 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1346
1347 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1348
1349 if (debug_displaced)
1350 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
5af949e3
UW
1351 "displaced: using temp reg %d, old value %s, new value %s\n",
1352 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1353 paddress (gdbarch, rip_base));
35669430
DE
1354}
1355
1356static void
1357fixup_displaced_copy (struct gdbarch *gdbarch,
1358 struct displaced_step_closure *dsc,
1359 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1360{
1361 const struct amd64_insn *details = &dsc->insn_details;
1362
1363 if (details->modrm_offset != -1)
1364 {
1365 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1366
1367 if ((modrm & 0xc7) == 0x05)
1368 {
1369 /* The insn uses rip-relative addressing.
1370 Deal with it. */
1371 fixup_riprel (gdbarch, dsc, from, to, regs);
1372 }
1373 }
1374}
1375
1376struct displaced_step_closure *
1377amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1378 CORE_ADDR from, CORE_ADDR to,
1379 struct regcache *regs)
1380{
1381 int len = gdbarch_max_insn_length (gdbarch);
741e63d7 1382 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
35669430
DE
1383 continually watch for running off the end of the buffer. */
1384 int fixup_sentinel_space = len;
224c3ddb
SM
1385 struct displaced_step_closure *dsc
1386 = ((struct displaced_step_closure *)
1387 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space));
35669430
DE
1388 gdb_byte *buf = &dsc->insn_buf[0];
1389 struct amd64_insn *details = &dsc->insn_details;
1390
1391 dsc->tmp_used = 0;
1392 dsc->max_len = len + fixup_sentinel_space;
1393
1394 read_memory (from, buf, len);
1395
1396 /* Set up the sentinel space so we don't have to worry about running
1397 off the end of the buffer. An excessive number of leading prefixes
1398 could otherwise cause this. */
1399 memset (buf + len, 0, fixup_sentinel_space);
1400
1401 amd64_get_insn_details (buf, details);
1402
1403 /* GDB may get control back after the insn after the syscall.
1404 Presumably this is a kernel bug.
1405 If this is a syscall, make sure there's a nop afterwards. */
1406 {
1407 int syscall_length;
1408
1409 if (amd64_syscall_p (details, &syscall_length))
1410 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1411 }
1412
1413 /* Modify the insn to cope with the address where it will be executed from.
1414 In particular, handle any rip-relative addressing. */
1415 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1416
1417 write_memory (to, buf, len);
1418
1419 if (debug_displaced)
1420 {
5af949e3
UW
1421 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1422 paddress (gdbarch, from), paddress (gdbarch, to));
35669430
DE
1423 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1424 }
1425
1426 return dsc;
1427}
1428
1429static int
1430amd64_absolute_jmp_p (const struct amd64_insn *details)
1431{
1432 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1433
1434 if (insn[0] == 0xff)
1435 {
1436 /* jump near, absolute indirect (/4) */
1437 if ((insn[1] & 0x38) == 0x20)
1438 return 1;
1439
1440 /* jump far, absolute indirect (/5) */
1441 if ((insn[1] & 0x38) == 0x28)
1442 return 1;
1443 }
1444
1445 return 0;
1446}
1447
c2170eef
MM
1448/* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1449
1450static int
1451amd64_jmp_p (const struct amd64_insn *details)
1452{
1453 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1454
1455 /* jump short, relative. */
1456 if (insn[0] == 0xeb)
1457 return 1;
1458
1459 /* jump near, relative. */
1460 if (insn[0] == 0xe9)
1461 return 1;
1462
1463 return amd64_absolute_jmp_p (details);
1464}
1465
35669430
DE
1466static int
1467amd64_absolute_call_p (const struct amd64_insn *details)
1468{
1469 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1470
1471 if (insn[0] == 0xff)
1472 {
1473 /* Call near, absolute indirect (/2) */
1474 if ((insn[1] & 0x38) == 0x10)
1475 return 1;
1476
1477 /* Call far, absolute indirect (/3) */
1478 if ((insn[1] & 0x38) == 0x18)
1479 return 1;
1480 }
1481
1482 return 0;
1483}
1484
1485static int
1486amd64_ret_p (const struct amd64_insn *details)
1487{
1488 /* NOTE: gcc can emit "repz ; ret". */
1489 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1490
1491 switch (insn[0])
1492 {
1493 case 0xc2: /* ret near, pop N bytes */
1494 case 0xc3: /* ret near */
1495 case 0xca: /* ret far, pop N bytes */
1496 case 0xcb: /* ret far */
1497 case 0xcf: /* iret */
1498 return 1;
1499
1500 default:
1501 return 0;
1502 }
1503}
1504
1505static int
1506amd64_call_p (const struct amd64_insn *details)
1507{
1508 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1509
1510 if (amd64_absolute_call_p (details))
1511 return 1;
1512
1513 /* call near, relative */
1514 if (insn[0] == 0xe8)
1515 return 1;
1516
1517 return 0;
1518}
1519
35669430
DE
1520/* Return non-zero if INSN is a system call, and set *LENGTHP to its
1521 length in bytes. Otherwise, return zero. */
1522
1523static int
1524amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1525{
1526 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1527
1528 if (insn[0] == 0x0f && insn[1] == 0x05)
1529 {
1530 *lengthp = 2;
1531 return 1;
1532 }
1533
1534 return 0;
1535}
1536
c2170eef
MM
1537/* Classify the instruction at ADDR using PRED.
1538 Throw an error if the memory can't be read. */
1539
1540static int
1541amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1542 int (*pred) (const struct amd64_insn *))
1543{
1544 struct amd64_insn details;
1545 gdb_byte *buf;
1546 int len, classification;
1547
1548 len = gdbarch_max_insn_length (gdbarch);
224c3ddb 1549 buf = (gdb_byte *) alloca (len);
c2170eef
MM
1550
1551 read_code (addr, buf, len);
1552 amd64_get_insn_details (buf, &details);
1553
1554 classification = pred (&details);
1555
1556 return classification;
1557}
1558
1559/* The gdbarch insn_is_call method. */
1560
1561static int
1562amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1563{
1564 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1565}
1566
1567/* The gdbarch insn_is_ret method. */
1568
1569static int
1570amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1571{
1572 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1573}
1574
1575/* The gdbarch insn_is_jump method. */
1576
1577static int
1578amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1579{
1580 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1581}
1582
35669430
DE
1583/* Fix up the state of registers and memory after having single-stepped
1584 a displaced instruction. */
1585
1586void
1587amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1588 struct displaced_step_closure *dsc,
1589 CORE_ADDR from, CORE_ADDR to,
1590 struct regcache *regs)
1591{
e17a4113 1592 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
35669430
DE
1593 /* The offset we applied to the instruction's address. */
1594 ULONGEST insn_offset = to - from;
1595 gdb_byte *insn = dsc->insn_buf;
1596 const struct amd64_insn *insn_details = &dsc->insn_details;
1597
1598 if (debug_displaced)
1599 fprintf_unfiltered (gdb_stdlog,
5af949e3 1600 "displaced: fixup (%s, %s), "
35669430 1601 "insn = 0x%02x 0x%02x ...\n",
5af949e3
UW
1602 paddress (gdbarch, from), paddress (gdbarch, to),
1603 insn[0], insn[1]);
35669430
DE
1604
1605 /* If we used a tmp reg, restore it. */
1606
1607 if (dsc->tmp_used)
1608 {
1609 if (debug_displaced)
5af949e3
UW
1610 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1611 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
35669430
DE
1612 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1613 }
1614
1615 /* The list of issues to contend with here is taken from
1616 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1617 Yay for Free Software! */
1618
1619 /* Relocate the %rip back to the program's instruction stream,
1620 if necessary. */
1621
1622 /* Except in the case of absolute or indirect jump or call
1623 instructions, or a return instruction, the new rip is relative to
1624 the displaced instruction; make it relative to the original insn.
1625 Well, signal handler returns don't need relocation either, but we use the
1626 value of %rip to recognize those; see below. */
1627 if (! amd64_absolute_jmp_p (insn_details)
1628 && ! amd64_absolute_call_p (insn_details)
1629 && ! amd64_ret_p (insn_details))
1630 {
1631 ULONGEST orig_rip;
1632 int insn_len;
1633
1634 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1635
1636 /* A signal trampoline system call changes the %rip, resuming
1637 execution of the main program after the signal handler has
1638 returned. That makes them like 'return' instructions; we
1639 shouldn't relocate %rip.
1640
1641 But most system calls don't, and we do need to relocate %rip.
1642
1643 Our heuristic for distinguishing these cases: if stepping
1644 over the system call instruction left control directly after
1645 the instruction, the we relocate --- control almost certainly
1646 doesn't belong in the displaced copy. Otherwise, we assume
1647 the instruction has put control where it belongs, and leave
1648 it unrelocated. Goodness help us if there are PC-relative
1649 system calls. */
1650 if (amd64_syscall_p (insn_details, &insn_len)
1651 && orig_rip != to + insn_len
1652 /* GDB can get control back after the insn after the syscall.
1653 Presumably this is a kernel bug.
1654 Fixup ensures its a nop, we add one to the length for it. */
1655 && orig_rip != to + insn_len + 1)
1656 {
1657 if (debug_displaced)
1658 fprintf_unfiltered (gdb_stdlog,
1659 "displaced: syscall changed %%rip; "
1660 "not relocating\n");
1661 }
1662 else
1663 {
1664 ULONGEST rip = orig_rip - insn_offset;
1665
1903f0e6
DE
1666 /* If we just stepped over a breakpoint insn, we don't backup
1667 the pc on purpose; this is to match behaviour without
1668 stepping. */
35669430
DE
1669
1670 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1671
1672 if (debug_displaced)
1673 fprintf_unfiltered (gdb_stdlog,
1674 "displaced: "
5af949e3
UW
1675 "relocated %%rip from %s to %s\n",
1676 paddress (gdbarch, orig_rip),
1677 paddress (gdbarch, rip));
35669430
DE
1678 }
1679 }
1680
1681 /* If the instruction was PUSHFL, then the TF bit will be set in the
1682 pushed value, and should be cleared. We'll leave this for later,
1683 since GDB already messes up the TF flag when stepping over a
1684 pushfl. */
1685
1686 /* If the instruction was a call, the return address now atop the
1687 stack is the address following the copied instruction. We need
1688 to make it the address following the original instruction. */
1689 if (amd64_call_p (insn_details))
1690 {
1691 ULONGEST rsp;
1692 ULONGEST retaddr;
1693 const ULONGEST retaddr_len = 8;
1694
1695 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
e17a4113 1696 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
4dafcdeb 1697 retaddr = (retaddr - insn_offset) & 0xffffffffffffffffULL;
e17a4113 1698 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
35669430
DE
1699
1700 if (debug_displaced)
1701 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
1702 "displaced: relocated return addr at %s "
1703 "to %s\n",
1704 paddress (gdbarch, rsp),
1705 paddress (gdbarch, retaddr));
35669430
DE
1706 }
1707}
dde08ee1
PA
1708
1709/* If the instruction INSN uses RIP-relative addressing, return the
1710 offset into the raw INSN where the displacement to be adjusted is
1711 found. Returns 0 if the instruction doesn't use RIP-relative
1712 addressing. */
1713
1714static int
1715rip_relative_offset (struct amd64_insn *insn)
1716{
1717 if (insn->modrm_offset != -1)
1718 {
1719 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1720
1721 if ((modrm & 0xc7) == 0x05)
1722 {
1723 /* The displacement is found right after the ModRM byte. */
1724 return insn->modrm_offset + 1;
1725 }
1726 }
1727
1728 return 0;
1729}
1730
1731static void
1732append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1733{
1734 target_write_memory (*to, buf, len);
1735 *to += len;
1736}
1737
60965737 1738static void
dde08ee1
PA
1739amd64_relocate_instruction (struct gdbarch *gdbarch,
1740 CORE_ADDR *to, CORE_ADDR oldloc)
1741{
1742 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1743 int len = gdbarch_max_insn_length (gdbarch);
1744 /* Extra space for sentinels. */
1745 int fixup_sentinel_space = len;
224c3ddb 1746 gdb_byte *buf = (gdb_byte *) xmalloc (len + fixup_sentinel_space);
dde08ee1
PA
1747 struct amd64_insn insn_details;
1748 int offset = 0;
1749 LONGEST rel32, newrel;
1750 gdb_byte *insn;
1751 int insn_length;
1752
1753 read_memory (oldloc, buf, len);
1754
1755 /* Set up the sentinel space so we don't have to worry about running
1756 off the end of the buffer. An excessive number of leading prefixes
1757 could otherwise cause this. */
1758 memset (buf + len, 0, fixup_sentinel_space);
1759
1760 insn = buf;
1761 amd64_get_insn_details (insn, &insn_details);
1762
1763 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1764
1765 /* Skip legacy instruction prefixes. */
1766 insn = amd64_skip_prefixes (insn);
1767
1768 /* Adjust calls with 32-bit relative addresses as push/jump, with
1769 the address pushed being the location where the original call in
1770 the user program would return to. */
1771 if (insn[0] == 0xe8)
1772 {
f077e978
PA
1773 gdb_byte push_buf[32];
1774 CORE_ADDR ret_addr;
1775 int i = 0;
dde08ee1
PA
1776
1777 /* Where "ret" in the original code will return to. */
1778 ret_addr = oldloc + insn_length;
f077e978
PA
1779
1780 /* If pushing an address higher than or equal to 0x80000000,
1781 avoid 'pushq', as that sign extends its 32-bit operand, which
1782 would be incorrect. */
1783 if (ret_addr <= 0x7fffffff)
1784 {
1785 push_buf[0] = 0x68; /* pushq $... */
1786 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1787 i = 5;
1788 }
1789 else
1790 {
1791 push_buf[i++] = 0x48; /* sub $0x8,%rsp */
1792 push_buf[i++] = 0x83;
1793 push_buf[i++] = 0xec;
1794 push_buf[i++] = 0x08;
1795
1796 push_buf[i++] = 0xc7; /* movl $imm,(%rsp) */
1797 push_buf[i++] = 0x04;
1798 push_buf[i++] = 0x24;
1799 store_unsigned_integer (&push_buf[i], 4, byte_order,
1800 ret_addr & 0xffffffff);
1801 i += 4;
1802
1803 push_buf[i++] = 0xc7; /* movl $imm,4(%rsp) */
1804 push_buf[i++] = 0x44;
1805 push_buf[i++] = 0x24;
1806 push_buf[i++] = 0x04;
1807 store_unsigned_integer (&push_buf[i], 4, byte_order,
1808 ret_addr >> 32);
1809 i += 4;
1810 }
1811 gdb_assert (i <= sizeof (push_buf));
dde08ee1 1812 /* Push the push. */
f077e978 1813 append_insns (to, i, push_buf);
dde08ee1
PA
1814
1815 /* Convert the relative call to a relative jump. */
1816 insn[0] = 0xe9;
1817
1818 /* Adjust the destination offset. */
1819 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1820 newrel = (oldloc - *to) + rel32;
f4a1794a
KY
1821 store_signed_integer (insn + 1, 4, byte_order, newrel);
1822
1823 if (debug_displaced)
1824 fprintf_unfiltered (gdb_stdlog,
1825 "Adjusted insn rel32=%s at %s to"
1826 " rel32=%s at %s\n",
1827 hex_string (rel32), paddress (gdbarch, oldloc),
1828 hex_string (newrel), paddress (gdbarch, *to));
dde08ee1
PA
1829
1830 /* Write the adjusted jump into its displaced location. */
1831 append_insns (to, 5, insn);
1832 return;
1833 }
1834
1835 offset = rip_relative_offset (&insn_details);
1836 if (!offset)
1837 {
1838 /* Adjust jumps with 32-bit relative addresses. Calls are
1839 already handled above. */
1840 if (insn[0] == 0xe9)
1841 offset = 1;
1842 /* Adjust conditional jumps. */
1843 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1844 offset = 2;
1845 }
1846
1847 if (offset)
1848 {
1849 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1850 newrel = (oldloc - *to) + rel32;
f4a1794a 1851 store_signed_integer (insn + offset, 4, byte_order, newrel);
dde08ee1
PA
1852 if (debug_displaced)
1853 fprintf_unfiltered (gdb_stdlog,
f4a1794a
KY
1854 "Adjusted insn rel32=%s at %s to"
1855 " rel32=%s at %s\n",
dde08ee1
PA
1856 hex_string (rel32), paddress (gdbarch, oldloc),
1857 hex_string (newrel), paddress (gdbarch, *to));
1858 }
1859
1860 /* Write the adjusted instruction into its displaced location. */
1861 append_insns (to, insn_length, buf);
1862}
1863
35669430 1864\f
c4f35dd8 1865/* The maximum number of saved registers. This should include %rip. */
90f90721 1866#define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
c4f35dd8 1867
e53bef9f 1868struct amd64_frame_cache
c4f35dd8
MK
1869{
1870 /* Base address. */
1871 CORE_ADDR base;
8fbca658 1872 int base_p;
c4f35dd8
MK
1873 CORE_ADDR sp_offset;
1874 CORE_ADDR pc;
1875
1876 /* Saved registers. */
e53bef9f 1877 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
c4f35dd8 1878 CORE_ADDR saved_sp;
e0c62198 1879 int saved_sp_reg;
c4f35dd8
MK
1880
1881 /* Do we have a frame? */
1882 int frameless_p;
1883};
8dda9770 1884
d2449ee8 1885/* Initialize a frame cache. */
c4f35dd8 1886
d2449ee8
DJ
1887static void
1888amd64_init_frame_cache (struct amd64_frame_cache *cache)
8dda9770 1889{
c4f35dd8
MK
1890 int i;
1891
c4f35dd8
MK
1892 /* Base address. */
1893 cache->base = 0;
8fbca658 1894 cache->base_p = 0;
c4f35dd8
MK
1895 cache->sp_offset = -8;
1896 cache->pc = 0;
1897
1898 /* Saved registers. We initialize these to -1 since zero is a valid
bba66b87
DE
1899 offset (that's where %rbp is supposed to be stored).
1900 The values start out as being offsets, and are later converted to
1901 addresses (at which point -1 is interpreted as an address, still meaning
1902 "invalid"). */
e53bef9f 1903 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
1904 cache->saved_regs[i] = -1;
1905 cache->saved_sp = 0;
e0c62198 1906 cache->saved_sp_reg = -1;
c4f35dd8
MK
1907
1908 /* Frameless until proven otherwise. */
1909 cache->frameless_p = 1;
d2449ee8 1910}
c4f35dd8 1911
d2449ee8
DJ
1912/* Allocate and initialize a frame cache. */
1913
1914static struct amd64_frame_cache *
1915amd64_alloc_frame_cache (void)
1916{
1917 struct amd64_frame_cache *cache;
1918
1919 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1920 amd64_init_frame_cache (cache);
c4f35dd8 1921 return cache;
8dda9770 1922}
53e95fcf 1923
e0c62198
L
1924/* GCC 4.4 and later, can put code in the prologue to realign the
1925 stack pointer. Check whether PC points to such code, and update
1926 CACHE accordingly. Return the first instruction after the code
1927 sequence or CURRENT_PC, whichever is smaller. If we don't
1928 recognize the code, return PC. */
1929
1930static CORE_ADDR
1931amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1932 struct amd64_frame_cache *cache)
1933{
1934 /* There are 2 code sequences to re-align stack before the frame
1935 gets set up:
1936
1937 1. Use a caller-saved saved register:
1938
1939 leaq 8(%rsp), %reg
1940 andq $-XXX, %rsp
1941 pushq -8(%reg)
1942
1943 2. Use a callee-saved saved register:
1944
1945 pushq %reg
1946 leaq 16(%rsp), %reg
1947 andq $-XXX, %rsp
1948 pushq -8(%reg)
1949
1950 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1951
1952 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1953 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1954 */
1955
1956 gdb_byte buf[18];
1957 int reg, r;
1958 int offset, offset_and;
e0c62198 1959
bae8a07a 1960 if (target_read_code (pc, buf, sizeof buf))
e0c62198
L
1961 return pc;
1962
1963 /* Check caller-saved saved register. The first instruction has
1964 to be "leaq 8(%rsp), %reg". */
1965 if ((buf[0] & 0xfb) == 0x48
1966 && buf[1] == 0x8d
1967 && buf[3] == 0x24
1968 && buf[4] == 0x8)
1969 {
1970 /* MOD must be binary 10 and R/M must be binary 100. */
1971 if ((buf[2] & 0xc7) != 0x44)
1972 return pc;
1973
1974 /* REG has register number. */
1975 reg = (buf[2] >> 3) & 7;
1976
1977 /* Check the REX.R bit. */
1978 if (buf[0] == 0x4c)
1979 reg += 8;
1980
1981 offset = 5;
1982 }
1983 else
1984 {
1985 /* Check callee-saved saved register. The first instruction
1986 has to be "pushq %reg". */
1987 reg = 0;
1988 if ((buf[0] & 0xf8) == 0x50)
1989 offset = 0;
1990 else if ((buf[0] & 0xf6) == 0x40
1991 && (buf[1] & 0xf8) == 0x50)
1992 {
1993 /* Check the REX.B bit. */
1994 if ((buf[0] & 1) != 0)
1995 reg = 8;
1996
1997 offset = 1;
1998 }
1999 else
2000 return pc;
2001
2002 /* Get register. */
2003 reg += buf[offset] & 0x7;
2004
2005 offset++;
2006
2007 /* The next instruction has to be "leaq 16(%rsp), %reg". */
2008 if ((buf[offset] & 0xfb) != 0x48
2009 || buf[offset + 1] != 0x8d
2010 || buf[offset + 3] != 0x24
2011 || buf[offset + 4] != 0x10)
2012 return pc;
2013
2014 /* MOD must be binary 10 and R/M must be binary 100. */
2015 if ((buf[offset + 2] & 0xc7) != 0x44)
2016 return pc;
2017
2018 /* REG has register number. */
2019 r = (buf[offset + 2] >> 3) & 7;
2020
2021 /* Check the REX.R bit. */
2022 if (buf[offset] == 0x4c)
2023 r += 8;
2024
2025 /* Registers in pushq and leaq have to be the same. */
2026 if (reg != r)
2027 return pc;
2028
2029 offset += 5;
2030 }
2031
2032 /* Rigister can't be %rsp nor %rbp. */
2033 if (reg == 4 || reg == 5)
2034 return pc;
2035
2036 /* The next instruction has to be "andq $-XXX, %rsp". */
2037 if (buf[offset] != 0x48
2038 || buf[offset + 2] != 0xe4
2039 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2040 return pc;
2041
2042 offset_and = offset;
2043 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2044
2045 /* The next instruction has to be "pushq -8(%reg)". */
2046 r = 0;
2047 if (buf[offset] == 0xff)
2048 offset++;
2049 else if ((buf[offset] & 0xf6) == 0x40
2050 && buf[offset + 1] == 0xff)
2051 {
2052 /* Check the REX.B bit. */
2053 if ((buf[offset] & 0x1) != 0)
2054 r = 8;
2055 offset += 2;
2056 }
2057 else
2058 return pc;
2059
2060 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2061 01. */
2062 if (buf[offset + 1] != 0xf8
2063 || (buf[offset] & 0xf8) != 0x70)
2064 return pc;
2065
2066 /* R/M has register. */
2067 r += buf[offset] & 7;
2068
2069 /* Registers in leaq and pushq have to be the same. */
2070 if (reg != r)
2071 return pc;
2072
2073 if (current_pc > pc + offset_and)
35669430 2074 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
e0c62198 2075
325fac50 2076 return std::min (pc + offset + 2, current_pc);
e0c62198
L
2077}
2078
ac142d96
L
2079/* Similar to amd64_analyze_stack_align for x32. */
2080
2081static CORE_ADDR
2082amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2083 struct amd64_frame_cache *cache)
2084{
2085 /* There are 2 code sequences to re-align stack before the frame
2086 gets set up:
2087
2088 1. Use a caller-saved saved register:
2089
2090 leaq 8(%rsp), %reg
2091 andq $-XXX, %rsp
2092 pushq -8(%reg)
2093
2094 or
2095
2096 [addr32] leal 8(%rsp), %reg
2097 andl $-XXX, %esp
2098 [addr32] pushq -8(%reg)
2099
2100 2. Use a callee-saved saved register:
2101
2102 pushq %reg
2103 leaq 16(%rsp), %reg
2104 andq $-XXX, %rsp
2105 pushq -8(%reg)
2106
2107 or
2108
2109 pushq %reg
2110 [addr32] leal 16(%rsp), %reg
2111 andl $-XXX, %esp
2112 [addr32] pushq -8(%reg)
2113
2114 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2115
2116 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2117 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2118
2119 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2120
2121 0x83 0xe4 0xf0 andl $-16, %esp
2122 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2123 */
2124
2125 gdb_byte buf[19];
2126 int reg, r;
2127 int offset, offset_and;
2128
2129 if (target_read_memory (pc, buf, sizeof buf))
2130 return pc;
2131
2132 /* Skip optional addr32 prefix. */
2133 offset = buf[0] == 0x67 ? 1 : 0;
2134
2135 /* Check caller-saved saved register. The first instruction has
2136 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2137 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2138 && buf[offset + 1] == 0x8d
2139 && buf[offset + 3] == 0x24
2140 && buf[offset + 4] == 0x8)
2141 {
2142 /* MOD must be binary 10 and R/M must be binary 100. */
2143 if ((buf[offset + 2] & 0xc7) != 0x44)
2144 return pc;
2145
2146 /* REG has register number. */
2147 reg = (buf[offset + 2] >> 3) & 7;
2148
2149 /* Check the REX.R bit. */
2150 if ((buf[offset] & 0x4) != 0)
2151 reg += 8;
2152
2153 offset += 5;
2154 }
2155 else
2156 {
2157 /* Check callee-saved saved register. The first instruction
2158 has to be "pushq %reg". */
2159 reg = 0;
2160 if ((buf[offset] & 0xf6) == 0x40
2161 && (buf[offset + 1] & 0xf8) == 0x50)
2162 {
2163 /* Check the REX.B bit. */
2164 if ((buf[offset] & 1) != 0)
2165 reg = 8;
2166
2167 offset += 1;
2168 }
2169 else if ((buf[offset] & 0xf8) != 0x50)
2170 return pc;
2171
2172 /* Get register. */
2173 reg += buf[offset] & 0x7;
2174
2175 offset++;
2176
2177 /* Skip optional addr32 prefix. */
2178 if (buf[offset] == 0x67)
2179 offset++;
2180
2181 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2182 "leal 16(%rsp), %reg". */
2183 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2184 || buf[offset + 1] != 0x8d
2185 || buf[offset + 3] != 0x24
2186 || buf[offset + 4] != 0x10)
2187 return pc;
2188
2189 /* MOD must be binary 10 and R/M must be binary 100. */
2190 if ((buf[offset + 2] & 0xc7) != 0x44)
2191 return pc;
2192
2193 /* REG has register number. */
2194 r = (buf[offset + 2] >> 3) & 7;
2195
2196 /* Check the REX.R bit. */
2197 if ((buf[offset] & 0x4) != 0)
2198 r += 8;
2199
2200 /* Registers in pushq and leaq have to be the same. */
2201 if (reg != r)
2202 return pc;
2203
2204 offset += 5;
2205 }
2206
2207 /* Rigister can't be %rsp nor %rbp. */
2208 if (reg == 4 || reg == 5)
2209 return pc;
2210
2211 /* The next instruction may be "andq $-XXX, %rsp" or
2212 "andl $-XXX, %esp". */
2213 if (buf[offset] != 0x48)
2214 offset--;
2215
2216 if (buf[offset + 2] != 0xe4
2217 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2218 return pc;
2219
2220 offset_and = offset;
2221 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2222
2223 /* Skip optional addr32 prefix. */
2224 if (buf[offset] == 0x67)
2225 offset++;
2226
2227 /* The next instruction has to be "pushq -8(%reg)". */
2228 r = 0;
2229 if (buf[offset] == 0xff)
2230 offset++;
2231 else if ((buf[offset] & 0xf6) == 0x40
2232 && buf[offset + 1] == 0xff)
2233 {
2234 /* Check the REX.B bit. */
2235 if ((buf[offset] & 0x1) != 0)
2236 r = 8;
2237 offset += 2;
2238 }
2239 else
2240 return pc;
2241
2242 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2243 01. */
2244 if (buf[offset + 1] != 0xf8
2245 || (buf[offset] & 0xf8) != 0x70)
2246 return pc;
2247
2248 /* R/M has register. */
2249 r += buf[offset] & 7;
2250
2251 /* Registers in leaq and pushq have to be the same. */
2252 if (reg != r)
2253 return pc;
2254
2255 if (current_pc > pc + offset_and)
2256 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2257
325fac50 2258 return std::min (pc + offset + 2, current_pc);
ac142d96
L
2259}
2260
c4f35dd8
MK
2261/* Do a limited analysis of the prologue at PC and update CACHE
2262 accordingly. Bail out early if CURRENT_PC is reached. Return the
2263 address where the analysis stopped.
2264
2265 We will handle only functions beginning with:
2266
2267 pushq %rbp 0x55
50f1ae7b 2268 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
c4f35dd8 2269
649e6d92
MK
2270 or (for the X32 ABI):
2271
2272 pushq %rbp 0x55
2273 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2274
2275 Any function that doesn't start with one of these sequences will be
2276 assumed to have no prologue and thus no valid frame pointer in
2277 %rbp. */
c4f35dd8
MK
2278
2279static CORE_ADDR
e17a4113
UW
2280amd64_analyze_prologue (struct gdbarch *gdbarch,
2281 CORE_ADDR pc, CORE_ADDR current_pc,
e53bef9f 2282 struct amd64_frame_cache *cache)
53e95fcf 2283{
e17a4113 2284 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
50f1ae7b
DE
2285 /* There are two variations of movq %rsp, %rbp. */
2286 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2287 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
649e6d92
MK
2288 /* Ditto for movl %esp, %ebp. */
2289 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2290 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2291
d8de1ef7
MK
2292 gdb_byte buf[3];
2293 gdb_byte op;
c4f35dd8
MK
2294
2295 if (current_pc <= pc)
2296 return current_pc;
2297
ac142d96
L
2298 if (gdbarch_ptr_bit (gdbarch) == 32)
2299 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2300 else
2301 pc = amd64_analyze_stack_align (pc, current_pc, cache);
e0c62198 2302
bae8a07a 2303 op = read_code_unsigned_integer (pc, 1, byte_order);
c4f35dd8
MK
2304
2305 if (op == 0x55) /* pushq %rbp */
2306 {
2307 /* Take into account that we've executed the `pushq %rbp' that
2308 starts this instruction sequence. */
90f90721 2309 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
c4f35dd8
MK
2310 cache->sp_offset += 8;
2311
2312 /* If that's all, return now. */
2313 if (current_pc <= pc + 1)
2314 return current_pc;
2315
bae8a07a 2316 read_code (pc + 1, buf, 3);
c4f35dd8 2317
649e6d92
MK
2318 /* Check for `movq %rsp, %rbp'. */
2319 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2320 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2321 {
2322 /* OK, we actually have a frame. */
2323 cache->frameless_p = 0;
2324 return pc + 4;
2325 }
2326
2327 /* For X32, also check for `movq %esp, %ebp'. */
2328 if (gdbarch_ptr_bit (gdbarch) == 32)
2329 {
2330 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2331 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2332 {
2333 /* OK, we actually have a frame. */
2334 cache->frameless_p = 0;
2335 return pc + 3;
2336 }
2337 }
2338
2339 return pc + 1;
c4f35dd8
MK
2340 }
2341
2342 return pc;
53e95fcf
JS
2343}
2344
df15bd07
JK
2345/* Work around false termination of prologue - GCC PR debug/48827.
2346
2347 START_PC is the first instruction of a function, PC is its minimal already
2348 determined advanced address. Function returns PC if it has nothing to do.
2349
2350 84 c0 test %al,%al
2351 74 23 je after
2352 <-- here is 0 lines advance - the false prologue end marker.
2353 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2354 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2355 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2356 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2357 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2358 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2359 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2360 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2361 after: */
c4f35dd8
MK
2362
2363static CORE_ADDR
df15bd07 2364amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
53e95fcf 2365{
08711b9a
JK
2366 struct symtab_and_line start_pc_sal, next_sal;
2367 gdb_byte buf[4 + 8 * 7];
2368 int offset, xmmreg;
c4f35dd8 2369
08711b9a
JK
2370 if (pc == start_pc)
2371 return pc;
2372
2373 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2374 if (start_pc_sal.symtab == NULL
43f3e411
DE
2375 || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
2376 (SYMTAB_COMPUNIT (start_pc_sal.symtab))) < 6
08711b9a
JK
2377 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2378 return pc;
2379
2380 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2381 if (next_sal.line != start_pc_sal.line)
2382 return pc;
2383
2384 /* START_PC can be from overlayed memory, ignored here. */
bae8a07a 2385 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
08711b9a
JK
2386 return pc;
2387
2388 /* test %al,%al */
2389 if (buf[0] != 0x84 || buf[1] != 0xc0)
2390 return pc;
2391 /* je AFTER */
2392 if (buf[2] != 0x74)
2393 return pc;
2394
2395 offset = 4;
2396 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2397 {
bede5f5f 2398 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
08711b9a 2399 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
bede5f5f 2400 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
08711b9a
JK
2401 return pc;
2402
bede5f5f
JK
2403 /* 0b01?????? */
2404 if ((buf[offset + 2] & 0xc0) == 0x40)
08711b9a
JK
2405 {
2406 /* 8-bit displacement. */
2407 offset += 4;
2408 }
bede5f5f
JK
2409 /* 0b10?????? */
2410 else if ((buf[offset + 2] & 0xc0) == 0x80)
08711b9a
JK
2411 {
2412 /* 32-bit displacement. */
2413 offset += 7;
2414 }
2415 else
2416 return pc;
2417 }
2418
2419 /* je AFTER */
2420 if (offset - 4 != buf[3])
2421 return pc;
2422
2423 return next_sal.end;
53e95fcf 2424}
df15bd07
JK
2425
2426/* Return PC of first real instruction. */
2427
2428static CORE_ADDR
2429amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2430{
2431 struct amd64_frame_cache cache;
2432 CORE_ADDR pc;
56bf0743
KB
2433 CORE_ADDR func_addr;
2434
2435 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2436 {
2437 CORE_ADDR post_prologue_pc
2438 = skip_prologue_using_sal (gdbarch, func_addr);
43f3e411 2439 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
56bf0743
KB
2440
2441 /* Clang always emits a line note before the prologue and another
2442 one after. We trust clang to emit usable line notes. */
2443 if (post_prologue_pc
43f3e411
DE
2444 && (cust != NULL
2445 && COMPUNIT_PRODUCER (cust) != NULL
61012eef 2446 && startswith (COMPUNIT_PRODUCER (cust), "clang ")))
325fac50 2447 return std::max (start_pc, post_prologue_pc);
56bf0743 2448 }
df15bd07
JK
2449
2450 amd64_init_frame_cache (&cache);
2451 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2452 &cache);
2453 if (cache.frameless_p)
2454 return start_pc;
2455
2456 return amd64_skip_xmm_prologue (pc, start_pc);
2457}
c4f35dd8 2458\f
53e95fcf 2459
c4f35dd8
MK
2460/* Normal frames. */
2461
8fbca658
PA
2462static void
2463amd64_frame_cache_1 (struct frame_info *this_frame,
2464 struct amd64_frame_cache *cache)
6d686a84 2465{
e17a4113
UW
2466 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2467 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
d8de1ef7 2468 gdb_byte buf[8];
6d686a84 2469 int i;
6d686a84 2470
10458914 2471 cache->pc = get_frame_func (this_frame);
c4f35dd8 2472 if (cache->pc != 0)
e17a4113
UW
2473 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2474 cache);
c4f35dd8
MK
2475
2476 if (cache->frameless_p)
2477 {
4a28816e
MK
2478 /* We didn't find a valid frame. If we're at the start of a
2479 function, or somewhere half-way its prologue, the function's
2480 frame probably hasn't been fully setup yet. Try to
2481 reconstruct the base address for the stack frame by looking
2482 at the stack pointer. For truly "frameless" functions this
2483 might work too. */
c4f35dd8 2484
e0c62198
L
2485 if (cache->saved_sp_reg != -1)
2486 {
8fbca658
PA
2487 /* Stack pointer has been saved. */
2488 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2489 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2490
e0c62198
L
2491 /* We're halfway aligning the stack. */
2492 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2493 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2494
2495 /* This will be added back below. */
2496 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2497 }
2498 else
2499 {
2500 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
e17a4113
UW
2501 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2502 + cache->sp_offset;
e0c62198 2503 }
c4f35dd8 2504 }
35883a3f
MK
2505 else
2506 {
10458914 2507 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
e17a4113 2508 cache->base = extract_unsigned_integer (buf, 8, byte_order);
35883a3f 2509 }
c4f35dd8
MK
2510
2511 /* Now that we have the base address for the stack frame we can
2512 calculate the value of %rsp in the calling frame. */
2513 cache->saved_sp = cache->base + 16;
2514
35883a3f
MK
2515 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2516 frame we find it at the same offset from the reconstructed base
e0c62198
L
2517 address. If we're halfway aligning the stack, %rip is handled
2518 differently (see above). */
2519 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2520 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
35883a3f 2521
c4f35dd8
MK
2522 /* Adjust all the saved registers such that they contain addresses
2523 instead of offsets. */
e53bef9f 2524 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
2525 if (cache->saved_regs[i] != -1)
2526 cache->saved_regs[i] += cache->base;
2527
8fbca658
PA
2528 cache->base_p = 1;
2529}
2530
2531static struct amd64_frame_cache *
2532amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
2533{
8fbca658
PA
2534 struct amd64_frame_cache *cache;
2535
2536 if (*this_cache)
9a3c8263 2537 return (struct amd64_frame_cache *) *this_cache;
8fbca658
PA
2538
2539 cache = amd64_alloc_frame_cache ();
2540 *this_cache = cache;
2541
492d29ea 2542 TRY
8fbca658
PA
2543 {
2544 amd64_frame_cache_1 (this_frame, cache);
2545 }
492d29ea 2546 CATCH (ex, RETURN_MASK_ERROR)
7556d4a4
PA
2547 {
2548 if (ex.error != NOT_AVAILABLE_ERROR)
2549 throw_exception (ex);
2550 }
492d29ea 2551 END_CATCH
8fbca658 2552
c4f35dd8 2553 return cache;
6d686a84
ML
2554}
2555
8fbca658
PA
2556static enum unwind_stop_reason
2557amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
2558 void **this_cache)
2559{
2560 struct amd64_frame_cache *cache =
2561 amd64_frame_cache (this_frame, this_cache);
2562
2563 if (!cache->base_p)
2564 return UNWIND_UNAVAILABLE;
2565
2566 /* This marks the outermost frame. */
2567 if (cache->base == 0)
2568 return UNWIND_OUTERMOST;
2569
2570 return UNWIND_NO_REASON;
2571}
2572
c4f35dd8 2573static void
10458914 2574amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
e53bef9f 2575 struct frame_id *this_id)
c4f35dd8 2576{
e53bef9f 2577 struct amd64_frame_cache *cache =
10458914 2578 amd64_frame_cache (this_frame, this_cache);
c4f35dd8 2579
8fbca658 2580 if (!cache->base_p)
5ce0145d
PA
2581 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2582 else if (cache->base == 0)
2583 {
2584 /* This marks the outermost frame. */
2585 return;
2586 }
2587 else
2588 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
c4f35dd8 2589}
e76e1718 2590
10458914
DJ
2591static struct value *
2592amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2593 int regnum)
53e95fcf 2594{
10458914 2595 struct gdbarch *gdbarch = get_frame_arch (this_frame);
e53bef9f 2596 struct amd64_frame_cache *cache =
10458914 2597 amd64_frame_cache (this_frame, this_cache);
e76e1718 2598
c4f35dd8 2599 gdb_assert (regnum >= 0);
b1ab997b 2600
2ae02b47 2601 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
10458914 2602 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
e76e1718 2603
e53bef9f 2604 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
10458914
DJ
2605 return frame_unwind_got_memory (this_frame, regnum,
2606 cache->saved_regs[regnum]);
e76e1718 2607
10458914 2608 return frame_unwind_got_register (this_frame, regnum, regnum);
c4f35dd8 2609}
e76e1718 2610
e53bef9f 2611static const struct frame_unwind amd64_frame_unwind =
c4f35dd8
MK
2612{
2613 NORMAL_FRAME,
8fbca658 2614 amd64_frame_unwind_stop_reason,
e53bef9f 2615 amd64_frame_this_id,
10458914
DJ
2616 amd64_frame_prev_register,
2617 NULL,
2618 default_frame_sniffer
c4f35dd8 2619};
c4f35dd8 2620\f
6710bf39
SS
2621/* Generate a bytecode expression to get the value of the saved PC. */
2622
2623static void
2624amd64_gen_return_address (struct gdbarch *gdbarch,
2625 struct agent_expr *ax, struct axs_value *value,
2626 CORE_ADDR scope)
2627{
2628 /* The following sequence assumes the traditional use of the base
2629 register. */
2630 ax_reg (ax, AMD64_RBP_REGNUM);
2631 ax_const_l (ax, 8);
2632 ax_simple (ax, aop_add);
2633 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2634 value->kind = axs_lvalue_memory;
2635}
2636\f
e76e1718 2637
c4f35dd8
MK
2638/* Signal trampolines. */
2639
2640/* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2641 64-bit variants. This would require using identical frame caches
2642 on both platforms. */
2643
e53bef9f 2644static struct amd64_frame_cache *
10458914 2645amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
c4f35dd8 2646{
e17a4113
UW
2647 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2648 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2649 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
e53bef9f 2650 struct amd64_frame_cache *cache;
c4f35dd8 2651 CORE_ADDR addr;
d8de1ef7 2652 gdb_byte buf[8];
2b5e0749 2653 int i;
c4f35dd8
MK
2654
2655 if (*this_cache)
9a3c8263 2656 return (struct amd64_frame_cache *) *this_cache;
c4f35dd8 2657
e53bef9f 2658 cache = amd64_alloc_frame_cache ();
c4f35dd8 2659
492d29ea 2660 TRY
8fbca658
PA
2661 {
2662 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2663 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2664
2665 addr = tdep->sigcontext_addr (this_frame);
2666 gdb_assert (tdep->sc_reg_offset);
2667 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2668 for (i = 0; i < tdep->sc_num_regs; i++)
2669 if (tdep->sc_reg_offset[i] != -1)
2670 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
c4f35dd8 2671
8fbca658
PA
2672 cache->base_p = 1;
2673 }
492d29ea 2674 CATCH (ex, RETURN_MASK_ERROR)
7556d4a4
PA
2675 {
2676 if (ex.error != NOT_AVAILABLE_ERROR)
2677 throw_exception (ex);
2678 }
492d29ea 2679 END_CATCH
c4f35dd8
MK
2680
2681 *this_cache = cache;
2682 return cache;
53e95fcf
JS
2683}
2684
8fbca658
PA
2685static enum unwind_stop_reason
2686amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
2687 void **this_cache)
2688{
2689 struct amd64_frame_cache *cache =
2690 amd64_sigtramp_frame_cache (this_frame, this_cache);
2691
2692 if (!cache->base_p)
2693 return UNWIND_UNAVAILABLE;
2694
2695 return UNWIND_NO_REASON;
2696}
2697
c4f35dd8 2698static void
10458914 2699amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
e53bef9f 2700 void **this_cache, struct frame_id *this_id)
c4f35dd8 2701{
e53bef9f 2702 struct amd64_frame_cache *cache =
10458914 2703 amd64_sigtramp_frame_cache (this_frame, this_cache);
c4f35dd8 2704
8fbca658 2705 if (!cache->base_p)
5ce0145d
PA
2706 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2707 else if (cache->base == 0)
2708 {
2709 /* This marks the outermost frame. */
2710 return;
2711 }
2712 else
2713 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
c4f35dd8
MK
2714}
2715
10458914
DJ
2716static struct value *
2717amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2718 void **this_cache, int regnum)
c4f35dd8
MK
2719{
2720 /* Make sure we've initialized the cache. */
10458914 2721 amd64_sigtramp_frame_cache (this_frame, this_cache);
c4f35dd8 2722
10458914 2723 return amd64_frame_prev_register (this_frame, this_cache, regnum);
c4f35dd8
MK
2724}
2725
10458914
DJ
2726static int
2727amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2728 struct frame_info *this_frame,
2729 void **this_cache)
c4f35dd8 2730{
10458914 2731 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
911bc6ee
MK
2732
2733 /* We shouldn't even bother if we don't have a sigcontext_addr
2734 handler. */
2735 if (tdep->sigcontext_addr == NULL)
10458914 2736 return 0;
911bc6ee
MK
2737
2738 if (tdep->sigtramp_p != NULL)
2739 {
10458914
DJ
2740 if (tdep->sigtramp_p (this_frame))
2741 return 1;
911bc6ee 2742 }
c4f35dd8 2743
911bc6ee 2744 if (tdep->sigtramp_start != 0)
1c3545ae 2745 {
10458914 2746 CORE_ADDR pc = get_frame_pc (this_frame);
1c3545ae 2747
911bc6ee
MK
2748 gdb_assert (tdep->sigtramp_end != 0);
2749 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
10458914 2750 return 1;
1c3545ae 2751 }
c4f35dd8 2752
10458914 2753 return 0;
c4f35dd8 2754}
10458914
DJ
2755
2756static const struct frame_unwind amd64_sigtramp_frame_unwind =
2757{
2758 SIGTRAMP_FRAME,
8fbca658 2759 amd64_sigtramp_frame_unwind_stop_reason,
10458914
DJ
2760 amd64_sigtramp_frame_this_id,
2761 amd64_sigtramp_frame_prev_register,
2762 NULL,
2763 amd64_sigtramp_frame_sniffer
2764};
c4f35dd8
MK
2765\f
2766
2767static CORE_ADDR
10458914 2768amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
c4f35dd8 2769{
e53bef9f 2770 struct amd64_frame_cache *cache =
10458914 2771 amd64_frame_cache (this_frame, this_cache);
c4f35dd8
MK
2772
2773 return cache->base;
2774}
2775
e53bef9f 2776static const struct frame_base amd64_frame_base =
c4f35dd8 2777{
e53bef9f
MK
2778 &amd64_frame_unwind,
2779 amd64_frame_base_address,
2780 amd64_frame_base_address,
2781 amd64_frame_base_address
c4f35dd8
MK
2782};
2783
872761f4
MS
2784/* Normal frames, but in a function epilogue. */
2785
c9cf6e20
MG
2786/* Implement the stack_frame_destroyed_p gdbarch method.
2787
2788 The epilogue is defined here as the 'ret' instruction, which will
872761f4
MS
2789 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2790 the function's stack frame. */
2791
2792static int
c9cf6e20 2793amd64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
872761f4
MS
2794{
2795 gdb_byte insn;
43f3e411 2796 struct compunit_symtab *cust;
e0d00bc7 2797
43f3e411
DE
2798 cust = find_pc_compunit_symtab (pc);
2799 if (cust != NULL && COMPUNIT_EPILOGUE_UNWIND_VALID (cust))
e0d00bc7 2800 return 0;
872761f4
MS
2801
2802 if (target_read_memory (pc, &insn, 1))
2803 return 0; /* Can't read memory at pc. */
2804
2805 if (insn != 0xc3) /* 'ret' instruction. */
2806 return 0;
2807
2808 return 1;
2809}
2810
2811static int
2812amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2813 struct frame_info *this_frame,
2814 void **this_prologue_cache)
2815{
2816 if (frame_relative_level (this_frame) == 0)
c9cf6e20
MG
2817 return amd64_stack_frame_destroyed_p (get_frame_arch (this_frame),
2818 get_frame_pc (this_frame));
872761f4
MS
2819 else
2820 return 0;
2821}
2822
2823static struct amd64_frame_cache *
2824amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2825{
2826 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2827 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2828 struct amd64_frame_cache *cache;
6c10c06b 2829 gdb_byte buf[8];
872761f4
MS
2830
2831 if (*this_cache)
9a3c8263 2832 return (struct amd64_frame_cache *) *this_cache;
872761f4
MS
2833
2834 cache = amd64_alloc_frame_cache ();
2835 *this_cache = cache;
2836
492d29ea 2837 TRY
8fbca658
PA
2838 {
2839 /* Cache base will be %esp plus cache->sp_offset (-8). */
2840 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2841 cache->base = extract_unsigned_integer (buf, 8,
2842 byte_order) + cache->sp_offset;
2843
2844 /* Cache pc will be the frame func. */
2845 cache->pc = get_frame_pc (this_frame);
872761f4 2846
8fbca658
PA
2847 /* The saved %esp will be at cache->base plus 16. */
2848 cache->saved_sp = cache->base + 16;
872761f4 2849
8fbca658
PA
2850 /* The saved %eip will be at cache->base plus 8. */
2851 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
872761f4 2852
8fbca658
PA
2853 cache->base_p = 1;
2854 }
492d29ea 2855 CATCH (ex, RETURN_MASK_ERROR)
7556d4a4
PA
2856 {
2857 if (ex.error != NOT_AVAILABLE_ERROR)
2858 throw_exception (ex);
2859 }
492d29ea 2860 END_CATCH
872761f4
MS
2861
2862 return cache;
2863}
2864
8fbca658
PA
2865static enum unwind_stop_reason
2866amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
2867 void **this_cache)
2868{
2869 struct amd64_frame_cache *cache
2870 = amd64_epilogue_frame_cache (this_frame, this_cache);
2871
2872 if (!cache->base_p)
2873 return UNWIND_UNAVAILABLE;
2874
2875 return UNWIND_NO_REASON;
2876}
2877
872761f4
MS
2878static void
2879amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2880 void **this_cache,
2881 struct frame_id *this_id)
2882{
2883 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2884 this_cache);
2885
8fbca658 2886 if (!cache->base_p)
5ce0145d
PA
2887 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2888 else
2889 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
872761f4
MS
2890}
2891
2892static const struct frame_unwind amd64_epilogue_frame_unwind =
2893{
2894 NORMAL_FRAME,
8fbca658 2895 amd64_epilogue_frame_unwind_stop_reason,
872761f4
MS
2896 amd64_epilogue_frame_this_id,
2897 amd64_frame_prev_register,
2898 NULL,
2899 amd64_epilogue_frame_sniffer
2900};
2901
166f4c7b 2902static struct frame_id
10458914 2903amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
166f4c7b 2904{
c4f35dd8
MK
2905 CORE_ADDR fp;
2906
10458914 2907 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
c4f35dd8 2908
10458914 2909 return frame_id_build (fp + 16, get_frame_pc (this_frame));
166f4c7b
ML
2910}
2911
8b148df9
AC
2912/* 16 byte align the SP per frame requirements. */
2913
2914static CORE_ADDR
e53bef9f 2915amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
8b148df9
AC
2916{
2917 return sp & -(CORE_ADDR)16;
2918}
473f17b0
MK
2919\f
2920
593adc23
MK
2921/* Supply register REGNUM from the buffer specified by FPREGS and LEN
2922 in the floating-point register set REGSET to register cache
2923 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
473f17b0
MK
2924
2925static void
e53bef9f
MK
2926amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2927 int regnum, const void *fpregs, size_t len)
473f17b0 2928{
09424cff
AA
2929 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2930 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
473f17b0 2931
1528345d 2932 gdb_assert (len >= tdep->sizeof_fpregset);
90f90721 2933 amd64_supply_fxsave (regcache, regnum, fpregs);
473f17b0 2934}
8b148df9 2935
593adc23
MK
2936/* Collect register REGNUM from the register cache REGCACHE and store
2937 it in the buffer specified by FPREGS and LEN as described by the
2938 floating-point register set REGSET. If REGNUM is -1, do this for
2939 all registers in REGSET. */
2940
2941static void
2942amd64_collect_fpregset (const struct regset *regset,
2943 const struct regcache *regcache,
2944 int regnum, void *fpregs, size_t len)
2945{
09424cff
AA
2946 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2947 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
593adc23 2948
1528345d 2949 gdb_assert (len >= tdep->sizeof_fpregset);
593adc23
MK
2950 amd64_collect_fxsave (regcache, regnum, fpregs);
2951}
2952
8f0435f7 2953const struct regset amd64_fpregset =
ecc37a5a
AA
2954 {
2955 NULL, amd64_supply_fpregset, amd64_collect_fpregset
2956 };
c6b33596
MK
2957\f
2958
436675d3
PA
2959/* Figure out where the longjmp will land. Slurp the jmp_buf out of
2960 %rdi. We expect its value to be a pointer to the jmp_buf structure
2961 from which we extract the address that we will land at. This
2962 address is copied into PC. This routine returns non-zero on
2963 success. */
2964
2965static int
2966amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2967{
2968 gdb_byte buf[8];
2969 CORE_ADDR jb_addr;
2970 struct gdbarch *gdbarch = get_frame_arch (frame);
2971 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
0dfff4cb 2972 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
436675d3
PA
2973
2974 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2975 longjmp will land. */
2976 if (jb_pc_offset == -1)
2977 return 0;
2978
2979 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
0dfff4cb
UW
2980 jb_addr= extract_typed_address
2981 (buf, builtin_type (gdbarch)->builtin_data_ptr);
436675d3
PA
2982 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2983 return 0;
2984
0dfff4cb 2985 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
436675d3
PA
2986
2987 return 1;
2988}
2989
cf648174
HZ
2990static const int amd64_record_regmap[] =
2991{
2992 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2993 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2994 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2995 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2996 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2997 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2998};
2999
2213a65d 3000void
90f90721 3001amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
53e95fcf 3002{
0c1a73d6 3003 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
90884b2b 3004 const struct target_desc *tdesc = info.target_desc;
05c0465e
SDJ
3005 static const char *const stap_integer_prefixes[] = { "$", NULL };
3006 static const char *const stap_register_prefixes[] = { "%", NULL };
3007 static const char *const stap_register_indirection_prefixes[] = { "(",
3008 NULL };
3009 static const char *const stap_register_indirection_suffixes[] = { ")",
3010 NULL };
53e95fcf 3011
473f17b0
MK
3012 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
3013 floating-point registers. */
3014 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
8f0435f7 3015 tdep->fpregset = &amd64_fpregset;
473f17b0 3016
90884b2b
L
3017 if (! tdesc_has_registers (tdesc))
3018 tdesc = tdesc_amd64;
3019 tdep->tdesc = tdesc;
3020
3021 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
3022 tdep->register_names = amd64_register_names;
3023
01f9f808
MS
3024 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
3025 {
3026 tdep->zmmh_register_names = amd64_zmmh_names;
3027 tdep->k_register_names = amd64_k_names;
3028 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
3029 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
3030
3031 tdep->num_zmm_regs = 32;
3032 tdep->num_xmm_avx512_regs = 16;
3033 tdep->num_ymm_avx512_regs = 16;
3034
3035 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
3036 tdep->k0_regnum = AMD64_K0_REGNUM;
3037 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
3038 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
3039 }
3040
a055a187
L
3041 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
3042 {
3043 tdep->ymmh_register_names = amd64_ymmh_names;
3044 tdep->num_ymm_regs = 16;
3045 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
3046 }
3047
e43e105e
WT
3048 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
3049 {
3050 tdep->mpx_register_names = amd64_mpx_names;
3051 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
3052 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
3053 }
3054
2735833d
WT
3055 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments") != NULL)
3056 {
3057 const struct tdesc_feature *feature =
3058 tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments");
3059 struct tdesc_arch_data *tdesc_data_segments =
3060 (struct tdesc_arch_data *) info.tdep_info;
3061
3062 tdesc_numbered_register (feature, tdesc_data_segments,
3063 AMD64_FSBASE_REGNUM, "fs_base");
3064 tdesc_numbered_register (feature, tdesc_data_segments,
3065 AMD64_GSBASE_REGNUM, "gs_base");
3066 }
3067
51547df6
MS
3068 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.pkeys") != NULL)
3069 {
3070 tdep->pkeys_register_names = amd64_pkeys_names;
3071 tdep->pkru_regnum = AMD64_PKRU_REGNUM;
3072 tdep->num_pkeys_regs = 1;
3073 }
3074
fe01d668 3075 tdep->num_byte_regs = 20;
1ba53b71
L
3076 tdep->num_word_regs = 16;
3077 tdep->num_dword_regs = 16;
3078 /* Avoid wiring in the MMX registers for now. */
3079 tdep->num_mmx_regs = 0;
3080
3543a589
TT
3081 set_gdbarch_pseudo_register_read_value (gdbarch,
3082 amd64_pseudo_register_read_value);
1ba53b71
L
3083 set_gdbarch_pseudo_register_write (gdbarch,
3084 amd64_pseudo_register_write);
62e5fd57
MK
3085 set_gdbarch_ax_pseudo_register_collect (gdbarch,
3086 amd64_ax_pseudo_register_collect);
1ba53b71
L
3087
3088 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
3089
5716833c 3090 /* AMD64 has an FPU and 16 SSE registers. */
90f90721 3091 tdep->st0_regnum = AMD64_ST0_REGNUM;
0c1a73d6 3092 tdep->num_xmm_regs = 16;
53e95fcf 3093
0c1a73d6 3094 /* This is what all the fuss is about. */
53e95fcf
JS
3095 set_gdbarch_long_bit (gdbarch, 64);
3096 set_gdbarch_long_long_bit (gdbarch, 64);
3097 set_gdbarch_ptr_bit (gdbarch, 64);
3098
e53bef9f
MK
3099 /* In contrast to the i386, on AMD64 a `long double' actually takes
3100 up 128 bits, even though it's still based on the i387 extended
3101 floating-point format which has only 80 significant bits. */
b83b026c
MK
3102 set_gdbarch_long_double_bit (gdbarch, 128);
3103
e53bef9f 3104 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
b83b026c
MK
3105
3106 /* Register numbers of various important registers. */
90f90721
MK
3107 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3108 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3109 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3110 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
b83b026c 3111
e53bef9f
MK
3112 /* The "default" register numbering scheme for AMD64 is referred to
3113 as the "DWARF Register Number Mapping" in the System V psABI.
3114 The preferred debugging format for all known AMD64 targets is
3115 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3116 DWARF-1), but we provide the same mapping just in case. This
3117 mapping is also used for stabs, which GCC does support. */
3118 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
e53bef9f 3119 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
de220d0f 3120
c4f35dd8 3121 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
e53bef9f 3122 be in use on any of the supported AMD64 targets. */
53e95fcf 3123
c4f35dd8 3124 /* Call dummy code. */
e53bef9f
MK
3125 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3126 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
8b148df9 3127 set_gdbarch_frame_red_zone_size (gdbarch, 128);
53e95fcf 3128
83acabca 3129 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
d532c08f
MK
3130 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3131 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3132
efb1c01c 3133 set_gdbarch_return_value (gdbarch, amd64_return_value);
53e95fcf 3134
e53bef9f 3135 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
53e95fcf 3136
cf648174
HZ
3137 tdep->record_regmap = amd64_record_regmap;
3138
10458914 3139 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
53e95fcf 3140
872761f4
MS
3141 /* Hook the function epilogue frame unwinder. This unwinder is
3142 appended to the list first, so that it supercedes the other
3143 unwinders in function epilogues. */
3144 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3145
3146 /* Hook the prologue-based frame unwinders. */
10458914
DJ
3147 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3148 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
e53bef9f 3149 frame_base_set_default (gdbarch, &amd64_frame_base);
c6b33596 3150
436675d3 3151 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
dde08ee1
PA
3152
3153 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
6710bf39
SS
3154
3155 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
55aa24fb
SDJ
3156
3157 /* SystemTap variables and functions. */
05c0465e
SDJ
3158 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3159 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3160 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3161 stap_register_indirection_prefixes);
3162 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3163 stap_register_indirection_suffixes);
55aa24fb
SDJ
3164 set_gdbarch_stap_is_single_operand (gdbarch,
3165 i386_stap_is_single_operand);
3166 set_gdbarch_stap_parse_special_token (gdbarch,
3167 i386_stap_parse_special_token);
c2170eef
MM
3168 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3169 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3170 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
c4f35dd8 3171}
fff4548b
MK
3172\f
3173
3174static struct type *
3175amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3176{
3177 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3178
3179 switch (regnum - tdep->eax_regnum)
3180 {
3181 case AMD64_RBP_REGNUM: /* %ebp */
3182 case AMD64_RSP_REGNUM: /* %esp */
3183 return builtin_type (gdbarch)->builtin_data_ptr;
3184 case AMD64_RIP_REGNUM: /* %eip */
3185 return builtin_type (gdbarch)->builtin_func_ptr;
3186 }
3187
3188 return i386_pseudo_register_type (gdbarch, regnum);
3189}
3190
3191void
3192amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
3193{
3194 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3195 const struct target_desc *tdesc = info.target_desc;
3196
3197 amd64_init_abi (info, gdbarch);
3198
3199 if (! tdesc_has_registers (tdesc))
3200 tdesc = tdesc_x32;
3201 tdep->tdesc = tdesc;
3202
3203 tdep->num_dword_regs = 17;
3204 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3205
3206 set_gdbarch_long_bit (gdbarch, 32);
3207 set_gdbarch_ptr_bit (gdbarch, 32);
3208}
90884b2b 3209
97de3545
JB
3210/* Return the target description for a specified XSAVE feature mask. */
3211
3212const struct target_desc *
3213amd64_target_description (uint64_t xcr0)
3214{
3215 switch (xcr0 & X86_XSTATE_ALL_MASK)
3216 {
51547df6
MS
3217 case X86_XSTATE_AVX_MPX_AVX512_PKU_MASK:
3218 return tdesc_amd64_avx_mpx_avx512_pku;
a1fa17ee
MS
3219 case X86_XSTATE_AVX_AVX512_MASK:
3220 return tdesc_amd64_avx_avx512;
97de3545
JB
3221 case X86_XSTATE_MPX_MASK:
3222 return tdesc_amd64_mpx;
2b863f51
WT
3223 case X86_XSTATE_AVX_MPX_MASK:
3224 return tdesc_amd64_avx_mpx;
97de3545
JB
3225 case X86_XSTATE_AVX_MASK:
3226 return tdesc_amd64_avx;
3227 default:
3228 return tdesc_amd64;
3229 }
3230}
3231
90884b2b
L
3232/* Provide a prototype to silence -Wmissing-prototypes. */
3233void _initialize_amd64_tdep (void);
3234
3235void
3236_initialize_amd64_tdep (void)
3237{
3238 initialize_tdesc_amd64 ();
a055a187 3239 initialize_tdesc_amd64_avx ();
e43e105e 3240 initialize_tdesc_amd64_mpx ();
2b863f51 3241 initialize_tdesc_amd64_avx_mpx ();
a1fa17ee 3242 initialize_tdesc_amd64_avx_avx512 ();
51547df6 3243 initialize_tdesc_amd64_avx_mpx_avx512_pku ();
01f9f808 3244
ac1438b5
L
3245 initialize_tdesc_x32 ();
3246 initialize_tdesc_x32_avx ();
a1fa17ee 3247 initialize_tdesc_x32_avx_avx512 ();
90884b2b 3248}
c4f35dd8
MK
3249\f
3250
41d041d6
MK
3251/* The 64-bit FXSAVE format differs from the 32-bit format in the
3252 sense that the instruction pointer and data pointer are simply
3253 64-bit offsets into the code segment and the data segment instead
3254 of a selector offset pair. The functions below store the upper 32
3255 bits of these pointers (instead of just the 16-bits of the segment
3256 selector). */
3257
3258/* Fill register REGNUM in REGCACHE with the appropriate
0485f6ad
MK
3259 floating-point or SSE register value from *FXSAVE. If REGNUM is
3260 -1, do this for all registers. This function masks off any of the
3261 reserved bits in *FXSAVE. */
c4f35dd8
MK
3262
3263void
90f90721 3264amd64_supply_fxsave (struct regcache *regcache, int regnum,
20a6ec49 3265 const void *fxsave)
c4f35dd8 3266{
20a6ec49
MD
3267 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3268 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3269
41d041d6 3270 i387_supply_fxsave (regcache, regnum, fxsave);
c4f35dd8 3271
233dfcf0
L
3272 if (fxsave
3273 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
c4f35dd8 3274 {
9a3c8263 3275 const gdb_byte *regs = (const gdb_byte *) fxsave;
41d041d6 3276
20a6ec49
MD
3277 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3278 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3279 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3280 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
c4f35dd8 3281 }
0c1a73d6
MK
3282}
3283
a055a187
L
3284/* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3285
3286void
3287amd64_supply_xsave (struct regcache *regcache, int regnum,
3288 const void *xsave)
3289{
3290 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3291 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3292
3293 i387_supply_xsave (regcache, regnum, xsave);
3294
233dfcf0
L
3295 if (xsave
3296 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
a055a187 3297 {
9a3c8263 3298 const gdb_byte *regs = (const gdb_byte *) xsave;
a055a187
L
3299
3300 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3301 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep),
3302 regs + 12);
3303 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3304 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep),
3305 regs + 20);
3306 }
3307}
3308
3c017e40
MK
3309/* Fill register REGNUM (if it is a floating-point or SSE register) in
3310 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3311 all registers. This function doesn't touch any of the reserved
3312 bits in *FXSAVE. */
3313
3314void
3315amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3316 void *fxsave)
3317{
20a6ec49
MD
3318 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3319 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9a3c8263 3320 gdb_byte *regs = (gdb_byte *) fxsave;
3c017e40
MK
3321
3322 i387_collect_fxsave (regcache, regnum, fxsave);
3323
233dfcf0 3324 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
f0ef85a5 3325 {
20a6ec49
MD
3326 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3327 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3328 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3329 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
f0ef85a5 3330 }
3c017e40 3331}
a055a187 3332
7a9dd1b2 3333/* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
a055a187
L
3334
3335void
3336amd64_collect_xsave (const struct regcache *regcache, int regnum,
3337 void *xsave, int gcore)
3338{
3339 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3340 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9a3c8263 3341 gdb_byte *regs = (gdb_byte *) xsave;
a055a187
L
3342
3343 i387_collect_xsave (regcache, regnum, xsave, gcore);
3344
233dfcf0 3345 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
a055a187
L
3346 {
3347 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3348 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep),
3349 regs + 12);
3350 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3351 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep),
3352 regs + 20);
3353 }
3354}
This page took 1.48992 seconds and 4 git commands to generate.