1 /* Target-dependent code for AMD64.
3 Copyright (C) 2001-2022 Free Software Foundation, Inc.
5 Contributed by Jiri Smid, SuSE Labs.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "opcode/i386.h"
25 #include "arch-utils.h"
27 #include "dummy-frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
40 #include "amd64-tdep.h"
41 #include "i387-tdep.h"
42 #include "gdbsupport/x86-xstate.h"
44 #include "target-descriptions.h"
45 #include "arch/amd64.h"
49 #include "gdbsupport/byte-vector.h"
52 #include "amd64-ravenscar-thread.h"
54 /* Note that the AMD64 architecture was previously known as x86-64.
55 The latter is (forever) engraved into the canonical system name as
56 returned by config.guess, and used as the name for the AMD64 port
57 of GNU/Linux. The BSD's have renamed their ports to amd64; they
58 don't like to shout. For GDB we prefer the amd64_-prefix over the
59 x86_64_-prefix since it's so much easier to type. */
61 /* Register information. */
63 static const char * const amd64_register_names
[] =
65 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
67 /* %r8 is indeed register number 8. */
68 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
69 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
71 /* %st0 is register number 24. */
72 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
73 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
75 /* %xmm0 is register number 40. */
76 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
77 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
81 static const char * const amd64_ymm_names
[] =
83 "ymm0", "ymm1", "ymm2", "ymm3",
84 "ymm4", "ymm5", "ymm6", "ymm7",
85 "ymm8", "ymm9", "ymm10", "ymm11",
86 "ymm12", "ymm13", "ymm14", "ymm15"
89 static const char * const amd64_ymm_avx512_names
[] =
91 "ymm16", "ymm17", "ymm18", "ymm19",
92 "ymm20", "ymm21", "ymm22", "ymm23",
93 "ymm24", "ymm25", "ymm26", "ymm27",
94 "ymm28", "ymm29", "ymm30", "ymm31"
97 static const char * const amd64_ymmh_names
[] =
99 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
100 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
101 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
102 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
105 static const char * const amd64_ymmh_avx512_names
[] =
107 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
108 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
109 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
110 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
113 static const char * const amd64_mpx_names
[] =
115 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
118 static const char * const amd64_k_names
[] =
120 "k0", "k1", "k2", "k3",
121 "k4", "k5", "k6", "k7"
124 static const char * const amd64_zmmh_names
[] =
126 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
127 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
128 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
129 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
130 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
131 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
132 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
133 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
136 static const char * const amd64_zmm_names
[] =
138 "zmm0", "zmm1", "zmm2", "zmm3",
139 "zmm4", "zmm5", "zmm6", "zmm7",
140 "zmm8", "zmm9", "zmm10", "zmm11",
141 "zmm12", "zmm13", "zmm14", "zmm15",
142 "zmm16", "zmm17", "zmm18", "zmm19",
143 "zmm20", "zmm21", "zmm22", "zmm23",
144 "zmm24", "zmm25", "zmm26", "zmm27",
145 "zmm28", "zmm29", "zmm30", "zmm31"
148 static const char * const amd64_xmm_avx512_names
[] = {
149 "xmm16", "xmm17", "xmm18", "xmm19",
150 "xmm20", "xmm21", "xmm22", "xmm23",
151 "xmm24", "xmm25", "xmm26", "xmm27",
152 "xmm28", "xmm29", "xmm30", "xmm31"
155 static const char * const amd64_pkeys_names
[] = {
159 /* DWARF Register Number Mapping as defined in the System V psABI,
162 static int amd64_dwarf_regmap
[] =
164 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
165 AMD64_RAX_REGNUM
, AMD64_RDX_REGNUM
,
166 AMD64_RCX_REGNUM
, AMD64_RBX_REGNUM
,
167 AMD64_RSI_REGNUM
, AMD64_RDI_REGNUM
,
169 /* Frame Pointer Register RBP. */
172 /* Stack Pointer Register RSP. */
175 /* Extended Integer Registers 8 - 15. */
176 AMD64_R8_REGNUM
, /* %r8 */
177 AMD64_R9_REGNUM
, /* %r9 */
178 AMD64_R10_REGNUM
, /* %r10 */
179 AMD64_R11_REGNUM
, /* %r11 */
180 AMD64_R12_REGNUM
, /* %r12 */
181 AMD64_R13_REGNUM
, /* %r13 */
182 AMD64_R14_REGNUM
, /* %r14 */
183 AMD64_R15_REGNUM
, /* %r15 */
185 /* Return Address RA. Mapped to RIP. */
188 /* SSE Registers 0 - 7. */
189 AMD64_XMM0_REGNUM
+ 0, AMD64_XMM1_REGNUM
,
190 AMD64_XMM0_REGNUM
+ 2, AMD64_XMM0_REGNUM
+ 3,
191 AMD64_XMM0_REGNUM
+ 4, AMD64_XMM0_REGNUM
+ 5,
192 AMD64_XMM0_REGNUM
+ 6, AMD64_XMM0_REGNUM
+ 7,
194 /* Extended SSE Registers 8 - 15. */
195 AMD64_XMM0_REGNUM
+ 8, AMD64_XMM0_REGNUM
+ 9,
196 AMD64_XMM0_REGNUM
+ 10, AMD64_XMM0_REGNUM
+ 11,
197 AMD64_XMM0_REGNUM
+ 12, AMD64_XMM0_REGNUM
+ 13,
198 AMD64_XMM0_REGNUM
+ 14, AMD64_XMM0_REGNUM
+ 15,
200 /* Floating Point Registers 0-7. */
201 AMD64_ST0_REGNUM
+ 0, AMD64_ST0_REGNUM
+ 1,
202 AMD64_ST0_REGNUM
+ 2, AMD64_ST0_REGNUM
+ 3,
203 AMD64_ST0_REGNUM
+ 4, AMD64_ST0_REGNUM
+ 5,
204 AMD64_ST0_REGNUM
+ 6, AMD64_ST0_REGNUM
+ 7,
206 /* MMX Registers 0 - 7.
207 We have to handle those registers specifically, as their register
208 number within GDB depends on the target (or they may even not be
209 available at all). */
210 -1, -1, -1, -1, -1, -1, -1, -1,
212 /* Control and Status Flags Register. */
215 /* Selector Registers. */
225 /* Segment Base Address Registers. */
231 /* Special Selector Registers. */
235 /* Floating Point Control Registers. */
241 static const int amd64_dwarf_regmap_len
=
242 (sizeof (amd64_dwarf_regmap
) / sizeof (amd64_dwarf_regmap
[0]));
244 /* Convert DWARF register number REG to the appropriate register
245 number used by GDB. */
248 amd64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
250 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
251 int ymm0_regnum
= tdep
->ymm0_regnum
;
254 if (reg
>= 0 && reg
< amd64_dwarf_regmap_len
)
255 regnum
= amd64_dwarf_regmap
[reg
];
258 && i386_xmm_regnum_p (gdbarch
, regnum
))
259 regnum
+= ymm0_regnum
- I387_XMM0_REGNUM (tdep
);
264 /* Map architectural register numbers to gdb register numbers. */
266 static const int amd64_arch_regmap
[16] =
268 AMD64_RAX_REGNUM
, /* %rax */
269 AMD64_RCX_REGNUM
, /* %rcx */
270 AMD64_RDX_REGNUM
, /* %rdx */
271 AMD64_RBX_REGNUM
, /* %rbx */
272 AMD64_RSP_REGNUM
, /* %rsp */
273 AMD64_RBP_REGNUM
, /* %rbp */
274 AMD64_RSI_REGNUM
, /* %rsi */
275 AMD64_RDI_REGNUM
, /* %rdi */
276 AMD64_R8_REGNUM
, /* %r8 */
277 AMD64_R9_REGNUM
, /* %r9 */
278 AMD64_R10_REGNUM
, /* %r10 */
279 AMD64_R11_REGNUM
, /* %r11 */
280 AMD64_R12_REGNUM
, /* %r12 */
281 AMD64_R13_REGNUM
, /* %r13 */
282 AMD64_R14_REGNUM
, /* %r14 */
283 AMD64_R15_REGNUM
/* %r15 */
286 static const int amd64_arch_regmap_len
=
287 (sizeof (amd64_arch_regmap
) / sizeof (amd64_arch_regmap
[0]));
289 /* Convert architectural register number REG to the appropriate register
290 number used by GDB. */
293 amd64_arch_reg_to_regnum (int reg
)
295 gdb_assert (reg
>= 0 && reg
< amd64_arch_regmap_len
);
297 return amd64_arch_regmap
[reg
];
300 /* Register names for byte pseudo-registers. */
302 static const char * const amd64_byte_names
[] =
304 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
305 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
306 "ah", "bh", "ch", "dh"
309 /* Number of lower byte registers. */
310 #define AMD64_NUM_LOWER_BYTE_REGS 16
312 /* Register names for word pseudo-registers. */
314 static const char * const amd64_word_names
[] =
316 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
317 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
320 /* Register names for dword pseudo-registers. */
322 static const char * const amd64_dword_names
[] =
324 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
325 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
329 /* Return the name of register REGNUM. */
332 amd64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
334 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
335 if (i386_byte_regnum_p (gdbarch
, regnum
))
336 return amd64_byte_names
[regnum
- tdep
->al_regnum
];
337 else if (i386_zmm_regnum_p (gdbarch
, regnum
))
338 return amd64_zmm_names
[regnum
- tdep
->zmm0_regnum
];
339 else if (i386_ymm_regnum_p (gdbarch
, regnum
))
340 return amd64_ymm_names
[regnum
- tdep
->ymm0_regnum
];
341 else if (i386_ymm_avx512_regnum_p (gdbarch
, regnum
))
342 return amd64_ymm_avx512_names
[regnum
- tdep
->ymm16_regnum
];
343 else if (i386_word_regnum_p (gdbarch
, regnum
))
344 return amd64_word_names
[regnum
- tdep
->ax_regnum
];
345 else if (i386_dword_regnum_p (gdbarch
, regnum
))
346 return amd64_dword_names
[regnum
- tdep
->eax_regnum
];
348 return i386_pseudo_register_name (gdbarch
, regnum
);
351 static struct value
*
352 amd64_pseudo_register_read_value (struct gdbarch
*gdbarch
,
353 readable_regcache
*regcache
,
356 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
358 value
*result_value
= allocate_value (register_type (gdbarch
, regnum
));
359 VALUE_LVAL (result_value
) = lval_register
;
360 VALUE_REGNUM (result_value
) = regnum
;
361 gdb_byte
*buf
= value_contents_raw (result_value
);
363 if (i386_byte_regnum_p (gdbarch
, regnum
))
365 int gpnum
= regnum
- tdep
->al_regnum
;
367 /* Extract (always little endian). */
368 if (gpnum
>= AMD64_NUM_LOWER_BYTE_REGS
)
370 gpnum
-= AMD64_NUM_LOWER_BYTE_REGS
;
371 gdb_byte raw_buf
[register_size (gdbarch
, gpnum
)];
373 /* Special handling for AH, BH, CH, DH. */
374 register_status status
= regcache
->raw_read (gpnum
, raw_buf
);
375 if (status
== REG_VALID
)
376 memcpy (buf
, raw_buf
+ 1, 1);
378 mark_value_bytes_unavailable (result_value
, 0,
379 TYPE_LENGTH (value_type (result_value
)));
383 gdb_byte raw_buf
[register_size (gdbarch
, gpnum
)];
384 register_status status
= regcache
->raw_read (gpnum
, raw_buf
);
385 if (status
== REG_VALID
)
386 memcpy (buf
, raw_buf
, 1);
388 mark_value_bytes_unavailable (result_value
, 0,
389 TYPE_LENGTH (value_type (result_value
)));
392 else if (i386_dword_regnum_p (gdbarch
, regnum
))
394 int gpnum
= regnum
- tdep
->eax_regnum
;
395 gdb_byte raw_buf
[register_size (gdbarch
, gpnum
)];
396 /* Extract (always little endian). */
397 register_status status
= regcache
->raw_read (gpnum
, raw_buf
);
398 if (status
== REG_VALID
)
399 memcpy (buf
, raw_buf
, 4);
401 mark_value_bytes_unavailable (result_value
, 0,
402 TYPE_LENGTH (value_type (result_value
)));
405 i386_pseudo_register_read_into_value (gdbarch
, regcache
, regnum
,
412 amd64_pseudo_register_write (struct gdbarch
*gdbarch
,
413 struct regcache
*regcache
,
414 int regnum
, const gdb_byte
*buf
)
416 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
418 if (i386_byte_regnum_p (gdbarch
, regnum
))
420 int gpnum
= regnum
- tdep
->al_regnum
;
422 if (gpnum
>= AMD64_NUM_LOWER_BYTE_REGS
)
424 gpnum
-= AMD64_NUM_LOWER_BYTE_REGS
;
425 gdb_byte raw_buf
[register_size (gdbarch
, gpnum
)];
427 /* Read ... AH, BH, CH, DH. */
428 regcache
->raw_read (gpnum
, raw_buf
);
429 /* ... Modify ... (always little endian). */
430 memcpy (raw_buf
+ 1, buf
, 1);
432 regcache
->raw_write (gpnum
, raw_buf
);
436 gdb_byte raw_buf
[register_size (gdbarch
, gpnum
)];
439 regcache
->raw_read (gpnum
, raw_buf
);
440 /* ... Modify ... (always little endian). */
441 memcpy (raw_buf
, buf
, 1);
443 regcache
->raw_write (gpnum
, raw_buf
);
446 else if (i386_dword_regnum_p (gdbarch
, regnum
))
448 int gpnum
= regnum
- tdep
->eax_regnum
;
449 gdb_byte raw_buf
[register_size (gdbarch
, gpnum
)];
452 regcache
->raw_read (gpnum
, raw_buf
);
453 /* ... Modify ... (always little endian). */
454 memcpy (raw_buf
, buf
, 4);
456 regcache
->raw_write (gpnum
, raw_buf
);
459 i386_pseudo_register_write (gdbarch
, regcache
, regnum
, buf
);
462 /* Implement the 'ax_pseudo_register_collect' gdbarch method. */
465 amd64_ax_pseudo_register_collect (struct gdbarch
*gdbarch
,
466 struct agent_expr
*ax
, int regnum
)
468 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
470 if (i386_byte_regnum_p (gdbarch
, regnum
))
472 int gpnum
= regnum
- tdep
->al_regnum
;
474 if (gpnum
>= AMD64_NUM_LOWER_BYTE_REGS
)
475 ax_reg_mask (ax
, gpnum
- AMD64_NUM_LOWER_BYTE_REGS
);
477 ax_reg_mask (ax
, gpnum
);
480 else if (i386_dword_regnum_p (gdbarch
, regnum
))
482 int gpnum
= regnum
- tdep
->eax_regnum
;
484 ax_reg_mask (ax
, gpnum
);
488 return i386_ax_pseudo_register_collect (gdbarch
, ax
, regnum
);
493 /* Register classes as defined in the psABI. */
507 /* Return the union class of CLASS1 and CLASS2. See the psABI for
510 static enum amd64_reg_class
511 amd64_merge_classes (enum amd64_reg_class class1
, enum amd64_reg_class class2
)
513 /* Rule (a): If both classes are equal, this is the resulting class. */
514 if (class1
== class2
)
517 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
518 is the other class. */
519 if (class1
== AMD64_NO_CLASS
)
521 if (class2
== AMD64_NO_CLASS
)
524 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
525 if (class1
== AMD64_MEMORY
|| class2
== AMD64_MEMORY
)
528 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
529 if (class1
== AMD64_INTEGER
|| class2
== AMD64_INTEGER
)
530 return AMD64_INTEGER
;
532 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
533 MEMORY is used as class. */
534 if (class1
== AMD64_X87
|| class1
== AMD64_X87UP
535 || class1
== AMD64_COMPLEX_X87
|| class2
== AMD64_X87
536 || class2
== AMD64_X87UP
|| class2
== AMD64_COMPLEX_X87
)
539 /* Rule (f): Otherwise class SSE is used. */
543 static void amd64_classify (struct type
*type
, enum amd64_reg_class theclass
[2]);
545 /* Return true if TYPE is a structure or union with unaligned fields. */
548 amd64_has_unaligned_fields (struct type
*type
)
550 if (type
->code () == TYPE_CODE_STRUCT
551 || type
->code () == TYPE_CODE_UNION
)
553 for (int i
= 0; i
< type
->num_fields (); i
++)
555 struct type
*subtype
= check_typedef (type
->field (i
).type ());
556 int bitpos
= TYPE_FIELD_BITPOS (type
, i
);
558 /* Ignore static fields, empty fields (for example nested
559 empty structures), and bitfields (these are handled by
561 if (field_is_static (&type
->field (i
))
562 || (TYPE_FIELD_BITSIZE (type
, i
) == 0
563 && TYPE_LENGTH (subtype
) == 0)
564 || TYPE_FIELD_PACKED (type
, i
))
570 int align
= type_align (subtype
);
572 error (_("could not determine alignment of type"));
574 int bytepos
= bitpos
/ 8;
575 if (bytepos
% align
!= 0)
578 if (amd64_has_unaligned_fields (subtype
))
586 /* Classify field I of TYPE starting at BITOFFSET according to the rules for
587 structures and union types, and store the result in THECLASS. */
590 amd64_classify_aggregate_field (struct type
*type
, int i
,
591 enum amd64_reg_class theclass
[2],
592 unsigned int bitoffset
)
594 struct type
*subtype
= check_typedef (type
->field (i
).type ());
595 int bitpos
= bitoffset
+ TYPE_FIELD_BITPOS (type
, i
);
596 int pos
= bitpos
/ 64;
597 enum amd64_reg_class subclass
[2];
598 int bitsize
= TYPE_FIELD_BITSIZE (type
, i
);
602 bitsize
= TYPE_LENGTH (subtype
) * 8;
603 endpos
= (bitpos
+ bitsize
- 1) / 64;
605 /* Ignore static fields, or empty fields, for example nested
607 if (field_is_static (&type
->field (i
)) || bitsize
== 0)
610 if (subtype
->code () == TYPE_CODE_STRUCT
611 || subtype
->code () == TYPE_CODE_UNION
)
613 /* Each field of an object is classified recursively. */
615 for (j
= 0; j
< subtype
->num_fields (); j
++)
616 amd64_classify_aggregate_field (subtype
, j
, theclass
, bitpos
);
620 gdb_assert (pos
== 0 || pos
== 1);
622 amd64_classify (subtype
, subclass
);
623 theclass
[pos
] = amd64_merge_classes (theclass
[pos
], subclass
[0]);
624 if (bitsize
<= 64 && pos
== 0 && endpos
== 1)
625 /* This is a bit of an odd case: We have a field that would
626 normally fit in one of the two eightbytes, except that
627 it is placed in a way that this field straddles them.
628 This has been seen with a structure containing an array.
630 The ABI is a bit unclear in this case, but we assume that
631 this field's class (stored in subclass[0]) must also be merged
632 into class[1]. In other words, our field has a piece stored
633 in the second eight-byte, and thus its class applies to
634 the second eight-byte as well.
636 In the case where the field length exceeds 8 bytes,
637 it should not be necessary to merge the field class
638 into class[1]. As LEN > 8, subclass[1] is necessarily
639 different from AMD64_NO_CLASS. If subclass[1] is equal
640 to subclass[0], then the normal class[1]/subclass[1]
641 merging will take care of everything. For subclass[1]
642 to be different from subclass[0], I can only see the case
643 where we have a SSE/SSEUP or X87/X87UP pair, which both
644 use up all 16 bytes of the aggregate, and are already
645 handled just fine (because each portion sits on its own
647 theclass
[1] = amd64_merge_classes (theclass
[1], subclass
[0]);
649 theclass
[1] = amd64_merge_classes (theclass
[1], subclass
[1]);
652 /* Classify TYPE according to the rules for aggregate (structures and
653 arrays) and union types, and store the result in CLASS. */
656 amd64_classify_aggregate (struct type
*type
, enum amd64_reg_class theclass
[2])
658 /* 1. If the size of an object is larger than two eightbytes, or it has
659 unaligned fields, it has class memory. */
660 if (TYPE_LENGTH (type
) > 16 || amd64_has_unaligned_fields (type
))
662 theclass
[0] = theclass
[1] = AMD64_MEMORY
;
666 /* 2. Both eightbytes get initialized to class NO_CLASS. */
667 theclass
[0] = theclass
[1] = AMD64_NO_CLASS
;
669 /* 3. Each field of an object is classified recursively so that
670 always two fields are considered. The resulting class is
671 calculated according to the classes of the fields in the
674 if (type
->code () == TYPE_CODE_ARRAY
)
676 struct type
*subtype
= check_typedef (TYPE_TARGET_TYPE (type
));
678 /* All fields in an array have the same type. */
679 amd64_classify (subtype
, theclass
);
680 if (TYPE_LENGTH (type
) > 8 && theclass
[1] == AMD64_NO_CLASS
)
681 theclass
[1] = theclass
[0];
687 /* Structure or union. */
688 gdb_assert (type
->code () == TYPE_CODE_STRUCT
689 || type
->code () == TYPE_CODE_UNION
);
691 for (i
= 0; i
< type
->num_fields (); i
++)
692 amd64_classify_aggregate_field (type
, i
, theclass
, 0);
695 /* 4. Then a post merger cleanup is done: */
697 /* Rule (a): If one of the classes is MEMORY, the whole argument is
699 if (theclass
[0] == AMD64_MEMORY
|| theclass
[1] == AMD64_MEMORY
)
700 theclass
[0] = theclass
[1] = AMD64_MEMORY
;
702 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
704 if (theclass
[0] == AMD64_SSEUP
)
705 theclass
[0] = AMD64_SSE
;
706 if (theclass
[1] == AMD64_SSEUP
&& theclass
[0] != AMD64_SSE
)
707 theclass
[1] = AMD64_SSE
;
710 /* Classify TYPE, and store the result in CLASS. */
713 amd64_classify (struct type
*type
, enum amd64_reg_class theclass
[2])
715 enum type_code code
= type
->code ();
716 int len
= TYPE_LENGTH (type
);
718 theclass
[0] = theclass
[1] = AMD64_NO_CLASS
;
720 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
721 long, long long, and pointers are in the INTEGER class. Similarly,
722 range types, used by languages such as Ada, are also in the INTEGER
724 if ((code
== TYPE_CODE_INT
|| code
== TYPE_CODE_ENUM
725 || code
== TYPE_CODE_BOOL
|| code
== TYPE_CODE_RANGE
726 || code
== TYPE_CODE_CHAR
727 || code
== TYPE_CODE_PTR
|| TYPE_IS_REFERENCE (type
))
728 && (len
== 1 || len
== 2 || len
== 4 || len
== 8))
729 theclass
[0] = AMD64_INTEGER
;
731 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
733 else if ((code
== TYPE_CODE_FLT
|| code
== TYPE_CODE_DECFLOAT
)
734 && (len
== 4 || len
== 8))
736 theclass
[0] = AMD64_SSE
;
738 /* Arguments of types __float128, _Decimal128 and __m128 are split into
739 two halves. The least significant ones belong to class SSE, the most
740 significant one to class SSEUP. */
741 else if (code
== TYPE_CODE_DECFLOAT
&& len
== 16)
742 /* FIXME: __float128, __m128. */
743 theclass
[0] = AMD64_SSE
, theclass
[1] = AMD64_SSEUP
;
745 /* The 64-bit mantissa of arguments of type long double belongs to
746 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
748 else if (code
== TYPE_CODE_FLT
&& len
== 16)
749 /* Class X87 and X87UP. */
750 theclass
[0] = AMD64_X87
, theclass
[1] = AMD64_X87UP
;
752 /* Arguments of complex T where T is one of the types float or
753 double get treated as if they are implemented as:
761 else if (code
== TYPE_CODE_COMPLEX
&& len
== 8)
762 theclass
[0] = AMD64_SSE
;
763 else if (code
== TYPE_CODE_COMPLEX
&& len
== 16)
764 theclass
[0] = theclass
[1] = AMD64_SSE
;
766 /* A variable of type complex long double is classified as type
768 else if (code
== TYPE_CODE_COMPLEX
&& len
== 32)
769 theclass
[0] = AMD64_COMPLEX_X87
;
772 else if (code
== TYPE_CODE_ARRAY
|| code
== TYPE_CODE_STRUCT
773 || code
== TYPE_CODE_UNION
)
774 amd64_classify_aggregate (type
, theclass
);
777 static enum return_value_convention
778 amd64_return_value (struct gdbarch
*gdbarch
, struct value
*function
,
779 struct type
*type
, struct regcache
*regcache
,
780 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
782 enum amd64_reg_class theclass
[2];
783 int len
= TYPE_LENGTH (type
);
784 static int integer_regnum
[] = { AMD64_RAX_REGNUM
, AMD64_RDX_REGNUM
};
785 static int sse_regnum
[] = { AMD64_XMM0_REGNUM
, AMD64_XMM1_REGNUM
};
790 gdb_assert (!(readbuf
&& writebuf
));
792 /* 1. Classify the return type with the classification algorithm. */
793 amd64_classify (type
, theclass
);
795 /* 2. If the type has class MEMORY, then the caller provides space
796 for the return value and passes the address of this storage in
797 %rdi as if it were the first argument to the function. In effect,
798 this address becomes a hidden first argument.
800 On return %rax will contain the address that has been passed in
801 by the caller in %rdi. */
802 if (theclass
[0] == AMD64_MEMORY
)
804 /* As indicated by the comment above, the ABI guarantees that we
805 can always find the return value just after the function has
812 regcache_raw_read_unsigned (regcache
, AMD64_RAX_REGNUM
, &addr
);
813 read_memory (addr
, readbuf
, TYPE_LENGTH (type
));
816 return RETURN_VALUE_ABI_RETURNS_ADDRESS
;
819 /* 8. If the class is COMPLEX_X87, the real part of the value is
820 returned in %st0 and the imaginary part in %st1. */
821 if (theclass
[0] == AMD64_COMPLEX_X87
)
825 regcache
->raw_read (AMD64_ST0_REGNUM
, readbuf
);
826 regcache
->raw_read (AMD64_ST1_REGNUM
, readbuf
+ 16);
831 i387_return_value (gdbarch
, regcache
);
832 regcache
->raw_write (AMD64_ST0_REGNUM
, writebuf
);
833 regcache
->raw_write (AMD64_ST1_REGNUM
, writebuf
+ 16);
835 /* Fix up the tag word such that both %st(0) and %st(1) are
837 regcache_raw_write_unsigned (regcache
, AMD64_FTAG_REGNUM
, 0xfff);
840 return RETURN_VALUE_REGISTER_CONVENTION
;
843 gdb_assert (theclass
[1] != AMD64_MEMORY
);
844 gdb_assert (len
<= 16);
846 for (i
= 0; len
> 0; i
++, len
-= 8)
854 /* 3. If the class is INTEGER, the next available register
855 of the sequence %rax, %rdx is used. */
856 regnum
= integer_regnum
[integer_reg
++];
860 /* 4. If the class is SSE, the next available SSE register
861 of the sequence %xmm0, %xmm1 is used. */
862 regnum
= sse_regnum
[sse_reg
++];
866 /* 5. If the class is SSEUP, the eightbyte is passed in the
867 upper half of the last used SSE register. */
868 gdb_assert (sse_reg
> 0);
869 regnum
= sse_regnum
[sse_reg
- 1];
874 /* 6. If the class is X87, the value is returned on the X87
875 stack in %st0 as 80-bit x87 number. */
876 regnum
= AMD64_ST0_REGNUM
;
878 i387_return_value (gdbarch
, regcache
);
882 /* 7. If the class is X87UP, the value is returned together
883 with the previous X87 value in %st0. */
884 gdb_assert (i
> 0 && theclass
[0] == AMD64_X87
);
885 regnum
= AMD64_ST0_REGNUM
;
894 gdb_assert (!"Unexpected register class.");
897 gdb_assert (regnum
!= -1);
900 regcache
->raw_read_part (regnum
, offset
, std::min (len
, 8),
903 regcache
->raw_write_part (regnum
, offset
, std::min (len
, 8),
907 return RETURN_VALUE_REGISTER_CONVENTION
;
912 amd64_push_arguments (struct regcache
*regcache
, int nargs
, struct value
**args
,
913 CORE_ADDR sp
, function_call_return_method return_method
)
915 static int integer_regnum
[] =
917 AMD64_RDI_REGNUM
, /* %rdi */
918 AMD64_RSI_REGNUM
, /* %rsi */
919 AMD64_RDX_REGNUM
, /* %rdx */
920 AMD64_RCX_REGNUM
, /* %rcx */
921 AMD64_R8_REGNUM
, /* %r8 */
922 AMD64_R9_REGNUM
/* %r9 */
924 static int sse_regnum
[] =
926 /* %xmm0 ... %xmm7 */
927 AMD64_XMM0_REGNUM
+ 0, AMD64_XMM1_REGNUM
,
928 AMD64_XMM0_REGNUM
+ 2, AMD64_XMM0_REGNUM
+ 3,
929 AMD64_XMM0_REGNUM
+ 4, AMD64_XMM0_REGNUM
+ 5,
930 AMD64_XMM0_REGNUM
+ 6, AMD64_XMM0_REGNUM
+ 7,
932 struct value
**stack_args
= XALLOCAVEC (struct value
*, nargs
);
933 int num_stack_args
= 0;
934 int num_elements
= 0;
940 /* Reserve a register for the "hidden" argument. */
941 if (return_method
== return_method_struct
)
944 for (i
= 0; i
< nargs
; i
++)
946 struct type
*type
= value_type (args
[i
]);
947 int len
= TYPE_LENGTH (type
);
948 enum amd64_reg_class theclass
[2];
949 int needed_integer_regs
= 0;
950 int needed_sse_regs
= 0;
953 /* Classify argument. */
954 amd64_classify (type
, theclass
);
956 /* Calculate the number of integer and SSE registers needed for
958 for (j
= 0; j
< 2; j
++)
960 if (theclass
[j
] == AMD64_INTEGER
)
961 needed_integer_regs
++;
962 else if (theclass
[j
] == AMD64_SSE
)
966 /* Check whether enough registers are available, and if the
967 argument should be passed in registers at all. */
968 if (integer_reg
+ needed_integer_regs
> ARRAY_SIZE (integer_regnum
)
969 || sse_reg
+ needed_sse_regs
> ARRAY_SIZE (sse_regnum
)
970 || (needed_integer_regs
== 0 && needed_sse_regs
== 0))
972 /* The argument will be passed on the stack. */
973 num_elements
+= ((len
+ 7) / 8);
974 stack_args
[num_stack_args
++] = args
[i
];
978 /* The argument will be passed in registers. */
979 const gdb_byte
*valbuf
= value_contents (args
[i
]);
982 gdb_assert (len
<= 16);
984 for (j
= 0; len
> 0; j
++, len
-= 8)
992 regnum
= integer_regnum
[integer_reg
++];
996 regnum
= sse_regnum
[sse_reg
++];
1000 gdb_assert (sse_reg
> 0);
1001 regnum
= sse_regnum
[sse_reg
- 1];
1005 case AMD64_NO_CLASS
:
1009 gdb_assert (!"Unexpected register class.");
1012 gdb_assert (regnum
!= -1);
1013 memset (buf
, 0, sizeof buf
);
1014 memcpy (buf
, valbuf
+ j
* 8, std::min (len
, 8));
1015 regcache
->raw_write_part (regnum
, offset
, 8, buf
);
1020 /* Allocate space for the arguments on the stack. */
1021 sp
-= num_elements
* 8;
1023 /* The psABI says that "The end of the input argument area shall be
1024 aligned on a 16 byte boundary." */
1027 /* Write out the arguments to the stack. */
1028 for (i
= 0; i
< num_stack_args
; i
++)
1030 struct type
*type
= value_type (stack_args
[i
]);
1031 const gdb_byte
*valbuf
= value_contents (stack_args
[i
]);
1032 int len
= TYPE_LENGTH (type
);
1034 write_memory (sp
+ element
* 8, valbuf
, len
);
1035 element
+= ((len
+ 7) / 8);
1038 /* The psABI says that "For calls that may call functions that use
1039 varargs or stdargs (prototype-less calls or calls to functions
1040 containing ellipsis (...) in the declaration) %al is used as
1041 hidden argument to specify the number of SSE registers used. */
1042 regcache_raw_write_unsigned (regcache
, AMD64_RAX_REGNUM
, sse_reg
);
1047 amd64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1048 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1049 int nargs
, struct value
**args
, CORE_ADDR sp
,
1050 function_call_return_method return_method
,
1051 CORE_ADDR struct_addr
)
1053 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1056 /* BND registers can be in arbitrary values at the moment of the
1057 inferior call. This can cause boundary violations that are not
1058 due to a real bug or even desired by the user. The best to be done
1059 is set the BND registers to allow access to the whole memory, INIT
1060 state, before pushing the inferior call. */
1061 i387_reset_bnd_regs (gdbarch
, regcache
);
1063 /* Pass arguments. */
1064 sp
= amd64_push_arguments (regcache
, nargs
, args
, sp
, return_method
);
1066 /* Pass "hidden" argument". */
1067 if (return_method
== return_method_struct
)
1069 store_unsigned_integer (buf
, 8, byte_order
, struct_addr
);
1070 regcache
->cooked_write (AMD64_RDI_REGNUM
, buf
);
1073 /* Store return address. */
1075 store_unsigned_integer (buf
, 8, byte_order
, bp_addr
);
1076 write_memory (sp
, buf
, 8);
1078 /* Finally, update the stack pointer... */
1079 store_unsigned_integer (buf
, 8, byte_order
, sp
);
1080 regcache
->cooked_write (AMD64_RSP_REGNUM
, buf
);
1082 /* ...and fake a frame pointer. */
1083 regcache
->cooked_write (AMD64_RBP_REGNUM
, buf
);
1088 /* Displaced instruction handling. */
1090 /* A partially decoded instruction.
1091 This contains enough details for displaced stepping purposes. */
1095 /* The number of opcode bytes. */
1097 /* The offset of the REX/VEX instruction encoding prefix or -1 if
1099 int enc_prefix_offset
;
1100 /* The offset to the first opcode byte. */
1102 /* The offset to the modrm byte or -1 if not present. */
1105 /* The raw instruction. */
1109 struct amd64_displaced_step_copy_insn_closure
1110 : public displaced_step_copy_insn_closure
1112 amd64_displaced_step_copy_insn_closure (int insn_buf_len
)
1113 : insn_buf (insn_buf_len
, 0)
1116 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1121 /* Details of the instruction. */
1122 struct amd64_insn insn_details
;
1124 /* The possibly modified insn. */
1125 gdb::byte_vector insn_buf
;
1128 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1129 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1130 at which point delete these in favor of libopcodes' versions). */
1132 static const unsigned char onebyte_has_modrm
[256] = {
1133 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1134 /* ------------------------------- */
1135 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1136 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1137 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1138 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1139 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1140 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1141 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1142 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1143 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1144 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1145 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1146 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1147 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1148 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1149 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1150 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1151 /* ------------------------------- */
1152 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1155 static const unsigned char twobyte_has_modrm
[256] = {
1156 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1157 /* ------------------------------- */
1158 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1159 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1160 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1161 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1162 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1163 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1164 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1165 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1166 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1167 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1168 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1169 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1170 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1171 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1172 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1173 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1174 /* ------------------------------- */
1175 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1178 static int amd64_syscall_p (const struct amd64_insn
*insn
, int *lengthp
);
1181 rex_prefix_p (gdb_byte pfx
)
1183 return REX_PREFIX_P (pfx
);
1186 /* True if PFX is the start of the 2-byte VEX prefix. */
1189 vex2_prefix_p (gdb_byte pfx
)
1194 /* True if PFX is the start of the 3-byte VEX prefix. */
1197 vex3_prefix_p (gdb_byte pfx
)
1202 /* Skip the legacy instruction prefixes in INSN.
1203 We assume INSN is properly sentineled so we don't have to worry
1204 about falling off the end of the buffer. */
1207 amd64_skip_prefixes (gdb_byte
*insn
)
1213 case DATA_PREFIX_OPCODE
:
1214 case ADDR_PREFIX_OPCODE
:
1215 case CS_PREFIX_OPCODE
:
1216 case DS_PREFIX_OPCODE
:
1217 case ES_PREFIX_OPCODE
:
1218 case FS_PREFIX_OPCODE
:
1219 case GS_PREFIX_OPCODE
:
1220 case SS_PREFIX_OPCODE
:
1221 case LOCK_PREFIX_OPCODE
:
1222 case REPE_PREFIX_OPCODE
:
1223 case REPNE_PREFIX_OPCODE
:
1235 /* Return an integer register (other than RSP) that is unused as an input
1237 In order to not require adding a rex prefix if the insn doesn't already
1238 have one, the result is restricted to RAX ... RDI, sans RSP.
1239 The register numbering of the result follows architecture ordering,
1243 amd64_get_unused_input_int_reg (const struct amd64_insn
*details
)
1245 /* 1 bit for each reg */
1246 int used_regs_mask
= 0;
1248 /* There can be at most 3 int regs used as inputs in an insn, and we have
1249 7 to choose from (RAX ... RDI, sans RSP).
1250 This allows us to take a conservative approach and keep things simple.
1251 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1252 that implicitly specify RAX. */
1255 used_regs_mask
|= 1 << EAX_REG_NUM
;
1256 /* Similarily avoid RDX, implicit operand in divides. */
1257 used_regs_mask
|= 1 << EDX_REG_NUM
;
1259 used_regs_mask
|= 1 << ESP_REG_NUM
;
1261 /* If the opcode is one byte long and there's no ModRM byte,
1262 assume the opcode specifies a register. */
1263 if (details
->opcode_len
== 1 && details
->modrm_offset
== -1)
1264 used_regs_mask
|= 1 << (details
->raw_insn
[details
->opcode_offset
] & 7);
1266 /* Mark used regs in the modrm/sib bytes. */
1267 if (details
->modrm_offset
!= -1)
1269 int modrm
= details
->raw_insn
[details
->modrm_offset
];
1270 int mod
= MODRM_MOD_FIELD (modrm
);
1271 int reg
= MODRM_REG_FIELD (modrm
);
1272 int rm
= MODRM_RM_FIELD (modrm
);
1273 int have_sib
= mod
!= 3 && rm
== 4;
1275 /* Assume the reg field of the modrm byte specifies a register. */
1276 used_regs_mask
|= 1 << reg
;
1280 int base
= SIB_BASE_FIELD (details
->raw_insn
[details
->modrm_offset
+ 1]);
1281 int idx
= SIB_INDEX_FIELD (details
->raw_insn
[details
->modrm_offset
+ 1]);
1282 used_regs_mask
|= 1 << base
;
1283 used_regs_mask
|= 1 << idx
;
1287 used_regs_mask
|= 1 << rm
;
1291 gdb_assert (used_regs_mask
< 256);
1292 gdb_assert (used_regs_mask
!= 255);
1294 /* Finally, find a free reg. */
1298 for (i
= 0; i
< 8; ++i
)
1300 if (! (used_regs_mask
& (1 << i
)))
1304 /* We shouldn't get here. */
1305 internal_error (__FILE__
, __LINE__
, _("unable to find free reg"));
1309 /* Extract the details of INSN that we need. */
1312 amd64_get_insn_details (gdb_byte
*insn
, struct amd64_insn
*details
)
1314 gdb_byte
*start
= insn
;
1317 details
->raw_insn
= insn
;
1319 details
->opcode_len
= -1;
1320 details
->enc_prefix_offset
= -1;
1321 details
->opcode_offset
= -1;
1322 details
->modrm_offset
= -1;
1324 /* Skip legacy instruction prefixes. */
1325 insn
= amd64_skip_prefixes (insn
);
1327 /* Skip REX/VEX instruction encoding prefixes. */
1328 if (rex_prefix_p (*insn
))
1330 details
->enc_prefix_offset
= insn
- start
;
1333 else if (vex2_prefix_p (*insn
))
1335 /* Don't record the offset in this case because this prefix has
1336 no REX.B equivalent. */
1339 else if (vex3_prefix_p (*insn
))
1341 details
->enc_prefix_offset
= insn
- start
;
1345 details
->opcode_offset
= insn
- start
;
1347 if (*insn
== TWO_BYTE_OPCODE_ESCAPE
)
1349 /* Two or three-byte opcode. */
1351 need_modrm
= twobyte_has_modrm
[*insn
];
1353 /* Check for three-byte opcode. */
1363 details
->opcode_len
= 3;
1366 details
->opcode_len
= 2;
1372 /* One-byte opcode. */
1373 need_modrm
= onebyte_has_modrm
[*insn
];
1374 details
->opcode_len
= 1;
1380 details
->modrm_offset
= insn
- start
;
1384 /* Update %rip-relative addressing in INSN.
1386 %rip-relative addressing only uses a 32-bit displacement.
1387 32 bits is not enough to be guaranteed to cover the distance between where
1388 the real instruction is and where its copy is.
1389 Convert the insn to use base+disp addressing.
1390 We set base = pc + insn_length so we can leave disp unchanged. */
1393 fixup_riprel (struct gdbarch
*gdbarch
,
1394 amd64_displaced_step_copy_insn_closure
*dsc
,
1395 CORE_ADDR from
, CORE_ADDR to
, struct regcache
*regs
)
1397 const struct amd64_insn
*insn_details
= &dsc
->insn_details
;
1398 int modrm_offset
= insn_details
->modrm_offset
;
1399 gdb_byte
*insn
= insn_details
->raw_insn
+ modrm_offset
;
1402 int arch_tmp_regno
, tmp_regno
;
1403 ULONGEST orig_value
;
1405 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1408 /* Compute the rip-relative address. */
1409 insn_length
= gdb_buffered_insn_length (gdbarch
, dsc
->insn_buf
.data (),
1410 dsc
->insn_buf
.size (), from
);
1411 rip_base
= from
+ insn_length
;
1413 /* We need a register to hold the address.
1414 Pick one not used in the insn.
1415 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1416 arch_tmp_regno
= amd64_get_unused_input_int_reg (insn_details
);
1417 tmp_regno
= amd64_arch_reg_to_regnum (arch_tmp_regno
);
1419 /* Position of the not-B bit in the 3-byte VEX prefix (in byte 1). */
1420 static constexpr gdb_byte VEX3_NOT_B
= 0x20;
1422 /* REX.B should be unset (VEX.!B set) as we were using rip-relative
1423 addressing, but ensure it's unset (set for VEX) anyway, tmp_regno
1425 if (insn_details
->enc_prefix_offset
!= -1)
1427 gdb_byte
*pfx
= &dsc
->insn_buf
[insn_details
->enc_prefix_offset
];
1428 if (rex_prefix_p (pfx
[0]))
1430 else if (vex3_prefix_p (pfx
[0]))
1431 pfx
[1] |= VEX3_NOT_B
;
1433 gdb_assert_not_reached ("unhandled prefix");
1436 regcache_cooked_read_unsigned (regs
, tmp_regno
, &orig_value
);
1437 dsc
->tmp_regno
= tmp_regno
;
1438 dsc
->tmp_save
= orig_value
;
1441 /* Convert the ModRM field to be base+disp. */
1442 dsc
->insn_buf
[modrm_offset
] &= ~0xc7;
1443 dsc
->insn_buf
[modrm_offset
] |= 0x80 + arch_tmp_regno
;
1445 regcache_cooked_write_unsigned (regs
, tmp_regno
, rip_base
);
1447 displaced_debug_printf ("%%rip-relative addressing used.");
1448 displaced_debug_printf ("using temp reg %d, old value %s, new value %s",
1449 dsc
->tmp_regno
, paddress (gdbarch
, dsc
->tmp_save
),
1450 paddress (gdbarch
, rip_base
));
1454 fixup_displaced_copy (struct gdbarch
*gdbarch
,
1455 amd64_displaced_step_copy_insn_closure
*dsc
,
1456 CORE_ADDR from
, CORE_ADDR to
, struct regcache
*regs
)
1458 const struct amd64_insn
*details
= &dsc
->insn_details
;
1460 if (details
->modrm_offset
!= -1)
1462 gdb_byte modrm
= details
->raw_insn
[details
->modrm_offset
];
1464 if ((modrm
& 0xc7) == 0x05)
1466 /* The insn uses rip-relative addressing.
1468 fixup_riprel (gdbarch
, dsc
, from
, to
, regs
);
1473 displaced_step_copy_insn_closure_up
1474 amd64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
1475 CORE_ADDR from
, CORE_ADDR to
,
1476 struct regcache
*regs
)
1478 int len
= gdbarch_max_insn_length (gdbarch
);
1479 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1480 continually watch for running off the end of the buffer. */
1481 int fixup_sentinel_space
= len
;
1482 std::unique_ptr
<amd64_displaced_step_copy_insn_closure
> dsc
1483 (new amd64_displaced_step_copy_insn_closure (len
+ fixup_sentinel_space
));
1484 gdb_byte
*buf
= &dsc
->insn_buf
[0];
1485 struct amd64_insn
*details
= &dsc
->insn_details
;
1487 read_memory (from
, buf
, len
);
1489 /* Set up the sentinel space so we don't have to worry about running
1490 off the end of the buffer. An excessive number of leading prefixes
1491 could otherwise cause this. */
1492 memset (buf
+ len
, 0, fixup_sentinel_space
);
1494 amd64_get_insn_details (buf
, details
);
1496 /* GDB may get control back after the insn after the syscall.
1497 Presumably this is a kernel bug.
1498 If this is a syscall, make sure there's a nop afterwards. */
1502 if (amd64_syscall_p (details
, &syscall_length
))
1503 buf
[details
->opcode_offset
+ syscall_length
] = NOP_OPCODE
;
1506 /* Modify the insn to cope with the address where it will be executed from.
1507 In particular, handle any rip-relative addressing. */
1508 fixup_displaced_copy (gdbarch
, dsc
.get (), from
, to
, regs
);
1510 write_memory (to
, buf
, len
);
1512 displaced_debug_printf ("copy %s->%s: %s",
1513 paddress (gdbarch
, from
), paddress (gdbarch
, to
),
1514 displaced_step_dump_bytes (buf
, len
).c_str ());
1516 /* This is a work around for a problem with g++ 4.8. */
1517 return displaced_step_copy_insn_closure_up (dsc
.release ());
1521 amd64_absolute_jmp_p (const struct amd64_insn
*details
)
1523 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1525 if (insn
[0] == 0xff)
1527 /* jump near, absolute indirect (/4) */
1528 if ((insn
[1] & 0x38) == 0x20)
1531 /* jump far, absolute indirect (/5) */
1532 if ((insn
[1] & 0x38) == 0x28)
1539 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1542 amd64_jmp_p (const struct amd64_insn
*details
)
1544 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1546 /* jump short, relative. */
1547 if (insn
[0] == 0xeb)
1550 /* jump near, relative. */
1551 if (insn
[0] == 0xe9)
1554 return amd64_absolute_jmp_p (details
);
1558 amd64_absolute_call_p (const struct amd64_insn
*details
)
1560 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1562 if (insn
[0] == 0xff)
1564 /* Call near, absolute indirect (/2) */
1565 if ((insn
[1] & 0x38) == 0x10)
1568 /* Call far, absolute indirect (/3) */
1569 if ((insn
[1] & 0x38) == 0x18)
1577 amd64_ret_p (const struct amd64_insn
*details
)
1579 /* NOTE: gcc can emit "repz ; ret". */
1580 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1584 case 0xc2: /* ret near, pop N bytes */
1585 case 0xc3: /* ret near */
1586 case 0xca: /* ret far, pop N bytes */
1587 case 0xcb: /* ret far */
1588 case 0xcf: /* iret */
1597 amd64_call_p (const struct amd64_insn
*details
)
1599 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1601 if (amd64_absolute_call_p (details
))
1604 /* call near, relative */
1605 if (insn
[0] == 0xe8)
1611 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1612 length in bytes. Otherwise, return zero. */
1615 amd64_syscall_p (const struct amd64_insn
*details
, int *lengthp
)
1617 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1619 if (insn
[0] == 0x0f && insn
[1] == 0x05)
1628 /* Classify the instruction at ADDR using PRED.
1629 Throw an error if the memory can't be read. */
1632 amd64_classify_insn_at (struct gdbarch
*gdbarch
, CORE_ADDR addr
,
1633 int (*pred
) (const struct amd64_insn
*))
1635 struct amd64_insn details
;
1637 int len
, classification
;
1639 len
= gdbarch_max_insn_length (gdbarch
);
1640 buf
= (gdb_byte
*) alloca (len
);
1642 read_code (addr
, buf
, len
);
1643 amd64_get_insn_details (buf
, &details
);
1645 classification
= pred (&details
);
1647 return classification
;
1650 /* The gdbarch insn_is_call method. */
1653 amd64_insn_is_call (struct gdbarch
*gdbarch
, CORE_ADDR addr
)
1655 return amd64_classify_insn_at (gdbarch
, addr
, amd64_call_p
);
1658 /* The gdbarch insn_is_ret method. */
1661 amd64_insn_is_ret (struct gdbarch
*gdbarch
, CORE_ADDR addr
)
1663 return amd64_classify_insn_at (gdbarch
, addr
, amd64_ret_p
);
1666 /* The gdbarch insn_is_jump method. */
1669 amd64_insn_is_jump (struct gdbarch
*gdbarch
, CORE_ADDR addr
)
1671 return amd64_classify_insn_at (gdbarch
, addr
, amd64_jmp_p
);
1674 /* Fix up the state of registers and memory after having single-stepped
1675 a displaced instruction. */
1678 amd64_displaced_step_fixup (struct gdbarch
*gdbarch
,
1679 struct displaced_step_copy_insn_closure
*dsc_
,
1680 CORE_ADDR from
, CORE_ADDR to
,
1681 struct regcache
*regs
)
1683 amd64_displaced_step_copy_insn_closure
*dsc
1684 = (amd64_displaced_step_copy_insn_closure
*) dsc_
;
1685 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1686 /* The offset we applied to the instruction's address. */
1687 ULONGEST insn_offset
= to
- from
;
1688 gdb_byte
*insn
= dsc
->insn_buf
.data ();
1689 const struct amd64_insn
*insn_details
= &dsc
->insn_details
;
1691 displaced_debug_printf ("fixup (%s, %s), insn = 0x%02x 0x%02x ...",
1692 paddress (gdbarch
, from
), paddress (gdbarch
, to
),
1695 /* If we used a tmp reg, restore it. */
1699 displaced_debug_printf ("restoring reg %d to %s",
1700 dsc
->tmp_regno
, paddress (gdbarch
, dsc
->tmp_save
));
1701 regcache_cooked_write_unsigned (regs
, dsc
->tmp_regno
, dsc
->tmp_save
);
1704 /* The list of issues to contend with here is taken from
1705 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1706 Yay for Free Software! */
1708 /* Relocate the %rip back to the program's instruction stream,
1711 /* Except in the case of absolute or indirect jump or call
1712 instructions, or a return instruction, the new rip is relative to
1713 the displaced instruction; make it relative to the original insn.
1714 Well, signal handler returns don't need relocation either, but we use the
1715 value of %rip to recognize those; see below. */
1716 if (! amd64_absolute_jmp_p (insn_details
)
1717 && ! amd64_absolute_call_p (insn_details
)
1718 && ! amd64_ret_p (insn_details
))
1723 regcache_cooked_read_unsigned (regs
, AMD64_RIP_REGNUM
, &orig_rip
);
1725 /* A signal trampoline system call changes the %rip, resuming
1726 execution of the main program after the signal handler has
1727 returned. That makes them like 'return' instructions; we
1728 shouldn't relocate %rip.
1730 But most system calls don't, and we do need to relocate %rip.
1732 Our heuristic for distinguishing these cases: if stepping
1733 over the system call instruction left control directly after
1734 the instruction, the we relocate --- control almost certainly
1735 doesn't belong in the displaced copy. Otherwise, we assume
1736 the instruction has put control where it belongs, and leave
1737 it unrelocated. Goodness help us if there are PC-relative
1739 if (amd64_syscall_p (insn_details
, &insn_len
)
1740 && orig_rip
!= to
+ insn_len
1741 /* GDB can get control back after the insn after the syscall.
1742 Presumably this is a kernel bug.
1743 Fixup ensures its a nop, we add one to the length for it. */
1744 && orig_rip
!= to
+ insn_len
+ 1)
1745 displaced_debug_printf ("syscall changed %%rip; not relocating");
1748 ULONGEST rip
= orig_rip
- insn_offset
;
1750 /* If we just stepped over a breakpoint insn, we don't backup
1751 the pc on purpose; this is to match behaviour without
1754 regcache_cooked_write_unsigned (regs
, AMD64_RIP_REGNUM
, rip
);
1756 displaced_debug_printf ("relocated %%rip from %s to %s",
1757 paddress (gdbarch
, orig_rip
),
1758 paddress (gdbarch
, rip
));
1762 /* If the instruction was PUSHFL, then the TF bit will be set in the
1763 pushed value, and should be cleared. We'll leave this for later,
1764 since GDB already messes up the TF flag when stepping over a
1767 /* If the instruction was a call, the return address now atop the
1768 stack is the address following the copied instruction. We need
1769 to make it the address following the original instruction. */
1770 if (amd64_call_p (insn_details
))
1774 const ULONGEST retaddr_len
= 8;
1776 regcache_cooked_read_unsigned (regs
, AMD64_RSP_REGNUM
, &rsp
);
1777 retaddr
= read_memory_unsigned_integer (rsp
, retaddr_len
, byte_order
);
1778 retaddr
= (retaddr
- insn_offset
) & 0xffffffffffffffffULL
;
1779 write_memory_unsigned_integer (rsp
, retaddr_len
, byte_order
, retaddr
);
1781 displaced_debug_printf ("relocated return addr at %s to %s",
1782 paddress (gdbarch
, rsp
),
1783 paddress (gdbarch
, retaddr
));
1787 /* If the instruction INSN uses RIP-relative addressing, return the
1788 offset into the raw INSN where the displacement to be adjusted is
1789 found. Returns 0 if the instruction doesn't use RIP-relative
1793 rip_relative_offset (struct amd64_insn
*insn
)
1795 if (insn
->modrm_offset
!= -1)
1797 gdb_byte modrm
= insn
->raw_insn
[insn
->modrm_offset
];
1799 if ((modrm
& 0xc7) == 0x05)
1801 /* The displacement is found right after the ModRM byte. */
1802 return insn
->modrm_offset
+ 1;
1810 append_insns (CORE_ADDR
*to
, ULONGEST len
, const gdb_byte
*buf
)
1812 target_write_memory (*to
, buf
, len
);
1817 amd64_relocate_instruction (struct gdbarch
*gdbarch
,
1818 CORE_ADDR
*to
, CORE_ADDR oldloc
)
1820 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1821 int len
= gdbarch_max_insn_length (gdbarch
);
1822 /* Extra space for sentinels. */
1823 int fixup_sentinel_space
= len
;
1824 gdb_byte
*buf
= (gdb_byte
*) xmalloc (len
+ fixup_sentinel_space
);
1825 struct amd64_insn insn_details
;
1827 LONGEST rel32
, newrel
;
1831 read_memory (oldloc
, buf
, len
);
1833 /* Set up the sentinel space so we don't have to worry about running
1834 off the end of the buffer. An excessive number of leading prefixes
1835 could otherwise cause this. */
1836 memset (buf
+ len
, 0, fixup_sentinel_space
);
1839 amd64_get_insn_details (insn
, &insn_details
);
1841 insn_length
= gdb_buffered_insn_length (gdbarch
, insn
, len
, oldloc
);
1843 /* Skip legacy instruction prefixes. */
1844 insn
= amd64_skip_prefixes (insn
);
1846 /* Adjust calls with 32-bit relative addresses as push/jump, with
1847 the address pushed being the location where the original call in
1848 the user program would return to. */
1849 if (insn
[0] == 0xe8)
1851 gdb_byte push_buf
[32];
1855 /* Where "ret" in the original code will return to. */
1856 ret_addr
= oldloc
+ insn_length
;
1858 /* If pushing an address higher than or equal to 0x80000000,
1859 avoid 'pushq', as that sign extends its 32-bit operand, which
1860 would be incorrect. */
1861 if (ret_addr
<= 0x7fffffff)
1863 push_buf
[0] = 0x68; /* pushq $... */
1864 store_unsigned_integer (&push_buf
[1], 4, byte_order
, ret_addr
);
1869 push_buf
[i
++] = 0x48; /* sub $0x8,%rsp */
1870 push_buf
[i
++] = 0x83;
1871 push_buf
[i
++] = 0xec;
1872 push_buf
[i
++] = 0x08;
1874 push_buf
[i
++] = 0xc7; /* movl $imm,(%rsp) */
1875 push_buf
[i
++] = 0x04;
1876 push_buf
[i
++] = 0x24;
1877 store_unsigned_integer (&push_buf
[i
], 4, byte_order
,
1878 ret_addr
& 0xffffffff);
1881 push_buf
[i
++] = 0xc7; /* movl $imm,4(%rsp) */
1882 push_buf
[i
++] = 0x44;
1883 push_buf
[i
++] = 0x24;
1884 push_buf
[i
++] = 0x04;
1885 store_unsigned_integer (&push_buf
[i
], 4, byte_order
,
1889 gdb_assert (i
<= sizeof (push_buf
));
1890 /* Push the push. */
1891 append_insns (to
, i
, push_buf
);
1893 /* Convert the relative call to a relative jump. */
1896 /* Adjust the destination offset. */
1897 rel32
= extract_signed_integer (insn
+ 1, 4, byte_order
);
1898 newrel
= (oldloc
- *to
) + rel32
;
1899 store_signed_integer (insn
+ 1, 4, byte_order
, newrel
);
1901 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1902 hex_string (rel32
), paddress (gdbarch
, oldloc
),
1903 hex_string (newrel
), paddress (gdbarch
, *to
));
1905 /* Write the adjusted jump into its displaced location. */
1906 append_insns (to
, 5, insn
);
1910 offset
= rip_relative_offset (&insn_details
);
1913 /* Adjust jumps with 32-bit relative addresses. Calls are
1914 already handled above. */
1915 if (insn
[0] == 0xe9)
1917 /* Adjust conditional jumps. */
1918 else if (insn
[0] == 0x0f && (insn
[1] & 0xf0) == 0x80)
1924 rel32
= extract_signed_integer (insn
+ offset
, 4, byte_order
);
1925 newrel
= (oldloc
- *to
) + rel32
;
1926 store_signed_integer (insn
+ offset
, 4, byte_order
, newrel
);
1927 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1928 hex_string (rel32
), paddress (gdbarch
, oldloc
),
1929 hex_string (newrel
), paddress (gdbarch
, *to
));
1932 /* Write the adjusted instruction into its displaced location. */
1933 append_insns (to
, insn_length
, buf
);
1937 /* The maximum number of saved registers. This should include %rip. */
1938 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1940 struct amd64_frame_cache
1945 CORE_ADDR sp_offset
;
1948 /* Saved registers. */
1949 CORE_ADDR saved_regs
[AMD64_NUM_SAVED_REGS
];
1953 /* Do we have a frame? */
1957 /* Initialize a frame cache. */
1960 amd64_init_frame_cache (struct amd64_frame_cache
*cache
)
1967 cache
->sp_offset
= -8;
1970 /* Saved registers. We initialize these to -1 since zero is a valid
1971 offset (that's where %rbp is supposed to be stored).
1972 The values start out as being offsets, and are later converted to
1973 addresses (at which point -1 is interpreted as an address, still meaning
1975 for (i
= 0; i
< AMD64_NUM_SAVED_REGS
; i
++)
1976 cache
->saved_regs
[i
] = -1;
1977 cache
->saved_sp
= 0;
1978 cache
->saved_sp_reg
= -1;
1980 /* Frameless until proven otherwise. */
1981 cache
->frameless_p
= 1;
1984 /* Allocate and initialize a frame cache. */
1986 static struct amd64_frame_cache
*
1987 amd64_alloc_frame_cache (void)
1989 struct amd64_frame_cache
*cache
;
1991 cache
= FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache
);
1992 amd64_init_frame_cache (cache
);
1996 /* GCC 4.4 and later, can put code in the prologue to realign the
1997 stack pointer. Check whether PC points to such code, and update
1998 CACHE accordingly. Return the first instruction after the code
1999 sequence or CURRENT_PC, whichever is smaller. If we don't
2000 recognize the code, return PC. */
2003 amd64_analyze_stack_align (CORE_ADDR pc
, CORE_ADDR current_pc
,
2004 struct amd64_frame_cache
*cache
)
2006 /* There are 2 code sequences to re-align stack before the frame
2009 1. Use a caller-saved saved register:
2015 2. Use a callee-saved saved register:
2022 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2024 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2025 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2030 int offset
, offset_and
;
2032 if (target_read_code (pc
, buf
, sizeof buf
))
2035 /* Check caller-saved saved register. The first instruction has
2036 to be "leaq 8(%rsp), %reg". */
2037 if ((buf
[0] & 0xfb) == 0x48
2042 /* MOD must be binary 10 and R/M must be binary 100. */
2043 if ((buf
[2] & 0xc7) != 0x44)
2046 /* REG has register number. */
2047 reg
= (buf
[2] >> 3) & 7;
2049 /* Check the REX.R bit. */
2057 /* Check callee-saved saved register. The first instruction
2058 has to be "pushq %reg". */
2060 if ((buf
[0] & 0xf8) == 0x50)
2062 else if ((buf
[0] & 0xf6) == 0x40
2063 && (buf
[1] & 0xf8) == 0x50)
2065 /* Check the REX.B bit. */
2066 if ((buf
[0] & 1) != 0)
2075 reg
+= buf
[offset
] & 0x7;
2079 /* The next instruction has to be "leaq 16(%rsp), %reg". */
2080 if ((buf
[offset
] & 0xfb) != 0x48
2081 || buf
[offset
+ 1] != 0x8d
2082 || buf
[offset
+ 3] != 0x24
2083 || buf
[offset
+ 4] != 0x10)
2086 /* MOD must be binary 10 and R/M must be binary 100. */
2087 if ((buf
[offset
+ 2] & 0xc7) != 0x44)
2090 /* REG has register number. */
2091 r
= (buf
[offset
+ 2] >> 3) & 7;
2093 /* Check the REX.R bit. */
2094 if (buf
[offset
] == 0x4c)
2097 /* Registers in pushq and leaq have to be the same. */
2104 /* Rigister can't be %rsp nor %rbp. */
2105 if (reg
== 4 || reg
== 5)
2108 /* The next instruction has to be "andq $-XXX, %rsp". */
2109 if (buf
[offset
] != 0x48
2110 || buf
[offset
+ 2] != 0xe4
2111 || (buf
[offset
+ 1] != 0x81 && buf
[offset
+ 1] != 0x83))
2114 offset_and
= offset
;
2115 offset
+= buf
[offset
+ 1] == 0x81 ? 7 : 4;
2117 /* The next instruction has to be "pushq -8(%reg)". */
2119 if (buf
[offset
] == 0xff)
2121 else if ((buf
[offset
] & 0xf6) == 0x40
2122 && buf
[offset
+ 1] == 0xff)
2124 /* Check the REX.B bit. */
2125 if ((buf
[offset
] & 0x1) != 0)
2132 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2134 if (buf
[offset
+ 1] != 0xf8
2135 || (buf
[offset
] & 0xf8) != 0x70)
2138 /* R/M has register. */
2139 r
+= buf
[offset
] & 7;
2141 /* Registers in leaq and pushq have to be the same. */
2145 if (current_pc
> pc
+ offset_and
)
2146 cache
->saved_sp_reg
= amd64_arch_reg_to_regnum (reg
);
2148 return std::min (pc
+ offset
+ 2, current_pc
);
2151 /* Similar to amd64_analyze_stack_align for x32. */
2154 amd64_x32_analyze_stack_align (CORE_ADDR pc
, CORE_ADDR current_pc
,
2155 struct amd64_frame_cache
*cache
)
2157 /* There are 2 code sequences to re-align stack before the frame
2160 1. Use a caller-saved saved register:
2168 [addr32] leal 8(%rsp), %reg
2170 [addr32] pushq -8(%reg)
2172 2. Use a callee-saved saved register:
2182 [addr32] leal 16(%rsp), %reg
2184 [addr32] pushq -8(%reg)
2186 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2188 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2189 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2191 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2193 0x83 0xe4 0xf0 andl $-16, %esp
2194 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2199 int offset
, offset_and
;
2201 if (target_read_memory (pc
, buf
, sizeof buf
))
2204 /* Skip optional addr32 prefix. */
2205 offset
= buf
[0] == 0x67 ? 1 : 0;
2207 /* Check caller-saved saved register. The first instruction has
2208 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2209 if (((buf
[offset
] & 0xfb) == 0x48 || (buf
[offset
] & 0xfb) == 0x40)
2210 && buf
[offset
+ 1] == 0x8d
2211 && buf
[offset
+ 3] == 0x24
2212 && buf
[offset
+ 4] == 0x8)
2214 /* MOD must be binary 10 and R/M must be binary 100. */
2215 if ((buf
[offset
+ 2] & 0xc7) != 0x44)
2218 /* REG has register number. */
2219 reg
= (buf
[offset
+ 2] >> 3) & 7;
2221 /* Check the REX.R bit. */
2222 if ((buf
[offset
] & 0x4) != 0)
2229 /* Check callee-saved saved register. The first instruction
2230 has to be "pushq %reg". */
2232 if ((buf
[offset
] & 0xf6) == 0x40
2233 && (buf
[offset
+ 1] & 0xf8) == 0x50)
2235 /* Check the REX.B bit. */
2236 if ((buf
[offset
] & 1) != 0)
2241 else if ((buf
[offset
] & 0xf8) != 0x50)
2245 reg
+= buf
[offset
] & 0x7;
2249 /* Skip optional addr32 prefix. */
2250 if (buf
[offset
] == 0x67)
2253 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2254 "leal 16(%rsp), %reg". */
2255 if (((buf
[offset
] & 0xfb) != 0x48 && (buf
[offset
] & 0xfb) != 0x40)
2256 || buf
[offset
+ 1] != 0x8d
2257 || buf
[offset
+ 3] != 0x24
2258 || buf
[offset
+ 4] != 0x10)
2261 /* MOD must be binary 10 and R/M must be binary 100. */
2262 if ((buf
[offset
+ 2] & 0xc7) != 0x44)
2265 /* REG has register number. */
2266 r
= (buf
[offset
+ 2] >> 3) & 7;
2268 /* Check the REX.R bit. */
2269 if ((buf
[offset
] & 0x4) != 0)
2272 /* Registers in pushq and leaq have to be the same. */
2279 /* Rigister can't be %rsp nor %rbp. */
2280 if (reg
== 4 || reg
== 5)
2283 /* The next instruction may be "andq $-XXX, %rsp" or
2284 "andl $-XXX, %esp". */
2285 if (buf
[offset
] != 0x48)
2288 if (buf
[offset
+ 2] != 0xe4
2289 || (buf
[offset
+ 1] != 0x81 && buf
[offset
+ 1] != 0x83))
2292 offset_and
= offset
;
2293 offset
+= buf
[offset
+ 1] == 0x81 ? 7 : 4;
2295 /* Skip optional addr32 prefix. */
2296 if (buf
[offset
] == 0x67)
2299 /* The next instruction has to be "pushq -8(%reg)". */
2301 if (buf
[offset
] == 0xff)
2303 else if ((buf
[offset
] & 0xf6) == 0x40
2304 && buf
[offset
+ 1] == 0xff)
2306 /* Check the REX.B bit. */
2307 if ((buf
[offset
] & 0x1) != 0)
2314 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2316 if (buf
[offset
+ 1] != 0xf8
2317 || (buf
[offset
] & 0xf8) != 0x70)
2320 /* R/M has register. */
2321 r
+= buf
[offset
] & 7;
2323 /* Registers in leaq and pushq have to be the same. */
2327 if (current_pc
> pc
+ offset_and
)
2328 cache
->saved_sp_reg
= amd64_arch_reg_to_regnum (reg
);
2330 return std::min (pc
+ offset
+ 2, current_pc
);
2333 /* Do a limited analysis of the prologue at PC and update CACHE
2334 accordingly. Bail out early if CURRENT_PC is reached. Return the
2335 address where the analysis stopped.
2337 We will handle only functions beginning with:
2340 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2342 or (for the X32 ABI):
2345 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2347 The `endbr64` instruction can be found before these sequences, and will be
2350 Any function that doesn't start with one of these sequences will be
2351 assumed to have no prologue and thus no valid frame pointer in
2355 amd64_analyze_prologue (struct gdbarch
*gdbarch
,
2356 CORE_ADDR pc
, CORE_ADDR current_pc
,
2357 struct amd64_frame_cache
*cache
)
2359 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2360 /* The `endbr64` instruction. */
2361 static const gdb_byte endbr64
[4] = { 0xf3, 0x0f, 0x1e, 0xfa };
2362 /* There are two variations of movq %rsp, %rbp. */
2363 static const gdb_byte mov_rsp_rbp_1
[3] = { 0x48, 0x89, 0xe5 };
2364 static const gdb_byte mov_rsp_rbp_2
[3] = { 0x48, 0x8b, 0xec };
2365 /* Ditto for movl %esp, %ebp. */
2366 static const gdb_byte mov_esp_ebp_1
[2] = { 0x89, 0xe5 };
2367 static const gdb_byte mov_esp_ebp_2
[2] = { 0x8b, 0xec };
2372 if (current_pc
<= pc
)
2375 if (gdbarch_ptr_bit (gdbarch
) == 32)
2376 pc
= amd64_x32_analyze_stack_align (pc
, current_pc
, cache
);
2378 pc
= amd64_analyze_stack_align (pc
, current_pc
, cache
);
2380 op
= read_code_unsigned_integer (pc
, 1, byte_order
);
2382 /* Check for the `endbr64` instruction, skip it if found. */
2383 if (op
== endbr64
[0])
2385 read_code (pc
+ 1, buf
, 3);
2387 if (memcmp (buf
, &endbr64
[1], 3) == 0)
2390 op
= read_code_unsigned_integer (pc
, 1, byte_order
);
2393 if (current_pc
<= pc
)
2396 if (op
== 0x55) /* pushq %rbp */
2398 /* Take into account that we've executed the `pushq %rbp' that
2399 starts this instruction sequence. */
2400 cache
->saved_regs
[AMD64_RBP_REGNUM
] = 0;
2401 cache
->sp_offset
+= 8;
2403 /* If that's all, return now. */
2404 if (current_pc
<= pc
+ 1)
2407 read_code (pc
+ 1, buf
, 3);
2409 /* Check for `movq %rsp, %rbp'. */
2410 if (memcmp (buf
, mov_rsp_rbp_1
, 3) == 0
2411 || memcmp (buf
, mov_rsp_rbp_2
, 3) == 0)
2413 /* OK, we actually have a frame. */
2414 cache
->frameless_p
= 0;
2418 /* For X32, also check for `movl %esp, %ebp'. */
2419 if (gdbarch_ptr_bit (gdbarch
) == 32)
2421 if (memcmp (buf
, mov_esp_ebp_1
, 2) == 0
2422 || memcmp (buf
, mov_esp_ebp_2
, 2) == 0)
2424 /* OK, we actually have a frame. */
2425 cache
->frameless_p
= 0;
2436 /* Work around false termination of prologue - GCC PR debug/48827.
2438 START_PC is the first instruction of a function, PC is its minimal already
2439 determined advanced address. Function returns PC if it has nothing to do.
2443 <-- here is 0 lines advance - the false prologue end marker.
2444 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2445 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2446 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2447 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2448 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2449 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2450 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2451 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2455 amd64_skip_xmm_prologue (CORE_ADDR pc
, CORE_ADDR start_pc
)
2457 struct symtab_and_line start_pc_sal
, next_sal
;
2458 gdb_byte buf
[4 + 8 * 7];
2464 start_pc_sal
= find_pc_sect_line (start_pc
, NULL
, 0);
2465 if (start_pc_sal
.symtab
== NULL
2466 || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
2467 (SYMTAB_COMPUNIT (start_pc_sal
.symtab
))) < 6
2468 || start_pc_sal
.pc
!= start_pc
|| pc
>= start_pc_sal
.end
)
2471 next_sal
= find_pc_sect_line (start_pc_sal
.end
, NULL
, 0);
2472 if (next_sal
.line
!= start_pc_sal
.line
)
2475 /* START_PC can be from overlayed memory, ignored here. */
2476 if (target_read_code (next_sal
.pc
- 4, buf
, sizeof (buf
)) != 0)
2480 if (buf
[0] != 0x84 || buf
[1] != 0xc0)
2487 for (xmmreg
= 0; xmmreg
< 8; xmmreg
++)
2489 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2490 if (buf
[offset
] != 0x0f || buf
[offset
+ 1] != 0x29
2491 || (buf
[offset
+ 2] & 0x3f) != (xmmreg
<< 3 | 0x5))
2495 if ((buf
[offset
+ 2] & 0xc0) == 0x40)
2497 /* 8-bit displacement. */
2501 else if ((buf
[offset
+ 2] & 0xc0) == 0x80)
2503 /* 32-bit displacement. */
2511 if (offset
- 4 != buf
[3])
2514 return next_sal
.end
;
2517 /* Return PC of first real instruction. */
2520 amd64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR start_pc
)
2522 struct amd64_frame_cache cache
;
2524 CORE_ADDR func_addr
;
2526 if (find_pc_partial_function (start_pc
, NULL
, &func_addr
, NULL
))
2528 CORE_ADDR post_prologue_pc
2529 = skip_prologue_using_sal (gdbarch
, func_addr
);
2530 struct compunit_symtab
*cust
= find_pc_compunit_symtab (func_addr
);
2532 /* LLVM backend (Clang/Flang) always emits a line note before the
2533 prologue and another one after. We trust clang and newer Intel
2534 compilers to emit usable line notes. */
2535 if (post_prologue_pc
2537 && COMPUNIT_PRODUCER (cust
) != NULL
2538 && (producer_is_llvm (COMPUNIT_PRODUCER (cust
))
2539 || producer_is_icc_ge_19 (COMPUNIT_PRODUCER (cust
)))))
2540 return std::max (start_pc
, post_prologue_pc
);
2543 amd64_init_frame_cache (&cache
);
2544 pc
= amd64_analyze_prologue (gdbarch
, start_pc
, 0xffffffffffffffffLL
,
2546 if (cache
.frameless_p
)
2549 return amd64_skip_xmm_prologue (pc
, start_pc
);
2553 /* Normal frames. */
2556 amd64_frame_cache_1 (struct frame_info
*this_frame
,
2557 struct amd64_frame_cache
*cache
)
2559 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2560 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2564 cache
->pc
= get_frame_func (this_frame
);
2566 amd64_analyze_prologue (gdbarch
, cache
->pc
, get_frame_pc (this_frame
),
2569 if (cache
->frameless_p
)
2571 /* We didn't find a valid frame. If we're at the start of a
2572 function, or somewhere half-way its prologue, the function's
2573 frame probably hasn't been fully setup yet. Try to
2574 reconstruct the base address for the stack frame by looking
2575 at the stack pointer. For truly "frameless" functions this
2578 if (cache
->saved_sp_reg
!= -1)
2580 /* Stack pointer has been saved. */
2581 get_frame_register (this_frame
, cache
->saved_sp_reg
, buf
);
2582 cache
->saved_sp
= extract_unsigned_integer (buf
, 8, byte_order
);
2584 /* We're halfway aligning the stack. */
2585 cache
->base
= ((cache
->saved_sp
- 8) & 0xfffffffffffffff0LL
) - 8;
2586 cache
->saved_regs
[AMD64_RIP_REGNUM
] = cache
->saved_sp
- 8;
2588 /* This will be added back below. */
2589 cache
->saved_regs
[AMD64_RIP_REGNUM
] -= cache
->base
;
2593 get_frame_register (this_frame
, AMD64_RSP_REGNUM
, buf
);
2594 cache
->base
= extract_unsigned_integer (buf
, 8, byte_order
)
2600 get_frame_register (this_frame
, AMD64_RBP_REGNUM
, buf
);
2601 cache
->base
= extract_unsigned_integer (buf
, 8, byte_order
);
2604 /* Now that we have the base address for the stack frame we can
2605 calculate the value of %rsp in the calling frame. */
2606 cache
->saved_sp
= cache
->base
+ 16;
2608 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2609 frame we find it at the same offset from the reconstructed base
2610 address. If we're halfway aligning the stack, %rip is handled
2611 differently (see above). */
2612 if (!cache
->frameless_p
|| cache
->saved_sp_reg
== -1)
2613 cache
->saved_regs
[AMD64_RIP_REGNUM
] = 8;
2615 /* Adjust all the saved registers such that they contain addresses
2616 instead of offsets. */
2617 for (i
= 0; i
< AMD64_NUM_SAVED_REGS
; i
++)
2618 if (cache
->saved_regs
[i
] != -1)
2619 cache
->saved_regs
[i
] += cache
->base
;
2624 static struct amd64_frame_cache
*
2625 amd64_frame_cache (struct frame_info
*this_frame
, void **this_cache
)
2627 struct amd64_frame_cache
*cache
;
2630 return (struct amd64_frame_cache
*) *this_cache
;
2632 cache
= amd64_alloc_frame_cache ();
2633 *this_cache
= cache
;
2637 amd64_frame_cache_1 (this_frame
, cache
);
2639 catch (const gdb_exception_error
&ex
)
2641 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
2648 static enum unwind_stop_reason
2649 amd64_frame_unwind_stop_reason (struct frame_info
*this_frame
,
2652 struct amd64_frame_cache
*cache
=
2653 amd64_frame_cache (this_frame
, this_cache
);
2656 return UNWIND_UNAVAILABLE
;
2658 /* This marks the outermost frame. */
2659 if (cache
->base
== 0)
2660 return UNWIND_OUTERMOST
;
2662 return UNWIND_NO_REASON
;
2666 amd64_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
2667 struct frame_id
*this_id
)
2669 struct amd64_frame_cache
*cache
=
2670 amd64_frame_cache (this_frame
, this_cache
);
2673 (*this_id
) = frame_id_build_unavailable_stack (cache
->pc
);
2674 else if (cache
->base
== 0)
2676 /* This marks the outermost frame. */
2680 (*this_id
) = frame_id_build (cache
->base
+ 16, cache
->pc
);
2683 static struct value
*
2684 amd64_frame_prev_register (struct frame_info
*this_frame
, void **this_cache
,
2687 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2688 struct amd64_frame_cache
*cache
=
2689 amd64_frame_cache (this_frame
, this_cache
);
2691 gdb_assert (regnum
>= 0);
2693 if (regnum
== gdbarch_sp_regnum (gdbarch
) && cache
->saved_sp
)
2694 return frame_unwind_got_constant (this_frame
, regnum
, cache
->saved_sp
);
2696 if (regnum
< AMD64_NUM_SAVED_REGS
&& cache
->saved_regs
[regnum
] != -1)
2697 return frame_unwind_got_memory (this_frame
, regnum
,
2698 cache
->saved_regs
[regnum
]);
2700 return frame_unwind_got_register (this_frame
, regnum
, regnum
);
2703 static const struct frame_unwind amd64_frame_unwind
=
2707 amd64_frame_unwind_stop_reason
,
2708 amd64_frame_this_id
,
2709 amd64_frame_prev_register
,
2711 default_frame_sniffer
2714 /* Generate a bytecode expression to get the value of the saved PC. */
2717 amd64_gen_return_address (struct gdbarch
*gdbarch
,
2718 struct agent_expr
*ax
, struct axs_value
*value
,
2721 /* The following sequence assumes the traditional use of the base
2723 ax_reg (ax
, AMD64_RBP_REGNUM
);
2725 ax_simple (ax
, aop_add
);
2726 value
->type
= register_type (gdbarch
, AMD64_RIP_REGNUM
);
2727 value
->kind
= axs_lvalue_memory
;
2731 /* Signal trampolines. */
2733 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2734 64-bit variants. This would require using identical frame caches
2735 on both platforms. */
2737 static struct amd64_frame_cache
*
2738 amd64_sigtramp_frame_cache (struct frame_info
*this_frame
, void **this_cache
)
2740 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2741 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2742 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2743 struct amd64_frame_cache
*cache
;
2749 return (struct amd64_frame_cache
*) *this_cache
;
2751 cache
= amd64_alloc_frame_cache ();
2755 get_frame_register (this_frame
, AMD64_RSP_REGNUM
, buf
);
2756 cache
->base
= extract_unsigned_integer (buf
, 8, byte_order
) - 8;
2758 addr
= tdep
->sigcontext_addr (this_frame
);
2759 gdb_assert (tdep
->sc_reg_offset
);
2760 gdb_assert (tdep
->sc_num_regs
<= AMD64_NUM_SAVED_REGS
);
2761 for (i
= 0; i
< tdep
->sc_num_regs
; i
++)
2762 if (tdep
->sc_reg_offset
[i
] != -1)
2763 cache
->saved_regs
[i
] = addr
+ tdep
->sc_reg_offset
[i
];
2767 catch (const gdb_exception_error
&ex
)
2769 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
2773 *this_cache
= cache
;
2777 static enum unwind_stop_reason
2778 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info
*this_frame
,
2781 struct amd64_frame_cache
*cache
=
2782 amd64_sigtramp_frame_cache (this_frame
, this_cache
);
2785 return UNWIND_UNAVAILABLE
;
2787 return UNWIND_NO_REASON
;
2791 amd64_sigtramp_frame_this_id (struct frame_info
*this_frame
,
2792 void **this_cache
, struct frame_id
*this_id
)
2794 struct amd64_frame_cache
*cache
=
2795 amd64_sigtramp_frame_cache (this_frame
, this_cache
);
2798 (*this_id
) = frame_id_build_unavailable_stack (get_frame_pc (this_frame
));
2799 else if (cache
->base
== 0)
2801 /* This marks the outermost frame. */
2805 (*this_id
) = frame_id_build (cache
->base
+ 16, get_frame_pc (this_frame
));
2808 static struct value
*
2809 amd64_sigtramp_frame_prev_register (struct frame_info
*this_frame
,
2810 void **this_cache
, int regnum
)
2812 /* Make sure we've initialized the cache. */
2813 amd64_sigtramp_frame_cache (this_frame
, this_cache
);
2815 return amd64_frame_prev_register (this_frame
, this_cache
, regnum
);
2819 amd64_sigtramp_frame_sniffer (const struct frame_unwind
*self
,
2820 struct frame_info
*this_frame
,
2823 struct gdbarch_tdep
*tdep
= gdbarch_tdep (get_frame_arch (this_frame
));
2825 /* We shouldn't even bother if we don't have a sigcontext_addr
2827 if (tdep
->sigcontext_addr
== NULL
)
2830 if (tdep
->sigtramp_p
!= NULL
)
2832 if (tdep
->sigtramp_p (this_frame
))
2836 if (tdep
->sigtramp_start
!= 0)
2838 CORE_ADDR pc
= get_frame_pc (this_frame
);
2840 gdb_assert (tdep
->sigtramp_end
!= 0);
2841 if (pc
>= tdep
->sigtramp_start
&& pc
< tdep
->sigtramp_end
)
2848 static const struct frame_unwind amd64_sigtramp_frame_unwind
=
2852 amd64_sigtramp_frame_unwind_stop_reason
,
2853 amd64_sigtramp_frame_this_id
,
2854 amd64_sigtramp_frame_prev_register
,
2856 amd64_sigtramp_frame_sniffer
2861 amd64_frame_base_address (struct frame_info
*this_frame
, void **this_cache
)
2863 struct amd64_frame_cache
*cache
=
2864 amd64_frame_cache (this_frame
, this_cache
);
2869 static const struct frame_base amd64_frame_base
=
2871 &amd64_frame_unwind
,
2872 amd64_frame_base_address
,
2873 amd64_frame_base_address
,
2874 amd64_frame_base_address
2877 /* Normal frames, but in a function epilogue. */
2879 /* Implement the stack_frame_destroyed_p gdbarch method.
2881 The epilogue is defined here as the 'ret' instruction, which will
2882 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2883 the function's stack frame. */
2886 amd64_stack_frame_destroyed_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
2889 struct compunit_symtab
*cust
;
2891 cust
= find_pc_compunit_symtab (pc
);
2892 if (cust
!= NULL
&& COMPUNIT_EPILOGUE_UNWIND_VALID (cust
))
2895 if (target_read_memory (pc
, &insn
, 1))
2896 return 0; /* Can't read memory at pc. */
2898 if (insn
!= 0xc3) /* 'ret' instruction. */
2905 amd64_epilogue_frame_sniffer (const struct frame_unwind
*self
,
2906 struct frame_info
*this_frame
,
2907 void **this_prologue_cache
)
2909 if (frame_relative_level (this_frame
) == 0)
2910 return amd64_stack_frame_destroyed_p (get_frame_arch (this_frame
),
2911 get_frame_pc (this_frame
));
2916 static struct amd64_frame_cache
*
2917 amd64_epilogue_frame_cache (struct frame_info
*this_frame
, void **this_cache
)
2919 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2920 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2921 struct amd64_frame_cache
*cache
;
2925 return (struct amd64_frame_cache
*) *this_cache
;
2927 cache
= amd64_alloc_frame_cache ();
2928 *this_cache
= cache
;
2932 /* Cache base will be %esp plus cache->sp_offset (-8). */
2933 get_frame_register (this_frame
, AMD64_RSP_REGNUM
, buf
);
2934 cache
->base
= extract_unsigned_integer (buf
, 8,
2935 byte_order
) + cache
->sp_offset
;
2937 /* Cache pc will be the frame func. */
2938 cache
->pc
= get_frame_pc (this_frame
);
2940 /* The saved %esp will be at cache->base plus 16. */
2941 cache
->saved_sp
= cache
->base
+ 16;
2943 /* The saved %eip will be at cache->base plus 8. */
2944 cache
->saved_regs
[AMD64_RIP_REGNUM
] = cache
->base
+ 8;
2948 catch (const gdb_exception_error
&ex
)
2950 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
2957 static enum unwind_stop_reason
2958 amd64_epilogue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
2961 struct amd64_frame_cache
*cache
2962 = amd64_epilogue_frame_cache (this_frame
, this_cache
);
2965 return UNWIND_UNAVAILABLE
;
2967 return UNWIND_NO_REASON
;
2971 amd64_epilogue_frame_this_id (struct frame_info
*this_frame
,
2973 struct frame_id
*this_id
)
2975 struct amd64_frame_cache
*cache
= amd64_epilogue_frame_cache (this_frame
,
2979 (*this_id
) = frame_id_build_unavailable_stack (cache
->pc
);
2981 (*this_id
) = frame_id_build (cache
->base
+ 8, cache
->pc
);
2984 static const struct frame_unwind amd64_epilogue_frame_unwind
=
2988 amd64_epilogue_frame_unwind_stop_reason
,
2989 amd64_epilogue_frame_this_id
,
2990 amd64_frame_prev_register
,
2992 amd64_epilogue_frame_sniffer
2995 static struct frame_id
2996 amd64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
3000 fp
= get_frame_register_unsigned (this_frame
, AMD64_RBP_REGNUM
);
3002 return frame_id_build (fp
+ 16, get_frame_pc (this_frame
));
3005 /* 16 byte align the SP per frame requirements. */
3008 amd64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
3010 return sp
& -(CORE_ADDR
)16;
3014 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
3015 in the floating-point register set REGSET to register cache
3016 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
3019 amd64_supply_fpregset (const struct regset
*regset
, struct regcache
*regcache
,
3020 int regnum
, const void *fpregs
, size_t len
)
3022 struct gdbarch
*gdbarch
= regcache
->arch ();
3023 const struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3025 gdb_assert (len
>= tdep
->sizeof_fpregset
);
3026 amd64_supply_fxsave (regcache
, regnum
, fpregs
);
3029 /* Collect register REGNUM from the register cache REGCACHE and store
3030 it in the buffer specified by FPREGS and LEN as described by the
3031 floating-point register set REGSET. If REGNUM is -1, do this for
3032 all registers in REGSET. */
3035 amd64_collect_fpregset (const struct regset
*regset
,
3036 const struct regcache
*regcache
,
3037 int regnum
, void *fpregs
, size_t len
)
3039 struct gdbarch
*gdbarch
= regcache
->arch ();
3040 const struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3042 gdb_assert (len
>= tdep
->sizeof_fpregset
);
3043 amd64_collect_fxsave (regcache
, regnum
, fpregs
);
3046 const struct regset amd64_fpregset
=
3048 NULL
, amd64_supply_fpregset
, amd64_collect_fpregset
3052 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
3053 %rdi. We expect its value to be a pointer to the jmp_buf structure
3054 from which we extract the address that we will land at. This
3055 address is copied into PC. This routine returns non-zero on
3059 amd64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
3063 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
3064 int jb_pc_offset
= gdbarch_tdep (gdbarch
)->jb_pc_offset
;
3065 int len
= TYPE_LENGTH (builtin_type (gdbarch
)->builtin_func_ptr
);
3067 /* If JB_PC_OFFSET is -1, we have no way to find out where the
3068 longjmp will land. */
3069 if (jb_pc_offset
== -1)
3072 get_frame_register (frame
, AMD64_RDI_REGNUM
, buf
);
3073 jb_addr
= extract_typed_address
3074 (buf
, builtin_type (gdbarch
)->builtin_data_ptr
);
3075 if (target_read_memory (jb_addr
+ jb_pc_offset
, buf
, len
))
3078 *pc
= extract_typed_address (buf
, builtin_type (gdbarch
)->builtin_func_ptr
);
3083 static const int amd64_record_regmap
[] =
3085 AMD64_RAX_REGNUM
, AMD64_RCX_REGNUM
, AMD64_RDX_REGNUM
, AMD64_RBX_REGNUM
,
3086 AMD64_RSP_REGNUM
, AMD64_RBP_REGNUM
, AMD64_RSI_REGNUM
, AMD64_RDI_REGNUM
,
3087 AMD64_R8_REGNUM
, AMD64_R9_REGNUM
, AMD64_R10_REGNUM
, AMD64_R11_REGNUM
,
3088 AMD64_R12_REGNUM
, AMD64_R13_REGNUM
, AMD64_R14_REGNUM
, AMD64_R15_REGNUM
,
3089 AMD64_RIP_REGNUM
, AMD64_EFLAGS_REGNUM
, AMD64_CS_REGNUM
, AMD64_SS_REGNUM
,
3090 AMD64_DS_REGNUM
, AMD64_ES_REGNUM
, AMD64_FS_REGNUM
, AMD64_GS_REGNUM
3093 /* Implement the "in_indirect_branch_thunk" gdbarch function. */
3096 amd64_in_indirect_branch_thunk (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
3098 return x86_in_indirect_branch_thunk (pc
, amd64_register_names
,
3104 amd64_init_abi (struct gdbarch_info info
, struct gdbarch
*gdbarch
,
3105 const target_desc
*default_tdesc
)
3107 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3108 const struct target_desc
*tdesc
= info
.target_desc
;
3109 static const char *const stap_integer_prefixes
[] = { "$", NULL
};
3110 static const char *const stap_register_prefixes
[] = { "%", NULL
};
3111 static const char *const stap_register_indirection_prefixes
[] = { "(",
3113 static const char *const stap_register_indirection_suffixes
[] = { ")",
3116 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
3117 floating-point registers. */
3118 tdep
->sizeof_fpregset
= I387_SIZEOF_FXSAVE
;
3119 tdep
->fpregset
= &amd64_fpregset
;
3121 if (! tdesc_has_registers (tdesc
))
3122 tdesc
= default_tdesc
;
3123 tdep
->tdesc
= tdesc
;
3125 tdep
->num_core_regs
= AMD64_NUM_GREGS
+ I387_NUM_REGS
;
3126 tdep
->register_names
= amd64_register_names
;
3128 if (tdesc_find_feature (tdesc
, "org.gnu.gdb.i386.avx512") != NULL
)
3130 tdep
->zmmh_register_names
= amd64_zmmh_names
;
3131 tdep
->k_register_names
= amd64_k_names
;
3132 tdep
->xmm_avx512_register_names
= amd64_xmm_avx512_names
;
3133 tdep
->ymm16h_register_names
= amd64_ymmh_avx512_names
;
3135 tdep
->num_zmm_regs
= 32;
3136 tdep
->num_xmm_avx512_regs
= 16;
3137 tdep
->num_ymm_avx512_regs
= 16;
3139 tdep
->zmm0h_regnum
= AMD64_ZMM0H_REGNUM
;
3140 tdep
->k0_regnum
= AMD64_K0_REGNUM
;
3141 tdep
->xmm16_regnum
= AMD64_XMM16_REGNUM
;
3142 tdep
->ymm16h_regnum
= AMD64_YMM16H_REGNUM
;
3145 if (tdesc_find_feature (tdesc
, "org.gnu.gdb.i386.avx") != NULL
)
3147 tdep
->ymmh_register_names
= amd64_ymmh_names
;
3148 tdep
->num_ymm_regs
= 16;
3149 tdep
->ymm0h_regnum
= AMD64_YMM0H_REGNUM
;
3152 if (tdesc_find_feature (tdesc
, "org.gnu.gdb.i386.mpx") != NULL
)
3154 tdep
->mpx_register_names
= amd64_mpx_names
;
3155 tdep
->bndcfgu_regnum
= AMD64_BNDCFGU_REGNUM
;
3156 tdep
->bnd0r_regnum
= AMD64_BND0R_REGNUM
;
3159 if (tdesc_find_feature (tdesc
, "org.gnu.gdb.i386.segments") != NULL
)
3161 tdep
->fsbase_regnum
= AMD64_FSBASE_REGNUM
;
3164 if (tdesc_find_feature (tdesc
, "org.gnu.gdb.i386.pkeys") != NULL
)
3166 tdep
->pkeys_register_names
= amd64_pkeys_names
;
3167 tdep
->pkru_regnum
= AMD64_PKRU_REGNUM
;
3168 tdep
->num_pkeys_regs
= 1;
3171 tdep
->num_byte_regs
= 20;
3172 tdep
->num_word_regs
= 16;
3173 tdep
->num_dword_regs
= 16;
3174 /* Avoid wiring in the MMX registers for now. */
3175 tdep
->num_mmx_regs
= 0;
3177 set_gdbarch_pseudo_register_read_value (gdbarch
,
3178 amd64_pseudo_register_read_value
);
3179 set_gdbarch_pseudo_register_write (gdbarch
,
3180 amd64_pseudo_register_write
);
3181 set_gdbarch_ax_pseudo_register_collect (gdbarch
,
3182 amd64_ax_pseudo_register_collect
);
3184 set_tdesc_pseudo_register_name (gdbarch
, amd64_pseudo_register_name
);
3186 /* AMD64 has an FPU and 16 SSE registers. */
3187 tdep
->st0_regnum
= AMD64_ST0_REGNUM
;
3188 tdep
->num_xmm_regs
= 16;
3190 /* This is what all the fuss is about. */
3191 set_gdbarch_long_bit (gdbarch
, 64);
3192 set_gdbarch_long_long_bit (gdbarch
, 64);
3193 set_gdbarch_ptr_bit (gdbarch
, 64);
3195 /* In contrast to the i386, on AMD64 a `long double' actually takes
3196 up 128 bits, even though it's still based on the i387 extended
3197 floating-point format which has only 80 significant bits. */
3198 set_gdbarch_long_double_bit (gdbarch
, 128);
3200 set_gdbarch_num_regs (gdbarch
, AMD64_NUM_REGS
);
3202 /* Register numbers of various important registers. */
3203 set_gdbarch_sp_regnum (gdbarch
, AMD64_RSP_REGNUM
); /* %rsp */
3204 set_gdbarch_pc_regnum (gdbarch
, AMD64_RIP_REGNUM
); /* %rip */
3205 set_gdbarch_ps_regnum (gdbarch
, AMD64_EFLAGS_REGNUM
); /* %eflags */
3206 set_gdbarch_fp0_regnum (gdbarch
, AMD64_ST0_REGNUM
); /* %st(0) */
3208 /* The "default" register numbering scheme for AMD64 is referred to
3209 as the "DWARF Register Number Mapping" in the System V psABI.
3210 The preferred debugging format for all known AMD64 targets is
3211 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3212 DWARF-1), but we provide the same mapping just in case. This
3213 mapping is also used for stabs, which GCC does support. */
3214 set_gdbarch_stab_reg_to_regnum (gdbarch
, amd64_dwarf_reg_to_regnum
);
3215 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, amd64_dwarf_reg_to_regnum
);
3217 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3218 be in use on any of the supported AMD64 targets. */
3220 /* Call dummy code. */
3221 set_gdbarch_push_dummy_call (gdbarch
, amd64_push_dummy_call
);
3222 set_gdbarch_frame_align (gdbarch
, amd64_frame_align
);
3223 set_gdbarch_frame_red_zone_size (gdbarch
, 128);
3225 set_gdbarch_convert_register_p (gdbarch
, i387_convert_register_p
);
3226 set_gdbarch_register_to_value (gdbarch
, i387_register_to_value
);
3227 set_gdbarch_value_to_register (gdbarch
, i387_value_to_register
);
3229 set_gdbarch_return_value (gdbarch
, amd64_return_value
);
3231 set_gdbarch_skip_prologue (gdbarch
, amd64_skip_prologue
);
3233 tdep
->record_regmap
= amd64_record_regmap
;
3235 set_gdbarch_dummy_id (gdbarch
, amd64_dummy_id
);
3237 /* Hook the function epilogue frame unwinder. This unwinder is
3238 appended to the list first, so that it supercedes the other
3239 unwinders in function epilogues. */
3240 frame_unwind_prepend_unwinder (gdbarch
, &amd64_epilogue_frame_unwind
);
3242 /* Hook the prologue-based frame unwinders. */
3243 frame_unwind_append_unwinder (gdbarch
, &amd64_sigtramp_frame_unwind
);
3244 frame_unwind_append_unwinder (gdbarch
, &amd64_frame_unwind
);
3245 frame_base_set_default (gdbarch
, &amd64_frame_base
);
3247 set_gdbarch_get_longjmp_target (gdbarch
, amd64_get_longjmp_target
);
3249 set_gdbarch_relocate_instruction (gdbarch
, amd64_relocate_instruction
);
3251 set_gdbarch_gen_return_address (gdbarch
, amd64_gen_return_address
);
3253 /* SystemTap variables and functions. */
3254 set_gdbarch_stap_integer_prefixes (gdbarch
, stap_integer_prefixes
);
3255 set_gdbarch_stap_register_prefixes (gdbarch
, stap_register_prefixes
);
3256 set_gdbarch_stap_register_indirection_prefixes (gdbarch
,
3257 stap_register_indirection_prefixes
);
3258 set_gdbarch_stap_register_indirection_suffixes (gdbarch
,
3259 stap_register_indirection_suffixes
);
3260 set_gdbarch_stap_is_single_operand (gdbarch
,
3261 i386_stap_is_single_operand
);
3262 set_gdbarch_stap_parse_special_token (gdbarch
,
3263 i386_stap_parse_special_token
);
3264 set_gdbarch_insn_is_call (gdbarch
, amd64_insn_is_call
);
3265 set_gdbarch_insn_is_ret (gdbarch
, amd64_insn_is_ret
);
3266 set_gdbarch_insn_is_jump (gdbarch
, amd64_insn_is_jump
);
3268 set_gdbarch_in_indirect_branch_thunk (gdbarch
,
3269 amd64_in_indirect_branch_thunk
);
3271 register_amd64_ravenscar_ops (gdbarch
);
3274 /* Initialize ARCH for x86-64, no osabi. */
3277 amd64_none_init_abi (gdbarch_info info
, gdbarch
*arch
)
3279 amd64_init_abi (info
, arch
, amd64_target_description (X86_XSTATE_SSE_MASK
,
3283 static struct type
*
3284 amd64_x32_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
3286 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3288 switch (regnum
- tdep
->eax_regnum
)
3290 case AMD64_RBP_REGNUM
: /* %ebp */
3291 case AMD64_RSP_REGNUM
: /* %esp */
3292 return builtin_type (gdbarch
)->builtin_data_ptr
;
3293 case AMD64_RIP_REGNUM
: /* %eip */
3294 return builtin_type (gdbarch
)->builtin_func_ptr
;
3297 return i386_pseudo_register_type (gdbarch
, regnum
);
3301 amd64_x32_init_abi (struct gdbarch_info info
, struct gdbarch
*gdbarch
,
3302 const target_desc
*default_tdesc
)
3304 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3306 amd64_init_abi (info
, gdbarch
, default_tdesc
);
3308 tdep
->num_dword_regs
= 17;
3309 set_tdesc_pseudo_register_type (gdbarch
, amd64_x32_pseudo_register_type
);
3311 set_gdbarch_long_bit (gdbarch
, 32);
3312 set_gdbarch_ptr_bit (gdbarch
, 32);
3315 /* Initialize ARCH for x64-32, no osabi. */
3318 amd64_x32_none_init_abi (gdbarch_info info
, gdbarch
*arch
)
3320 amd64_x32_init_abi (info
, arch
,
3321 amd64_target_description (X86_XSTATE_SSE_MASK
, true));
3324 /* Return the target description for a specified XSAVE feature mask. */
3326 const struct target_desc
*
3327 amd64_target_description (uint64_t xcr0
, bool segments
)
3329 static target_desc
*amd64_tdescs \
3330 [2/*AVX*/][2/*MPX*/][2/*AVX512*/][2/*PKRU*/][2/*segments*/] = {};
3331 target_desc
**tdesc
;
3333 tdesc
= &amd64_tdescs
[(xcr0
& X86_XSTATE_AVX
) ? 1 : 0]
3334 [(xcr0
& X86_XSTATE_MPX
) ? 1 : 0]
3335 [(xcr0
& X86_XSTATE_AVX512
) ? 1 : 0]
3336 [(xcr0
& X86_XSTATE_PKRU
) ? 1 : 0]
3340 *tdesc
= amd64_create_target_description (xcr0
, false, false,
3346 void _initialize_amd64_tdep ();
3348 _initialize_amd64_tdep ()
3350 gdbarch_register_osabi (bfd_arch_i386
, bfd_mach_x86_64
, GDB_OSABI_NONE
,
3351 amd64_none_init_abi
);
3352 gdbarch_register_osabi (bfd_arch_i386
, bfd_mach_x64_32
, GDB_OSABI_NONE
,
3353 amd64_x32_none_init_abi
);
3357 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3358 sense that the instruction pointer and data pointer are simply
3359 64-bit offsets into the code segment and the data segment instead
3360 of a selector offset pair. The functions below store the upper 32
3361 bits of these pointers (instead of just the 16-bits of the segment
3364 /* Fill register REGNUM in REGCACHE with the appropriate
3365 floating-point or SSE register value from *FXSAVE. If REGNUM is
3366 -1, do this for all registers. This function masks off any of the
3367 reserved bits in *FXSAVE. */
3370 amd64_supply_fxsave (struct regcache
*regcache
, int regnum
,
3373 struct gdbarch
*gdbarch
= regcache
->arch ();
3374 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3376 i387_supply_fxsave (regcache
, regnum
, fxsave
);
3379 && gdbarch_bfd_arch_info (gdbarch
)->bits_per_word
== 64)
3381 const gdb_byte
*regs
= (const gdb_byte
*) fxsave
;
3383 if (regnum
== -1 || regnum
== I387_FISEG_REGNUM (tdep
))
3384 regcache
->raw_supply (I387_FISEG_REGNUM (tdep
), regs
+ 12);
3385 if (regnum
== -1 || regnum
== I387_FOSEG_REGNUM (tdep
))
3386 regcache
->raw_supply (I387_FOSEG_REGNUM (tdep
), regs
+ 20);
3390 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3393 amd64_supply_xsave (struct regcache
*regcache
, int regnum
,
3396 struct gdbarch
*gdbarch
= regcache
->arch ();
3397 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3399 i387_supply_xsave (regcache
, regnum
, xsave
);
3402 && gdbarch_bfd_arch_info (gdbarch
)->bits_per_word
== 64)
3404 const gdb_byte
*regs
= (const gdb_byte
*) xsave
;
3407 clear_bv
= i387_xsave_get_clear_bv (gdbarch
, xsave
);
3409 /* If the FISEG and FOSEG registers have not been initialised yet
3410 (their CLEAR_BV bit is set) then their default values of zero will
3411 have already been setup by I387_SUPPLY_XSAVE. */
3412 if (!(clear_bv
& X86_XSTATE_X87
))
3414 if (regnum
== -1 || regnum
== I387_FISEG_REGNUM (tdep
))
3415 regcache
->raw_supply (I387_FISEG_REGNUM (tdep
), regs
+ 12);
3416 if (regnum
== -1 || regnum
== I387_FOSEG_REGNUM (tdep
))
3417 regcache
->raw_supply (I387_FOSEG_REGNUM (tdep
), regs
+ 20);
3422 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3423 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3424 all registers. This function doesn't touch any of the reserved
3428 amd64_collect_fxsave (const struct regcache
*regcache
, int regnum
,
3431 struct gdbarch
*gdbarch
= regcache
->arch ();
3432 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3433 gdb_byte
*regs
= (gdb_byte
*) fxsave
;
3435 i387_collect_fxsave (regcache
, regnum
, fxsave
);
3437 if (gdbarch_bfd_arch_info (gdbarch
)->bits_per_word
== 64)
3439 if (regnum
== -1 || regnum
== I387_FISEG_REGNUM (tdep
))
3440 regcache
->raw_collect (I387_FISEG_REGNUM (tdep
), regs
+ 12);
3441 if (regnum
== -1 || regnum
== I387_FOSEG_REGNUM (tdep
))
3442 regcache
->raw_collect (I387_FOSEG_REGNUM (tdep
), regs
+ 20);
3446 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3449 amd64_collect_xsave (const struct regcache
*regcache
, int regnum
,
3450 void *xsave
, int gcore
)
3452 struct gdbarch
*gdbarch
= regcache
->arch ();
3453 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3454 gdb_byte
*regs
= (gdb_byte
*) xsave
;
3456 i387_collect_xsave (regcache
, regnum
, xsave
, gcore
);
3458 if (gdbarch_bfd_arch_info (gdbarch
)->bits_per_word
== 64)
3460 if (regnum
== -1 || regnum
== I387_FISEG_REGNUM (tdep
))
3461 regcache
->raw_collect (I387_FISEG_REGNUM (tdep
),
3463 if (regnum
== -1 || regnum
== I387_FOSEG_REGNUM (tdep
))
3464 regcache
->raw_collect (I387_FOSEG_REGNUM (tdep
),