1 /* Target-dependent code for AMD64.
3 Copyright (C) 2001-2017 Free Software Foundation, Inc.
5 Contributed by Jiri Smid, SuSE Labs.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "opcode/i386.h"
25 #include "arch-utils.h"
27 #include "dummy-frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
40 #include "amd64-tdep.h"
41 #include "i387-tdep.h"
42 #include "x86-xstate.h"
45 #include "features/i386/amd64.c"
46 #include "features/i386/amd64-avx.c"
47 #include "features/i386/amd64-mpx.c"
48 #include "features/i386/amd64-avx-mpx.c"
49 #include "features/i386/amd64-avx-avx512.c"
50 #include "features/i386/amd64-avx-mpx-avx512-pku.c"
52 #include "features/i386/x32.c"
53 #include "features/i386/x32-avx.c"
54 #include "features/i386/x32-avx-avx512.c"
59 /* Note that the AMD64 architecture was previously known as x86-64.
60 The latter is (forever) engraved into the canonical system name as
61 returned by config.guess, and used as the name for the AMD64 port
62 of GNU/Linux. The BSD's have renamed their ports to amd64; they
63 don't like to shout. For GDB we prefer the amd64_-prefix over the
64 x86_64_-prefix since it's so much easier to type. */
66 /* Register information. */
68 static const char *amd64_register_names
[] =
70 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
72 /* %r8 is indeed register number 8. */
73 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
74 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
76 /* %st0 is register number 24. */
77 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
78 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
80 /* %xmm0 is register number 40. */
81 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
82 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
86 static const char *amd64_ymm_names
[] =
88 "ymm0", "ymm1", "ymm2", "ymm3",
89 "ymm4", "ymm5", "ymm6", "ymm7",
90 "ymm8", "ymm9", "ymm10", "ymm11",
91 "ymm12", "ymm13", "ymm14", "ymm15"
94 static const char *amd64_ymm_avx512_names
[] =
96 "ymm16", "ymm17", "ymm18", "ymm19",
97 "ymm20", "ymm21", "ymm22", "ymm23",
98 "ymm24", "ymm25", "ymm26", "ymm27",
99 "ymm28", "ymm29", "ymm30", "ymm31"
102 static const char *amd64_ymmh_names
[] =
104 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
105 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
106 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
107 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
110 static const char *amd64_ymmh_avx512_names
[] =
112 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
113 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
114 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
115 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
118 static const char *amd64_mpx_names
[] =
120 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
123 static const char *amd64_k_names
[] =
125 "k0", "k1", "k2", "k3",
126 "k4", "k5", "k6", "k7"
129 static const char *amd64_zmmh_names
[] =
131 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
132 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
133 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
134 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
135 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
136 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
137 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
138 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
141 static const char *amd64_zmm_names
[] =
143 "zmm0", "zmm1", "zmm2", "zmm3",
144 "zmm4", "zmm5", "zmm6", "zmm7",
145 "zmm8", "zmm9", "zmm10", "zmm11",
146 "zmm12", "zmm13", "zmm14", "zmm15",
147 "zmm16", "zmm17", "zmm18", "zmm19",
148 "zmm20", "zmm21", "zmm22", "zmm23",
149 "zmm24", "zmm25", "zmm26", "zmm27",
150 "zmm28", "zmm29", "zmm30", "zmm31"
153 static const char *amd64_xmm_avx512_names
[] = {
154 "xmm16", "xmm17", "xmm18", "xmm19",
155 "xmm20", "xmm21", "xmm22", "xmm23",
156 "xmm24", "xmm25", "xmm26", "xmm27",
157 "xmm28", "xmm29", "xmm30", "xmm31"
160 static const char *amd64_pkeys_names
[] = {
164 /* DWARF Register Number Mapping as defined in the System V psABI,
167 static int amd64_dwarf_regmap
[] =
169 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
170 AMD64_RAX_REGNUM
, AMD64_RDX_REGNUM
,
171 AMD64_RCX_REGNUM
, AMD64_RBX_REGNUM
,
172 AMD64_RSI_REGNUM
, AMD64_RDI_REGNUM
,
174 /* Frame Pointer Register RBP. */
177 /* Stack Pointer Register RSP. */
180 /* Extended Integer Registers 8 - 15. */
181 AMD64_R8_REGNUM
, /* %r8 */
182 AMD64_R9_REGNUM
, /* %r9 */
183 AMD64_R10_REGNUM
, /* %r10 */
184 AMD64_R11_REGNUM
, /* %r11 */
185 AMD64_R12_REGNUM
, /* %r12 */
186 AMD64_R13_REGNUM
, /* %r13 */
187 AMD64_R14_REGNUM
, /* %r14 */
188 AMD64_R15_REGNUM
, /* %r15 */
190 /* Return Address RA. Mapped to RIP. */
193 /* SSE Registers 0 - 7. */
194 AMD64_XMM0_REGNUM
+ 0, AMD64_XMM1_REGNUM
,
195 AMD64_XMM0_REGNUM
+ 2, AMD64_XMM0_REGNUM
+ 3,
196 AMD64_XMM0_REGNUM
+ 4, AMD64_XMM0_REGNUM
+ 5,
197 AMD64_XMM0_REGNUM
+ 6, AMD64_XMM0_REGNUM
+ 7,
199 /* Extended SSE Registers 8 - 15. */
200 AMD64_XMM0_REGNUM
+ 8, AMD64_XMM0_REGNUM
+ 9,
201 AMD64_XMM0_REGNUM
+ 10, AMD64_XMM0_REGNUM
+ 11,
202 AMD64_XMM0_REGNUM
+ 12, AMD64_XMM0_REGNUM
+ 13,
203 AMD64_XMM0_REGNUM
+ 14, AMD64_XMM0_REGNUM
+ 15,
205 /* Floating Point Registers 0-7. */
206 AMD64_ST0_REGNUM
+ 0, AMD64_ST0_REGNUM
+ 1,
207 AMD64_ST0_REGNUM
+ 2, AMD64_ST0_REGNUM
+ 3,
208 AMD64_ST0_REGNUM
+ 4, AMD64_ST0_REGNUM
+ 5,
209 AMD64_ST0_REGNUM
+ 6, AMD64_ST0_REGNUM
+ 7,
211 /* MMX Registers 0 - 7.
212 We have to handle those registers specifically, as their register
213 number within GDB depends on the target (or they may even not be
214 available at all). */
215 -1, -1, -1, -1, -1, -1, -1, -1,
217 /* Control and Status Flags Register. */
220 /* Selector Registers. */
230 /* Segment Base Address Registers. */
236 /* Special Selector Registers. */
240 /* Floating Point Control Registers. */
246 static const int amd64_dwarf_regmap_len
=
247 (sizeof (amd64_dwarf_regmap
) / sizeof (amd64_dwarf_regmap
[0]));
249 /* Convert DWARF register number REG to the appropriate register
250 number used by GDB. */
253 amd64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
255 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
256 int ymm0_regnum
= tdep
->ymm0_regnum
;
259 if (reg
>= 0 && reg
< amd64_dwarf_regmap_len
)
260 regnum
= amd64_dwarf_regmap
[reg
];
263 && i386_xmm_regnum_p (gdbarch
, regnum
))
264 regnum
+= ymm0_regnum
- I387_XMM0_REGNUM (tdep
);
269 /* Map architectural register numbers to gdb register numbers. */
271 static const int amd64_arch_regmap
[16] =
273 AMD64_RAX_REGNUM
, /* %rax */
274 AMD64_RCX_REGNUM
, /* %rcx */
275 AMD64_RDX_REGNUM
, /* %rdx */
276 AMD64_RBX_REGNUM
, /* %rbx */
277 AMD64_RSP_REGNUM
, /* %rsp */
278 AMD64_RBP_REGNUM
, /* %rbp */
279 AMD64_RSI_REGNUM
, /* %rsi */
280 AMD64_RDI_REGNUM
, /* %rdi */
281 AMD64_R8_REGNUM
, /* %r8 */
282 AMD64_R9_REGNUM
, /* %r9 */
283 AMD64_R10_REGNUM
, /* %r10 */
284 AMD64_R11_REGNUM
, /* %r11 */
285 AMD64_R12_REGNUM
, /* %r12 */
286 AMD64_R13_REGNUM
, /* %r13 */
287 AMD64_R14_REGNUM
, /* %r14 */
288 AMD64_R15_REGNUM
/* %r15 */
291 static const int amd64_arch_regmap_len
=
292 (sizeof (amd64_arch_regmap
) / sizeof (amd64_arch_regmap
[0]));
294 /* Convert architectural register number REG to the appropriate register
295 number used by GDB. */
298 amd64_arch_reg_to_regnum (int reg
)
300 gdb_assert (reg
>= 0 && reg
< amd64_arch_regmap_len
);
302 return amd64_arch_regmap
[reg
];
305 /* Register names for byte pseudo-registers. */
307 static const char *amd64_byte_names
[] =
309 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
310 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
311 "ah", "bh", "ch", "dh"
314 /* Number of lower byte registers. */
315 #define AMD64_NUM_LOWER_BYTE_REGS 16
317 /* Register names for word pseudo-registers. */
319 static const char *amd64_word_names
[] =
321 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
322 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
325 /* Register names for dword pseudo-registers. */
327 static const char *amd64_dword_names
[] =
329 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
330 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
334 /* Return the name of register REGNUM. */
337 amd64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
339 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
340 if (i386_byte_regnum_p (gdbarch
, regnum
))
341 return amd64_byte_names
[regnum
- tdep
->al_regnum
];
342 else if (i386_zmm_regnum_p (gdbarch
, regnum
))
343 return amd64_zmm_names
[regnum
- tdep
->zmm0_regnum
];
344 else if (i386_ymm_regnum_p (gdbarch
, regnum
))
345 return amd64_ymm_names
[regnum
- tdep
->ymm0_regnum
];
346 else if (i386_ymm_avx512_regnum_p (gdbarch
, regnum
))
347 return amd64_ymm_avx512_names
[regnum
- tdep
->ymm16_regnum
];
348 else if (i386_word_regnum_p (gdbarch
, regnum
))
349 return amd64_word_names
[regnum
- tdep
->ax_regnum
];
350 else if (i386_dword_regnum_p (gdbarch
, regnum
))
351 return amd64_dword_names
[regnum
- tdep
->eax_regnum
];
353 return i386_pseudo_register_name (gdbarch
, regnum
);
356 static struct value
*
357 amd64_pseudo_register_read_value (struct gdbarch
*gdbarch
,
358 struct regcache
*regcache
,
361 gdb_byte
*raw_buf
= (gdb_byte
*) alloca (register_size (gdbarch
, regnum
));
362 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
363 enum register_status status
;
364 struct value
*result_value
;
367 result_value
= allocate_value (register_type (gdbarch
, regnum
));
368 VALUE_LVAL (result_value
) = lval_register
;
369 VALUE_REGNUM (result_value
) = regnum
;
370 buf
= value_contents_raw (result_value
);
372 if (i386_byte_regnum_p (gdbarch
, regnum
))
374 int gpnum
= regnum
- tdep
->al_regnum
;
376 /* Extract (always little endian). */
377 if (gpnum
>= AMD64_NUM_LOWER_BYTE_REGS
)
379 /* Special handling for AH, BH, CH, DH. */
380 status
= regcache_raw_read (regcache
,
381 gpnum
- AMD64_NUM_LOWER_BYTE_REGS
,
383 if (status
== REG_VALID
)
384 memcpy (buf
, raw_buf
+ 1, 1);
386 mark_value_bytes_unavailable (result_value
, 0,
387 TYPE_LENGTH (value_type (result_value
)));
391 status
= regcache_raw_read (regcache
, gpnum
, raw_buf
);
392 if (status
== REG_VALID
)
393 memcpy (buf
, raw_buf
, 1);
395 mark_value_bytes_unavailable (result_value
, 0,
396 TYPE_LENGTH (value_type (result_value
)));
399 else if (i386_dword_regnum_p (gdbarch
, regnum
))
401 int gpnum
= regnum
- tdep
->eax_regnum
;
402 /* Extract (always little endian). */
403 status
= regcache_raw_read (regcache
, gpnum
, raw_buf
);
404 if (status
== REG_VALID
)
405 memcpy (buf
, raw_buf
, 4);
407 mark_value_bytes_unavailable (result_value
, 0,
408 TYPE_LENGTH (value_type (result_value
)));
411 i386_pseudo_register_read_into_value (gdbarch
, regcache
, regnum
,
418 amd64_pseudo_register_write (struct gdbarch
*gdbarch
,
419 struct regcache
*regcache
,
420 int regnum
, const gdb_byte
*buf
)
422 gdb_byte
*raw_buf
= (gdb_byte
*) alloca (register_size (gdbarch
, regnum
));
423 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
425 if (i386_byte_regnum_p (gdbarch
, regnum
))
427 int gpnum
= regnum
- tdep
->al_regnum
;
429 if (gpnum
>= AMD64_NUM_LOWER_BYTE_REGS
)
431 /* Read ... AH, BH, CH, DH. */
432 regcache_raw_read (regcache
,
433 gpnum
- AMD64_NUM_LOWER_BYTE_REGS
, raw_buf
);
434 /* ... Modify ... (always little endian). */
435 memcpy (raw_buf
+ 1, buf
, 1);
437 regcache_raw_write (regcache
,
438 gpnum
- AMD64_NUM_LOWER_BYTE_REGS
, raw_buf
);
443 regcache_raw_read (regcache
, gpnum
, raw_buf
);
444 /* ... Modify ... (always little endian). */
445 memcpy (raw_buf
, buf
, 1);
447 regcache_raw_write (regcache
, gpnum
, raw_buf
);
450 else if (i386_dword_regnum_p (gdbarch
, regnum
))
452 int gpnum
= regnum
- tdep
->eax_regnum
;
455 regcache_raw_read (regcache
, gpnum
, raw_buf
);
456 /* ... Modify ... (always little endian). */
457 memcpy (raw_buf
, buf
, 4);
459 regcache_raw_write (regcache
, gpnum
, raw_buf
);
462 i386_pseudo_register_write (gdbarch
, regcache
, regnum
, buf
);
465 /* Implement the 'ax_pseudo_register_collect' gdbarch method. */
468 amd64_ax_pseudo_register_collect (struct gdbarch
*gdbarch
,
469 struct agent_expr
*ax
, int regnum
)
471 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
473 if (i386_byte_regnum_p (gdbarch
, regnum
))
475 int gpnum
= regnum
- tdep
->al_regnum
;
477 if (gpnum
>= AMD64_NUM_LOWER_BYTE_REGS
)
478 ax_reg_mask (ax
, gpnum
- AMD64_NUM_LOWER_BYTE_REGS
);
480 ax_reg_mask (ax
, gpnum
);
483 else if (i386_dword_regnum_p (gdbarch
, regnum
))
485 int gpnum
= regnum
- tdep
->eax_regnum
;
487 ax_reg_mask (ax
, gpnum
);
491 return i386_ax_pseudo_register_collect (gdbarch
, ax
, regnum
);
496 /* Register classes as defined in the psABI. */
510 /* Return the union class of CLASS1 and CLASS2. See the psABI for
513 static enum amd64_reg_class
514 amd64_merge_classes (enum amd64_reg_class class1
, enum amd64_reg_class class2
)
516 /* Rule (a): If both classes are equal, this is the resulting class. */
517 if (class1
== class2
)
520 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
521 is the other class. */
522 if (class1
== AMD64_NO_CLASS
)
524 if (class2
== AMD64_NO_CLASS
)
527 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
528 if (class1
== AMD64_MEMORY
|| class2
== AMD64_MEMORY
)
531 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
532 if (class1
== AMD64_INTEGER
|| class2
== AMD64_INTEGER
)
533 return AMD64_INTEGER
;
535 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
536 MEMORY is used as class. */
537 if (class1
== AMD64_X87
|| class1
== AMD64_X87UP
538 || class1
== AMD64_COMPLEX_X87
|| class2
== AMD64_X87
539 || class2
== AMD64_X87UP
|| class2
== AMD64_COMPLEX_X87
)
542 /* Rule (f): Otherwise class SSE is used. */
546 static void amd64_classify (struct type
*type
, enum amd64_reg_class theclass
[2]);
548 /* Return non-zero if TYPE is a non-POD structure or union type. */
551 amd64_non_pod_p (struct type
*type
)
553 /* ??? A class with a base class certainly isn't POD, but does this
554 catch all non-POD structure types? */
555 if (TYPE_CODE (type
) == TYPE_CODE_STRUCT
&& TYPE_N_BASECLASSES (type
) > 0)
561 /* Classify TYPE according to the rules for aggregate (structures and
562 arrays) and union types, and store the result in CLASS. */
565 amd64_classify_aggregate (struct type
*type
, enum amd64_reg_class theclass
[2])
567 /* 1. If the size of an object is larger than two eightbytes, or in
568 C++, is a non-POD structure or union type, or contains
569 unaligned fields, it has class memory. */
570 if (TYPE_LENGTH (type
) > 16 || amd64_non_pod_p (type
))
572 theclass
[0] = theclass
[1] = AMD64_MEMORY
;
576 /* 2. Both eightbytes get initialized to class NO_CLASS. */
577 theclass
[0] = theclass
[1] = AMD64_NO_CLASS
;
579 /* 3. Each field of an object is classified recursively so that
580 always two fields are considered. The resulting class is
581 calculated according to the classes of the fields in the
584 if (TYPE_CODE (type
) == TYPE_CODE_ARRAY
)
586 struct type
*subtype
= check_typedef (TYPE_TARGET_TYPE (type
));
588 /* All fields in an array have the same type. */
589 amd64_classify (subtype
, theclass
);
590 if (TYPE_LENGTH (type
) > 8 && theclass
[1] == AMD64_NO_CLASS
)
591 theclass
[1] = theclass
[0];
597 /* Structure or union. */
598 gdb_assert (TYPE_CODE (type
) == TYPE_CODE_STRUCT
599 || TYPE_CODE (type
) == TYPE_CODE_UNION
);
601 for (i
= 0; i
< TYPE_NFIELDS (type
); i
++)
603 struct type
*subtype
= check_typedef (TYPE_FIELD_TYPE (type
, i
));
604 int pos
= TYPE_FIELD_BITPOS (type
, i
) / 64;
605 enum amd64_reg_class subclass
[2];
606 int bitsize
= TYPE_FIELD_BITSIZE (type
, i
);
610 bitsize
= TYPE_LENGTH (subtype
) * 8;
611 endpos
= (TYPE_FIELD_BITPOS (type
, i
) + bitsize
- 1) / 64;
613 /* Ignore static fields. */
614 if (field_is_static (&TYPE_FIELD (type
, i
)))
617 gdb_assert (pos
== 0 || pos
== 1);
619 amd64_classify (subtype
, subclass
);
620 theclass
[pos
] = amd64_merge_classes (theclass
[pos
], subclass
[0]);
621 if (bitsize
<= 64 && pos
== 0 && endpos
== 1)
622 /* This is a bit of an odd case: We have a field that would
623 normally fit in one of the two eightbytes, except that
624 it is placed in a way that this field straddles them.
625 This has been seen with a structure containing an array.
627 The ABI is a bit unclear in this case, but we assume that
628 this field's class (stored in subclass[0]) must also be merged
629 into class[1]. In other words, our field has a piece stored
630 in the second eight-byte, and thus its class applies to
631 the second eight-byte as well.
633 In the case where the field length exceeds 8 bytes,
634 it should not be necessary to merge the field class
635 into class[1]. As LEN > 8, subclass[1] is necessarily
636 different from AMD64_NO_CLASS. If subclass[1] is equal
637 to subclass[0], then the normal class[1]/subclass[1]
638 merging will take care of everything. For subclass[1]
639 to be different from subclass[0], I can only see the case
640 where we have a SSE/SSEUP or X87/X87UP pair, which both
641 use up all 16 bytes of the aggregate, and are already
642 handled just fine (because each portion sits on its own
644 theclass
[1] = amd64_merge_classes (theclass
[1], subclass
[0]);
646 theclass
[1] = amd64_merge_classes (theclass
[1], subclass
[1]);
650 /* 4. Then a post merger cleanup is done: */
652 /* Rule (a): If one of the classes is MEMORY, the whole argument is
654 if (theclass
[0] == AMD64_MEMORY
|| theclass
[1] == AMD64_MEMORY
)
655 theclass
[0] = theclass
[1] = AMD64_MEMORY
;
657 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
659 if (theclass
[0] == AMD64_SSEUP
)
660 theclass
[0] = AMD64_SSE
;
661 if (theclass
[1] == AMD64_SSEUP
&& theclass
[0] != AMD64_SSE
)
662 theclass
[1] = AMD64_SSE
;
665 /* Classify TYPE, and store the result in CLASS. */
668 amd64_classify (struct type
*type
, enum amd64_reg_class theclass
[2])
670 enum type_code code
= TYPE_CODE (type
);
671 int len
= TYPE_LENGTH (type
);
673 theclass
[0] = theclass
[1] = AMD64_NO_CLASS
;
675 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
676 long, long long, and pointers are in the INTEGER class. Similarly,
677 range types, used by languages such as Ada, are also in the INTEGER
679 if ((code
== TYPE_CODE_INT
|| code
== TYPE_CODE_ENUM
680 || code
== TYPE_CODE_BOOL
|| code
== TYPE_CODE_RANGE
681 || code
== TYPE_CODE_CHAR
682 || code
== TYPE_CODE_PTR
|| code
== TYPE_CODE_REF
)
683 && (len
== 1 || len
== 2 || len
== 4 || len
== 8))
684 theclass
[0] = AMD64_INTEGER
;
686 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
688 else if ((code
== TYPE_CODE_FLT
|| code
== TYPE_CODE_DECFLOAT
)
689 && (len
== 4 || len
== 8))
691 theclass
[0] = AMD64_SSE
;
693 /* Arguments of types __float128, _Decimal128 and __m128 are split into
694 two halves. The least significant ones belong to class SSE, the most
695 significant one to class SSEUP. */
696 else if (code
== TYPE_CODE_DECFLOAT
&& len
== 16)
697 /* FIXME: __float128, __m128. */
698 theclass
[0] = AMD64_SSE
, theclass
[1] = AMD64_SSEUP
;
700 /* The 64-bit mantissa of arguments of type long double belongs to
701 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
703 else if (code
== TYPE_CODE_FLT
&& len
== 16)
704 /* Class X87 and X87UP. */
705 theclass
[0] = AMD64_X87
, theclass
[1] = AMD64_X87UP
;
707 /* Arguments of complex T where T is one of the types float or
708 double get treated as if they are implemented as:
716 else if (code
== TYPE_CODE_COMPLEX
&& len
== 8)
717 theclass
[0] = AMD64_SSE
;
718 else if (code
== TYPE_CODE_COMPLEX
&& len
== 16)
719 theclass
[0] = theclass
[1] = AMD64_SSE
;
721 /* A variable of type complex long double is classified as type
723 else if (code
== TYPE_CODE_COMPLEX
&& len
== 32)
724 theclass
[0] = AMD64_COMPLEX_X87
;
727 else if (code
== TYPE_CODE_ARRAY
|| code
== TYPE_CODE_STRUCT
728 || code
== TYPE_CODE_UNION
)
729 amd64_classify_aggregate (type
, theclass
);
732 static enum return_value_convention
733 amd64_return_value (struct gdbarch
*gdbarch
, struct value
*function
,
734 struct type
*type
, struct regcache
*regcache
,
735 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
737 enum amd64_reg_class theclass
[2];
738 int len
= TYPE_LENGTH (type
);
739 static int integer_regnum
[] = { AMD64_RAX_REGNUM
, AMD64_RDX_REGNUM
};
740 static int sse_regnum
[] = { AMD64_XMM0_REGNUM
, AMD64_XMM1_REGNUM
};
745 gdb_assert (!(readbuf
&& writebuf
));
747 /* 1. Classify the return type with the classification algorithm. */
748 amd64_classify (type
, theclass
);
750 /* 2. If the type has class MEMORY, then the caller provides space
751 for the return value and passes the address of this storage in
752 %rdi as if it were the first argument to the function. In effect,
753 this address becomes a hidden first argument.
755 On return %rax will contain the address that has been passed in
756 by the caller in %rdi. */
757 if (theclass
[0] == AMD64_MEMORY
)
759 /* As indicated by the comment above, the ABI guarantees that we
760 can always find the return value just after the function has
767 regcache_raw_read_unsigned (regcache
, AMD64_RAX_REGNUM
, &addr
);
768 read_memory (addr
, readbuf
, TYPE_LENGTH (type
));
771 return RETURN_VALUE_ABI_RETURNS_ADDRESS
;
774 /* 8. If the class is COMPLEX_X87, the real part of the value is
775 returned in %st0 and the imaginary part in %st1. */
776 if (theclass
[0] == AMD64_COMPLEX_X87
)
780 regcache_raw_read (regcache
, AMD64_ST0_REGNUM
, readbuf
);
781 regcache_raw_read (regcache
, AMD64_ST1_REGNUM
, readbuf
+ 16);
786 i387_return_value (gdbarch
, regcache
);
787 regcache_raw_write (regcache
, AMD64_ST0_REGNUM
, writebuf
);
788 regcache_raw_write (regcache
, AMD64_ST1_REGNUM
, writebuf
+ 16);
790 /* Fix up the tag word such that both %st(0) and %st(1) are
792 regcache_raw_write_unsigned (regcache
, AMD64_FTAG_REGNUM
, 0xfff);
795 return RETURN_VALUE_REGISTER_CONVENTION
;
798 gdb_assert (theclass
[1] != AMD64_MEMORY
);
799 gdb_assert (len
<= 16);
801 for (i
= 0; len
> 0; i
++, len
-= 8)
809 /* 3. If the class is INTEGER, the next available register
810 of the sequence %rax, %rdx is used. */
811 regnum
= integer_regnum
[integer_reg
++];
815 /* 4. If the class is SSE, the next available SSE register
816 of the sequence %xmm0, %xmm1 is used. */
817 regnum
= sse_regnum
[sse_reg
++];
821 /* 5. If the class is SSEUP, the eightbyte is passed in the
822 upper half of the last used SSE register. */
823 gdb_assert (sse_reg
> 0);
824 regnum
= sse_regnum
[sse_reg
- 1];
829 /* 6. If the class is X87, the value is returned on the X87
830 stack in %st0 as 80-bit x87 number. */
831 regnum
= AMD64_ST0_REGNUM
;
833 i387_return_value (gdbarch
, regcache
);
837 /* 7. If the class is X87UP, the value is returned together
838 with the previous X87 value in %st0. */
839 gdb_assert (i
> 0 && theclass
[0] == AMD64_X87
);
840 regnum
= AMD64_ST0_REGNUM
;
849 gdb_assert (!"Unexpected register class.");
852 gdb_assert (regnum
!= -1);
855 regcache_raw_read_part (regcache
, regnum
, offset
, std::min (len
, 8),
858 regcache_raw_write_part (regcache
, regnum
, offset
, std::min (len
, 8),
862 return RETURN_VALUE_REGISTER_CONVENTION
;
867 amd64_push_arguments (struct regcache
*regcache
, int nargs
,
868 struct value
**args
, CORE_ADDR sp
, int struct_return
)
870 static int integer_regnum
[] =
872 AMD64_RDI_REGNUM
, /* %rdi */
873 AMD64_RSI_REGNUM
, /* %rsi */
874 AMD64_RDX_REGNUM
, /* %rdx */
875 AMD64_RCX_REGNUM
, /* %rcx */
876 AMD64_R8_REGNUM
, /* %r8 */
877 AMD64_R9_REGNUM
/* %r9 */
879 static int sse_regnum
[] =
881 /* %xmm0 ... %xmm7 */
882 AMD64_XMM0_REGNUM
+ 0, AMD64_XMM1_REGNUM
,
883 AMD64_XMM0_REGNUM
+ 2, AMD64_XMM0_REGNUM
+ 3,
884 AMD64_XMM0_REGNUM
+ 4, AMD64_XMM0_REGNUM
+ 5,
885 AMD64_XMM0_REGNUM
+ 6, AMD64_XMM0_REGNUM
+ 7,
887 struct value
**stack_args
= XALLOCAVEC (struct value
*, nargs
);
888 int num_stack_args
= 0;
889 int num_elements
= 0;
895 /* Reserve a register for the "hidden" argument. */
899 for (i
= 0; i
< nargs
; i
++)
901 struct type
*type
= value_type (args
[i
]);
902 int len
= TYPE_LENGTH (type
);
903 enum amd64_reg_class theclass
[2];
904 int needed_integer_regs
= 0;
905 int needed_sse_regs
= 0;
908 /* Classify argument. */
909 amd64_classify (type
, theclass
);
911 /* Calculate the number of integer and SSE registers needed for
913 for (j
= 0; j
< 2; j
++)
915 if (theclass
[j
] == AMD64_INTEGER
)
916 needed_integer_regs
++;
917 else if (theclass
[j
] == AMD64_SSE
)
921 /* Check whether enough registers are available, and if the
922 argument should be passed in registers at all. */
923 if (integer_reg
+ needed_integer_regs
> ARRAY_SIZE (integer_regnum
)
924 || sse_reg
+ needed_sse_regs
> ARRAY_SIZE (sse_regnum
)
925 || (needed_integer_regs
== 0 && needed_sse_regs
== 0))
927 /* The argument will be passed on the stack. */
928 num_elements
+= ((len
+ 7) / 8);
929 stack_args
[num_stack_args
++] = args
[i
];
933 /* The argument will be passed in registers. */
934 const gdb_byte
*valbuf
= value_contents (args
[i
]);
937 gdb_assert (len
<= 16);
939 for (j
= 0; len
> 0; j
++, len
-= 8)
947 regnum
= integer_regnum
[integer_reg
++];
951 regnum
= sse_regnum
[sse_reg
++];
955 gdb_assert (sse_reg
> 0);
956 regnum
= sse_regnum
[sse_reg
- 1];
961 gdb_assert (!"Unexpected register class.");
964 gdb_assert (regnum
!= -1);
965 memset (buf
, 0, sizeof buf
);
966 memcpy (buf
, valbuf
+ j
* 8, std::min (len
, 8));
967 regcache_raw_write_part (regcache
, regnum
, offset
, 8, buf
);
972 /* Allocate space for the arguments on the stack. */
973 sp
-= num_elements
* 8;
975 /* The psABI says that "The end of the input argument area shall be
976 aligned on a 16 byte boundary." */
979 /* Write out the arguments to the stack. */
980 for (i
= 0; i
< num_stack_args
; i
++)
982 struct type
*type
= value_type (stack_args
[i
]);
983 const gdb_byte
*valbuf
= value_contents (stack_args
[i
]);
984 int len
= TYPE_LENGTH (type
);
986 write_memory (sp
+ element
* 8, valbuf
, len
);
987 element
+= ((len
+ 7) / 8);
990 /* The psABI says that "For calls that may call functions that use
991 varargs or stdargs (prototype-less calls or calls to functions
992 containing ellipsis (...) in the declaration) %al is used as
993 hidden argument to specify the number of SSE registers used. */
994 regcache_raw_write_unsigned (regcache
, AMD64_RAX_REGNUM
, sse_reg
);
999 amd64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1000 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1001 int nargs
, struct value
**args
, CORE_ADDR sp
,
1002 int struct_return
, CORE_ADDR struct_addr
)
1004 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1007 /* Pass arguments. */
1008 sp
= amd64_push_arguments (regcache
, nargs
, args
, sp
, struct_return
);
1010 /* Pass "hidden" argument". */
1013 store_unsigned_integer (buf
, 8, byte_order
, struct_addr
);
1014 regcache_cooked_write (regcache
, AMD64_RDI_REGNUM
, buf
);
1017 /* Store return address. */
1019 store_unsigned_integer (buf
, 8, byte_order
, bp_addr
);
1020 write_memory (sp
, buf
, 8);
1022 /* Finally, update the stack pointer... */
1023 store_unsigned_integer (buf
, 8, byte_order
, sp
);
1024 regcache_cooked_write (regcache
, AMD64_RSP_REGNUM
, buf
);
1026 /* ...and fake a frame pointer. */
1027 regcache_cooked_write (regcache
, AMD64_RBP_REGNUM
, buf
);
1032 /* Displaced instruction handling. */
1034 /* A partially decoded instruction.
1035 This contains enough details for displaced stepping purposes. */
1039 /* The number of opcode bytes. */
1041 /* The offset of the rex prefix or -1 if not present. */
1043 /* The offset to the first opcode byte. */
1045 /* The offset to the modrm byte or -1 if not present. */
1048 /* The raw instruction. */
1052 struct displaced_step_closure
1054 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1059 /* Details of the instruction. */
1060 struct amd64_insn insn_details
;
1062 /* Amount of space allocated to insn_buf. */
1065 /* The possibly modified insn.
1066 This is a variable-length field. */
1067 gdb_byte insn_buf
[1];
1070 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1071 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1072 at which point delete these in favor of libopcodes' versions). */
1074 static const unsigned char onebyte_has_modrm
[256] = {
1075 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1076 /* ------------------------------- */
1077 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1078 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1079 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1080 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1081 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1082 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1083 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1084 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1085 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1086 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1087 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1088 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1089 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1090 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1091 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1092 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1093 /* ------------------------------- */
1094 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1097 static const unsigned char twobyte_has_modrm
[256] = {
1098 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1099 /* ------------------------------- */
1100 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1101 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1102 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1103 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1104 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1105 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1106 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1107 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1108 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1109 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1110 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1111 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1112 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1113 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1114 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1115 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1116 /* ------------------------------- */
1117 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1120 static int amd64_syscall_p (const struct amd64_insn
*insn
, int *lengthp
);
1123 rex_prefix_p (gdb_byte pfx
)
1125 return REX_PREFIX_P (pfx
);
1128 /* Skip the legacy instruction prefixes in INSN.
1129 We assume INSN is properly sentineled so we don't have to worry
1130 about falling off the end of the buffer. */
1133 amd64_skip_prefixes (gdb_byte
*insn
)
1139 case DATA_PREFIX_OPCODE
:
1140 case ADDR_PREFIX_OPCODE
:
1141 case CS_PREFIX_OPCODE
:
1142 case DS_PREFIX_OPCODE
:
1143 case ES_PREFIX_OPCODE
:
1144 case FS_PREFIX_OPCODE
:
1145 case GS_PREFIX_OPCODE
:
1146 case SS_PREFIX_OPCODE
:
1147 case LOCK_PREFIX_OPCODE
:
1148 case REPE_PREFIX_OPCODE
:
1149 case REPNE_PREFIX_OPCODE
:
1161 /* Return an integer register (other than RSP) that is unused as an input
1163 In order to not require adding a rex prefix if the insn doesn't already
1164 have one, the result is restricted to RAX ... RDI, sans RSP.
1165 The register numbering of the result follows architecture ordering,
1169 amd64_get_unused_input_int_reg (const struct amd64_insn
*details
)
1171 /* 1 bit for each reg */
1172 int used_regs_mask
= 0;
1174 /* There can be at most 3 int regs used as inputs in an insn, and we have
1175 7 to choose from (RAX ... RDI, sans RSP).
1176 This allows us to take a conservative approach and keep things simple.
1177 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1178 that implicitly specify RAX. */
1181 used_regs_mask
|= 1 << EAX_REG_NUM
;
1182 /* Similarily avoid RDX, implicit operand in divides. */
1183 used_regs_mask
|= 1 << EDX_REG_NUM
;
1185 used_regs_mask
|= 1 << ESP_REG_NUM
;
1187 /* If the opcode is one byte long and there's no ModRM byte,
1188 assume the opcode specifies a register. */
1189 if (details
->opcode_len
== 1 && details
->modrm_offset
== -1)
1190 used_regs_mask
|= 1 << (details
->raw_insn
[details
->opcode_offset
] & 7);
1192 /* Mark used regs in the modrm/sib bytes. */
1193 if (details
->modrm_offset
!= -1)
1195 int modrm
= details
->raw_insn
[details
->modrm_offset
];
1196 int mod
= MODRM_MOD_FIELD (modrm
);
1197 int reg
= MODRM_REG_FIELD (modrm
);
1198 int rm
= MODRM_RM_FIELD (modrm
);
1199 int have_sib
= mod
!= 3 && rm
== 4;
1201 /* Assume the reg field of the modrm byte specifies a register. */
1202 used_regs_mask
|= 1 << reg
;
1206 int base
= SIB_BASE_FIELD (details
->raw_insn
[details
->modrm_offset
+ 1]);
1207 int idx
= SIB_INDEX_FIELD (details
->raw_insn
[details
->modrm_offset
+ 1]);
1208 used_regs_mask
|= 1 << base
;
1209 used_regs_mask
|= 1 << idx
;
1213 used_regs_mask
|= 1 << rm
;
1217 gdb_assert (used_regs_mask
< 256);
1218 gdb_assert (used_regs_mask
!= 255);
1220 /* Finally, find a free reg. */
1224 for (i
= 0; i
< 8; ++i
)
1226 if (! (used_regs_mask
& (1 << i
)))
1230 /* We shouldn't get here. */
1231 internal_error (__FILE__
, __LINE__
, _("unable to find free reg"));
1235 /* Extract the details of INSN that we need. */
1238 amd64_get_insn_details (gdb_byte
*insn
, struct amd64_insn
*details
)
1240 gdb_byte
*start
= insn
;
1243 details
->raw_insn
= insn
;
1245 details
->opcode_len
= -1;
1246 details
->rex_offset
= -1;
1247 details
->opcode_offset
= -1;
1248 details
->modrm_offset
= -1;
1250 /* Skip legacy instruction prefixes. */
1251 insn
= amd64_skip_prefixes (insn
);
1253 /* Skip REX instruction prefix. */
1254 if (rex_prefix_p (*insn
))
1256 details
->rex_offset
= insn
- start
;
1260 details
->opcode_offset
= insn
- start
;
1262 if (*insn
== TWO_BYTE_OPCODE_ESCAPE
)
1264 /* Two or three-byte opcode. */
1266 need_modrm
= twobyte_has_modrm
[*insn
];
1268 /* Check for three-byte opcode. */
1278 details
->opcode_len
= 3;
1281 details
->opcode_len
= 2;
1287 /* One-byte opcode. */
1288 need_modrm
= onebyte_has_modrm
[*insn
];
1289 details
->opcode_len
= 1;
1295 details
->modrm_offset
= insn
- start
;
1299 /* Update %rip-relative addressing in INSN.
1301 %rip-relative addressing only uses a 32-bit displacement.
1302 32 bits is not enough to be guaranteed to cover the distance between where
1303 the real instruction is and where its copy is.
1304 Convert the insn to use base+disp addressing.
1305 We set base = pc + insn_length so we can leave disp unchanged. */
1308 fixup_riprel (struct gdbarch
*gdbarch
, struct displaced_step_closure
*dsc
,
1309 CORE_ADDR from
, CORE_ADDR to
, struct regcache
*regs
)
1311 const struct amd64_insn
*insn_details
= &dsc
->insn_details
;
1312 int modrm_offset
= insn_details
->modrm_offset
;
1313 gdb_byte
*insn
= insn_details
->raw_insn
+ modrm_offset
;
1316 int arch_tmp_regno
, tmp_regno
;
1317 ULONGEST orig_value
;
1319 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1322 /* Compute the rip-relative address. */
1323 insn_length
= gdb_buffered_insn_length (gdbarch
, dsc
->insn_buf
,
1324 dsc
->max_len
, from
);
1325 rip_base
= from
+ insn_length
;
1327 /* We need a register to hold the address.
1328 Pick one not used in the insn.
1329 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1330 arch_tmp_regno
= amd64_get_unused_input_int_reg (insn_details
);
1331 tmp_regno
= amd64_arch_reg_to_regnum (arch_tmp_regno
);
1333 /* REX.B should be unset as we were using rip-relative addressing,
1334 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1335 if (insn_details
->rex_offset
!= -1)
1336 dsc
->insn_buf
[insn_details
->rex_offset
] &= ~REX_B
;
1338 regcache_cooked_read_unsigned (regs
, tmp_regno
, &orig_value
);
1339 dsc
->tmp_regno
= tmp_regno
;
1340 dsc
->tmp_save
= orig_value
;
1343 /* Convert the ModRM field to be base+disp. */
1344 dsc
->insn_buf
[modrm_offset
] &= ~0xc7;
1345 dsc
->insn_buf
[modrm_offset
] |= 0x80 + arch_tmp_regno
;
1347 regcache_cooked_write_unsigned (regs
, tmp_regno
, rip_base
);
1349 if (debug_displaced
)
1350 fprintf_unfiltered (gdb_stdlog
, "displaced: %%rip-relative addressing used.\n"
1351 "displaced: using temp reg %d, old value %s, new value %s\n",
1352 dsc
->tmp_regno
, paddress (gdbarch
, dsc
->tmp_save
),
1353 paddress (gdbarch
, rip_base
));
1357 fixup_displaced_copy (struct gdbarch
*gdbarch
,
1358 struct displaced_step_closure
*dsc
,
1359 CORE_ADDR from
, CORE_ADDR to
, struct regcache
*regs
)
1361 const struct amd64_insn
*details
= &dsc
->insn_details
;
1363 if (details
->modrm_offset
!= -1)
1365 gdb_byte modrm
= details
->raw_insn
[details
->modrm_offset
];
1367 if ((modrm
& 0xc7) == 0x05)
1369 /* The insn uses rip-relative addressing.
1371 fixup_riprel (gdbarch
, dsc
, from
, to
, regs
);
1376 struct displaced_step_closure
*
1377 amd64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
1378 CORE_ADDR from
, CORE_ADDR to
,
1379 struct regcache
*regs
)
1381 int len
= gdbarch_max_insn_length (gdbarch
);
1382 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1383 continually watch for running off the end of the buffer. */
1384 int fixup_sentinel_space
= len
;
1385 struct displaced_step_closure
*dsc
1386 = ((struct displaced_step_closure
*)
1387 xmalloc (sizeof (*dsc
) + len
+ fixup_sentinel_space
));
1388 gdb_byte
*buf
= &dsc
->insn_buf
[0];
1389 struct amd64_insn
*details
= &dsc
->insn_details
;
1392 dsc
->max_len
= len
+ fixup_sentinel_space
;
1394 read_memory (from
, buf
, len
);
1396 /* Set up the sentinel space so we don't have to worry about running
1397 off the end of the buffer. An excessive number of leading prefixes
1398 could otherwise cause this. */
1399 memset (buf
+ len
, 0, fixup_sentinel_space
);
1401 amd64_get_insn_details (buf
, details
);
1403 /* GDB may get control back after the insn after the syscall.
1404 Presumably this is a kernel bug.
1405 If this is a syscall, make sure there's a nop afterwards. */
1409 if (amd64_syscall_p (details
, &syscall_length
))
1410 buf
[details
->opcode_offset
+ syscall_length
] = NOP_OPCODE
;
1413 /* Modify the insn to cope with the address where it will be executed from.
1414 In particular, handle any rip-relative addressing. */
1415 fixup_displaced_copy (gdbarch
, dsc
, from
, to
, regs
);
1417 write_memory (to
, buf
, len
);
1419 if (debug_displaced
)
1421 fprintf_unfiltered (gdb_stdlog
, "displaced: copy %s->%s: ",
1422 paddress (gdbarch
, from
), paddress (gdbarch
, to
));
1423 displaced_step_dump_bytes (gdb_stdlog
, buf
, len
);
1430 amd64_absolute_jmp_p (const struct amd64_insn
*details
)
1432 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1434 if (insn
[0] == 0xff)
1436 /* jump near, absolute indirect (/4) */
1437 if ((insn
[1] & 0x38) == 0x20)
1440 /* jump far, absolute indirect (/5) */
1441 if ((insn
[1] & 0x38) == 0x28)
1448 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1451 amd64_jmp_p (const struct amd64_insn
*details
)
1453 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1455 /* jump short, relative. */
1456 if (insn
[0] == 0xeb)
1459 /* jump near, relative. */
1460 if (insn
[0] == 0xe9)
1463 return amd64_absolute_jmp_p (details
);
1467 amd64_absolute_call_p (const struct amd64_insn
*details
)
1469 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1471 if (insn
[0] == 0xff)
1473 /* Call near, absolute indirect (/2) */
1474 if ((insn
[1] & 0x38) == 0x10)
1477 /* Call far, absolute indirect (/3) */
1478 if ((insn
[1] & 0x38) == 0x18)
1486 amd64_ret_p (const struct amd64_insn
*details
)
1488 /* NOTE: gcc can emit "repz ; ret". */
1489 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1493 case 0xc2: /* ret near, pop N bytes */
1494 case 0xc3: /* ret near */
1495 case 0xca: /* ret far, pop N bytes */
1496 case 0xcb: /* ret far */
1497 case 0xcf: /* iret */
1506 amd64_call_p (const struct amd64_insn
*details
)
1508 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1510 if (amd64_absolute_call_p (details
))
1513 /* call near, relative */
1514 if (insn
[0] == 0xe8)
1520 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1521 length in bytes. Otherwise, return zero. */
1524 amd64_syscall_p (const struct amd64_insn
*details
, int *lengthp
)
1526 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1528 if (insn
[0] == 0x0f && insn
[1] == 0x05)
1537 /* Classify the instruction at ADDR using PRED.
1538 Throw an error if the memory can't be read. */
1541 amd64_classify_insn_at (struct gdbarch
*gdbarch
, CORE_ADDR addr
,
1542 int (*pred
) (const struct amd64_insn
*))
1544 struct amd64_insn details
;
1546 int len
, classification
;
1548 len
= gdbarch_max_insn_length (gdbarch
);
1549 buf
= (gdb_byte
*) alloca (len
);
1551 read_code (addr
, buf
, len
);
1552 amd64_get_insn_details (buf
, &details
);
1554 classification
= pred (&details
);
1556 return classification
;
1559 /* The gdbarch insn_is_call method. */
1562 amd64_insn_is_call (struct gdbarch
*gdbarch
, CORE_ADDR addr
)
1564 return amd64_classify_insn_at (gdbarch
, addr
, amd64_call_p
);
1567 /* The gdbarch insn_is_ret method. */
1570 amd64_insn_is_ret (struct gdbarch
*gdbarch
, CORE_ADDR addr
)
1572 return amd64_classify_insn_at (gdbarch
, addr
, amd64_ret_p
);
1575 /* The gdbarch insn_is_jump method. */
1578 amd64_insn_is_jump (struct gdbarch
*gdbarch
, CORE_ADDR addr
)
1580 return amd64_classify_insn_at (gdbarch
, addr
, amd64_jmp_p
);
1583 /* Fix up the state of registers and memory after having single-stepped
1584 a displaced instruction. */
1587 amd64_displaced_step_fixup (struct gdbarch
*gdbarch
,
1588 struct displaced_step_closure
*dsc
,
1589 CORE_ADDR from
, CORE_ADDR to
,
1590 struct regcache
*regs
)
1592 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1593 /* The offset we applied to the instruction's address. */
1594 ULONGEST insn_offset
= to
- from
;
1595 gdb_byte
*insn
= dsc
->insn_buf
;
1596 const struct amd64_insn
*insn_details
= &dsc
->insn_details
;
1598 if (debug_displaced
)
1599 fprintf_unfiltered (gdb_stdlog
,
1600 "displaced: fixup (%s, %s), "
1601 "insn = 0x%02x 0x%02x ...\n",
1602 paddress (gdbarch
, from
), paddress (gdbarch
, to
),
1605 /* If we used a tmp reg, restore it. */
1609 if (debug_displaced
)
1610 fprintf_unfiltered (gdb_stdlog
, "displaced: restoring reg %d to %s\n",
1611 dsc
->tmp_regno
, paddress (gdbarch
, dsc
->tmp_save
));
1612 regcache_cooked_write_unsigned (regs
, dsc
->tmp_regno
, dsc
->tmp_save
);
1615 /* The list of issues to contend with here is taken from
1616 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1617 Yay for Free Software! */
1619 /* Relocate the %rip back to the program's instruction stream,
1622 /* Except in the case of absolute or indirect jump or call
1623 instructions, or a return instruction, the new rip is relative to
1624 the displaced instruction; make it relative to the original insn.
1625 Well, signal handler returns don't need relocation either, but we use the
1626 value of %rip to recognize those; see below. */
1627 if (! amd64_absolute_jmp_p (insn_details
)
1628 && ! amd64_absolute_call_p (insn_details
)
1629 && ! amd64_ret_p (insn_details
))
1634 regcache_cooked_read_unsigned (regs
, AMD64_RIP_REGNUM
, &orig_rip
);
1636 /* A signal trampoline system call changes the %rip, resuming
1637 execution of the main program after the signal handler has
1638 returned. That makes them like 'return' instructions; we
1639 shouldn't relocate %rip.
1641 But most system calls don't, and we do need to relocate %rip.
1643 Our heuristic for distinguishing these cases: if stepping
1644 over the system call instruction left control directly after
1645 the instruction, the we relocate --- control almost certainly
1646 doesn't belong in the displaced copy. Otherwise, we assume
1647 the instruction has put control where it belongs, and leave
1648 it unrelocated. Goodness help us if there are PC-relative
1650 if (amd64_syscall_p (insn_details
, &insn_len
)
1651 && orig_rip
!= to
+ insn_len
1652 /* GDB can get control back after the insn after the syscall.
1653 Presumably this is a kernel bug.
1654 Fixup ensures its a nop, we add one to the length for it. */
1655 && orig_rip
!= to
+ insn_len
+ 1)
1657 if (debug_displaced
)
1658 fprintf_unfiltered (gdb_stdlog
,
1659 "displaced: syscall changed %%rip; "
1660 "not relocating\n");
1664 ULONGEST rip
= orig_rip
- insn_offset
;
1666 /* If we just stepped over a breakpoint insn, we don't backup
1667 the pc on purpose; this is to match behaviour without
1670 regcache_cooked_write_unsigned (regs
, AMD64_RIP_REGNUM
, rip
);
1672 if (debug_displaced
)
1673 fprintf_unfiltered (gdb_stdlog
,
1675 "relocated %%rip from %s to %s\n",
1676 paddress (gdbarch
, orig_rip
),
1677 paddress (gdbarch
, rip
));
1681 /* If the instruction was PUSHFL, then the TF bit will be set in the
1682 pushed value, and should be cleared. We'll leave this for later,
1683 since GDB already messes up the TF flag when stepping over a
1686 /* If the instruction was a call, the return address now atop the
1687 stack is the address following the copied instruction. We need
1688 to make it the address following the original instruction. */
1689 if (amd64_call_p (insn_details
))
1693 const ULONGEST retaddr_len
= 8;
1695 regcache_cooked_read_unsigned (regs
, AMD64_RSP_REGNUM
, &rsp
);
1696 retaddr
= read_memory_unsigned_integer (rsp
, retaddr_len
, byte_order
);
1697 retaddr
= (retaddr
- insn_offset
) & 0xffffffffffffffffULL
;
1698 write_memory_unsigned_integer (rsp
, retaddr_len
, byte_order
, retaddr
);
1700 if (debug_displaced
)
1701 fprintf_unfiltered (gdb_stdlog
,
1702 "displaced: relocated return addr at %s "
1704 paddress (gdbarch
, rsp
),
1705 paddress (gdbarch
, retaddr
));
1709 /* If the instruction INSN uses RIP-relative addressing, return the
1710 offset into the raw INSN where the displacement to be adjusted is
1711 found. Returns 0 if the instruction doesn't use RIP-relative
1715 rip_relative_offset (struct amd64_insn
*insn
)
1717 if (insn
->modrm_offset
!= -1)
1719 gdb_byte modrm
= insn
->raw_insn
[insn
->modrm_offset
];
1721 if ((modrm
& 0xc7) == 0x05)
1723 /* The displacement is found right after the ModRM byte. */
1724 return insn
->modrm_offset
+ 1;
1732 append_insns (CORE_ADDR
*to
, ULONGEST len
, const gdb_byte
*buf
)
1734 target_write_memory (*to
, buf
, len
);
1739 amd64_relocate_instruction (struct gdbarch
*gdbarch
,
1740 CORE_ADDR
*to
, CORE_ADDR oldloc
)
1742 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1743 int len
= gdbarch_max_insn_length (gdbarch
);
1744 /* Extra space for sentinels. */
1745 int fixup_sentinel_space
= len
;
1746 gdb_byte
*buf
= (gdb_byte
*) xmalloc (len
+ fixup_sentinel_space
);
1747 struct amd64_insn insn_details
;
1749 LONGEST rel32
, newrel
;
1753 read_memory (oldloc
, buf
, len
);
1755 /* Set up the sentinel space so we don't have to worry about running
1756 off the end of the buffer. An excessive number of leading prefixes
1757 could otherwise cause this. */
1758 memset (buf
+ len
, 0, fixup_sentinel_space
);
1761 amd64_get_insn_details (insn
, &insn_details
);
1763 insn_length
= gdb_buffered_insn_length (gdbarch
, insn
, len
, oldloc
);
1765 /* Skip legacy instruction prefixes. */
1766 insn
= amd64_skip_prefixes (insn
);
1768 /* Adjust calls with 32-bit relative addresses as push/jump, with
1769 the address pushed being the location where the original call in
1770 the user program would return to. */
1771 if (insn
[0] == 0xe8)
1773 gdb_byte push_buf
[32];
1777 /* Where "ret" in the original code will return to. */
1778 ret_addr
= oldloc
+ insn_length
;
1780 /* If pushing an address higher than or equal to 0x80000000,
1781 avoid 'pushq', as that sign extends its 32-bit operand, which
1782 would be incorrect. */
1783 if (ret_addr
<= 0x7fffffff)
1785 push_buf
[0] = 0x68; /* pushq $... */
1786 store_unsigned_integer (&push_buf
[1], 4, byte_order
, ret_addr
);
1791 push_buf
[i
++] = 0x48; /* sub $0x8,%rsp */
1792 push_buf
[i
++] = 0x83;
1793 push_buf
[i
++] = 0xec;
1794 push_buf
[i
++] = 0x08;
1796 push_buf
[i
++] = 0xc7; /* movl $imm,(%rsp) */
1797 push_buf
[i
++] = 0x04;
1798 push_buf
[i
++] = 0x24;
1799 store_unsigned_integer (&push_buf
[i
], 4, byte_order
,
1800 ret_addr
& 0xffffffff);
1803 push_buf
[i
++] = 0xc7; /* movl $imm,4(%rsp) */
1804 push_buf
[i
++] = 0x44;
1805 push_buf
[i
++] = 0x24;
1806 push_buf
[i
++] = 0x04;
1807 store_unsigned_integer (&push_buf
[i
], 4, byte_order
,
1811 gdb_assert (i
<= sizeof (push_buf
));
1812 /* Push the push. */
1813 append_insns (to
, i
, push_buf
);
1815 /* Convert the relative call to a relative jump. */
1818 /* Adjust the destination offset. */
1819 rel32
= extract_signed_integer (insn
+ 1, 4, byte_order
);
1820 newrel
= (oldloc
- *to
) + rel32
;
1821 store_signed_integer (insn
+ 1, 4, byte_order
, newrel
);
1823 if (debug_displaced
)
1824 fprintf_unfiltered (gdb_stdlog
,
1825 "Adjusted insn rel32=%s at %s to"
1826 " rel32=%s at %s\n",
1827 hex_string (rel32
), paddress (gdbarch
, oldloc
),
1828 hex_string (newrel
), paddress (gdbarch
, *to
));
1830 /* Write the adjusted jump into its displaced location. */
1831 append_insns (to
, 5, insn
);
1835 offset
= rip_relative_offset (&insn_details
);
1838 /* Adjust jumps with 32-bit relative addresses. Calls are
1839 already handled above. */
1840 if (insn
[0] == 0xe9)
1842 /* Adjust conditional jumps. */
1843 else if (insn
[0] == 0x0f && (insn
[1] & 0xf0) == 0x80)
1849 rel32
= extract_signed_integer (insn
+ offset
, 4, byte_order
);
1850 newrel
= (oldloc
- *to
) + rel32
;
1851 store_signed_integer (insn
+ offset
, 4, byte_order
, newrel
);
1852 if (debug_displaced
)
1853 fprintf_unfiltered (gdb_stdlog
,
1854 "Adjusted insn rel32=%s at %s to"
1855 " rel32=%s at %s\n",
1856 hex_string (rel32
), paddress (gdbarch
, oldloc
),
1857 hex_string (newrel
), paddress (gdbarch
, *to
));
1860 /* Write the adjusted instruction into its displaced location. */
1861 append_insns (to
, insn_length
, buf
);
1865 /* The maximum number of saved registers. This should include %rip. */
1866 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1868 struct amd64_frame_cache
1873 CORE_ADDR sp_offset
;
1876 /* Saved registers. */
1877 CORE_ADDR saved_regs
[AMD64_NUM_SAVED_REGS
];
1881 /* Do we have a frame? */
1885 /* Initialize a frame cache. */
1888 amd64_init_frame_cache (struct amd64_frame_cache
*cache
)
1895 cache
->sp_offset
= -8;
1898 /* Saved registers. We initialize these to -1 since zero is a valid
1899 offset (that's where %rbp is supposed to be stored).
1900 The values start out as being offsets, and are later converted to
1901 addresses (at which point -1 is interpreted as an address, still meaning
1903 for (i
= 0; i
< AMD64_NUM_SAVED_REGS
; i
++)
1904 cache
->saved_regs
[i
] = -1;
1905 cache
->saved_sp
= 0;
1906 cache
->saved_sp_reg
= -1;
1908 /* Frameless until proven otherwise. */
1909 cache
->frameless_p
= 1;
1912 /* Allocate and initialize a frame cache. */
1914 static struct amd64_frame_cache
*
1915 amd64_alloc_frame_cache (void)
1917 struct amd64_frame_cache
*cache
;
1919 cache
= FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache
);
1920 amd64_init_frame_cache (cache
);
1924 /* GCC 4.4 and later, can put code in the prologue to realign the
1925 stack pointer. Check whether PC points to such code, and update
1926 CACHE accordingly. Return the first instruction after the code
1927 sequence or CURRENT_PC, whichever is smaller. If we don't
1928 recognize the code, return PC. */
1931 amd64_analyze_stack_align (CORE_ADDR pc
, CORE_ADDR current_pc
,
1932 struct amd64_frame_cache
*cache
)
1934 /* There are 2 code sequences to re-align stack before the frame
1937 1. Use a caller-saved saved register:
1943 2. Use a callee-saved saved register:
1950 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1952 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1953 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1958 int offset
, offset_and
;
1960 if (target_read_code (pc
, buf
, sizeof buf
))
1963 /* Check caller-saved saved register. The first instruction has
1964 to be "leaq 8(%rsp), %reg". */
1965 if ((buf
[0] & 0xfb) == 0x48
1970 /* MOD must be binary 10 and R/M must be binary 100. */
1971 if ((buf
[2] & 0xc7) != 0x44)
1974 /* REG has register number. */
1975 reg
= (buf
[2] >> 3) & 7;
1977 /* Check the REX.R bit. */
1985 /* Check callee-saved saved register. The first instruction
1986 has to be "pushq %reg". */
1988 if ((buf
[0] & 0xf8) == 0x50)
1990 else if ((buf
[0] & 0xf6) == 0x40
1991 && (buf
[1] & 0xf8) == 0x50)
1993 /* Check the REX.B bit. */
1994 if ((buf
[0] & 1) != 0)
2003 reg
+= buf
[offset
] & 0x7;
2007 /* The next instruction has to be "leaq 16(%rsp), %reg". */
2008 if ((buf
[offset
] & 0xfb) != 0x48
2009 || buf
[offset
+ 1] != 0x8d
2010 || buf
[offset
+ 3] != 0x24
2011 || buf
[offset
+ 4] != 0x10)
2014 /* MOD must be binary 10 and R/M must be binary 100. */
2015 if ((buf
[offset
+ 2] & 0xc7) != 0x44)
2018 /* REG has register number. */
2019 r
= (buf
[offset
+ 2] >> 3) & 7;
2021 /* Check the REX.R bit. */
2022 if (buf
[offset
] == 0x4c)
2025 /* Registers in pushq and leaq have to be the same. */
2032 /* Rigister can't be %rsp nor %rbp. */
2033 if (reg
== 4 || reg
== 5)
2036 /* The next instruction has to be "andq $-XXX, %rsp". */
2037 if (buf
[offset
] != 0x48
2038 || buf
[offset
+ 2] != 0xe4
2039 || (buf
[offset
+ 1] != 0x81 && buf
[offset
+ 1] != 0x83))
2042 offset_and
= offset
;
2043 offset
+= buf
[offset
+ 1] == 0x81 ? 7 : 4;
2045 /* The next instruction has to be "pushq -8(%reg)". */
2047 if (buf
[offset
] == 0xff)
2049 else if ((buf
[offset
] & 0xf6) == 0x40
2050 && buf
[offset
+ 1] == 0xff)
2052 /* Check the REX.B bit. */
2053 if ((buf
[offset
] & 0x1) != 0)
2060 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2062 if (buf
[offset
+ 1] != 0xf8
2063 || (buf
[offset
] & 0xf8) != 0x70)
2066 /* R/M has register. */
2067 r
+= buf
[offset
] & 7;
2069 /* Registers in leaq and pushq have to be the same. */
2073 if (current_pc
> pc
+ offset_and
)
2074 cache
->saved_sp_reg
= amd64_arch_reg_to_regnum (reg
);
2076 return std::min (pc
+ offset
+ 2, current_pc
);
2079 /* Similar to amd64_analyze_stack_align for x32. */
2082 amd64_x32_analyze_stack_align (CORE_ADDR pc
, CORE_ADDR current_pc
,
2083 struct amd64_frame_cache
*cache
)
2085 /* There are 2 code sequences to re-align stack before the frame
2088 1. Use a caller-saved saved register:
2096 [addr32] leal 8(%rsp), %reg
2098 [addr32] pushq -8(%reg)
2100 2. Use a callee-saved saved register:
2110 [addr32] leal 16(%rsp), %reg
2112 [addr32] pushq -8(%reg)
2114 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2116 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2117 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2119 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2121 0x83 0xe4 0xf0 andl $-16, %esp
2122 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2127 int offset
, offset_and
;
2129 if (target_read_memory (pc
, buf
, sizeof buf
))
2132 /* Skip optional addr32 prefix. */
2133 offset
= buf
[0] == 0x67 ? 1 : 0;
2135 /* Check caller-saved saved register. The first instruction has
2136 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2137 if (((buf
[offset
] & 0xfb) == 0x48 || (buf
[offset
] & 0xfb) == 0x40)
2138 && buf
[offset
+ 1] == 0x8d
2139 && buf
[offset
+ 3] == 0x24
2140 && buf
[offset
+ 4] == 0x8)
2142 /* MOD must be binary 10 and R/M must be binary 100. */
2143 if ((buf
[offset
+ 2] & 0xc7) != 0x44)
2146 /* REG has register number. */
2147 reg
= (buf
[offset
+ 2] >> 3) & 7;
2149 /* Check the REX.R bit. */
2150 if ((buf
[offset
] & 0x4) != 0)
2157 /* Check callee-saved saved register. The first instruction
2158 has to be "pushq %reg". */
2160 if ((buf
[offset
] & 0xf6) == 0x40
2161 && (buf
[offset
+ 1] & 0xf8) == 0x50)
2163 /* Check the REX.B bit. */
2164 if ((buf
[offset
] & 1) != 0)
2169 else if ((buf
[offset
] & 0xf8) != 0x50)
2173 reg
+= buf
[offset
] & 0x7;
2177 /* Skip optional addr32 prefix. */
2178 if (buf
[offset
] == 0x67)
2181 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2182 "leal 16(%rsp), %reg". */
2183 if (((buf
[offset
] & 0xfb) != 0x48 && (buf
[offset
] & 0xfb) != 0x40)
2184 || buf
[offset
+ 1] != 0x8d
2185 || buf
[offset
+ 3] != 0x24
2186 || buf
[offset
+ 4] != 0x10)
2189 /* MOD must be binary 10 and R/M must be binary 100. */
2190 if ((buf
[offset
+ 2] & 0xc7) != 0x44)
2193 /* REG has register number. */
2194 r
= (buf
[offset
+ 2] >> 3) & 7;
2196 /* Check the REX.R bit. */
2197 if ((buf
[offset
] & 0x4) != 0)
2200 /* Registers in pushq and leaq have to be the same. */
2207 /* Rigister can't be %rsp nor %rbp. */
2208 if (reg
== 4 || reg
== 5)
2211 /* The next instruction may be "andq $-XXX, %rsp" or
2212 "andl $-XXX, %esp". */
2213 if (buf
[offset
] != 0x48)
2216 if (buf
[offset
+ 2] != 0xe4
2217 || (buf
[offset
+ 1] != 0x81 && buf
[offset
+ 1] != 0x83))
2220 offset_and
= offset
;
2221 offset
+= buf
[offset
+ 1] == 0x81 ? 7 : 4;
2223 /* Skip optional addr32 prefix. */
2224 if (buf
[offset
] == 0x67)
2227 /* The next instruction has to be "pushq -8(%reg)". */
2229 if (buf
[offset
] == 0xff)
2231 else if ((buf
[offset
] & 0xf6) == 0x40
2232 && buf
[offset
+ 1] == 0xff)
2234 /* Check the REX.B bit. */
2235 if ((buf
[offset
] & 0x1) != 0)
2242 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2244 if (buf
[offset
+ 1] != 0xf8
2245 || (buf
[offset
] & 0xf8) != 0x70)
2248 /* R/M has register. */
2249 r
+= buf
[offset
] & 7;
2251 /* Registers in leaq and pushq have to be the same. */
2255 if (current_pc
> pc
+ offset_and
)
2256 cache
->saved_sp_reg
= amd64_arch_reg_to_regnum (reg
);
2258 return std::min (pc
+ offset
+ 2, current_pc
);
2261 /* Do a limited analysis of the prologue at PC and update CACHE
2262 accordingly. Bail out early if CURRENT_PC is reached. Return the
2263 address where the analysis stopped.
2265 We will handle only functions beginning with:
2268 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2270 or (for the X32 ABI):
2273 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2275 Any function that doesn't start with one of these sequences will be
2276 assumed to have no prologue and thus no valid frame pointer in
2280 amd64_analyze_prologue (struct gdbarch
*gdbarch
,
2281 CORE_ADDR pc
, CORE_ADDR current_pc
,
2282 struct amd64_frame_cache
*cache
)
2284 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2285 /* There are two variations of movq %rsp, %rbp. */
2286 static const gdb_byte mov_rsp_rbp_1
[3] = { 0x48, 0x89, 0xe5 };
2287 static const gdb_byte mov_rsp_rbp_2
[3] = { 0x48, 0x8b, 0xec };
2288 /* Ditto for movl %esp, %ebp. */
2289 static const gdb_byte mov_esp_ebp_1
[2] = { 0x89, 0xe5 };
2290 static const gdb_byte mov_esp_ebp_2
[2] = { 0x8b, 0xec };
2295 if (current_pc
<= pc
)
2298 if (gdbarch_ptr_bit (gdbarch
) == 32)
2299 pc
= amd64_x32_analyze_stack_align (pc
, current_pc
, cache
);
2301 pc
= amd64_analyze_stack_align (pc
, current_pc
, cache
);
2303 op
= read_code_unsigned_integer (pc
, 1, byte_order
);
2305 if (op
== 0x55) /* pushq %rbp */
2307 /* Take into account that we've executed the `pushq %rbp' that
2308 starts this instruction sequence. */
2309 cache
->saved_regs
[AMD64_RBP_REGNUM
] = 0;
2310 cache
->sp_offset
+= 8;
2312 /* If that's all, return now. */
2313 if (current_pc
<= pc
+ 1)
2316 read_code (pc
+ 1, buf
, 3);
2318 /* Check for `movq %rsp, %rbp'. */
2319 if (memcmp (buf
, mov_rsp_rbp_1
, 3) == 0
2320 || memcmp (buf
, mov_rsp_rbp_2
, 3) == 0)
2322 /* OK, we actually have a frame. */
2323 cache
->frameless_p
= 0;
2327 /* For X32, also check for `movq %esp, %ebp'. */
2328 if (gdbarch_ptr_bit (gdbarch
) == 32)
2330 if (memcmp (buf
, mov_esp_ebp_1
, 2) == 0
2331 || memcmp (buf
, mov_esp_ebp_2
, 2) == 0)
2333 /* OK, we actually have a frame. */
2334 cache
->frameless_p
= 0;
2345 /* Work around false termination of prologue - GCC PR debug/48827.
2347 START_PC is the first instruction of a function, PC is its minimal already
2348 determined advanced address. Function returns PC if it has nothing to do.
2352 <-- here is 0 lines advance - the false prologue end marker.
2353 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2354 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2355 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2356 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2357 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2358 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2359 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2360 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2364 amd64_skip_xmm_prologue (CORE_ADDR pc
, CORE_ADDR start_pc
)
2366 struct symtab_and_line start_pc_sal
, next_sal
;
2367 gdb_byte buf
[4 + 8 * 7];
2373 start_pc_sal
= find_pc_sect_line (start_pc
, NULL
, 0);
2374 if (start_pc_sal
.symtab
== NULL
2375 || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
2376 (SYMTAB_COMPUNIT (start_pc_sal
.symtab
))) < 6
2377 || start_pc_sal
.pc
!= start_pc
|| pc
>= start_pc_sal
.end
)
2380 next_sal
= find_pc_sect_line (start_pc_sal
.end
, NULL
, 0);
2381 if (next_sal
.line
!= start_pc_sal
.line
)
2384 /* START_PC can be from overlayed memory, ignored here. */
2385 if (target_read_code (next_sal
.pc
- 4, buf
, sizeof (buf
)) != 0)
2389 if (buf
[0] != 0x84 || buf
[1] != 0xc0)
2396 for (xmmreg
= 0; xmmreg
< 8; xmmreg
++)
2398 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2399 if (buf
[offset
] != 0x0f || buf
[offset
+ 1] != 0x29
2400 || (buf
[offset
+ 2] & 0x3f) != (xmmreg
<< 3 | 0x5))
2404 if ((buf
[offset
+ 2] & 0xc0) == 0x40)
2406 /* 8-bit displacement. */
2410 else if ((buf
[offset
+ 2] & 0xc0) == 0x80)
2412 /* 32-bit displacement. */
2420 if (offset
- 4 != buf
[3])
2423 return next_sal
.end
;
2426 /* Return PC of first real instruction. */
2429 amd64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR start_pc
)
2431 struct amd64_frame_cache cache
;
2433 CORE_ADDR func_addr
;
2435 if (find_pc_partial_function (start_pc
, NULL
, &func_addr
, NULL
))
2437 CORE_ADDR post_prologue_pc
2438 = skip_prologue_using_sal (gdbarch
, func_addr
);
2439 struct compunit_symtab
*cust
= find_pc_compunit_symtab (func_addr
);
2441 /* Clang always emits a line note before the prologue and another
2442 one after. We trust clang to emit usable line notes. */
2443 if (post_prologue_pc
2445 && COMPUNIT_PRODUCER (cust
) != NULL
2446 && startswith (COMPUNIT_PRODUCER (cust
), "clang ")))
2447 return std::max (start_pc
, post_prologue_pc
);
2450 amd64_init_frame_cache (&cache
);
2451 pc
= amd64_analyze_prologue (gdbarch
, start_pc
, 0xffffffffffffffffLL
,
2453 if (cache
.frameless_p
)
2456 return amd64_skip_xmm_prologue (pc
, start_pc
);
2460 /* Normal frames. */
2463 amd64_frame_cache_1 (struct frame_info
*this_frame
,
2464 struct amd64_frame_cache
*cache
)
2466 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2467 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2471 cache
->pc
= get_frame_func (this_frame
);
2473 amd64_analyze_prologue (gdbarch
, cache
->pc
, get_frame_pc (this_frame
),
2476 if (cache
->frameless_p
)
2478 /* We didn't find a valid frame. If we're at the start of a
2479 function, or somewhere half-way its prologue, the function's
2480 frame probably hasn't been fully setup yet. Try to
2481 reconstruct the base address for the stack frame by looking
2482 at the stack pointer. For truly "frameless" functions this
2485 if (cache
->saved_sp_reg
!= -1)
2487 /* Stack pointer has been saved. */
2488 get_frame_register (this_frame
, cache
->saved_sp_reg
, buf
);
2489 cache
->saved_sp
= extract_unsigned_integer (buf
, 8, byte_order
);
2491 /* We're halfway aligning the stack. */
2492 cache
->base
= ((cache
->saved_sp
- 8) & 0xfffffffffffffff0LL
) - 8;
2493 cache
->saved_regs
[AMD64_RIP_REGNUM
] = cache
->saved_sp
- 8;
2495 /* This will be added back below. */
2496 cache
->saved_regs
[AMD64_RIP_REGNUM
] -= cache
->base
;
2500 get_frame_register (this_frame
, AMD64_RSP_REGNUM
, buf
);
2501 cache
->base
= extract_unsigned_integer (buf
, 8, byte_order
)
2507 get_frame_register (this_frame
, AMD64_RBP_REGNUM
, buf
);
2508 cache
->base
= extract_unsigned_integer (buf
, 8, byte_order
);
2511 /* Now that we have the base address for the stack frame we can
2512 calculate the value of %rsp in the calling frame. */
2513 cache
->saved_sp
= cache
->base
+ 16;
2515 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2516 frame we find it at the same offset from the reconstructed base
2517 address. If we're halfway aligning the stack, %rip is handled
2518 differently (see above). */
2519 if (!cache
->frameless_p
|| cache
->saved_sp_reg
== -1)
2520 cache
->saved_regs
[AMD64_RIP_REGNUM
] = 8;
2522 /* Adjust all the saved registers such that they contain addresses
2523 instead of offsets. */
2524 for (i
= 0; i
< AMD64_NUM_SAVED_REGS
; i
++)
2525 if (cache
->saved_regs
[i
] != -1)
2526 cache
->saved_regs
[i
] += cache
->base
;
2531 static struct amd64_frame_cache
*
2532 amd64_frame_cache (struct frame_info
*this_frame
, void **this_cache
)
2534 struct amd64_frame_cache
*cache
;
2537 return (struct amd64_frame_cache
*) *this_cache
;
2539 cache
= amd64_alloc_frame_cache ();
2540 *this_cache
= cache
;
2544 amd64_frame_cache_1 (this_frame
, cache
);
2546 CATCH (ex
, RETURN_MASK_ERROR
)
2548 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
2549 throw_exception (ex
);
2556 static enum unwind_stop_reason
2557 amd64_frame_unwind_stop_reason (struct frame_info
*this_frame
,
2560 struct amd64_frame_cache
*cache
=
2561 amd64_frame_cache (this_frame
, this_cache
);
2564 return UNWIND_UNAVAILABLE
;
2566 /* This marks the outermost frame. */
2567 if (cache
->base
== 0)
2568 return UNWIND_OUTERMOST
;
2570 return UNWIND_NO_REASON
;
2574 amd64_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
2575 struct frame_id
*this_id
)
2577 struct amd64_frame_cache
*cache
=
2578 amd64_frame_cache (this_frame
, this_cache
);
2581 (*this_id
) = frame_id_build_unavailable_stack (cache
->pc
);
2582 else if (cache
->base
== 0)
2584 /* This marks the outermost frame. */
2588 (*this_id
) = frame_id_build (cache
->base
+ 16, cache
->pc
);
2591 static struct value
*
2592 amd64_frame_prev_register (struct frame_info
*this_frame
, void **this_cache
,
2595 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2596 struct amd64_frame_cache
*cache
=
2597 amd64_frame_cache (this_frame
, this_cache
);
2599 gdb_assert (regnum
>= 0);
2601 if (regnum
== gdbarch_sp_regnum (gdbarch
) && cache
->saved_sp
)
2602 return frame_unwind_got_constant (this_frame
, regnum
, cache
->saved_sp
);
2604 if (regnum
< AMD64_NUM_SAVED_REGS
&& cache
->saved_regs
[regnum
] != -1)
2605 return frame_unwind_got_memory (this_frame
, regnum
,
2606 cache
->saved_regs
[regnum
]);
2608 return frame_unwind_got_register (this_frame
, regnum
, regnum
);
2611 static const struct frame_unwind amd64_frame_unwind
=
2614 amd64_frame_unwind_stop_reason
,
2615 amd64_frame_this_id
,
2616 amd64_frame_prev_register
,
2618 default_frame_sniffer
2621 /* Generate a bytecode expression to get the value of the saved PC. */
2624 amd64_gen_return_address (struct gdbarch
*gdbarch
,
2625 struct agent_expr
*ax
, struct axs_value
*value
,
2628 /* The following sequence assumes the traditional use of the base
2630 ax_reg (ax
, AMD64_RBP_REGNUM
);
2632 ax_simple (ax
, aop_add
);
2633 value
->type
= register_type (gdbarch
, AMD64_RIP_REGNUM
);
2634 value
->kind
= axs_lvalue_memory
;
2638 /* Signal trampolines. */
2640 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2641 64-bit variants. This would require using identical frame caches
2642 on both platforms. */
2644 static struct amd64_frame_cache
*
2645 amd64_sigtramp_frame_cache (struct frame_info
*this_frame
, void **this_cache
)
2647 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2648 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2649 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2650 struct amd64_frame_cache
*cache
;
2656 return (struct amd64_frame_cache
*) *this_cache
;
2658 cache
= amd64_alloc_frame_cache ();
2662 get_frame_register (this_frame
, AMD64_RSP_REGNUM
, buf
);
2663 cache
->base
= extract_unsigned_integer (buf
, 8, byte_order
) - 8;
2665 addr
= tdep
->sigcontext_addr (this_frame
);
2666 gdb_assert (tdep
->sc_reg_offset
);
2667 gdb_assert (tdep
->sc_num_regs
<= AMD64_NUM_SAVED_REGS
);
2668 for (i
= 0; i
< tdep
->sc_num_regs
; i
++)
2669 if (tdep
->sc_reg_offset
[i
] != -1)
2670 cache
->saved_regs
[i
] = addr
+ tdep
->sc_reg_offset
[i
];
2674 CATCH (ex
, RETURN_MASK_ERROR
)
2676 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
2677 throw_exception (ex
);
2681 *this_cache
= cache
;
2685 static enum unwind_stop_reason
2686 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info
*this_frame
,
2689 struct amd64_frame_cache
*cache
=
2690 amd64_sigtramp_frame_cache (this_frame
, this_cache
);
2693 return UNWIND_UNAVAILABLE
;
2695 return UNWIND_NO_REASON
;
2699 amd64_sigtramp_frame_this_id (struct frame_info
*this_frame
,
2700 void **this_cache
, struct frame_id
*this_id
)
2702 struct amd64_frame_cache
*cache
=
2703 amd64_sigtramp_frame_cache (this_frame
, this_cache
);
2706 (*this_id
) = frame_id_build_unavailable_stack (get_frame_pc (this_frame
));
2707 else if (cache
->base
== 0)
2709 /* This marks the outermost frame. */
2713 (*this_id
) = frame_id_build (cache
->base
+ 16, get_frame_pc (this_frame
));
2716 static struct value
*
2717 amd64_sigtramp_frame_prev_register (struct frame_info
*this_frame
,
2718 void **this_cache
, int regnum
)
2720 /* Make sure we've initialized the cache. */
2721 amd64_sigtramp_frame_cache (this_frame
, this_cache
);
2723 return amd64_frame_prev_register (this_frame
, this_cache
, regnum
);
2727 amd64_sigtramp_frame_sniffer (const struct frame_unwind
*self
,
2728 struct frame_info
*this_frame
,
2731 struct gdbarch_tdep
*tdep
= gdbarch_tdep (get_frame_arch (this_frame
));
2733 /* We shouldn't even bother if we don't have a sigcontext_addr
2735 if (tdep
->sigcontext_addr
== NULL
)
2738 if (tdep
->sigtramp_p
!= NULL
)
2740 if (tdep
->sigtramp_p (this_frame
))
2744 if (tdep
->sigtramp_start
!= 0)
2746 CORE_ADDR pc
= get_frame_pc (this_frame
);
2748 gdb_assert (tdep
->sigtramp_end
!= 0);
2749 if (pc
>= tdep
->sigtramp_start
&& pc
< tdep
->sigtramp_end
)
2756 static const struct frame_unwind amd64_sigtramp_frame_unwind
=
2759 amd64_sigtramp_frame_unwind_stop_reason
,
2760 amd64_sigtramp_frame_this_id
,
2761 amd64_sigtramp_frame_prev_register
,
2763 amd64_sigtramp_frame_sniffer
2768 amd64_frame_base_address (struct frame_info
*this_frame
, void **this_cache
)
2770 struct amd64_frame_cache
*cache
=
2771 amd64_frame_cache (this_frame
, this_cache
);
2776 static const struct frame_base amd64_frame_base
=
2778 &amd64_frame_unwind
,
2779 amd64_frame_base_address
,
2780 amd64_frame_base_address
,
2781 amd64_frame_base_address
2784 /* Normal frames, but in a function epilogue. */
2786 /* Implement the stack_frame_destroyed_p gdbarch method.
2788 The epilogue is defined here as the 'ret' instruction, which will
2789 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2790 the function's stack frame. */
2793 amd64_stack_frame_destroyed_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
2796 struct compunit_symtab
*cust
;
2798 cust
= find_pc_compunit_symtab (pc
);
2799 if (cust
!= NULL
&& COMPUNIT_EPILOGUE_UNWIND_VALID (cust
))
2802 if (target_read_memory (pc
, &insn
, 1))
2803 return 0; /* Can't read memory at pc. */
2805 if (insn
!= 0xc3) /* 'ret' instruction. */
2812 amd64_epilogue_frame_sniffer (const struct frame_unwind
*self
,
2813 struct frame_info
*this_frame
,
2814 void **this_prologue_cache
)
2816 if (frame_relative_level (this_frame
) == 0)
2817 return amd64_stack_frame_destroyed_p (get_frame_arch (this_frame
),
2818 get_frame_pc (this_frame
));
2823 static struct amd64_frame_cache
*
2824 amd64_epilogue_frame_cache (struct frame_info
*this_frame
, void **this_cache
)
2826 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2827 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2828 struct amd64_frame_cache
*cache
;
2832 return (struct amd64_frame_cache
*) *this_cache
;
2834 cache
= amd64_alloc_frame_cache ();
2835 *this_cache
= cache
;
2839 /* Cache base will be %esp plus cache->sp_offset (-8). */
2840 get_frame_register (this_frame
, AMD64_RSP_REGNUM
, buf
);
2841 cache
->base
= extract_unsigned_integer (buf
, 8,
2842 byte_order
) + cache
->sp_offset
;
2844 /* Cache pc will be the frame func. */
2845 cache
->pc
= get_frame_pc (this_frame
);
2847 /* The saved %esp will be at cache->base plus 16. */
2848 cache
->saved_sp
= cache
->base
+ 16;
2850 /* The saved %eip will be at cache->base plus 8. */
2851 cache
->saved_regs
[AMD64_RIP_REGNUM
] = cache
->base
+ 8;
2855 CATCH (ex
, RETURN_MASK_ERROR
)
2857 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
2858 throw_exception (ex
);
2865 static enum unwind_stop_reason
2866 amd64_epilogue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
2869 struct amd64_frame_cache
*cache
2870 = amd64_epilogue_frame_cache (this_frame
, this_cache
);
2873 return UNWIND_UNAVAILABLE
;
2875 return UNWIND_NO_REASON
;
2879 amd64_epilogue_frame_this_id (struct frame_info
*this_frame
,
2881 struct frame_id
*this_id
)
2883 struct amd64_frame_cache
*cache
= amd64_epilogue_frame_cache (this_frame
,
2887 (*this_id
) = frame_id_build_unavailable_stack (cache
->pc
);
2889 (*this_id
) = frame_id_build (cache
->base
+ 8, cache
->pc
);
2892 static const struct frame_unwind amd64_epilogue_frame_unwind
=
2895 amd64_epilogue_frame_unwind_stop_reason
,
2896 amd64_epilogue_frame_this_id
,
2897 amd64_frame_prev_register
,
2899 amd64_epilogue_frame_sniffer
2902 static struct frame_id
2903 amd64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2907 fp
= get_frame_register_unsigned (this_frame
, AMD64_RBP_REGNUM
);
2909 return frame_id_build (fp
+ 16, get_frame_pc (this_frame
));
2912 /* 16 byte align the SP per frame requirements. */
2915 amd64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
2917 return sp
& -(CORE_ADDR
)16;
2921 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2922 in the floating-point register set REGSET to register cache
2923 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2926 amd64_supply_fpregset (const struct regset
*regset
, struct regcache
*regcache
,
2927 int regnum
, const void *fpregs
, size_t len
)
2929 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
2930 const struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2932 gdb_assert (len
>= tdep
->sizeof_fpregset
);
2933 amd64_supply_fxsave (regcache
, regnum
, fpregs
);
2936 /* Collect register REGNUM from the register cache REGCACHE and store
2937 it in the buffer specified by FPREGS and LEN as described by the
2938 floating-point register set REGSET. If REGNUM is -1, do this for
2939 all registers in REGSET. */
2942 amd64_collect_fpregset (const struct regset
*regset
,
2943 const struct regcache
*regcache
,
2944 int regnum
, void *fpregs
, size_t len
)
2946 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
2947 const struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2949 gdb_assert (len
>= tdep
->sizeof_fpregset
);
2950 amd64_collect_fxsave (regcache
, regnum
, fpregs
);
2953 const struct regset amd64_fpregset
=
2955 NULL
, amd64_supply_fpregset
, amd64_collect_fpregset
2959 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2960 %rdi. We expect its value to be a pointer to the jmp_buf structure
2961 from which we extract the address that we will land at. This
2962 address is copied into PC. This routine returns non-zero on
2966 amd64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2970 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2971 int jb_pc_offset
= gdbarch_tdep (gdbarch
)->jb_pc_offset
;
2972 int len
= TYPE_LENGTH (builtin_type (gdbarch
)->builtin_func_ptr
);
2974 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2975 longjmp will land. */
2976 if (jb_pc_offset
== -1)
2979 get_frame_register (frame
, AMD64_RDI_REGNUM
, buf
);
2980 jb_addr
= extract_typed_address
2981 (buf
, builtin_type (gdbarch
)->builtin_data_ptr
);
2982 if (target_read_memory (jb_addr
+ jb_pc_offset
, buf
, len
))
2985 *pc
= extract_typed_address (buf
, builtin_type (gdbarch
)->builtin_func_ptr
);
2990 static const int amd64_record_regmap
[] =
2992 AMD64_RAX_REGNUM
, AMD64_RCX_REGNUM
, AMD64_RDX_REGNUM
, AMD64_RBX_REGNUM
,
2993 AMD64_RSP_REGNUM
, AMD64_RBP_REGNUM
, AMD64_RSI_REGNUM
, AMD64_RDI_REGNUM
,
2994 AMD64_R8_REGNUM
, AMD64_R9_REGNUM
, AMD64_R10_REGNUM
, AMD64_R11_REGNUM
,
2995 AMD64_R12_REGNUM
, AMD64_R13_REGNUM
, AMD64_R14_REGNUM
, AMD64_R15_REGNUM
,
2996 AMD64_RIP_REGNUM
, AMD64_EFLAGS_REGNUM
, AMD64_CS_REGNUM
, AMD64_SS_REGNUM
,
2997 AMD64_DS_REGNUM
, AMD64_ES_REGNUM
, AMD64_FS_REGNUM
, AMD64_GS_REGNUM
3001 amd64_init_abi (struct gdbarch_info info
, struct gdbarch
*gdbarch
)
3003 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3004 const struct target_desc
*tdesc
= info
.target_desc
;
3005 static const char *const stap_integer_prefixes
[] = { "$", NULL
};
3006 static const char *const stap_register_prefixes
[] = { "%", NULL
};
3007 static const char *const stap_register_indirection_prefixes
[] = { "(",
3009 static const char *const stap_register_indirection_suffixes
[] = { ")",
3012 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
3013 floating-point registers. */
3014 tdep
->sizeof_fpregset
= I387_SIZEOF_FXSAVE
;
3015 tdep
->fpregset
= &amd64_fpregset
;
3017 if (! tdesc_has_registers (tdesc
))
3018 tdesc
= tdesc_amd64
;
3019 tdep
->tdesc
= tdesc
;
3021 tdep
->num_core_regs
= AMD64_NUM_GREGS
+ I387_NUM_REGS
;
3022 tdep
->register_names
= amd64_register_names
;
3024 if (tdesc_find_feature (tdesc
, "org.gnu.gdb.i386.avx512") != NULL
)
3026 tdep
->zmmh_register_names
= amd64_zmmh_names
;
3027 tdep
->k_register_names
= amd64_k_names
;
3028 tdep
->xmm_avx512_register_names
= amd64_xmm_avx512_names
;
3029 tdep
->ymm16h_register_names
= amd64_ymmh_avx512_names
;
3031 tdep
->num_zmm_regs
= 32;
3032 tdep
->num_xmm_avx512_regs
= 16;
3033 tdep
->num_ymm_avx512_regs
= 16;
3035 tdep
->zmm0h_regnum
= AMD64_ZMM0H_REGNUM
;
3036 tdep
->k0_regnum
= AMD64_K0_REGNUM
;
3037 tdep
->xmm16_regnum
= AMD64_XMM16_REGNUM
;
3038 tdep
->ymm16h_regnum
= AMD64_YMM16H_REGNUM
;
3041 if (tdesc_find_feature (tdesc
, "org.gnu.gdb.i386.avx") != NULL
)
3043 tdep
->ymmh_register_names
= amd64_ymmh_names
;
3044 tdep
->num_ymm_regs
= 16;
3045 tdep
->ymm0h_regnum
= AMD64_YMM0H_REGNUM
;
3048 if (tdesc_find_feature (tdesc
, "org.gnu.gdb.i386.mpx") != NULL
)
3050 tdep
->mpx_register_names
= amd64_mpx_names
;
3051 tdep
->bndcfgu_regnum
= AMD64_BNDCFGU_REGNUM
;
3052 tdep
->bnd0r_regnum
= AMD64_BND0R_REGNUM
;
3055 if (tdesc_find_feature (tdesc
, "org.gnu.gdb.i386.segments") != NULL
)
3057 const struct tdesc_feature
*feature
=
3058 tdesc_find_feature (tdesc
, "org.gnu.gdb.i386.segments");
3059 struct tdesc_arch_data
*tdesc_data_segments
=
3060 (struct tdesc_arch_data
*) info
.tdep_info
;
3062 tdesc_numbered_register (feature
, tdesc_data_segments
,
3063 AMD64_FSBASE_REGNUM
, "fs_base");
3064 tdesc_numbered_register (feature
, tdesc_data_segments
,
3065 AMD64_GSBASE_REGNUM
, "gs_base");
3068 if (tdesc_find_feature (tdesc
, "org.gnu.gdb.i386.pkeys") != NULL
)
3070 tdep
->pkeys_register_names
= amd64_pkeys_names
;
3071 tdep
->pkru_regnum
= AMD64_PKRU_REGNUM
;
3072 tdep
->num_pkeys_regs
= 1;
3075 tdep
->num_byte_regs
= 20;
3076 tdep
->num_word_regs
= 16;
3077 tdep
->num_dword_regs
= 16;
3078 /* Avoid wiring in the MMX registers for now. */
3079 tdep
->num_mmx_regs
= 0;
3081 set_gdbarch_pseudo_register_read_value (gdbarch
,
3082 amd64_pseudo_register_read_value
);
3083 set_gdbarch_pseudo_register_write (gdbarch
,
3084 amd64_pseudo_register_write
);
3085 set_gdbarch_ax_pseudo_register_collect (gdbarch
,
3086 amd64_ax_pseudo_register_collect
);
3088 set_tdesc_pseudo_register_name (gdbarch
, amd64_pseudo_register_name
);
3090 /* AMD64 has an FPU and 16 SSE registers. */
3091 tdep
->st0_regnum
= AMD64_ST0_REGNUM
;
3092 tdep
->num_xmm_regs
= 16;
3094 /* This is what all the fuss is about. */
3095 set_gdbarch_long_bit (gdbarch
, 64);
3096 set_gdbarch_long_long_bit (gdbarch
, 64);
3097 set_gdbarch_ptr_bit (gdbarch
, 64);
3099 /* In contrast to the i386, on AMD64 a `long double' actually takes
3100 up 128 bits, even though it's still based on the i387 extended
3101 floating-point format which has only 80 significant bits. */
3102 set_gdbarch_long_double_bit (gdbarch
, 128);
3104 set_gdbarch_num_regs (gdbarch
, AMD64_NUM_REGS
);
3106 /* Register numbers of various important registers. */
3107 set_gdbarch_sp_regnum (gdbarch
, AMD64_RSP_REGNUM
); /* %rsp */
3108 set_gdbarch_pc_regnum (gdbarch
, AMD64_RIP_REGNUM
); /* %rip */
3109 set_gdbarch_ps_regnum (gdbarch
, AMD64_EFLAGS_REGNUM
); /* %eflags */
3110 set_gdbarch_fp0_regnum (gdbarch
, AMD64_ST0_REGNUM
); /* %st(0) */
3112 /* The "default" register numbering scheme for AMD64 is referred to
3113 as the "DWARF Register Number Mapping" in the System V psABI.
3114 The preferred debugging format for all known AMD64 targets is
3115 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3116 DWARF-1), but we provide the same mapping just in case. This
3117 mapping is also used for stabs, which GCC does support. */
3118 set_gdbarch_stab_reg_to_regnum (gdbarch
, amd64_dwarf_reg_to_regnum
);
3119 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, amd64_dwarf_reg_to_regnum
);
3121 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3122 be in use on any of the supported AMD64 targets. */
3124 /* Call dummy code. */
3125 set_gdbarch_push_dummy_call (gdbarch
, amd64_push_dummy_call
);
3126 set_gdbarch_frame_align (gdbarch
, amd64_frame_align
);
3127 set_gdbarch_frame_red_zone_size (gdbarch
, 128);
3129 set_gdbarch_convert_register_p (gdbarch
, i387_convert_register_p
);
3130 set_gdbarch_register_to_value (gdbarch
, i387_register_to_value
);
3131 set_gdbarch_value_to_register (gdbarch
, i387_value_to_register
);
3133 set_gdbarch_return_value (gdbarch
, amd64_return_value
);
3135 set_gdbarch_skip_prologue (gdbarch
, amd64_skip_prologue
);
3137 tdep
->record_regmap
= amd64_record_regmap
;
3139 set_gdbarch_dummy_id (gdbarch
, amd64_dummy_id
);
3141 /* Hook the function epilogue frame unwinder. This unwinder is
3142 appended to the list first, so that it supercedes the other
3143 unwinders in function epilogues. */
3144 frame_unwind_prepend_unwinder (gdbarch
, &amd64_epilogue_frame_unwind
);
3146 /* Hook the prologue-based frame unwinders. */
3147 frame_unwind_append_unwinder (gdbarch
, &amd64_sigtramp_frame_unwind
);
3148 frame_unwind_append_unwinder (gdbarch
, &amd64_frame_unwind
);
3149 frame_base_set_default (gdbarch
, &amd64_frame_base
);
3151 set_gdbarch_get_longjmp_target (gdbarch
, amd64_get_longjmp_target
);
3153 set_gdbarch_relocate_instruction (gdbarch
, amd64_relocate_instruction
);
3155 set_gdbarch_gen_return_address (gdbarch
, amd64_gen_return_address
);
3157 /* SystemTap variables and functions. */
3158 set_gdbarch_stap_integer_prefixes (gdbarch
, stap_integer_prefixes
);
3159 set_gdbarch_stap_register_prefixes (gdbarch
, stap_register_prefixes
);
3160 set_gdbarch_stap_register_indirection_prefixes (gdbarch
,
3161 stap_register_indirection_prefixes
);
3162 set_gdbarch_stap_register_indirection_suffixes (gdbarch
,
3163 stap_register_indirection_suffixes
);
3164 set_gdbarch_stap_is_single_operand (gdbarch
,
3165 i386_stap_is_single_operand
);
3166 set_gdbarch_stap_parse_special_token (gdbarch
,
3167 i386_stap_parse_special_token
);
3168 set_gdbarch_insn_is_call (gdbarch
, amd64_insn_is_call
);
3169 set_gdbarch_insn_is_ret (gdbarch
, amd64_insn_is_ret
);
3170 set_gdbarch_insn_is_jump (gdbarch
, amd64_insn_is_jump
);
3174 static struct type
*
3175 amd64_x32_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
3177 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3179 switch (regnum
- tdep
->eax_regnum
)
3181 case AMD64_RBP_REGNUM
: /* %ebp */
3182 case AMD64_RSP_REGNUM
: /* %esp */
3183 return builtin_type (gdbarch
)->builtin_data_ptr
;
3184 case AMD64_RIP_REGNUM
: /* %eip */
3185 return builtin_type (gdbarch
)->builtin_func_ptr
;
3188 return i386_pseudo_register_type (gdbarch
, regnum
);
3192 amd64_x32_init_abi (struct gdbarch_info info
, struct gdbarch
*gdbarch
)
3194 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3195 const struct target_desc
*tdesc
= info
.target_desc
;
3197 amd64_init_abi (info
, gdbarch
);
3199 if (! tdesc_has_registers (tdesc
))
3201 tdep
->tdesc
= tdesc
;
3203 tdep
->num_dword_regs
= 17;
3204 set_tdesc_pseudo_register_type (gdbarch
, amd64_x32_pseudo_register_type
);
3206 set_gdbarch_long_bit (gdbarch
, 32);
3207 set_gdbarch_ptr_bit (gdbarch
, 32);
3210 /* Return the target description for a specified XSAVE feature mask. */
3212 const struct target_desc
*
3213 amd64_target_description (uint64_t xcr0
)
3215 switch (xcr0
& X86_XSTATE_ALL_MASK
)
3217 case X86_XSTATE_AVX_MPX_AVX512_PKU_MASK
:
3218 return tdesc_amd64_avx_mpx_avx512_pku
;
3219 case X86_XSTATE_AVX_AVX512_MASK
:
3220 return tdesc_amd64_avx_avx512
;
3221 case X86_XSTATE_MPX_MASK
:
3222 return tdesc_amd64_mpx
;
3223 case X86_XSTATE_AVX_MPX_MASK
:
3224 return tdesc_amd64_avx_mpx
;
3225 case X86_XSTATE_AVX_MASK
:
3226 return tdesc_amd64_avx
;
3232 /* Provide a prototype to silence -Wmissing-prototypes. */
3233 void _initialize_amd64_tdep (void);
3236 _initialize_amd64_tdep (void)
3238 initialize_tdesc_amd64 ();
3239 initialize_tdesc_amd64_avx ();
3240 initialize_tdesc_amd64_mpx ();
3241 initialize_tdesc_amd64_avx_mpx ();
3242 initialize_tdesc_amd64_avx_avx512 ();
3243 initialize_tdesc_amd64_avx_mpx_avx512_pku ();
3245 initialize_tdesc_x32 ();
3246 initialize_tdesc_x32_avx ();
3247 initialize_tdesc_x32_avx_avx512 ();
3251 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3252 sense that the instruction pointer and data pointer are simply
3253 64-bit offsets into the code segment and the data segment instead
3254 of a selector offset pair. The functions below store the upper 32
3255 bits of these pointers (instead of just the 16-bits of the segment
3258 /* Fill register REGNUM in REGCACHE with the appropriate
3259 floating-point or SSE register value from *FXSAVE. If REGNUM is
3260 -1, do this for all registers. This function masks off any of the
3261 reserved bits in *FXSAVE. */
3264 amd64_supply_fxsave (struct regcache
*regcache
, int regnum
,
3267 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3268 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3270 i387_supply_fxsave (regcache
, regnum
, fxsave
);
3273 && gdbarch_bfd_arch_info (gdbarch
)->bits_per_word
== 64)
3275 const gdb_byte
*regs
= (const gdb_byte
*) fxsave
;
3277 if (regnum
== -1 || regnum
== I387_FISEG_REGNUM (tdep
))
3278 regcache_raw_supply (regcache
, I387_FISEG_REGNUM (tdep
), regs
+ 12);
3279 if (regnum
== -1 || regnum
== I387_FOSEG_REGNUM (tdep
))
3280 regcache_raw_supply (regcache
, I387_FOSEG_REGNUM (tdep
), regs
+ 20);
3284 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3287 amd64_supply_xsave (struct regcache
*regcache
, int regnum
,
3290 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3291 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3293 i387_supply_xsave (regcache
, regnum
, xsave
);
3296 && gdbarch_bfd_arch_info (gdbarch
)->bits_per_word
== 64)
3298 const gdb_byte
*regs
= (const gdb_byte
*) xsave
;
3300 if (regnum
== -1 || regnum
== I387_FISEG_REGNUM (tdep
))
3301 regcache_raw_supply (regcache
, I387_FISEG_REGNUM (tdep
),
3303 if (regnum
== -1 || regnum
== I387_FOSEG_REGNUM (tdep
))
3304 regcache_raw_supply (regcache
, I387_FOSEG_REGNUM (tdep
),
3309 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3310 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3311 all registers. This function doesn't touch any of the reserved
3315 amd64_collect_fxsave (const struct regcache
*regcache
, int regnum
,
3318 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3319 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3320 gdb_byte
*regs
= (gdb_byte
*) fxsave
;
3322 i387_collect_fxsave (regcache
, regnum
, fxsave
);
3324 if (gdbarch_bfd_arch_info (gdbarch
)->bits_per_word
== 64)
3326 if (regnum
== -1 || regnum
== I387_FISEG_REGNUM (tdep
))
3327 regcache_raw_collect (regcache
, I387_FISEG_REGNUM (tdep
), regs
+ 12);
3328 if (regnum
== -1 || regnum
== I387_FOSEG_REGNUM (tdep
))
3329 regcache_raw_collect (regcache
, I387_FOSEG_REGNUM (tdep
), regs
+ 20);
3333 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3336 amd64_collect_xsave (const struct regcache
*regcache
, int regnum
,
3337 void *xsave
, int gcore
)
3339 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3340 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3341 gdb_byte
*regs
= (gdb_byte
*) xsave
;
3343 i387_collect_xsave (regcache
, regnum
, xsave
, gcore
);
3345 if (gdbarch_bfd_arch_info (gdbarch
)->bits_per_word
== 64)
3347 if (regnum
== -1 || regnum
== I387_FISEG_REGNUM (tdep
))
3348 regcache_raw_collect (regcache
, I387_FISEG_REGNUM (tdep
),
3350 if (regnum
== -1 || regnum
== I387_FOSEG_REGNUM (tdep
))
3351 regcache_raw_collect (regcache
, I387_FOSEG_REGNUM (tdep
),