[gdb/testsuite] Add target board cc-with-gnu-debuglink.exp
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
CommitLineData
e53bef9f 1/* Target-dependent code for AMD64.
ce0eebec 2
3666a048 3 Copyright (C) 2001-2021 Free Software Foundation, Inc.
5ae96ec1
MK
4
5 Contributed by Jiri Smid, SuSE Labs.
53e95fcf
JS
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
a9762ec7 11 the Free Software Foundation; either version 3 of the License, or
53e95fcf
JS
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
a9762ec7 20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
53e95fcf
JS
21
22#include "defs.h"
4de283e4
TT
23#include "opcode/i386.h"
24#include "dis-asm.h"
c4f35dd8
MK
25#include "arch-utils.h"
26#include "block.h"
27#include "dummy-frame.h"
4de283e4 28#include "frame.h"
c4f35dd8
MK
29#include "frame-base.h"
30#include "frame-unwind.h"
d55e5aa6
TT
31#include "inferior.h"
32#include "infrun.h"
4de283e4
TT
33#include "gdbcmd.h"
34#include "gdbcore.h"
c4f35dd8 35#include "objfiles.h"
53e95fcf 36#include "regcache.h"
2c261fae 37#include "regset.h"
53e95fcf 38#include "symfile.h"
4de283e4
TT
39#include "disasm.h"
40#include "amd64-tdep.h"
41#include "i387-tdep.h"
268a13a5 42#include "gdbsupport/x86-xstate.h"
4de283e4 43#include <algorithm>
22916b07 44#include "target-descriptions.h"
4de283e4
TT
45#include "arch/amd64.h"
46#include "producer.h"
47#include "ax.h"
48#include "ax-gdb.h"
268a13a5 49#include "gdbsupport/byte-vector.h"
4de283e4 50#include "osabi.h"
1d509aa6 51#include "x86-tdep.h"
257e02d8 52#include "amd64-ravenscar-thread.h"
6710bf39 53
e53bef9f
MK
54/* Note that the AMD64 architecture was previously known as x86-64.
55 The latter is (forever) engraved into the canonical system name as
90f90721 56 returned by config.guess, and used as the name for the AMD64 port
e53bef9f
MK
57 of GNU/Linux. The BSD's have renamed their ports to amd64; they
58 don't like to shout. For GDB we prefer the amd64_-prefix over the
59 x86_64_-prefix since it's so much easier to type. */
60
402ecd56 61/* Register information. */
c4f35dd8 62
27087b7f 63static const char * const amd64_register_names[] =
de220d0f 64{
6707b003 65 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
c4f35dd8
MK
66
67 /* %r8 is indeed register number 8. */
6707b003
UW
68 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
69 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
c4f35dd8 70
af233647 71 /* %st0 is register number 24. */
6707b003
UW
72 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
73 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
c4f35dd8 74
af233647 75 /* %xmm0 is register number 40. */
6707b003
UW
76 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
77 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
78 "mxcsr",
0e04a514
ML
79};
80
27087b7f 81static const char * const amd64_ymm_names[] =
a055a187
L
82{
83 "ymm0", "ymm1", "ymm2", "ymm3",
84 "ymm4", "ymm5", "ymm6", "ymm7",
85 "ymm8", "ymm9", "ymm10", "ymm11",
86 "ymm12", "ymm13", "ymm14", "ymm15"
87};
88
27087b7f 89static const char * const amd64_ymm_avx512_names[] =
01f9f808
MS
90{
91 "ymm16", "ymm17", "ymm18", "ymm19",
92 "ymm20", "ymm21", "ymm22", "ymm23",
93 "ymm24", "ymm25", "ymm26", "ymm27",
94 "ymm28", "ymm29", "ymm30", "ymm31"
95};
96
27087b7f 97static const char * const amd64_ymmh_names[] =
a055a187
L
98{
99 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
100 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
101 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
102 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
103};
de220d0f 104
27087b7f 105static const char * const amd64_ymmh_avx512_names[] =
01f9f808
MS
106{
107 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
108 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
109 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
110 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
111};
112
27087b7f 113static const char * const amd64_mpx_names[] =
e43e105e
WT
114{
115 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
116};
117
27087b7f 118static const char * const amd64_k_names[] =
01f9f808
MS
119{
120 "k0", "k1", "k2", "k3",
121 "k4", "k5", "k6", "k7"
122};
123
27087b7f 124static const char * const amd64_zmmh_names[] =
01f9f808
MS
125{
126 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
127 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
128 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
129 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
130 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
131 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
132 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
133 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
134};
135
27087b7f 136static const char * const amd64_zmm_names[] =
01f9f808
MS
137{
138 "zmm0", "zmm1", "zmm2", "zmm3",
139 "zmm4", "zmm5", "zmm6", "zmm7",
140 "zmm8", "zmm9", "zmm10", "zmm11",
141 "zmm12", "zmm13", "zmm14", "zmm15",
142 "zmm16", "zmm17", "zmm18", "zmm19",
143 "zmm20", "zmm21", "zmm22", "zmm23",
144 "zmm24", "zmm25", "zmm26", "zmm27",
145 "zmm28", "zmm29", "zmm30", "zmm31"
146};
147
27087b7f 148static const char * const amd64_xmm_avx512_names[] = {
01f9f808
MS
149 "xmm16", "xmm17", "xmm18", "xmm19",
150 "xmm20", "xmm21", "xmm22", "xmm23",
151 "xmm24", "xmm25", "xmm26", "xmm27",
152 "xmm28", "xmm29", "xmm30", "xmm31"
153};
154
27087b7f 155static const char * const amd64_pkeys_names[] = {
51547df6
MS
156 "pkru"
157};
158
c4f35dd8
MK
159/* DWARF Register Number Mapping as defined in the System V psABI,
160 section 3.6. */
53e95fcf 161
e53bef9f 162static int amd64_dwarf_regmap[] =
0e04a514 163{
c4f35dd8 164 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
90f90721
MK
165 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
166 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
167 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
c4f35dd8
MK
168
169 /* Frame Pointer Register RBP. */
90f90721 170 AMD64_RBP_REGNUM,
c4f35dd8
MK
171
172 /* Stack Pointer Register RSP. */
90f90721 173 AMD64_RSP_REGNUM,
c4f35dd8
MK
174
175 /* Extended Integer Registers 8 - 15. */
5b856f36
PM
176 AMD64_R8_REGNUM, /* %r8 */
177 AMD64_R9_REGNUM, /* %r9 */
178 AMD64_R10_REGNUM, /* %r10 */
179 AMD64_R11_REGNUM, /* %r11 */
180 AMD64_R12_REGNUM, /* %r12 */
181 AMD64_R13_REGNUM, /* %r13 */
182 AMD64_R14_REGNUM, /* %r14 */
183 AMD64_R15_REGNUM, /* %r15 */
c4f35dd8 184
59207364 185 /* Return Address RA. Mapped to RIP. */
90f90721 186 AMD64_RIP_REGNUM,
c4f35dd8
MK
187
188 /* SSE Registers 0 - 7. */
90f90721
MK
189 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
190 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
191 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
192 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
c4f35dd8
MK
193
194 /* Extended SSE Registers 8 - 15. */
90f90721
MK
195 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
196 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
197 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
198 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
c4f35dd8
MK
199
200 /* Floating Point Registers 0-7. */
90f90721
MK
201 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
202 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
203 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
c6f4c129 204 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
f7ca3fcf
PM
205
206 /* MMX Registers 0 - 7.
207 We have to handle those registers specifically, as their register
208 number within GDB depends on the target (or they may even not be
209 available at all). */
210 -1, -1, -1, -1, -1, -1, -1, -1,
211
c6f4c129
JB
212 /* Control and Status Flags Register. */
213 AMD64_EFLAGS_REGNUM,
214
215 /* Selector Registers. */
216 AMD64_ES_REGNUM,
217 AMD64_CS_REGNUM,
218 AMD64_SS_REGNUM,
219 AMD64_DS_REGNUM,
220 AMD64_FS_REGNUM,
221 AMD64_GS_REGNUM,
222 -1,
223 -1,
224
225 /* Segment Base Address Registers. */
226 -1,
227 -1,
228 -1,
229 -1,
230
231 /* Special Selector Registers. */
232 -1,
233 -1,
234
235 /* Floating Point Control Registers. */
236 AMD64_MXCSR_REGNUM,
237 AMD64_FCTRL_REGNUM,
238 AMD64_FSTAT_REGNUM
c4f35dd8 239};
0e04a514 240
e53bef9f
MK
241static const int amd64_dwarf_regmap_len =
242 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
0e04a514 243
c4f35dd8
MK
244/* Convert DWARF register number REG to the appropriate register
245 number used by GDB. */
26abbdc4 246
c4f35dd8 247static int
d3f73121 248amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
53e95fcf 249{
a055a187
L
250 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
251 int ymm0_regnum = tdep->ymm0_regnum;
c4f35dd8 252 int regnum = -1;
53e95fcf 253
16aff9a6 254 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
e53bef9f 255 regnum = amd64_dwarf_regmap[reg];
53e95fcf 256
0fde2c53 257 if (ymm0_regnum >= 0
a055a187
L
258 && i386_xmm_regnum_p (gdbarch, regnum))
259 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
c4f35dd8
MK
260
261 return regnum;
53e95fcf 262}
d532c08f 263
35669430
DE
264/* Map architectural register numbers to gdb register numbers. */
265
266static const int amd64_arch_regmap[16] =
267{
268 AMD64_RAX_REGNUM, /* %rax */
269 AMD64_RCX_REGNUM, /* %rcx */
270 AMD64_RDX_REGNUM, /* %rdx */
271 AMD64_RBX_REGNUM, /* %rbx */
272 AMD64_RSP_REGNUM, /* %rsp */
273 AMD64_RBP_REGNUM, /* %rbp */
274 AMD64_RSI_REGNUM, /* %rsi */
275 AMD64_RDI_REGNUM, /* %rdi */
276 AMD64_R8_REGNUM, /* %r8 */
277 AMD64_R9_REGNUM, /* %r9 */
278 AMD64_R10_REGNUM, /* %r10 */
279 AMD64_R11_REGNUM, /* %r11 */
280 AMD64_R12_REGNUM, /* %r12 */
281 AMD64_R13_REGNUM, /* %r13 */
282 AMD64_R14_REGNUM, /* %r14 */
283 AMD64_R15_REGNUM /* %r15 */
284};
285
286static const int amd64_arch_regmap_len =
287 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
288
289/* Convert architectural register number REG to the appropriate register
290 number used by GDB. */
291
292static int
293amd64_arch_reg_to_regnum (int reg)
294{
295 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
296
297 return amd64_arch_regmap[reg];
298}
299
1ba53b71
L
300/* Register names for byte pseudo-registers. */
301
27087b7f 302static const char * const amd64_byte_names[] =
1ba53b71
L
303{
304 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
fe01d668
L
305 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
306 "ah", "bh", "ch", "dh"
1ba53b71
L
307};
308
fe01d668
L
309/* Number of lower byte registers. */
310#define AMD64_NUM_LOWER_BYTE_REGS 16
311
1ba53b71
L
312/* Register names for word pseudo-registers. */
313
27087b7f 314static const char * const amd64_word_names[] =
1ba53b71 315{
9cad29ac 316 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
1ba53b71
L
317 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
318};
319
320/* Register names for dword pseudo-registers. */
321
27087b7f 322static const char * const amd64_dword_names[] =
1ba53b71
L
323{
324 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
fff4548b
MK
325 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
326 "eip"
1ba53b71
L
327};
328
329/* Return the name of register REGNUM. */
330
331static const char *
332amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
333{
334 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
335 if (i386_byte_regnum_p (gdbarch, regnum))
336 return amd64_byte_names[regnum - tdep->al_regnum];
01f9f808
MS
337 else if (i386_zmm_regnum_p (gdbarch, regnum))
338 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
a055a187
L
339 else if (i386_ymm_regnum_p (gdbarch, regnum))
340 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
01f9f808
MS
341 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
342 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
1ba53b71
L
343 else if (i386_word_regnum_p (gdbarch, regnum))
344 return amd64_word_names[regnum - tdep->ax_regnum];
345 else if (i386_dword_regnum_p (gdbarch, regnum))
346 return amd64_dword_names[regnum - tdep->eax_regnum];
347 else
348 return i386_pseudo_register_name (gdbarch, regnum);
349}
350
3543a589
TT
351static struct value *
352amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
849d0ba8 353 readable_regcache *regcache,
3543a589 354 int regnum)
1ba53b71 355{
1ba53b71 356 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3543a589 357
925047fe 358 value *result_value = allocate_value (register_type (gdbarch, regnum));
3543a589
TT
359 VALUE_LVAL (result_value) = lval_register;
360 VALUE_REGNUM (result_value) = regnum;
925047fe 361 gdb_byte *buf = value_contents_raw (result_value);
1ba53b71
L
362
363 if (i386_byte_regnum_p (gdbarch, regnum))
364 {
365 int gpnum = regnum - tdep->al_regnum;
366
367 /* Extract (always little endian). */
fe01d668
L
368 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
369 {
925047fe
SM
370 gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
371 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
372
fe01d668 373 /* Special handling for AH, BH, CH, DH. */
925047fe 374 register_status status = regcache->raw_read (gpnum, raw_buf);
05d1431c
PA
375 if (status == REG_VALID)
376 memcpy (buf, raw_buf + 1, 1);
3543a589
TT
377 else
378 mark_value_bytes_unavailable (result_value, 0,
379 TYPE_LENGTH (value_type (result_value)));
fe01d668
L
380 }
381 else
382 {
925047fe
SM
383 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
384 register_status status = regcache->raw_read (gpnum, raw_buf);
05d1431c
PA
385 if (status == REG_VALID)
386 memcpy (buf, raw_buf, 1);
3543a589
TT
387 else
388 mark_value_bytes_unavailable (result_value, 0,
389 TYPE_LENGTH (value_type (result_value)));
fe01d668 390 }
1ba53b71
L
391 }
392 else if (i386_dword_regnum_p (gdbarch, regnum))
393 {
394 int gpnum = regnum - tdep->eax_regnum;
925047fe 395 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
1ba53b71 396 /* Extract (always little endian). */
925047fe 397 register_status status = regcache->raw_read (gpnum, raw_buf);
05d1431c
PA
398 if (status == REG_VALID)
399 memcpy (buf, raw_buf, 4);
3543a589
TT
400 else
401 mark_value_bytes_unavailable (result_value, 0,
402 TYPE_LENGTH (value_type (result_value)));
1ba53b71
L
403 }
404 else
3543a589
TT
405 i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
406 result_value);
407
408 return result_value;
1ba53b71
L
409}
410
411static void
412amd64_pseudo_register_write (struct gdbarch *gdbarch,
413 struct regcache *regcache,
414 int regnum, const gdb_byte *buf)
415{
1ba53b71
L
416 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
417
418 if (i386_byte_regnum_p (gdbarch, regnum))
419 {
420 int gpnum = regnum - tdep->al_regnum;
421
fe01d668
L
422 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
423 {
925047fe
SM
424 gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
425 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
426
fe01d668 427 /* Read ... AH, BH, CH, DH. */
925047fe 428 regcache->raw_read (gpnum, raw_buf);
fe01d668
L
429 /* ... Modify ... (always little endian). */
430 memcpy (raw_buf + 1, buf, 1);
431 /* ... Write. */
925047fe 432 regcache->raw_write (gpnum, raw_buf);
fe01d668
L
433 }
434 else
435 {
925047fe
SM
436 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
437
fe01d668 438 /* Read ... */
0b883586 439 regcache->raw_read (gpnum, raw_buf);
fe01d668
L
440 /* ... Modify ... (always little endian). */
441 memcpy (raw_buf, buf, 1);
442 /* ... Write. */
10eaee5f 443 regcache->raw_write (gpnum, raw_buf);
fe01d668 444 }
1ba53b71
L
445 }
446 else if (i386_dword_regnum_p (gdbarch, regnum))
447 {
448 int gpnum = regnum - tdep->eax_regnum;
925047fe 449 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
1ba53b71
L
450
451 /* Read ... */
0b883586 452 regcache->raw_read (gpnum, raw_buf);
1ba53b71
L
453 /* ... Modify ... (always little endian). */
454 memcpy (raw_buf, buf, 4);
455 /* ... Write. */
10eaee5f 456 regcache->raw_write (gpnum, raw_buf);
1ba53b71
L
457 }
458 else
459 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
460}
461
62e5fd57
MK
462/* Implement the 'ax_pseudo_register_collect' gdbarch method. */
463
464static int
465amd64_ax_pseudo_register_collect (struct gdbarch *gdbarch,
466 struct agent_expr *ax, int regnum)
467{
468 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
469
470 if (i386_byte_regnum_p (gdbarch, regnum))
471 {
472 int gpnum = regnum - tdep->al_regnum;
473
474 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
475 ax_reg_mask (ax, gpnum - AMD64_NUM_LOWER_BYTE_REGS);
476 else
477 ax_reg_mask (ax, gpnum);
478 return 0;
479 }
480 else if (i386_dword_regnum_p (gdbarch, regnum))
481 {
482 int gpnum = regnum - tdep->eax_regnum;
483
484 ax_reg_mask (ax, gpnum);
485 return 0;
486 }
487 else
488 return i386_ax_pseudo_register_collect (gdbarch, ax, regnum);
489}
490
53e95fcf
JS
491\f
492
bf4d6c1c
JB
493/* Register classes as defined in the psABI. */
494
495enum amd64_reg_class
496{
497 AMD64_INTEGER,
498 AMD64_SSE,
499 AMD64_SSEUP,
500 AMD64_X87,
501 AMD64_X87UP,
502 AMD64_COMPLEX_X87,
503 AMD64_NO_CLASS,
504 AMD64_MEMORY
505};
506
efb1c01c
MK
507/* Return the union class of CLASS1 and CLASS2. See the psABI for
508 details. */
509
510static enum amd64_reg_class
511amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
512{
513 /* Rule (a): If both classes are equal, this is the resulting class. */
514 if (class1 == class2)
515 return class1;
516
517 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
518 is the other class. */
519 if (class1 == AMD64_NO_CLASS)
520 return class2;
521 if (class2 == AMD64_NO_CLASS)
522 return class1;
523
524 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
525 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
526 return AMD64_MEMORY;
527
528 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
529 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
530 return AMD64_INTEGER;
531
532 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
533 MEMORY is used as class. */
534 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
535 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
536 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
537 return AMD64_MEMORY;
538
539 /* Rule (f): Otherwise class SSE is used. */
540 return AMD64_SSE;
541}
542
fe978cb0 543static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]);
bf4d6c1c 544
4aa866af 545/* Return true if TYPE is a structure or union with unaligned fields. */
79b1ab3d 546
4aa866af
LS
547static bool
548amd64_has_unaligned_fields (struct type *type)
79b1ab3d 549{
78134374
SM
550 if (type->code () == TYPE_CODE_STRUCT
551 || type->code () == TYPE_CODE_UNION)
4aa866af 552 {
1f704f76 553 for (int i = 0; i < type->num_fields (); i++)
4aa866af 554 {
940da03e 555 struct type *subtype = check_typedef (type->field (i).type ());
4aa866af
LS
556 int bitpos = TYPE_FIELD_BITPOS (type, i);
557 int align = type_align(subtype);
558
a59240a4
TT
559 /* Ignore static fields, empty fields (for example nested
560 empty structures), and bitfields (these are handled by
561 the caller). */
ceacbf6e 562 if (field_is_static (&type->field (i))
4aa866af 563 || (TYPE_FIELD_BITSIZE (type, i) == 0
a59240a4
TT
564 && TYPE_LENGTH (subtype) == 0)
565 || TYPE_FIELD_PACKED (type, i))
4aa866af
LS
566 continue;
567
568 if (bitpos % 8 != 0)
569 return true;
570
571 int bytepos = bitpos / 8;
572 if (bytepos % align != 0)
573 return true;
574
a59240a4 575 if (amd64_has_unaligned_fields (subtype))
4aa866af
LS
576 return true;
577 }
578 }
79b1ab3d 579
4aa866af 580 return false;
79b1ab3d
MK
581}
582
d10eccaa
TV
583/* Classify field I of TYPE starting at BITOFFSET according to the rules for
584 structures and union types, and store the result in THECLASS. */
585
586static void
587amd64_classify_aggregate_field (struct type *type, int i,
588 enum amd64_reg_class theclass[2],
589 unsigned int bitoffset)
590{
940da03e 591 struct type *subtype = check_typedef (type->field (i).type ());
d10eccaa
TV
592 int bitpos = bitoffset + TYPE_FIELD_BITPOS (type, i);
593 int pos = bitpos / 64;
594 enum amd64_reg_class subclass[2];
595 int bitsize = TYPE_FIELD_BITSIZE (type, i);
596 int endpos;
597
598 if (bitsize == 0)
599 bitsize = TYPE_LENGTH (subtype) * 8;
600 endpos = (bitpos + bitsize - 1) / 64;
601
602 /* Ignore static fields, or empty fields, for example nested
603 empty structures.*/
ceacbf6e 604 if (field_is_static (&type->field (i)) || bitsize == 0)
d10eccaa
TV
605 return;
606
78134374
SM
607 if (subtype->code () == TYPE_CODE_STRUCT
608 || subtype->code () == TYPE_CODE_UNION)
d10eccaa
TV
609 {
610 /* Each field of an object is classified recursively. */
611 int j;
1f704f76 612 for (j = 0; j < subtype->num_fields (); j++)
d10eccaa
TV
613 amd64_classify_aggregate_field (subtype, j, theclass, bitpos);
614 return;
615 }
616
617 gdb_assert (pos == 0 || pos == 1);
618
619 amd64_classify (subtype, subclass);
620 theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]);
621 if (bitsize <= 64 && pos == 0 && endpos == 1)
622 /* This is a bit of an odd case: We have a field that would
623 normally fit in one of the two eightbytes, except that
624 it is placed in a way that this field straddles them.
625 This has been seen with a structure containing an array.
626
627 The ABI is a bit unclear in this case, but we assume that
628 this field's class (stored in subclass[0]) must also be merged
629 into class[1]. In other words, our field has a piece stored
630 in the second eight-byte, and thus its class applies to
631 the second eight-byte as well.
632
633 In the case where the field length exceeds 8 bytes,
634 it should not be necessary to merge the field class
635 into class[1]. As LEN > 8, subclass[1] is necessarily
636 different from AMD64_NO_CLASS. If subclass[1] is equal
637 to subclass[0], then the normal class[1]/subclass[1]
638 merging will take care of everything. For subclass[1]
639 to be different from subclass[0], I can only see the case
640 where we have a SSE/SSEUP or X87/X87UP pair, which both
641 use up all 16 bytes of the aggregate, and are already
642 handled just fine (because each portion sits on its own
643 8-byte). */
644 theclass[1] = amd64_merge_classes (theclass[1], subclass[0]);
645 if (pos == 0)
646 theclass[1] = amd64_merge_classes (theclass[1], subclass[1]);
647}
648
efb1c01c
MK
649/* Classify TYPE according to the rules for aggregate (structures and
650 arrays) and union types, and store the result in CLASS. */
c4f35dd8
MK
651
652static void
fe978cb0 653amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
53e95fcf 654{
4aa866af 655 /* 1. If the size of an object is larger than two eightbytes, or it has
dda83cd7 656 unaligned fields, it has class memory. */
4aa866af 657 if (TYPE_LENGTH (type) > 16 || amd64_has_unaligned_fields (type))
53e95fcf 658 {
fe978cb0 659 theclass[0] = theclass[1] = AMD64_MEMORY;
efb1c01c 660 return;
53e95fcf 661 }
efb1c01c
MK
662
663 /* 2. Both eightbytes get initialized to class NO_CLASS. */
fe978cb0 664 theclass[0] = theclass[1] = AMD64_NO_CLASS;
efb1c01c
MK
665
666 /* 3. Each field of an object is classified recursively so that
dda83cd7
SM
667 always two fields are considered. The resulting class is
668 calculated according to the classes of the fields in the
669 eightbyte: */
efb1c01c 670
78134374 671 if (type->code () == TYPE_CODE_ARRAY)
8ffd9b1b 672 {
efb1c01c
MK
673 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
674
675 /* All fields in an array have the same type. */
fe978cb0
PA
676 amd64_classify (subtype, theclass);
677 if (TYPE_LENGTH (type) > 8 && theclass[1] == AMD64_NO_CLASS)
678 theclass[1] = theclass[0];
8ffd9b1b 679 }
53e95fcf
JS
680 else
681 {
efb1c01c 682 int i;
53e95fcf 683
efb1c01c 684 /* Structure or union. */
78134374
SM
685 gdb_assert (type->code () == TYPE_CODE_STRUCT
686 || type->code () == TYPE_CODE_UNION);
efb1c01c 687
1f704f76 688 for (i = 0; i < type->num_fields (); i++)
d10eccaa 689 amd64_classify_aggregate_field (type, i, theclass, 0);
53e95fcf 690 }
efb1c01c
MK
691
692 /* 4. Then a post merger cleanup is done: */
693
694 /* Rule (a): If one of the classes is MEMORY, the whole argument is
695 passed in memory. */
fe978cb0
PA
696 if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY)
697 theclass[0] = theclass[1] = AMD64_MEMORY;
efb1c01c 698
177b42fe 699 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
efb1c01c 700 SSE. */
fe978cb0
PA
701 if (theclass[0] == AMD64_SSEUP)
702 theclass[0] = AMD64_SSE;
703 if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE)
704 theclass[1] = AMD64_SSE;
efb1c01c
MK
705}
706
707/* Classify TYPE, and store the result in CLASS. */
708
bf4d6c1c 709static void
fe978cb0 710amd64_classify (struct type *type, enum amd64_reg_class theclass[2])
efb1c01c 711{
78134374 712 enum type_code code = type->code ();
efb1c01c
MK
713 int len = TYPE_LENGTH (type);
714
fe978cb0 715 theclass[0] = theclass[1] = AMD64_NO_CLASS;
efb1c01c
MK
716
717 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
5a7225ed
JB
718 long, long long, and pointers are in the INTEGER class. Similarly,
719 range types, used by languages such as Ada, are also in the INTEGER
720 class. */
efb1c01c 721 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
b929c77f 722 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
9db13498 723 || code == TYPE_CODE_CHAR
aa006118 724 || code == TYPE_CODE_PTR || TYPE_IS_REFERENCE (type))
efb1c01c 725 && (len == 1 || len == 2 || len == 4 || len == 8))
fe978cb0 726 theclass[0] = AMD64_INTEGER;
efb1c01c 727
5daa78cc
TJB
728 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
729 are in class SSE. */
730 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
731 && (len == 4 || len == 8))
efb1c01c 732 /* FIXME: __m64 . */
fe978cb0 733 theclass[0] = AMD64_SSE;
efb1c01c 734
5daa78cc
TJB
735 /* Arguments of types __float128, _Decimal128 and __m128 are split into
736 two halves. The least significant ones belong to class SSE, the most
efb1c01c 737 significant one to class SSEUP. */
5daa78cc
TJB
738 else if (code == TYPE_CODE_DECFLOAT && len == 16)
739 /* FIXME: __float128, __m128. */
fe978cb0 740 theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP;
efb1c01c
MK
741
742 /* The 64-bit mantissa of arguments of type long double belongs to
743 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
744 class X87UP. */
745 else if (code == TYPE_CODE_FLT && len == 16)
746 /* Class X87 and X87UP. */
fe978cb0 747 theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP;
efb1c01c 748
7f7930dd
MK
749 /* Arguments of complex T where T is one of the types float or
750 double get treated as if they are implemented as:
751
752 struct complexT {
753 T real;
754 T imag;
5f52445b
YQ
755 };
756
757 */
7f7930dd 758 else if (code == TYPE_CODE_COMPLEX && len == 8)
fe978cb0 759 theclass[0] = AMD64_SSE;
7f7930dd 760 else if (code == TYPE_CODE_COMPLEX && len == 16)
fe978cb0 761 theclass[0] = theclass[1] = AMD64_SSE;
7f7930dd
MK
762
763 /* A variable of type complex long double is classified as type
764 COMPLEX_X87. */
765 else if (code == TYPE_CODE_COMPLEX && len == 32)
fe978cb0 766 theclass[0] = AMD64_COMPLEX_X87;
7f7930dd 767
efb1c01c
MK
768 /* Aggregates. */
769 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
770 || code == TYPE_CODE_UNION)
fe978cb0 771 amd64_classify_aggregate (type, theclass);
efb1c01c
MK
772}
773
774static enum return_value_convention
6a3a010b 775amd64_return_value (struct gdbarch *gdbarch, struct value *function,
c055b101 776 struct type *type, struct regcache *regcache,
42835c2b 777 gdb_byte *readbuf, const gdb_byte *writebuf)
efb1c01c 778{
fe978cb0 779 enum amd64_reg_class theclass[2];
efb1c01c 780 int len = TYPE_LENGTH (type);
90f90721
MK
781 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
782 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
efb1c01c
MK
783 int integer_reg = 0;
784 int sse_reg = 0;
785 int i;
786
787 gdb_assert (!(readbuf && writebuf));
788
789 /* 1. Classify the return type with the classification algorithm. */
fe978cb0 790 amd64_classify (type, theclass);
efb1c01c
MK
791
792 /* 2. If the type has class MEMORY, then the caller provides space
6fa57a7d 793 for the return value and passes the address of this storage in
0963b4bd 794 %rdi as if it were the first argument to the function. In effect,
6fa57a7d
MK
795 this address becomes a hidden first argument.
796
797 On return %rax will contain the address that has been passed in
798 by the caller in %rdi. */
fe978cb0 799 if (theclass[0] == AMD64_MEMORY)
6fa57a7d
MK
800 {
801 /* As indicated by the comment above, the ABI guarantees that we
dda83cd7
SM
802 can always find the return value just after the function has
803 returned. */
6fa57a7d
MK
804
805 if (readbuf)
806 {
807 ULONGEST addr;
808
809 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
810 read_memory (addr, readbuf, TYPE_LENGTH (type));
811 }
812
813 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
814 }
efb1c01c 815
7f7930dd 816 /* 8. If the class is COMPLEX_X87, the real part of the value is
dda83cd7 817 returned in %st0 and the imaginary part in %st1. */
fe978cb0 818 if (theclass[0] == AMD64_COMPLEX_X87)
7f7930dd
MK
819 {
820 if (readbuf)
821 {
0b883586
SM
822 regcache->raw_read (AMD64_ST0_REGNUM, readbuf);
823 regcache->raw_read (AMD64_ST1_REGNUM, readbuf + 16);
7f7930dd
MK
824 }
825
826 if (writebuf)
827 {
828 i387_return_value (gdbarch, regcache);
10eaee5f
SM
829 regcache->raw_write (AMD64_ST0_REGNUM, writebuf);
830 regcache->raw_write (AMD64_ST1_REGNUM, writebuf + 16);
7f7930dd
MK
831
832 /* Fix up the tag word such that both %st(0) and %st(1) are
833 marked as valid. */
834 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
835 }
836
837 return RETURN_VALUE_REGISTER_CONVENTION;
838 }
839
fe978cb0 840 gdb_assert (theclass[1] != AMD64_MEMORY);
bad43aa5 841 gdb_assert (len <= 16);
efb1c01c
MK
842
843 for (i = 0; len > 0; i++, len -= 8)
844 {
845 int regnum = -1;
846 int offset = 0;
847
fe978cb0 848 switch (theclass[i])
efb1c01c
MK
849 {
850 case AMD64_INTEGER:
851 /* 3. If the class is INTEGER, the next available register
852 of the sequence %rax, %rdx is used. */
853 regnum = integer_regnum[integer_reg++];
854 break;
855
856 case AMD64_SSE:
857 /* 4. If the class is SSE, the next available SSE register
dda83cd7 858 of the sequence %xmm0, %xmm1 is used. */
efb1c01c
MK
859 regnum = sse_regnum[sse_reg++];
860 break;
861
862 case AMD64_SSEUP:
863 /* 5. If the class is SSEUP, the eightbyte is passed in the
864 upper half of the last used SSE register. */
865 gdb_assert (sse_reg > 0);
866 regnum = sse_regnum[sse_reg - 1];
867 offset = 8;
868 break;
869
870 case AMD64_X87:
871 /* 6. If the class is X87, the value is returned on the X87
dda83cd7 872 stack in %st0 as 80-bit x87 number. */
90f90721 873 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
874 if (writebuf)
875 i387_return_value (gdbarch, regcache);
876 break;
877
878 case AMD64_X87UP:
879 /* 7. If the class is X87UP, the value is returned together
dda83cd7 880 with the previous X87 value in %st0. */
fe978cb0 881 gdb_assert (i > 0 && theclass[0] == AMD64_X87);
90f90721 882 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
883 offset = 8;
884 len = 2;
885 break;
886
887 case AMD64_NO_CLASS:
888 continue;
889
890 default:
891 gdb_assert (!"Unexpected register class.");
892 }
893
894 gdb_assert (regnum != -1);
895
896 if (readbuf)
502fe83e
SM
897 regcache->raw_read_part (regnum, offset, std::min (len, 8),
898 readbuf + i * 8);
efb1c01c 899 if (writebuf)
4f0420fd
SM
900 regcache->raw_write_part (regnum, offset, std::min (len, 8),
901 writebuf + i * 8);
efb1c01c
MK
902 }
903
904 return RETURN_VALUE_REGISTER_CONVENTION;
53e95fcf
JS
905}
906\f
907
720aa428 908static CORE_ADDR
cf84fa6b
AH
909amd64_push_arguments (struct regcache *regcache, int nargs, struct value **args,
910 CORE_ADDR sp, function_call_return_method return_method)
720aa428 911{
bf4d6c1c
JB
912 static int integer_regnum[] =
913 {
914 AMD64_RDI_REGNUM, /* %rdi */
915 AMD64_RSI_REGNUM, /* %rsi */
916 AMD64_RDX_REGNUM, /* %rdx */
917 AMD64_RCX_REGNUM, /* %rcx */
5b856f36
PM
918 AMD64_R8_REGNUM, /* %r8 */
919 AMD64_R9_REGNUM /* %r9 */
bf4d6c1c 920 };
720aa428
MK
921 static int sse_regnum[] =
922 {
923 /* %xmm0 ... %xmm7 */
90f90721
MK
924 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
925 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
926 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
927 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
720aa428 928 };
224c3ddb 929 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
720aa428
MK
930 int num_stack_args = 0;
931 int num_elements = 0;
932 int element = 0;
933 int integer_reg = 0;
934 int sse_reg = 0;
935 int i;
936
6470d250 937 /* Reserve a register for the "hidden" argument. */
cf84fa6b 938if (return_method == return_method_struct)
6470d250
MK
939 integer_reg++;
940
720aa428
MK
941 for (i = 0; i < nargs; i++)
942 {
4991999e 943 struct type *type = value_type (args[i]);
720aa428 944 int len = TYPE_LENGTH (type);
fe978cb0 945 enum amd64_reg_class theclass[2];
720aa428
MK
946 int needed_integer_regs = 0;
947 int needed_sse_regs = 0;
948 int j;
949
950 /* Classify argument. */
fe978cb0 951 amd64_classify (type, theclass);
720aa428
MK
952
953 /* Calculate the number of integer and SSE registers needed for
dda83cd7 954 this argument. */
720aa428
MK
955 for (j = 0; j < 2; j++)
956 {
fe978cb0 957 if (theclass[j] == AMD64_INTEGER)
720aa428 958 needed_integer_regs++;
fe978cb0 959 else if (theclass[j] == AMD64_SSE)
720aa428
MK
960 needed_sse_regs++;
961 }
962
963 /* Check whether enough registers are available, and if the
dda83cd7 964 argument should be passed in registers at all. */
bf4d6c1c 965 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
720aa428
MK
966 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
967 || (needed_integer_regs == 0 && needed_sse_regs == 0))
968 {
969 /* The argument will be passed on the stack. */
970 num_elements += ((len + 7) / 8);
849e9755 971 stack_args[num_stack_args++] = args[i];
720aa428
MK
972 }
973 else
974 {
975 /* The argument will be passed in registers. */
d8de1ef7
MK
976 const gdb_byte *valbuf = value_contents (args[i]);
977 gdb_byte buf[8];
720aa428
MK
978
979 gdb_assert (len <= 16);
980
981 for (j = 0; len > 0; j++, len -= 8)
982 {
983 int regnum = -1;
984 int offset = 0;
985
fe978cb0 986 switch (theclass[j])
720aa428
MK
987 {
988 case AMD64_INTEGER:
bf4d6c1c 989 regnum = integer_regnum[integer_reg++];
720aa428
MK
990 break;
991
992 case AMD64_SSE:
993 regnum = sse_regnum[sse_reg++];
994 break;
995
996 case AMD64_SSEUP:
997 gdb_assert (sse_reg > 0);
998 regnum = sse_regnum[sse_reg - 1];
999 offset = 8;
1000 break;
1001
745ff14e
TV
1002 case AMD64_NO_CLASS:
1003 continue;
1004
720aa428
MK
1005 default:
1006 gdb_assert (!"Unexpected register class.");
1007 }
1008
1009 gdb_assert (regnum != -1);
1010 memset (buf, 0, sizeof buf);
325fac50 1011 memcpy (buf, valbuf + j * 8, std::min (len, 8));
4f0420fd 1012 regcache->raw_write_part (regnum, offset, 8, buf);
720aa428
MK
1013 }
1014 }
1015 }
1016
1017 /* Allocate space for the arguments on the stack. */
1018 sp -= num_elements * 8;
1019
1020 /* The psABI says that "The end of the input argument area shall be
1021 aligned on a 16 byte boundary." */
1022 sp &= ~0xf;
1023
1024 /* Write out the arguments to the stack. */
1025 for (i = 0; i < num_stack_args; i++)
1026 {
4991999e 1027 struct type *type = value_type (stack_args[i]);
d8de1ef7 1028 const gdb_byte *valbuf = value_contents (stack_args[i]);
849e9755
JB
1029 int len = TYPE_LENGTH (type);
1030
1031 write_memory (sp + element * 8, valbuf, len);
1032 element += ((len + 7) / 8);
720aa428
MK
1033 }
1034
1035 /* The psABI says that "For calls that may call functions that use
1036 varargs or stdargs (prototype-less calls or calls to functions
1037 containing ellipsis (...) in the declaration) %al is used as
1038 hidden argument to specify the number of SSE registers used. */
90f90721 1039 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
720aa428
MK
1040 return sp;
1041}
1042
c4f35dd8 1043static CORE_ADDR
7d9b040b 1044amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
e53bef9f
MK
1045 struct regcache *regcache, CORE_ADDR bp_addr,
1046 int nargs, struct value **args, CORE_ADDR sp,
cf84fa6b
AH
1047 function_call_return_method return_method,
1048 CORE_ADDR struct_addr)
53e95fcf 1049{
e17a4113 1050 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
d8de1ef7 1051 gdb_byte buf[8];
c4f35dd8 1052
4a612d6f
WT
1053 /* BND registers can be in arbitrary values at the moment of the
1054 inferior call. This can cause boundary violations that are not
1055 due to a real bug or even desired by the user. The best to be done
1056 is set the BND registers to allow access to the whole memory, INIT
1057 state, before pushing the inferior call. */
1058 i387_reset_bnd_regs (gdbarch, regcache);
1059
c4f35dd8 1060 /* Pass arguments. */
cf84fa6b 1061 sp = amd64_push_arguments (regcache, nargs, args, sp, return_method);
c4f35dd8
MK
1062
1063 /* Pass "hidden" argument". */
cf84fa6b 1064 if (return_method == return_method_struct)
c4f35dd8 1065 {
e17a4113 1066 store_unsigned_integer (buf, 8, byte_order, struct_addr);
b66f5587 1067 regcache->cooked_write (AMD64_RDI_REGNUM, buf);
c4f35dd8
MK
1068 }
1069
1070 /* Store return address. */
1071 sp -= 8;
e17a4113 1072 store_unsigned_integer (buf, 8, byte_order, bp_addr);
c4f35dd8
MK
1073 write_memory (sp, buf, 8);
1074
1075 /* Finally, update the stack pointer... */
e17a4113 1076 store_unsigned_integer (buf, 8, byte_order, sp);
b66f5587 1077 regcache->cooked_write (AMD64_RSP_REGNUM, buf);
c4f35dd8
MK
1078
1079 /* ...and fake a frame pointer. */
b66f5587 1080 regcache->cooked_write (AMD64_RBP_REGNUM, buf);
c4f35dd8 1081
3e210248 1082 return sp + 16;
53e95fcf 1083}
c4f35dd8 1084\f
35669430
DE
1085/* Displaced instruction handling. */
1086
1087/* A partially decoded instruction.
1088 This contains enough details for displaced stepping purposes. */
1089
1090struct amd64_insn
1091{
1092 /* The number of opcode bytes. */
1093 int opcode_len;
50a1fdd5
PA
1094 /* The offset of the REX/VEX instruction encoding prefix or -1 if
1095 not present. */
1096 int enc_prefix_offset;
35669430
DE
1097 /* The offset to the first opcode byte. */
1098 int opcode_offset;
1099 /* The offset to the modrm byte or -1 if not present. */
1100 int modrm_offset;
1101
1102 /* The raw instruction. */
1103 gdb_byte *raw_insn;
1104};
1105
1152d984
SM
1106struct amd64_displaced_step_copy_insn_closure
1107 : public displaced_step_copy_insn_closure
35669430 1108{
1152d984 1109 amd64_displaced_step_copy_insn_closure (int insn_buf_len)
cfba9872
SM
1110 : insn_buf (insn_buf_len, 0)
1111 {}
1112
35669430 1113 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
cfba9872 1114 int tmp_used = 0;
35669430
DE
1115 int tmp_regno;
1116 ULONGEST tmp_save;
1117
1118 /* Details of the instruction. */
1119 struct amd64_insn insn_details;
1120
cfba9872
SM
1121 /* The possibly modified insn. */
1122 gdb::byte_vector insn_buf;
35669430
DE
1123};
1124
1125/* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1126 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1127 at which point delete these in favor of libopcodes' versions). */
1128
1129static const unsigned char onebyte_has_modrm[256] = {
1130 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1131 /* ------------------------------- */
1132 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1133 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1134 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1135 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1136 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1137 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1138 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1139 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1140 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1141 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1142 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1143 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1144 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1145 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1146 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1147 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1148 /* ------------------------------- */
1149 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1150};
1151
1152static const unsigned char twobyte_has_modrm[256] = {
1153 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1154 /* ------------------------------- */
1155 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1156 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1157 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1158 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1159 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1160 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1161 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1162 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1163 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1164 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1165 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1166 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1167 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1168 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1169 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1170 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1171 /* ------------------------------- */
1172 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1173};
1174
1175static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1176
1177static int
1178rex_prefix_p (gdb_byte pfx)
1179{
1180 return REX_PREFIX_P (pfx);
1181}
1182
50a1fdd5
PA
1183/* True if PFX is the start of the 2-byte VEX prefix. */
1184
1185static bool
1186vex2_prefix_p (gdb_byte pfx)
1187{
1188 return pfx == 0xc5;
1189}
1190
1191/* True if PFX is the start of the 3-byte VEX prefix. */
1192
1193static bool
1194vex3_prefix_p (gdb_byte pfx)
1195{
1196 return pfx == 0xc4;
1197}
1198
35669430
DE
1199/* Skip the legacy instruction prefixes in INSN.
1200 We assume INSN is properly sentineled so we don't have to worry
1201 about falling off the end of the buffer. */
1202
1203static gdb_byte *
1903f0e6 1204amd64_skip_prefixes (gdb_byte *insn)
35669430
DE
1205{
1206 while (1)
1207 {
1208 switch (*insn)
1209 {
1210 case DATA_PREFIX_OPCODE:
1211 case ADDR_PREFIX_OPCODE:
1212 case CS_PREFIX_OPCODE:
1213 case DS_PREFIX_OPCODE:
1214 case ES_PREFIX_OPCODE:
1215 case FS_PREFIX_OPCODE:
1216 case GS_PREFIX_OPCODE:
1217 case SS_PREFIX_OPCODE:
1218 case LOCK_PREFIX_OPCODE:
1219 case REPE_PREFIX_OPCODE:
1220 case REPNE_PREFIX_OPCODE:
1221 ++insn;
1222 continue;
1223 default:
1224 break;
1225 }
1226 break;
1227 }
1228
1229 return insn;
1230}
1231
35669430
DE
1232/* Return an integer register (other than RSP) that is unused as an input
1233 operand in INSN.
1234 In order to not require adding a rex prefix if the insn doesn't already
1235 have one, the result is restricted to RAX ... RDI, sans RSP.
1236 The register numbering of the result follows architecture ordering,
1237 e.g. RDI = 7. */
1238
1239static int
1240amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1241{
1242 /* 1 bit for each reg */
1243 int used_regs_mask = 0;
1244
1245 /* There can be at most 3 int regs used as inputs in an insn, and we have
1246 7 to choose from (RAX ... RDI, sans RSP).
1247 This allows us to take a conservative approach and keep things simple.
1248 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1249 that implicitly specify RAX. */
1250
1251 /* Avoid RAX. */
1252 used_regs_mask |= 1 << EAX_REG_NUM;
1253 /* Similarily avoid RDX, implicit operand in divides. */
1254 used_regs_mask |= 1 << EDX_REG_NUM;
1255 /* Avoid RSP. */
1256 used_regs_mask |= 1 << ESP_REG_NUM;
1257
1258 /* If the opcode is one byte long and there's no ModRM byte,
1259 assume the opcode specifies a register. */
1260 if (details->opcode_len == 1 && details->modrm_offset == -1)
1261 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1262
1263 /* Mark used regs in the modrm/sib bytes. */
1264 if (details->modrm_offset != -1)
1265 {
1266 int modrm = details->raw_insn[details->modrm_offset];
1267 int mod = MODRM_MOD_FIELD (modrm);
1268 int reg = MODRM_REG_FIELD (modrm);
1269 int rm = MODRM_RM_FIELD (modrm);
1270 int have_sib = mod != 3 && rm == 4;
1271
1272 /* Assume the reg field of the modrm byte specifies a register. */
1273 used_regs_mask |= 1 << reg;
1274
1275 if (have_sib)
1276 {
1277 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
d48ebb5b 1278 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
35669430 1279 used_regs_mask |= 1 << base;
d48ebb5b 1280 used_regs_mask |= 1 << idx;
35669430
DE
1281 }
1282 else
1283 {
1284 used_regs_mask |= 1 << rm;
1285 }
1286 }
1287
1288 gdb_assert (used_regs_mask < 256);
1289 gdb_assert (used_regs_mask != 255);
1290
1291 /* Finally, find a free reg. */
1292 {
1293 int i;
1294
1295 for (i = 0; i < 8; ++i)
1296 {
1297 if (! (used_regs_mask & (1 << i)))
1298 return i;
1299 }
1300
1301 /* We shouldn't get here. */
1302 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1303 }
1304}
1305
1306/* Extract the details of INSN that we need. */
1307
1308static void
1309amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1310{
1311 gdb_byte *start = insn;
1312 int need_modrm;
1313
1314 details->raw_insn = insn;
1315
1316 details->opcode_len = -1;
50a1fdd5 1317 details->enc_prefix_offset = -1;
35669430
DE
1318 details->opcode_offset = -1;
1319 details->modrm_offset = -1;
1320
1321 /* Skip legacy instruction prefixes. */
1903f0e6 1322 insn = amd64_skip_prefixes (insn);
35669430 1323
50a1fdd5 1324 /* Skip REX/VEX instruction encoding prefixes. */
35669430
DE
1325 if (rex_prefix_p (*insn))
1326 {
50a1fdd5 1327 details->enc_prefix_offset = insn - start;
35669430
DE
1328 ++insn;
1329 }
50a1fdd5
PA
1330 else if (vex2_prefix_p (*insn))
1331 {
1332 /* Don't record the offset in this case because this prefix has
1333 no REX.B equivalent. */
1334 insn += 2;
1335 }
1336 else if (vex3_prefix_p (*insn))
1337 {
1338 details->enc_prefix_offset = insn - start;
1339 insn += 3;
1340 }
35669430
DE
1341
1342 details->opcode_offset = insn - start;
1343
1344 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1345 {
1346 /* Two or three-byte opcode. */
1347 ++insn;
1348 need_modrm = twobyte_has_modrm[*insn];
1349
1350 /* Check for three-byte opcode. */
1903f0e6 1351 switch (*insn)
35669430 1352 {
1903f0e6
DE
1353 case 0x24:
1354 case 0x25:
1355 case 0x38:
1356 case 0x3a:
1357 case 0x7a:
1358 case 0x7b:
35669430
DE
1359 ++insn;
1360 details->opcode_len = 3;
1903f0e6
DE
1361 break;
1362 default:
1363 details->opcode_len = 2;
1364 break;
35669430 1365 }
35669430
DE
1366 }
1367 else
1368 {
1369 /* One-byte opcode. */
1370 need_modrm = onebyte_has_modrm[*insn];
1371 details->opcode_len = 1;
1372 }
1373
1374 if (need_modrm)
1375 {
1376 ++insn;
1377 details->modrm_offset = insn - start;
1378 }
1379}
1380
1381/* Update %rip-relative addressing in INSN.
1382
1383 %rip-relative addressing only uses a 32-bit displacement.
1384 32 bits is not enough to be guaranteed to cover the distance between where
1385 the real instruction is and where its copy is.
1386 Convert the insn to use base+disp addressing.
1387 We set base = pc + insn_length so we can leave disp unchanged. */
c4f35dd8 1388
35669430 1389static void
1152d984
SM
1390fixup_riprel (struct gdbarch *gdbarch,
1391 amd64_displaced_step_copy_insn_closure *dsc,
35669430
DE
1392 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1393{
1394 const struct amd64_insn *insn_details = &dsc->insn_details;
1395 int modrm_offset = insn_details->modrm_offset;
1396 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1397 CORE_ADDR rip_base;
35669430
DE
1398 int insn_length;
1399 int arch_tmp_regno, tmp_regno;
1400 ULONGEST orig_value;
1401
1402 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1403 ++insn;
1404
1405 /* Compute the rip-relative address. */
cfba9872
SM
1406 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf.data (),
1407 dsc->insn_buf.size (), from);
35669430
DE
1408 rip_base = from + insn_length;
1409
1410 /* We need a register to hold the address.
1411 Pick one not used in the insn.
1412 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1413 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1414 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1415
50a1fdd5
PA
1416 /* Position of the not-B bit in the 3-byte VEX prefix (in byte 1). */
1417 static constexpr gdb_byte VEX3_NOT_B = 0x20;
1418
1419 /* REX.B should be unset (VEX.!B set) as we were using rip-relative
1420 addressing, but ensure it's unset (set for VEX) anyway, tmp_regno
1421 is not r8-r15. */
1422 if (insn_details->enc_prefix_offset != -1)
1423 {
1424 gdb_byte *pfx = &dsc->insn_buf[insn_details->enc_prefix_offset];
1425 if (rex_prefix_p (pfx[0]))
1426 pfx[0] &= ~REX_B;
1427 else if (vex3_prefix_p (pfx[0]))
1428 pfx[1] |= VEX3_NOT_B;
1429 else
1430 gdb_assert_not_reached ("unhandled prefix");
1431 }
35669430
DE
1432
1433 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1434 dsc->tmp_regno = tmp_regno;
1435 dsc->tmp_save = orig_value;
1436 dsc->tmp_used = 1;
1437
1438 /* Convert the ModRM field to be base+disp. */
1439 dsc->insn_buf[modrm_offset] &= ~0xc7;
1440 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1441
1442 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1443
136821d9
SM
1444 displaced_debug_printf ("%%rip-relative addressing used.");
1445 displaced_debug_printf ("using temp reg %d, old value %s, new value %s",
1446 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1447 paddress (gdbarch, rip_base));
35669430
DE
1448}
1449
1450static void
1451fixup_displaced_copy (struct gdbarch *gdbarch,
1152d984 1452 amd64_displaced_step_copy_insn_closure *dsc,
35669430
DE
1453 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1454{
1455 const struct amd64_insn *details = &dsc->insn_details;
1456
1457 if (details->modrm_offset != -1)
1458 {
1459 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1460
1461 if ((modrm & 0xc7) == 0x05)
1462 {
1463 /* The insn uses rip-relative addressing.
1464 Deal with it. */
1465 fixup_riprel (gdbarch, dsc, from, to, regs);
1466 }
1467 }
1468}
1469
1152d984 1470displaced_step_copy_insn_closure_up
35669430
DE
1471amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1472 CORE_ADDR from, CORE_ADDR to,
1473 struct regcache *regs)
1474{
1475 int len = gdbarch_max_insn_length (gdbarch);
741e63d7 1476 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
35669430
DE
1477 continually watch for running off the end of the buffer. */
1478 int fixup_sentinel_space = len;
1152d984
SM
1479 std::unique_ptr<amd64_displaced_step_copy_insn_closure> dsc
1480 (new amd64_displaced_step_copy_insn_closure (len + fixup_sentinel_space));
35669430
DE
1481 gdb_byte *buf = &dsc->insn_buf[0];
1482 struct amd64_insn *details = &dsc->insn_details;
1483
35669430
DE
1484 read_memory (from, buf, len);
1485
1486 /* Set up the sentinel space so we don't have to worry about running
1487 off the end of the buffer. An excessive number of leading prefixes
1488 could otherwise cause this. */
1489 memset (buf + len, 0, fixup_sentinel_space);
1490
1491 amd64_get_insn_details (buf, details);
1492
1493 /* GDB may get control back after the insn after the syscall.
1494 Presumably this is a kernel bug.
1495 If this is a syscall, make sure there's a nop afterwards. */
1496 {
1497 int syscall_length;
1498
1499 if (amd64_syscall_p (details, &syscall_length))
1500 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1501 }
1502
1503 /* Modify the insn to cope with the address where it will be executed from.
1504 In particular, handle any rip-relative addressing. */
e8217e61 1505 fixup_displaced_copy (gdbarch, dsc.get (), from, to, regs);
35669430
DE
1506
1507 write_memory (to, buf, len);
1508
136821d9
SM
1509 displaced_debug_printf ("copy %s->%s: %s",
1510 paddress (gdbarch, from), paddress (gdbarch, to),
1511 displaced_step_dump_bytes (buf, len).c_str ());
35669430 1512
6d0cf446 1513 /* This is a work around for a problem with g++ 4.8. */
1152d984 1514 return displaced_step_copy_insn_closure_up (dsc.release ());
35669430
DE
1515}
1516
1517static int
1518amd64_absolute_jmp_p (const struct amd64_insn *details)
1519{
1520 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1521
1522 if (insn[0] == 0xff)
1523 {
1524 /* jump near, absolute indirect (/4) */
1525 if ((insn[1] & 0x38) == 0x20)
1526 return 1;
1527
1528 /* jump far, absolute indirect (/5) */
1529 if ((insn[1] & 0x38) == 0x28)
1530 return 1;
1531 }
1532
1533 return 0;
1534}
1535
c2170eef
MM
1536/* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1537
1538static int
1539amd64_jmp_p (const struct amd64_insn *details)
1540{
1541 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1542
1543 /* jump short, relative. */
1544 if (insn[0] == 0xeb)
1545 return 1;
1546
1547 /* jump near, relative. */
1548 if (insn[0] == 0xe9)
1549 return 1;
1550
1551 return amd64_absolute_jmp_p (details);
1552}
1553
35669430
DE
1554static int
1555amd64_absolute_call_p (const struct amd64_insn *details)
1556{
1557 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1558
1559 if (insn[0] == 0xff)
1560 {
1561 /* Call near, absolute indirect (/2) */
1562 if ((insn[1] & 0x38) == 0x10)
1563 return 1;
1564
1565 /* Call far, absolute indirect (/3) */
1566 if ((insn[1] & 0x38) == 0x18)
1567 return 1;
1568 }
1569
1570 return 0;
1571}
1572
1573static int
1574amd64_ret_p (const struct amd64_insn *details)
1575{
1576 /* NOTE: gcc can emit "repz ; ret". */
1577 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1578
1579 switch (insn[0])
1580 {
1581 case 0xc2: /* ret near, pop N bytes */
1582 case 0xc3: /* ret near */
1583 case 0xca: /* ret far, pop N bytes */
1584 case 0xcb: /* ret far */
1585 case 0xcf: /* iret */
1586 return 1;
1587
1588 default:
1589 return 0;
1590 }
1591}
1592
1593static int
1594amd64_call_p (const struct amd64_insn *details)
1595{
1596 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1597
1598 if (amd64_absolute_call_p (details))
1599 return 1;
1600
1601 /* call near, relative */
1602 if (insn[0] == 0xe8)
1603 return 1;
1604
1605 return 0;
1606}
1607
35669430
DE
1608/* Return non-zero if INSN is a system call, and set *LENGTHP to its
1609 length in bytes. Otherwise, return zero. */
1610
1611static int
1612amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1613{
1614 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1615
1616 if (insn[0] == 0x0f && insn[1] == 0x05)
1617 {
1618 *lengthp = 2;
1619 return 1;
1620 }
1621
1622 return 0;
1623}
1624
c2170eef
MM
1625/* Classify the instruction at ADDR using PRED.
1626 Throw an error if the memory can't be read. */
1627
1628static int
1629amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1630 int (*pred) (const struct amd64_insn *))
1631{
1632 struct amd64_insn details;
1633 gdb_byte *buf;
1634 int len, classification;
1635
1636 len = gdbarch_max_insn_length (gdbarch);
224c3ddb 1637 buf = (gdb_byte *) alloca (len);
c2170eef
MM
1638
1639 read_code (addr, buf, len);
1640 amd64_get_insn_details (buf, &details);
1641
1642 classification = pred (&details);
1643
1644 return classification;
1645}
1646
1647/* The gdbarch insn_is_call method. */
1648
1649static int
1650amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1651{
1652 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1653}
1654
1655/* The gdbarch insn_is_ret method. */
1656
1657static int
1658amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1659{
1660 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1661}
1662
1663/* The gdbarch insn_is_jump method. */
1664
1665static int
1666amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1667{
1668 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1669}
1670
35669430
DE
1671/* Fix up the state of registers and memory after having single-stepped
1672 a displaced instruction. */
1673
1674void
1675amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1152d984 1676 struct displaced_step_copy_insn_closure *dsc_,
35669430
DE
1677 CORE_ADDR from, CORE_ADDR to,
1678 struct regcache *regs)
1679{
1152d984
SM
1680 amd64_displaced_step_copy_insn_closure *dsc
1681 = (amd64_displaced_step_copy_insn_closure *) dsc_;
e17a4113 1682 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
35669430
DE
1683 /* The offset we applied to the instruction's address. */
1684 ULONGEST insn_offset = to - from;
cfba9872 1685 gdb_byte *insn = dsc->insn_buf.data ();
35669430
DE
1686 const struct amd64_insn *insn_details = &dsc->insn_details;
1687
136821d9
SM
1688 displaced_debug_printf ("fixup (%s, %s), insn = 0x%02x 0x%02x ...",
1689 paddress (gdbarch, from), paddress (gdbarch, to),
1690 insn[0], insn[1]);
35669430
DE
1691
1692 /* If we used a tmp reg, restore it. */
1693
1694 if (dsc->tmp_used)
1695 {
136821d9
SM
1696 displaced_debug_printf ("restoring reg %d to %s",
1697 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
35669430
DE
1698 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1699 }
1700
1701 /* The list of issues to contend with here is taken from
1702 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1703 Yay for Free Software! */
1704
1705 /* Relocate the %rip back to the program's instruction stream,
1706 if necessary. */
1707
1708 /* Except in the case of absolute or indirect jump or call
1709 instructions, or a return instruction, the new rip is relative to
1710 the displaced instruction; make it relative to the original insn.
1711 Well, signal handler returns don't need relocation either, but we use the
1712 value of %rip to recognize those; see below. */
1713 if (! amd64_absolute_jmp_p (insn_details)
1714 && ! amd64_absolute_call_p (insn_details)
1715 && ! amd64_ret_p (insn_details))
1716 {
1717 ULONGEST orig_rip;
1718 int insn_len;
1719
1720 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1721
1722 /* A signal trampoline system call changes the %rip, resuming
1723 execution of the main program after the signal handler has
1724 returned. That makes them like 'return' instructions; we
1725 shouldn't relocate %rip.
1726
1727 But most system calls don't, and we do need to relocate %rip.
1728
1729 Our heuristic for distinguishing these cases: if stepping
1730 over the system call instruction left control directly after
1731 the instruction, the we relocate --- control almost certainly
1732 doesn't belong in the displaced copy. Otherwise, we assume
1733 the instruction has put control where it belongs, and leave
1734 it unrelocated. Goodness help us if there are PC-relative
1735 system calls. */
1736 if (amd64_syscall_p (insn_details, &insn_len)
1737 && orig_rip != to + insn_len
1738 /* GDB can get control back after the insn after the syscall.
1739 Presumably this is a kernel bug.
1740 Fixup ensures its a nop, we add one to the length for it. */
1741 && orig_rip != to + insn_len + 1)
136821d9 1742 displaced_debug_printf ("syscall changed %%rip; not relocating");
35669430
DE
1743 else
1744 {
1745 ULONGEST rip = orig_rip - insn_offset;
1746
1903f0e6
DE
1747 /* If we just stepped over a breakpoint insn, we don't backup
1748 the pc on purpose; this is to match behaviour without
1749 stepping. */
35669430
DE
1750
1751 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1752
136821d9
SM
1753 displaced_debug_printf ("relocated %%rip from %s to %s",
1754 paddress (gdbarch, orig_rip),
1755 paddress (gdbarch, rip));
35669430
DE
1756 }
1757 }
1758
1759 /* If the instruction was PUSHFL, then the TF bit will be set in the
1760 pushed value, and should be cleared. We'll leave this for later,
1761 since GDB already messes up the TF flag when stepping over a
1762 pushfl. */
1763
1764 /* If the instruction was a call, the return address now atop the
1765 stack is the address following the copied instruction. We need
1766 to make it the address following the original instruction. */
1767 if (amd64_call_p (insn_details))
1768 {
1769 ULONGEST rsp;
1770 ULONGEST retaddr;
1771 const ULONGEST retaddr_len = 8;
1772
1773 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
e17a4113 1774 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
4dafcdeb 1775 retaddr = (retaddr - insn_offset) & 0xffffffffffffffffULL;
e17a4113 1776 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
35669430 1777
136821d9
SM
1778 displaced_debug_printf ("relocated return addr at %s to %s",
1779 paddress (gdbarch, rsp),
1780 paddress (gdbarch, retaddr));
35669430
DE
1781 }
1782}
dde08ee1
PA
1783
1784/* If the instruction INSN uses RIP-relative addressing, return the
1785 offset into the raw INSN where the displacement to be adjusted is
1786 found. Returns 0 if the instruction doesn't use RIP-relative
1787 addressing. */
1788
1789static int
1790rip_relative_offset (struct amd64_insn *insn)
1791{
1792 if (insn->modrm_offset != -1)
1793 {
1794 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1795
1796 if ((modrm & 0xc7) == 0x05)
1797 {
1798 /* The displacement is found right after the ModRM byte. */
1799 return insn->modrm_offset + 1;
1800 }
1801 }
1802
1803 return 0;
1804}
1805
1806static void
1807append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1808{
1809 target_write_memory (*to, buf, len);
1810 *to += len;
1811}
1812
60965737 1813static void
dde08ee1
PA
1814amd64_relocate_instruction (struct gdbarch *gdbarch,
1815 CORE_ADDR *to, CORE_ADDR oldloc)
1816{
1817 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1818 int len = gdbarch_max_insn_length (gdbarch);
1819 /* Extra space for sentinels. */
1820 int fixup_sentinel_space = len;
224c3ddb 1821 gdb_byte *buf = (gdb_byte *) xmalloc (len + fixup_sentinel_space);
dde08ee1
PA
1822 struct amd64_insn insn_details;
1823 int offset = 0;
1824 LONGEST rel32, newrel;
1825 gdb_byte *insn;
1826 int insn_length;
1827
1828 read_memory (oldloc, buf, len);
1829
1830 /* Set up the sentinel space so we don't have to worry about running
1831 off the end of the buffer. An excessive number of leading prefixes
1832 could otherwise cause this. */
1833 memset (buf + len, 0, fixup_sentinel_space);
1834
1835 insn = buf;
1836 amd64_get_insn_details (insn, &insn_details);
1837
1838 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1839
1840 /* Skip legacy instruction prefixes. */
1841 insn = amd64_skip_prefixes (insn);
1842
1843 /* Adjust calls with 32-bit relative addresses as push/jump, with
1844 the address pushed being the location where the original call in
1845 the user program would return to. */
1846 if (insn[0] == 0xe8)
1847 {
f077e978
PA
1848 gdb_byte push_buf[32];
1849 CORE_ADDR ret_addr;
1850 int i = 0;
dde08ee1
PA
1851
1852 /* Where "ret" in the original code will return to. */
1853 ret_addr = oldloc + insn_length;
f077e978
PA
1854
1855 /* If pushing an address higher than or equal to 0x80000000,
1856 avoid 'pushq', as that sign extends its 32-bit operand, which
1857 would be incorrect. */
1858 if (ret_addr <= 0x7fffffff)
1859 {
1860 push_buf[0] = 0x68; /* pushq $... */
1861 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1862 i = 5;
1863 }
1864 else
1865 {
1866 push_buf[i++] = 0x48; /* sub $0x8,%rsp */
1867 push_buf[i++] = 0x83;
1868 push_buf[i++] = 0xec;
1869 push_buf[i++] = 0x08;
1870
1871 push_buf[i++] = 0xc7; /* movl $imm,(%rsp) */
1872 push_buf[i++] = 0x04;
1873 push_buf[i++] = 0x24;
1874 store_unsigned_integer (&push_buf[i], 4, byte_order,
1875 ret_addr & 0xffffffff);
1876 i += 4;
1877
1878 push_buf[i++] = 0xc7; /* movl $imm,4(%rsp) */
1879 push_buf[i++] = 0x44;
1880 push_buf[i++] = 0x24;
1881 push_buf[i++] = 0x04;
1882 store_unsigned_integer (&push_buf[i], 4, byte_order,
1883 ret_addr >> 32);
1884 i += 4;
1885 }
1886 gdb_assert (i <= sizeof (push_buf));
dde08ee1 1887 /* Push the push. */
f077e978 1888 append_insns (to, i, push_buf);
dde08ee1
PA
1889
1890 /* Convert the relative call to a relative jump. */
1891 insn[0] = 0xe9;
1892
1893 /* Adjust the destination offset. */
1894 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1895 newrel = (oldloc - *to) + rel32;
f4a1794a
KY
1896 store_signed_integer (insn + 1, 4, byte_order, newrel);
1897
136821d9
SM
1898 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1899 hex_string (rel32), paddress (gdbarch, oldloc),
1900 hex_string (newrel), paddress (gdbarch, *to));
dde08ee1
PA
1901
1902 /* Write the adjusted jump into its displaced location. */
1903 append_insns (to, 5, insn);
1904 return;
1905 }
1906
1907 offset = rip_relative_offset (&insn_details);
1908 if (!offset)
1909 {
1910 /* Adjust jumps with 32-bit relative addresses. Calls are
1911 already handled above. */
1912 if (insn[0] == 0xe9)
1913 offset = 1;
1914 /* Adjust conditional jumps. */
1915 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1916 offset = 2;
1917 }
1918
1919 if (offset)
1920 {
1921 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1922 newrel = (oldloc - *to) + rel32;
f4a1794a 1923 store_signed_integer (insn + offset, 4, byte_order, newrel);
136821d9
SM
1924 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1925 hex_string (rel32), paddress (gdbarch, oldloc),
1926 hex_string (newrel), paddress (gdbarch, *to));
dde08ee1
PA
1927 }
1928
1929 /* Write the adjusted instruction into its displaced location. */
1930 append_insns (to, insn_length, buf);
1931}
1932
35669430 1933\f
c4f35dd8 1934/* The maximum number of saved registers. This should include %rip. */
90f90721 1935#define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
c4f35dd8 1936
e53bef9f 1937struct amd64_frame_cache
c4f35dd8
MK
1938{
1939 /* Base address. */
1940 CORE_ADDR base;
8fbca658 1941 int base_p;
c4f35dd8
MK
1942 CORE_ADDR sp_offset;
1943 CORE_ADDR pc;
1944
1945 /* Saved registers. */
e53bef9f 1946 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
c4f35dd8 1947 CORE_ADDR saved_sp;
e0c62198 1948 int saved_sp_reg;
c4f35dd8
MK
1949
1950 /* Do we have a frame? */
1951 int frameless_p;
1952};
8dda9770 1953
d2449ee8 1954/* Initialize a frame cache. */
c4f35dd8 1955
d2449ee8
DJ
1956static void
1957amd64_init_frame_cache (struct amd64_frame_cache *cache)
8dda9770 1958{
c4f35dd8
MK
1959 int i;
1960
c4f35dd8
MK
1961 /* Base address. */
1962 cache->base = 0;
8fbca658 1963 cache->base_p = 0;
c4f35dd8
MK
1964 cache->sp_offset = -8;
1965 cache->pc = 0;
1966
1967 /* Saved registers. We initialize these to -1 since zero is a valid
bba66b87
DE
1968 offset (that's where %rbp is supposed to be stored).
1969 The values start out as being offsets, and are later converted to
1970 addresses (at which point -1 is interpreted as an address, still meaning
1971 "invalid"). */
e53bef9f 1972 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
1973 cache->saved_regs[i] = -1;
1974 cache->saved_sp = 0;
e0c62198 1975 cache->saved_sp_reg = -1;
c4f35dd8
MK
1976
1977 /* Frameless until proven otherwise. */
1978 cache->frameless_p = 1;
d2449ee8 1979}
c4f35dd8 1980
d2449ee8
DJ
1981/* Allocate and initialize a frame cache. */
1982
1983static struct amd64_frame_cache *
1984amd64_alloc_frame_cache (void)
1985{
1986 struct amd64_frame_cache *cache;
1987
1988 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1989 amd64_init_frame_cache (cache);
c4f35dd8 1990 return cache;
8dda9770 1991}
53e95fcf 1992
e0c62198
L
1993/* GCC 4.4 and later, can put code in the prologue to realign the
1994 stack pointer. Check whether PC points to such code, and update
1995 CACHE accordingly. Return the first instruction after the code
1996 sequence or CURRENT_PC, whichever is smaller. If we don't
1997 recognize the code, return PC. */
1998
1999static CORE_ADDR
2000amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2001 struct amd64_frame_cache *cache)
2002{
2003 /* There are 2 code sequences to re-align stack before the frame
2004 gets set up:
2005
2006 1. Use a caller-saved saved register:
2007
2008 leaq 8(%rsp), %reg
2009 andq $-XXX, %rsp
2010 pushq -8(%reg)
2011
2012 2. Use a callee-saved saved register:
2013
2014 pushq %reg
2015 leaq 16(%rsp), %reg
2016 andq $-XXX, %rsp
2017 pushq -8(%reg)
2018
2019 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2020
2021 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2022 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2023 */
2024
2025 gdb_byte buf[18];
2026 int reg, r;
2027 int offset, offset_and;
e0c62198 2028
bae8a07a 2029 if (target_read_code (pc, buf, sizeof buf))
e0c62198
L
2030 return pc;
2031
2032 /* Check caller-saved saved register. The first instruction has
2033 to be "leaq 8(%rsp), %reg". */
2034 if ((buf[0] & 0xfb) == 0x48
2035 && buf[1] == 0x8d
2036 && buf[3] == 0x24
2037 && buf[4] == 0x8)
2038 {
2039 /* MOD must be binary 10 and R/M must be binary 100. */
2040 if ((buf[2] & 0xc7) != 0x44)
2041 return pc;
2042
2043 /* REG has register number. */
2044 reg = (buf[2] >> 3) & 7;
2045
2046 /* Check the REX.R bit. */
2047 if (buf[0] == 0x4c)
2048 reg += 8;
2049
2050 offset = 5;
2051 }
2052 else
2053 {
2054 /* Check callee-saved saved register. The first instruction
2055 has to be "pushq %reg". */
2056 reg = 0;
2057 if ((buf[0] & 0xf8) == 0x50)
2058 offset = 0;
2059 else if ((buf[0] & 0xf6) == 0x40
2060 && (buf[1] & 0xf8) == 0x50)
2061 {
2062 /* Check the REX.B bit. */
2063 if ((buf[0] & 1) != 0)
2064 reg = 8;
2065
2066 offset = 1;
2067 }
2068 else
2069 return pc;
2070
2071 /* Get register. */
2072 reg += buf[offset] & 0x7;
2073
2074 offset++;
2075
2076 /* The next instruction has to be "leaq 16(%rsp), %reg". */
2077 if ((buf[offset] & 0xfb) != 0x48
2078 || buf[offset + 1] != 0x8d
2079 || buf[offset + 3] != 0x24
2080 || buf[offset + 4] != 0x10)
2081 return pc;
2082
2083 /* MOD must be binary 10 and R/M must be binary 100. */
2084 if ((buf[offset + 2] & 0xc7) != 0x44)
2085 return pc;
2086
2087 /* REG has register number. */
2088 r = (buf[offset + 2] >> 3) & 7;
2089
2090 /* Check the REX.R bit. */
2091 if (buf[offset] == 0x4c)
2092 r += 8;
2093
2094 /* Registers in pushq and leaq have to be the same. */
2095 if (reg != r)
2096 return pc;
2097
2098 offset += 5;
2099 }
2100
2101 /* Rigister can't be %rsp nor %rbp. */
2102 if (reg == 4 || reg == 5)
2103 return pc;
2104
2105 /* The next instruction has to be "andq $-XXX, %rsp". */
2106 if (buf[offset] != 0x48
2107 || buf[offset + 2] != 0xe4
2108 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2109 return pc;
2110
2111 offset_and = offset;
2112 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2113
2114 /* The next instruction has to be "pushq -8(%reg)". */
2115 r = 0;
2116 if (buf[offset] == 0xff)
2117 offset++;
2118 else if ((buf[offset] & 0xf6) == 0x40
2119 && buf[offset + 1] == 0xff)
2120 {
2121 /* Check the REX.B bit. */
2122 if ((buf[offset] & 0x1) != 0)
2123 r = 8;
2124 offset += 2;
2125 }
2126 else
2127 return pc;
2128
2129 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2130 01. */
2131 if (buf[offset + 1] != 0xf8
2132 || (buf[offset] & 0xf8) != 0x70)
2133 return pc;
2134
2135 /* R/M has register. */
2136 r += buf[offset] & 7;
2137
2138 /* Registers in leaq and pushq have to be the same. */
2139 if (reg != r)
2140 return pc;
2141
2142 if (current_pc > pc + offset_and)
35669430 2143 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
e0c62198 2144
325fac50 2145 return std::min (pc + offset + 2, current_pc);
e0c62198
L
2146}
2147
ac142d96
L
2148/* Similar to amd64_analyze_stack_align for x32. */
2149
2150static CORE_ADDR
2151amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2152 struct amd64_frame_cache *cache)
2153{
2154 /* There are 2 code sequences to re-align stack before the frame
2155 gets set up:
2156
2157 1. Use a caller-saved saved register:
2158
2159 leaq 8(%rsp), %reg
2160 andq $-XXX, %rsp
2161 pushq -8(%reg)
2162
2163 or
2164
2165 [addr32] leal 8(%rsp), %reg
2166 andl $-XXX, %esp
2167 [addr32] pushq -8(%reg)
2168
2169 2. Use a callee-saved saved register:
2170
2171 pushq %reg
2172 leaq 16(%rsp), %reg
2173 andq $-XXX, %rsp
2174 pushq -8(%reg)
2175
2176 or
2177
2178 pushq %reg
2179 [addr32] leal 16(%rsp), %reg
2180 andl $-XXX, %esp
2181 [addr32] pushq -8(%reg)
2182
2183 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2184
2185 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2186 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2187
2188 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2189
2190 0x83 0xe4 0xf0 andl $-16, %esp
2191 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2192 */
2193
2194 gdb_byte buf[19];
2195 int reg, r;
2196 int offset, offset_and;
2197
2198 if (target_read_memory (pc, buf, sizeof buf))
2199 return pc;
2200
2201 /* Skip optional addr32 prefix. */
2202 offset = buf[0] == 0x67 ? 1 : 0;
2203
2204 /* Check caller-saved saved register. The first instruction has
2205 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2206 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2207 && buf[offset + 1] == 0x8d
2208 && buf[offset + 3] == 0x24
2209 && buf[offset + 4] == 0x8)
2210 {
2211 /* MOD must be binary 10 and R/M must be binary 100. */
2212 if ((buf[offset + 2] & 0xc7) != 0x44)
2213 return pc;
2214
2215 /* REG has register number. */
2216 reg = (buf[offset + 2] >> 3) & 7;
2217
2218 /* Check the REX.R bit. */
2219 if ((buf[offset] & 0x4) != 0)
2220 reg += 8;
2221
2222 offset += 5;
2223 }
2224 else
2225 {
2226 /* Check callee-saved saved register. The first instruction
2227 has to be "pushq %reg". */
2228 reg = 0;
2229 if ((buf[offset] & 0xf6) == 0x40
2230 && (buf[offset + 1] & 0xf8) == 0x50)
2231 {
2232 /* Check the REX.B bit. */
2233 if ((buf[offset] & 1) != 0)
2234 reg = 8;
2235
2236 offset += 1;
2237 }
2238 else if ((buf[offset] & 0xf8) != 0x50)
2239 return pc;
2240
2241 /* Get register. */
2242 reg += buf[offset] & 0x7;
2243
2244 offset++;
2245
2246 /* Skip optional addr32 prefix. */
2247 if (buf[offset] == 0x67)
2248 offset++;
2249
2250 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2251 "leal 16(%rsp), %reg". */
2252 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2253 || buf[offset + 1] != 0x8d
2254 || buf[offset + 3] != 0x24
2255 || buf[offset + 4] != 0x10)
2256 return pc;
2257
2258 /* MOD must be binary 10 and R/M must be binary 100. */
2259 if ((buf[offset + 2] & 0xc7) != 0x44)
2260 return pc;
2261
2262 /* REG has register number. */
2263 r = (buf[offset + 2] >> 3) & 7;
2264
2265 /* Check the REX.R bit. */
2266 if ((buf[offset] & 0x4) != 0)
2267 r += 8;
2268
2269 /* Registers in pushq and leaq have to be the same. */
2270 if (reg != r)
2271 return pc;
2272
2273 offset += 5;
2274 }
2275
2276 /* Rigister can't be %rsp nor %rbp. */
2277 if (reg == 4 || reg == 5)
2278 return pc;
2279
2280 /* The next instruction may be "andq $-XXX, %rsp" or
2281 "andl $-XXX, %esp". */
2282 if (buf[offset] != 0x48)
2283 offset--;
2284
2285 if (buf[offset + 2] != 0xe4
2286 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2287 return pc;
2288
2289 offset_and = offset;
2290 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2291
2292 /* Skip optional addr32 prefix. */
2293 if (buf[offset] == 0x67)
2294 offset++;
2295
2296 /* The next instruction has to be "pushq -8(%reg)". */
2297 r = 0;
2298 if (buf[offset] == 0xff)
2299 offset++;
2300 else if ((buf[offset] & 0xf6) == 0x40
2301 && buf[offset + 1] == 0xff)
2302 {
2303 /* Check the REX.B bit. */
2304 if ((buf[offset] & 0x1) != 0)
2305 r = 8;
2306 offset += 2;
2307 }
2308 else
2309 return pc;
2310
2311 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2312 01. */
2313 if (buf[offset + 1] != 0xf8
2314 || (buf[offset] & 0xf8) != 0x70)
2315 return pc;
2316
2317 /* R/M has register. */
2318 r += buf[offset] & 7;
2319
2320 /* Registers in leaq and pushq have to be the same. */
2321 if (reg != r)
2322 return pc;
2323
2324 if (current_pc > pc + offset_and)
2325 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2326
325fac50 2327 return std::min (pc + offset + 2, current_pc);
ac142d96
L
2328}
2329
c4f35dd8
MK
2330/* Do a limited analysis of the prologue at PC and update CACHE
2331 accordingly. Bail out early if CURRENT_PC is reached. Return the
2332 address where the analysis stopped.
2333
2334 We will handle only functions beginning with:
2335
2336 pushq %rbp 0x55
50f1ae7b 2337 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
c4f35dd8 2338
649e6d92
MK
2339 or (for the X32 ABI):
2340
2341 pushq %rbp 0x55
2342 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2343
ac4a4f1c
SM
2344 The `endbr64` instruction can be found before these sequences, and will be
2345 skipped if found.
2346
649e6d92
MK
2347 Any function that doesn't start with one of these sequences will be
2348 assumed to have no prologue and thus no valid frame pointer in
2349 %rbp. */
c4f35dd8
MK
2350
2351static CORE_ADDR
e17a4113
UW
2352amd64_analyze_prologue (struct gdbarch *gdbarch,
2353 CORE_ADDR pc, CORE_ADDR current_pc,
e53bef9f 2354 struct amd64_frame_cache *cache)
53e95fcf 2355{
e17a4113 2356 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
ac4a4f1c
SM
2357 /* The `endbr64` instruction. */
2358 static const gdb_byte endbr64[4] = { 0xf3, 0x0f, 0x1e, 0xfa };
50f1ae7b
DE
2359 /* There are two variations of movq %rsp, %rbp. */
2360 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2361 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
649e6d92
MK
2362 /* Ditto for movl %esp, %ebp. */
2363 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2364 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2365
d8de1ef7
MK
2366 gdb_byte buf[3];
2367 gdb_byte op;
c4f35dd8
MK
2368
2369 if (current_pc <= pc)
2370 return current_pc;
2371
ac142d96
L
2372 if (gdbarch_ptr_bit (gdbarch) == 32)
2373 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2374 else
2375 pc = amd64_analyze_stack_align (pc, current_pc, cache);
e0c62198 2376
bae8a07a 2377 op = read_code_unsigned_integer (pc, 1, byte_order);
c4f35dd8 2378
ac4a4f1c
SM
2379 /* Check for the `endbr64` instruction, skip it if found. */
2380 if (op == endbr64[0])
2381 {
2382 read_code (pc + 1, buf, 3);
2383
2384 if (memcmp (buf, &endbr64[1], 3) == 0)
2385 pc += 4;
2386
2387 op = read_code_unsigned_integer (pc, 1, byte_order);
2388 }
2389
2390 if (current_pc <= pc)
2391 return current_pc;
2392
c4f35dd8
MK
2393 if (op == 0x55) /* pushq %rbp */
2394 {
2395 /* Take into account that we've executed the `pushq %rbp' that
dda83cd7 2396 starts this instruction sequence. */
90f90721 2397 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
c4f35dd8
MK
2398 cache->sp_offset += 8;
2399
2400 /* If that's all, return now. */
2401 if (current_pc <= pc + 1)
dda83cd7 2402 return current_pc;
c4f35dd8 2403
bae8a07a 2404 read_code (pc + 1, buf, 3);
c4f35dd8 2405
649e6d92
MK
2406 /* Check for `movq %rsp, %rbp'. */
2407 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2408 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2409 {
2410 /* OK, we actually have a frame. */
2411 cache->frameless_p = 0;
2412 return pc + 4;
2413 }
2414
ed908db6 2415 /* For X32, also check for `movl %esp, %ebp'. */
649e6d92
MK
2416 if (gdbarch_ptr_bit (gdbarch) == 32)
2417 {
2418 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2419 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2420 {
2421 /* OK, we actually have a frame. */
2422 cache->frameless_p = 0;
2423 return pc + 3;
2424 }
2425 }
2426
2427 return pc + 1;
c4f35dd8
MK
2428 }
2429
2430 return pc;
53e95fcf
JS
2431}
2432
df15bd07
JK
2433/* Work around false termination of prologue - GCC PR debug/48827.
2434
2435 START_PC is the first instruction of a function, PC is its minimal already
2436 determined advanced address. Function returns PC if it has nothing to do.
2437
2438 84 c0 test %al,%al
2439 74 23 je after
2440 <-- here is 0 lines advance - the false prologue end marker.
2441 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2442 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2443 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2444 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2445 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2446 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2447 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2448 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2449 after: */
c4f35dd8
MK
2450
2451static CORE_ADDR
df15bd07 2452amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
53e95fcf 2453{
08711b9a
JK
2454 struct symtab_and_line start_pc_sal, next_sal;
2455 gdb_byte buf[4 + 8 * 7];
2456 int offset, xmmreg;
c4f35dd8 2457
08711b9a
JK
2458 if (pc == start_pc)
2459 return pc;
2460
2461 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2462 if (start_pc_sal.symtab == NULL
43f3e411
DE
2463 || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
2464 (SYMTAB_COMPUNIT (start_pc_sal.symtab))) < 6
08711b9a
JK
2465 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2466 return pc;
2467
2468 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2469 if (next_sal.line != start_pc_sal.line)
2470 return pc;
2471
2472 /* START_PC can be from overlayed memory, ignored here. */
bae8a07a 2473 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
08711b9a
JK
2474 return pc;
2475
2476 /* test %al,%al */
2477 if (buf[0] != 0x84 || buf[1] != 0xc0)
2478 return pc;
2479 /* je AFTER */
2480 if (buf[2] != 0x74)
2481 return pc;
2482
2483 offset = 4;
2484 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2485 {
bede5f5f 2486 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
08711b9a 2487 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
dda83cd7 2488 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
08711b9a
JK
2489 return pc;
2490
bede5f5f
JK
2491 /* 0b01?????? */
2492 if ((buf[offset + 2] & 0xc0) == 0x40)
08711b9a
JK
2493 {
2494 /* 8-bit displacement. */
2495 offset += 4;
2496 }
bede5f5f
JK
2497 /* 0b10?????? */
2498 else if ((buf[offset + 2] & 0xc0) == 0x80)
08711b9a
JK
2499 {
2500 /* 32-bit displacement. */
2501 offset += 7;
2502 }
2503 else
2504 return pc;
2505 }
2506
2507 /* je AFTER */
2508 if (offset - 4 != buf[3])
2509 return pc;
2510
2511 return next_sal.end;
53e95fcf 2512}
df15bd07
JK
2513
2514/* Return PC of first real instruction. */
2515
2516static CORE_ADDR
2517amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2518{
2519 struct amd64_frame_cache cache;
2520 CORE_ADDR pc;
56bf0743
KB
2521 CORE_ADDR func_addr;
2522
2523 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2524 {
2525 CORE_ADDR post_prologue_pc
2526 = skip_prologue_using_sal (gdbarch, func_addr);
43f3e411 2527 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
56bf0743 2528
c2fd7fae 2529 /* LLVM backend (Clang/Flang) always emits a line note before the
16e311ab
FW
2530 prologue and another one after. We trust clang and newer Intel
2531 compilers to emit usable line notes. */
56bf0743 2532 if (post_prologue_pc
43f3e411
DE
2533 && (cust != NULL
2534 && COMPUNIT_PRODUCER (cust) != NULL
16e311ab
FW
2535 && (producer_is_llvm (COMPUNIT_PRODUCER (cust))
2536 || producer_is_icc_ge_19 (COMPUNIT_PRODUCER (cust)))))
2537 return std::max (start_pc, post_prologue_pc);
56bf0743 2538 }
df15bd07
JK
2539
2540 amd64_init_frame_cache (&cache);
2541 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2542 &cache);
2543 if (cache.frameless_p)
2544 return start_pc;
2545
2546 return amd64_skip_xmm_prologue (pc, start_pc);
2547}
c4f35dd8 2548\f
53e95fcf 2549
c4f35dd8
MK
2550/* Normal frames. */
2551
8fbca658
PA
2552static void
2553amd64_frame_cache_1 (struct frame_info *this_frame,
2554 struct amd64_frame_cache *cache)
6d686a84 2555{
e17a4113
UW
2556 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2557 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
d8de1ef7 2558 gdb_byte buf[8];
6d686a84 2559 int i;
6d686a84 2560
10458914 2561 cache->pc = get_frame_func (this_frame);
c4f35dd8 2562 if (cache->pc != 0)
e17a4113
UW
2563 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2564 cache);
c4f35dd8
MK
2565
2566 if (cache->frameless_p)
2567 {
4a28816e
MK
2568 /* We didn't find a valid frame. If we're at the start of a
2569 function, or somewhere half-way its prologue, the function's
2570 frame probably hasn't been fully setup yet. Try to
2571 reconstruct the base address for the stack frame by looking
2572 at the stack pointer. For truly "frameless" functions this
2573 might work too. */
c4f35dd8 2574
e0c62198
L
2575 if (cache->saved_sp_reg != -1)
2576 {
8fbca658
PA
2577 /* Stack pointer has been saved. */
2578 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2579 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2580
e0c62198
L
2581 /* We're halfway aligning the stack. */
2582 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2583 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2584
2585 /* This will be added back below. */
2586 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2587 }
2588 else
2589 {
2590 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
e17a4113
UW
2591 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2592 + cache->sp_offset;
e0c62198 2593 }
c4f35dd8 2594 }
35883a3f
MK
2595 else
2596 {
10458914 2597 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
e17a4113 2598 cache->base = extract_unsigned_integer (buf, 8, byte_order);
35883a3f 2599 }
c4f35dd8
MK
2600
2601 /* Now that we have the base address for the stack frame we can
2602 calculate the value of %rsp in the calling frame. */
2603 cache->saved_sp = cache->base + 16;
2604
35883a3f
MK
2605 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2606 frame we find it at the same offset from the reconstructed base
e0c62198
L
2607 address. If we're halfway aligning the stack, %rip is handled
2608 differently (see above). */
2609 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2610 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
35883a3f 2611
c4f35dd8
MK
2612 /* Adjust all the saved registers such that they contain addresses
2613 instead of offsets. */
e53bef9f 2614 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
2615 if (cache->saved_regs[i] != -1)
2616 cache->saved_regs[i] += cache->base;
2617
8fbca658
PA
2618 cache->base_p = 1;
2619}
2620
2621static struct amd64_frame_cache *
2622amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
2623{
8fbca658
PA
2624 struct amd64_frame_cache *cache;
2625
2626 if (*this_cache)
9a3c8263 2627 return (struct amd64_frame_cache *) *this_cache;
8fbca658
PA
2628
2629 cache = amd64_alloc_frame_cache ();
2630 *this_cache = cache;
2631
a70b8144 2632 try
8fbca658
PA
2633 {
2634 amd64_frame_cache_1 (this_frame, cache);
2635 }
230d2906 2636 catch (const gdb_exception_error &ex)
7556d4a4
PA
2637 {
2638 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 2639 throw;
7556d4a4 2640 }
8fbca658 2641
c4f35dd8 2642 return cache;
6d686a84
ML
2643}
2644
8fbca658
PA
2645static enum unwind_stop_reason
2646amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
2647 void **this_cache)
2648{
2649 struct amd64_frame_cache *cache =
2650 amd64_frame_cache (this_frame, this_cache);
2651
2652 if (!cache->base_p)
2653 return UNWIND_UNAVAILABLE;
2654
2655 /* This marks the outermost frame. */
2656 if (cache->base == 0)
2657 return UNWIND_OUTERMOST;
2658
2659 return UNWIND_NO_REASON;
2660}
2661
c4f35dd8 2662static void
10458914 2663amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
e53bef9f 2664 struct frame_id *this_id)
c4f35dd8 2665{
e53bef9f 2666 struct amd64_frame_cache *cache =
10458914 2667 amd64_frame_cache (this_frame, this_cache);
c4f35dd8 2668
8fbca658 2669 if (!cache->base_p)
5ce0145d
PA
2670 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2671 else if (cache->base == 0)
2672 {
2673 /* This marks the outermost frame. */
2674 return;
2675 }
2676 else
2677 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
c4f35dd8 2678}
e76e1718 2679
10458914
DJ
2680static struct value *
2681amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2682 int regnum)
53e95fcf 2683{
10458914 2684 struct gdbarch *gdbarch = get_frame_arch (this_frame);
e53bef9f 2685 struct amd64_frame_cache *cache =
10458914 2686 amd64_frame_cache (this_frame, this_cache);
e76e1718 2687
c4f35dd8 2688 gdb_assert (regnum >= 0);
b1ab997b 2689
2ae02b47 2690 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
10458914 2691 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
e76e1718 2692
e53bef9f 2693 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
10458914
DJ
2694 return frame_unwind_got_memory (this_frame, regnum,
2695 cache->saved_regs[regnum]);
e76e1718 2696
10458914 2697 return frame_unwind_got_register (this_frame, regnum, regnum);
c4f35dd8 2698}
e76e1718 2699
e53bef9f 2700static const struct frame_unwind amd64_frame_unwind =
c4f35dd8
MK
2701{
2702 NORMAL_FRAME,
8fbca658 2703 amd64_frame_unwind_stop_reason,
e53bef9f 2704 amd64_frame_this_id,
10458914
DJ
2705 amd64_frame_prev_register,
2706 NULL,
2707 default_frame_sniffer
c4f35dd8 2708};
c4f35dd8 2709\f
6710bf39
SS
2710/* Generate a bytecode expression to get the value of the saved PC. */
2711
2712static void
2713amd64_gen_return_address (struct gdbarch *gdbarch,
2714 struct agent_expr *ax, struct axs_value *value,
2715 CORE_ADDR scope)
2716{
2717 /* The following sequence assumes the traditional use of the base
2718 register. */
2719 ax_reg (ax, AMD64_RBP_REGNUM);
2720 ax_const_l (ax, 8);
2721 ax_simple (ax, aop_add);
2722 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2723 value->kind = axs_lvalue_memory;
2724}
2725\f
e76e1718 2726
c4f35dd8
MK
2727/* Signal trampolines. */
2728
2729/* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2730 64-bit variants. This would require using identical frame caches
2731 on both platforms. */
2732
e53bef9f 2733static struct amd64_frame_cache *
10458914 2734amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
c4f35dd8 2735{
e17a4113
UW
2736 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2737 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2738 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
e53bef9f 2739 struct amd64_frame_cache *cache;
c4f35dd8 2740 CORE_ADDR addr;
d8de1ef7 2741 gdb_byte buf[8];
2b5e0749 2742 int i;
c4f35dd8
MK
2743
2744 if (*this_cache)
9a3c8263 2745 return (struct amd64_frame_cache *) *this_cache;
c4f35dd8 2746
e53bef9f 2747 cache = amd64_alloc_frame_cache ();
c4f35dd8 2748
a70b8144 2749 try
8fbca658
PA
2750 {
2751 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2752 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2753
2754 addr = tdep->sigcontext_addr (this_frame);
2755 gdb_assert (tdep->sc_reg_offset);
2756 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2757 for (i = 0; i < tdep->sc_num_regs; i++)
2758 if (tdep->sc_reg_offset[i] != -1)
2759 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
c4f35dd8 2760
8fbca658
PA
2761 cache->base_p = 1;
2762 }
230d2906 2763 catch (const gdb_exception_error &ex)
7556d4a4
PA
2764 {
2765 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 2766 throw;
7556d4a4 2767 }
c4f35dd8
MK
2768
2769 *this_cache = cache;
2770 return cache;
53e95fcf
JS
2771}
2772
8fbca658
PA
2773static enum unwind_stop_reason
2774amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
2775 void **this_cache)
2776{
2777 struct amd64_frame_cache *cache =
2778 amd64_sigtramp_frame_cache (this_frame, this_cache);
2779
2780 if (!cache->base_p)
2781 return UNWIND_UNAVAILABLE;
2782
2783 return UNWIND_NO_REASON;
2784}
2785
c4f35dd8 2786static void
10458914 2787amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
e53bef9f 2788 void **this_cache, struct frame_id *this_id)
c4f35dd8 2789{
e53bef9f 2790 struct amd64_frame_cache *cache =
10458914 2791 amd64_sigtramp_frame_cache (this_frame, this_cache);
c4f35dd8 2792
8fbca658 2793 if (!cache->base_p)
5ce0145d
PA
2794 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2795 else if (cache->base == 0)
2796 {
2797 /* This marks the outermost frame. */
2798 return;
2799 }
2800 else
2801 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
c4f35dd8
MK
2802}
2803
10458914
DJ
2804static struct value *
2805amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2806 void **this_cache, int regnum)
c4f35dd8
MK
2807{
2808 /* Make sure we've initialized the cache. */
10458914 2809 amd64_sigtramp_frame_cache (this_frame, this_cache);
c4f35dd8 2810
10458914 2811 return amd64_frame_prev_register (this_frame, this_cache, regnum);
c4f35dd8
MK
2812}
2813
10458914
DJ
2814static int
2815amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2816 struct frame_info *this_frame,
2817 void **this_cache)
c4f35dd8 2818{
10458914 2819 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
911bc6ee
MK
2820
2821 /* We shouldn't even bother if we don't have a sigcontext_addr
2822 handler. */
2823 if (tdep->sigcontext_addr == NULL)
10458914 2824 return 0;
911bc6ee
MK
2825
2826 if (tdep->sigtramp_p != NULL)
2827 {
10458914
DJ
2828 if (tdep->sigtramp_p (this_frame))
2829 return 1;
911bc6ee 2830 }
c4f35dd8 2831
911bc6ee 2832 if (tdep->sigtramp_start != 0)
1c3545ae 2833 {
10458914 2834 CORE_ADDR pc = get_frame_pc (this_frame);
1c3545ae 2835
911bc6ee
MK
2836 gdb_assert (tdep->sigtramp_end != 0);
2837 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
10458914 2838 return 1;
1c3545ae 2839 }
c4f35dd8 2840
10458914 2841 return 0;
c4f35dd8 2842}
10458914
DJ
2843
2844static const struct frame_unwind amd64_sigtramp_frame_unwind =
2845{
2846 SIGTRAMP_FRAME,
8fbca658 2847 amd64_sigtramp_frame_unwind_stop_reason,
10458914
DJ
2848 amd64_sigtramp_frame_this_id,
2849 amd64_sigtramp_frame_prev_register,
2850 NULL,
2851 amd64_sigtramp_frame_sniffer
2852};
c4f35dd8
MK
2853\f
2854
2855static CORE_ADDR
10458914 2856amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
c4f35dd8 2857{
e53bef9f 2858 struct amd64_frame_cache *cache =
10458914 2859 amd64_frame_cache (this_frame, this_cache);
c4f35dd8
MK
2860
2861 return cache->base;
2862}
2863
e53bef9f 2864static const struct frame_base amd64_frame_base =
c4f35dd8 2865{
e53bef9f
MK
2866 &amd64_frame_unwind,
2867 amd64_frame_base_address,
2868 amd64_frame_base_address,
2869 amd64_frame_base_address
c4f35dd8
MK
2870};
2871
872761f4
MS
2872/* Normal frames, but in a function epilogue. */
2873
c9cf6e20
MG
2874/* Implement the stack_frame_destroyed_p gdbarch method.
2875
2876 The epilogue is defined here as the 'ret' instruction, which will
872761f4
MS
2877 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2878 the function's stack frame. */
2879
2880static int
c9cf6e20 2881amd64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
872761f4
MS
2882{
2883 gdb_byte insn;
43f3e411 2884 struct compunit_symtab *cust;
e0d00bc7 2885
43f3e411
DE
2886 cust = find_pc_compunit_symtab (pc);
2887 if (cust != NULL && COMPUNIT_EPILOGUE_UNWIND_VALID (cust))
e0d00bc7 2888 return 0;
872761f4
MS
2889
2890 if (target_read_memory (pc, &insn, 1))
2891 return 0; /* Can't read memory at pc. */
2892
2893 if (insn != 0xc3) /* 'ret' instruction. */
2894 return 0;
2895
2896 return 1;
2897}
2898
2899static int
2900amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2901 struct frame_info *this_frame,
2902 void **this_prologue_cache)
2903{
2904 if (frame_relative_level (this_frame) == 0)
c9cf6e20
MG
2905 return amd64_stack_frame_destroyed_p (get_frame_arch (this_frame),
2906 get_frame_pc (this_frame));
872761f4
MS
2907 else
2908 return 0;
2909}
2910
2911static struct amd64_frame_cache *
2912amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2913{
2914 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2915 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2916 struct amd64_frame_cache *cache;
6c10c06b 2917 gdb_byte buf[8];
872761f4
MS
2918
2919 if (*this_cache)
9a3c8263 2920 return (struct amd64_frame_cache *) *this_cache;
872761f4
MS
2921
2922 cache = amd64_alloc_frame_cache ();
2923 *this_cache = cache;
2924
a70b8144 2925 try
8fbca658
PA
2926 {
2927 /* Cache base will be %esp plus cache->sp_offset (-8). */
2928 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2929 cache->base = extract_unsigned_integer (buf, 8,
2930 byte_order) + cache->sp_offset;
2931
2932 /* Cache pc will be the frame func. */
2933 cache->pc = get_frame_pc (this_frame);
872761f4 2934
8fbca658
PA
2935 /* The saved %esp will be at cache->base plus 16. */
2936 cache->saved_sp = cache->base + 16;
872761f4 2937
8fbca658
PA
2938 /* The saved %eip will be at cache->base plus 8. */
2939 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
872761f4 2940
8fbca658
PA
2941 cache->base_p = 1;
2942 }
230d2906 2943 catch (const gdb_exception_error &ex)
7556d4a4
PA
2944 {
2945 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 2946 throw;
7556d4a4 2947 }
872761f4
MS
2948
2949 return cache;
2950}
2951
8fbca658
PA
2952static enum unwind_stop_reason
2953amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
2954 void **this_cache)
2955{
2956 struct amd64_frame_cache *cache
2957 = amd64_epilogue_frame_cache (this_frame, this_cache);
2958
2959 if (!cache->base_p)
2960 return UNWIND_UNAVAILABLE;
2961
2962 return UNWIND_NO_REASON;
2963}
2964
872761f4
MS
2965static void
2966amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2967 void **this_cache,
2968 struct frame_id *this_id)
2969{
2970 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2971 this_cache);
2972
8fbca658 2973 if (!cache->base_p)
5ce0145d
PA
2974 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2975 else
2976 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
872761f4
MS
2977}
2978
2979static const struct frame_unwind amd64_epilogue_frame_unwind =
2980{
2981 NORMAL_FRAME,
8fbca658 2982 amd64_epilogue_frame_unwind_stop_reason,
872761f4
MS
2983 amd64_epilogue_frame_this_id,
2984 amd64_frame_prev_register,
2985 NULL,
2986 amd64_epilogue_frame_sniffer
2987};
2988
166f4c7b 2989static struct frame_id
10458914 2990amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
166f4c7b 2991{
c4f35dd8
MK
2992 CORE_ADDR fp;
2993
10458914 2994 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
c4f35dd8 2995
10458914 2996 return frame_id_build (fp + 16, get_frame_pc (this_frame));
166f4c7b
ML
2997}
2998
8b148df9
AC
2999/* 16 byte align the SP per frame requirements. */
3000
3001static CORE_ADDR
e53bef9f 3002amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
8b148df9
AC
3003{
3004 return sp & -(CORE_ADDR)16;
3005}
473f17b0
MK
3006\f
3007
593adc23
MK
3008/* Supply register REGNUM from the buffer specified by FPREGS and LEN
3009 in the floating-point register set REGSET to register cache
3010 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
473f17b0
MK
3011
3012static void
e53bef9f
MK
3013amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
3014 int regnum, const void *fpregs, size_t len)
473f17b0 3015{
ac7936df 3016 struct gdbarch *gdbarch = regcache->arch ();
09424cff 3017 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
473f17b0 3018
1528345d 3019 gdb_assert (len >= tdep->sizeof_fpregset);
90f90721 3020 amd64_supply_fxsave (regcache, regnum, fpregs);
473f17b0 3021}
8b148df9 3022
593adc23
MK
3023/* Collect register REGNUM from the register cache REGCACHE and store
3024 it in the buffer specified by FPREGS and LEN as described by the
3025 floating-point register set REGSET. If REGNUM is -1, do this for
3026 all registers in REGSET. */
3027
3028static void
3029amd64_collect_fpregset (const struct regset *regset,
3030 const struct regcache *regcache,
3031 int regnum, void *fpregs, size_t len)
3032{
ac7936df 3033 struct gdbarch *gdbarch = regcache->arch ();
09424cff 3034 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
593adc23 3035
1528345d 3036 gdb_assert (len >= tdep->sizeof_fpregset);
593adc23
MK
3037 amd64_collect_fxsave (regcache, regnum, fpregs);
3038}
3039
8f0435f7 3040const struct regset amd64_fpregset =
ecc37a5a
AA
3041 {
3042 NULL, amd64_supply_fpregset, amd64_collect_fpregset
3043 };
c6b33596
MK
3044\f
3045
436675d3
PA
3046/* Figure out where the longjmp will land. Slurp the jmp_buf out of
3047 %rdi. We expect its value to be a pointer to the jmp_buf structure
3048 from which we extract the address that we will land at. This
3049 address is copied into PC. This routine returns non-zero on
3050 success. */
3051
3052static int
3053amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
3054{
3055 gdb_byte buf[8];
3056 CORE_ADDR jb_addr;
3057 struct gdbarch *gdbarch = get_frame_arch (frame);
3058 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
0dfff4cb 3059 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
436675d3
PA
3060
3061 /* If JB_PC_OFFSET is -1, we have no way to find out where the
3062 longjmp will land. */
3063 if (jb_pc_offset == -1)
3064 return 0;
3065
3066 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
0dfff4cb
UW
3067 jb_addr= extract_typed_address
3068 (buf, builtin_type (gdbarch)->builtin_data_ptr);
436675d3
PA
3069 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
3070 return 0;
3071
0dfff4cb 3072 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
436675d3
PA
3073
3074 return 1;
3075}
3076
cf648174
HZ
3077static const int amd64_record_regmap[] =
3078{
3079 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
3080 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
3081 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
3082 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
3083 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
3084 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
3085};
3086
1d509aa6
MM
3087/* Implement the "in_indirect_branch_thunk" gdbarch function. */
3088
3089static bool
3090amd64_in_indirect_branch_thunk (struct gdbarch *gdbarch, CORE_ADDR pc)
3091{
3092 return x86_in_indirect_branch_thunk (pc, amd64_register_names,
3093 AMD64_RAX_REGNUM,
3094 AMD64_RIP_REGNUM);
3095}
3096
2213a65d 3097void
c55a47e7 3098amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
a04b5337 3099 const target_desc *default_tdesc)
53e95fcf 3100{
0c1a73d6 3101 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
90884b2b 3102 const struct target_desc *tdesc = info.target_desc;
05c0465e
SDJ
3103 static const char *const stap_integer_prefixes[] = { "$", NULL };
3104 static const char *const stap_register_prefixes[] = { "%", NULL };
3105 static const char *const stap_register_indirection_prefixes[] = { "(",
3106 NULL };
3107 static const char *const stap_register_indirection_suffixes[] = { ")",
3108 NULL };
53e95fcf 3109
473f17b0
MK
3110 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
3111 floating-point registers. */
3112 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
8f0435f7 3113 tdep->fpregset = &amd64_fpregset;
473f17b0 3114
90884b2b 3115 if (! tdesc_has_registers (tdesc))
c55a47e7 3116 tdesc = default_tdesc;
90884b2b
L
3117 tdep->tdesc = tdesc;
3118
3119 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
3120 tdep->register_names = amd64_register_names;
3121
01f9f808
MS
3122 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
3123 {
3124 tdep->zmmh_register_names = amd64_zmmh_names;
3125 tdep->k_register_names = amd64_k_names;
3126 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
3127 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
3128
3129 tdep->num_zmm_regs = 32;
3130 tdep->num_xmm_avx512_regs = 16;
3131 tdep->num_ymm_avx512_regs = 16;
3132
3133 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
3134 tdep->k0_regnum = AMD64_K0_REGNUM;
3135 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
3136 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
3137 }
3138
a055a187
L
3139 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
3140 {
3141 tdep->ymmh_register_names = amd64_ymmh_names;
3142 tdep->num_ymm_regs = 16;
3143 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
3144 }
3145
e43e105e
WT
3146 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
3147 {
3148 tdep->mpx_register_names = amd64_mpx_names;
3149 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
3150 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
3151 }
3152
2735833d
WT
3153 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments") != NULL)
3154 {
1163a4b7 3155 tdep->fsbase_regnum = AMD64_FSBASE_REGNUM;
2735833d
WT
3156 }
3157
51547df6
MS
3158 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.pkeys") != NULL)
3159 {
3160 tdep->pkeys_register_names = amd64_pkeys_names;
3161 tdep->pkru_regnum = AMD64_PKRU_REGNUM;
3162 tdep->num_pkeys_regs = 1;
3163 }
3164
fe01d668 3165 tdep->num_byte_regs = 20;
1ba53b71
L
3166 tdep->num_word_regs = 16;
3167 tdep->num_dword_regs = 16;
3168 /* Avoid wiring in the MMX registers for now. */
3169 tdep->num_mmx_regs = 0;
3170
3543a589
TT
3171 set_gdbarch_pseudo_register_read_value (gdbarch,
3172 amd64_pseudo_register_read_value);
1ba53b71
L
3173 set_gdbarch_pseudo_register_write (gdbarch,
3174 amd64_pseudo_register_write);
62e5fd57
MK
3175 set_gdbarch_ax_pseudo_register_collect (gdbarch,
3176 amd64_ax_pseudo_register_collect);
1ba53b71
L
3177
3178 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
3179
5716833c 3180 /* AMD64 has an FPU and 16 SSE registers. */
90f90721 3181 tdep->st0_regnum = AMD64_ST0_REGNUM;
0c1a73d6 3182 tdep->num_xmm_regs = 16;
53e95fcf 3183
0c1a73d6 3184 /* This is what all the fuss is about. */
53e95fcf
JS
3185 set_gdbarch_long_bit (gdbarch, 64);
3186 set_gdbarch_long_long_bit (gdbarch, 64);
3187 set_gdbarch_ptr_bit (gdbarch, 64);
3188
e53bef9f
MK
3189 /* In contrast to the i386, on AMD64 a `long double' actually takes
3190 up 128 bits, even though it's still based on the i387 extended
3191 floating-point format which has only 80 significant bits. */
b83b026c
MK
3192 set_gdbarch_long_double_bit (gdbarch, 128);
3193
e53bef9f 3194 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
b83b026c
MK
3195
3196 /* Register numbers of various important registers. */
90f90721
MK
3197 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3198 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3199 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3200 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
b83b026c 3201
e53bef9f
MK
3202 /* The "default" register numbering scheme for AMD64 is referred to
3203 as the "DWARF Register Number Mapping" in the System V psABI.
3204 The preferred debugging format for all known AMD64 targets is
3205 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3206 DWARF-1), but we provide the same mapping just in case. This
3207 mapping is also used for stabs, which GCC does support. */
3208 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
e53bef9f 3209 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
de220d0f 3210
c4f35dd8 3211 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
e53bef9f 3212 be in use on any of the supported AMD64 targets. */
53e95fcf 3213
c4f35dd8 3214 /* Call dummy code. */
e53bef9f
MK
3215 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3216 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
8b148df9 3217 set_gdbarch_frame_red_zone_size (gdbarch, 128);
53e95fcf 3218
83acabca 3219 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
d532c08f
MK
3220 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3221 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3222
efb1c01c 3223 set_gdbarch_return_value (gdbarch, amd64_return_value);
53e95fcf 3224
e53bef9f 3225 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
53e95fcf 3226
cf648174
HZ
3227 tdep->record_regmap = amd64_record_regmap;
3228
10458914 3229 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
53e95fcf 3230
872761f4
MS
3231 /* Hook the function epilogue frame unwinder. This unwinder is
3232 appended to the list first, so that it supercedes the other
3233 unwinders in function epilogues. */
3234 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3235
3236 /* Hook the prologue-based frame unwinders. */
10458914
DJ
3237 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3238 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
e53bef9f 3239 frame_base_set_default (gdbarch, &amd64_frame_base);
c6b33596 3240
436675d3 3241 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
dde08ee1
PA
3242
3243 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
6710bf39
SS
3244
3245 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
55aa24fb
SDJ
3246
3247 /* SystemTap variables and functions. */
05c0465e
SDJ
3248 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3249 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3250 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3251 stap_register_indirection_prefixes);
3252 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3253 stap_register_indirection_suffixes);
55aa24fb
SDJ
3254 set_gdbarch_stap_is_single_operand (gdbarch,
3255 i386_stap_is_single_operand);
3256 set_gdbarch_stap_parse_special_token (gdbarch,
3257 i386_stap_parse_special_token);
c2170eef
MM
3258 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3259 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3260 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
1d509aa6
MM
3261
3262 set_gdbarch_in_indirect_branch_thunk (gdbarch,
3263 amd64_in_indirect_branch_thunk);
257e02d8
TT
3264
3265 register_amd64_ravenscar_ops (gdbarch);
c4f35dd8 3266}
c912f608
SM
3267
3268/* Initialize ARCH for x86-64, no osabi. */
3269
3270static void
3271amd64_none_init_abi (gdbarch_info info, gdbarch *arch)
3272{
de52b960
PA
3273 amd64_init_abi (info, arch, amd64_target_description (X86_XSTATE_SSE_MASK,
3274 true));
c912f608 3275}
fff4548b
MK
3276
3277static struct type *
3278amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3279{
3280 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3281
3282 switch (regnum - tdep->eax_regnum)
3283 {
3284 case AMD64_RBP_REGNUM: /* %ebp */
3285 case AMD64_RSP_REGNUM: /* %esp */
3286 return builtin_type (gdbarch)->builtin_data_ptr;
3287 case AMD64_RIP_REGNUM: /* %eip */
3288 return builtin_type (gdbarch)->builtin_func_ptr;
3289 }
3290
3291 return i386_pseudo_register_type (gdbarch, regnum);
3292}
3293
3294void
c55a47e7 3295amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
a04b5337 3296 const target_desc *default_tdesc)
fff4548b
MK
3297{
3298 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
fff4548b 3299
c55a47e7 3300 amd64_init_abi (info, gdbarch, default_tdesc);
fff4548b
MK
3301
3302 tdep->num_dword_regs = 17;
3303 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3304
3305 set_gdbarch_long_bit (gdbarch, 32);
3306 set_gdbarch_ptr_bit (gdbarch, 32);
3307}
90884b2b 3308
c912f608
SM
3309/* Initialize ARCH for x64-32, no osabi. */
3310
3311static void
3312amd64_x32_none_init_abi (gdbarch_info info, gdbarch *arch)
3313{
3314 amd64_x32_init_abi (info, arch,
de52b960 3315 amd64_target_description (X86_XSTATE_SSE_MASK, true));
c912f608
SM
3316}
3317
97de3545
JB
3318/* Return the target description for a specified XSAVE feature mask. */
3319
3320const struct target_desc *
de52b960 3321amd64_target_description (uint64_t xcr0, bool segments)
97de3545 3322{
22916b07 3323 static target_desc *amd64_tdescs \
de52b960 3324 [2/*AVX*/][2/*MPX*/][2/*AVX512*/][2/*PKRU*/][2/*segments*/] = {};
22916b07
YQ
3325 target_desc **tdesc;
3326
3327 tdesc = &amd64_tdescs[(xcr0 & X86_XSTATE_AVX) ? 1 : 0]
3328 [(xcr0 & X86_XSTATE_MPX) ? 1 : 0]
3329 [(xcr0 & X86_XSTATE_AVX512) ? 1 : 0]
de52b960
PA
3330 [(xcr0 & X86_XSTATE_PKRU) ? 1 : 0]
3331 [segments ? 1 : 0];
22916b07
YQ
3332
3333 if (*tdesc == NULL)
de52b960
PA
3334 *tdesc = amd64_create_target_description (xcr0, false, false,
3335 segments);
22916b07
YQ
3336
3337 return *tdesc;
97de3545
JB
3338}
3339
6c265988 3340void _initialize_amd64_tdep ();
90884b2b 3341void
6c265988 3342_initialize_amd64_tdep ()
90884b2b 3343{
c912f608
SM
3344 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_NONE,
3345 amd64_none_init_abi);
3346 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x64_32, GDB_OSABI_NONE,
3347 amd64_x32_none_init_abi);
90884b2b 3348}
c4f35dd8
MK
3349\f
3350
41d041d6
MK
3351/* The 64-bit FXSAVE format differs from the 32-bit format in the
3352 sense that the instruction pointer and data pointer are simply
3353 64-bit offsets into the code segment and the data segment instead
3354 of a selector offset pair. The functions below store the upper 32
3355 bits of these pointers (instead of just the 16-bits of the segment
3356 selector). */
3357
3358/* Fill register REGNUM in REGCACHE with the appropriate
0485f6ad
MK
3359 floating-point or SSE register value from *FXSAVE. If REGNUM is
3360 -1, do this for all registers. This function masks off any of the
3361 reserved bits in *FXSAVE. */
c4f35dd8
MK
3362
3363void
90f90721 3364amd64_supply_fxsave (struct regcache *regcache, int regnum,
20a6ec49 3365 const void *fxsave)
c4f35dd8 3366{
ac7936df 3367 struct gdbarch *gdbarch = regcache->arch ();
20a6ec49
MD
3368 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3369
41d041d6 3370 i387_supply_fxsave (regcache, regnum, fxsave);
c4f35dd8 3371
233dfcf0
L
3372 if (fxsave
3373 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
c4f35dd8 3374 {
9a3c8263 3375 const gdb_byte *regs = (const gdb_byte *) fxsave;
41d041d6 3376
20a6ec49 3377 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
73e1c03f 3378 regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
20a6ec49 3379 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
73e1c03f 3380 regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
c4f35dd8 3381 }
0c1a73d6
MK
3382}
3383
a055a187
L
3384/* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3385
3386void
3387amd64_supply_xsave (struct regcache *regcache, int regnum,
3388 const void *xsave)
3389{
ac7936df 3390 struct gdbarch *gdbarch = regcache->arch ();
a055a187
L
3391 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3392
3393 i387_supply_xsave (regcache, regnum, xsave);
3394
233dfcf0
L
3395 if (xsave
3396 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
a055a187 3397 {
9a3c8263 3398 const gdb_byte *regs = (const gdb_byte *) xsave;
8ee22052 3399 ULONGEST clear_bv;
a055a187 3400
8ee22052
AB
3401 clear_bv = i387_xsave_get_clear_bv (gdbarch, xsave);
3402
3403 /* If the FISEG and FOSEG registers have not been initialised yet
3404 (their CLEAR_BV bit is set) then their default values of zero will
3405 have already been setup by I387_SUPPLY_XSAVE. */
3406 if (!(clear_bv & X86_XSTATE_X87))
3407 {
3408 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
73e1c03f 3409 regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
8ee22052 3410 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
73e1c03f 3411 regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
8ee22052 3412 }
a055a187
L
3413 }
3414}
3415
3c017e40
MK
3416/* Fill register REGNUM (if it is a floating-point or SSE register) in
3417 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3418 all registers. This function doesn't touch any of the reserved
3419 bits in *FXSAVE. */
3420
3421void
3422amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3423 void *fxsave)
3424{
ac7936df 3425 struct gdbarch *gdbarch = regcache->arch ();
20a6ec49 3426 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9a3c8263 3427 gdb_byte *regs = (gdb_byte *) fxsave;
3c017e40
MK
3428
3429 i387_collect_fxsave (regcache, regnum, fxsave);
3430
233dfcf0 3431 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
f0ef85a5 3432 {
20a6ec49 3433 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
34a79281 3434 regcache->raw_collect (I387_FISEG_REGNUM (tdep), regs + 12);
20a6ec49 3435 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
34a79281 3436 regcache->raw_collect (I387_FOSEG_REGNUM (tdep), regs + 20);
f0ef85a5 3437 }
3c017e40 3438}
a055a187 3439
7a9dd1b2 3440/* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
a055a187
L
3441
3442void
3443amd64_collect_xsave (const struct regcache *regcache, int regnum,
3444 void *xsave, int gcore)
3445{
ac7936df 3446 struct gdbarch *gdbarch = regcache->arch ();
a055a187 3447 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9a3c8263 3448 gdb_byte *regs = (gdb_byte *) xsave;
a055a187
L
3449
3450 i387_collect_xsave (regcache, regnum, xsave, gcore);
3451
233dfcf0 3452 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
a055a187
L
3453 {
3454 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
34a79281 3455 regcache->raw_collect (I387_FISEG_REGNUM (tdep),
a055a187
L
3456 regs + 12);
3457 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
34a79281 3458 regcache->raw_collect (I387_FOSEG_REGNUM (tdep),
a055a187
L
3459 regs + 20);
3460 }
3461}
This page took 1.570662 seconds and 4 git commands to generate.