daily update
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
CommitLineData
e53bef9f 1/* Target-dependent code for AMD64.
ce0eebec 2
ecd75fc8 3 Copyright (C) 2001-2014 Free Software Foundation, Inc.
5ae96ec1
MK
4
5 Contributed by Jiri Smid, SuSE Labs.
53e95fcf
JS
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
a9762ec7 11 the Free Software Foundation; either version 3 of the License, or
53e95fcf
JS
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
a9762ec7 20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
53e95fcf
JS
21
22#include "defs.h"
35669430
DE
23#include "opcode/i386.h"
24#include "dis-asm.h"
c4f35dd8
MK
25#include "arch-utils.h"
26#include "block.h"
27#include "dummy-frame.h"
28#include "frame.h"
29#include "frame-base.h"
30#include "frame-unwind.h"
53e95fcf 31#include "inferior.h"
45741a9c 32#include "infrun.h"
53e95fcf 33#include "gdbcmd.h"
c4f35dd8
MK
34#include "gdbcore.h"
35#include "objfiles.h"
53e95fcf 36#include "regcache.h"
2c261fae 37#include "regset.h"
53e95fcf 38#include "symfile.h"
eda5a4d7 39#include "disasm.h"
82dbc5f7 40#include "gdb_assert.h"
8fbca658 41#include "exceptions.h"
9c1488cb 42#include "amd64-tdep.h"
c4f35dd8 43#include "i387-tdep.h"
53e95fcf 44
90884b2b 45#include "features/i386/amd64.c"
a055a187 46#include "features/i386/amd64-avx.c"
e43e105e 47#include "features/i386/amd64-mpx.c"
01f9f808
MS
48#include "features/i386/amd64-avx512.c"
49
ac1438b5
L
50#include "features/i386/x32.c"
51#include "features/i386/x32-avx.c"
01f9f808 52#include "features/i386/x32-avx512.c"
90884b2b 53
6710bf39
SS
54#include "ax.h"
55#include "ax-gdb.h"
56
e53bef9f
MK
57/* Note that the AMD64 architecture was previously known as x86-64.
58 The latter is (forever) engraved into the canonical system name as
90f90721 59 returned by config.guess, and used as the name for the AMD64 port
e53bef9f
MK
60 of GNU/Linux. The BSD's have renamed their ports to amd64; they
61 don't like to shout. For GDB we prefer the amd64_-prefix over the
62 x86_64_-prefix since it's so much easier to type. */
63
402ecd56 64/* Register information. */
c4f35dd8 65
6707b003 66static const char *amd64_register_names[] =
de220d0f 67{
6707b003 68 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
c4f35dd8
MK
69
70 /* %r8 is indeed register number 8. */
6707b003
UW
71 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
72 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
c4f35dd8 73
af233647 74 /* %st0 is register number 24. */
6707b003
UW
75 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
76 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
c4f35dd8 77
af233647 78 /* %xmm0 is register number 40. */
6707b003
UW
79 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
80 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
81 "mxcsr",
0e04a514
ML
82};
83
a055a187
L
84static const char *amd64_ymm_names[] =
85{
86 "ymm0", "ymm1", "ymm2", "ymm3",
87 "ymm4", "ymm5", "ymm6", "ymm7",
88 "ymm8", "ymm9", "ymm10", "ymm11",
89 "ymm12", "ymm13", "ymm14", "ymm15"
90};
91
01f9f808
MS
92static const char *amd64_ymm_avx512_names[] =
93{
94 "ymm16", "ymm17", "ymm18", "ymm19",
95 "ymm20", "ymm21", "ymm22", "ymm23",
96 "ymm24", "ymm25", "ymm26", "ymm27",
97 "ymm28", "ymm29", "ymm30", "ymm31"
98};
99
a055a187
L
100static const char *amd64_ymmh_names[] =
101{
102 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
103 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
104 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
105 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
106};
de220d0f 107
01f9f808
MS
108static const char *amd64_ymmh_avx512_names[] =
109{
110 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
111 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
112 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
113 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
114};
115
e43e105e
WT
116static const char *amd64_mpx_names[] =
117{
118 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
119};
120
01f9f808
MS
121static const char *amd64_k_names[] =
122{
123 "k0", "k1", "k2", "k3",
124 "k4", "k5", "k6", "k7"
125};
126
127static const char *amd64_zmmh_names[] =
128{
129 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
130 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
131 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
132 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
133 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
134 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
135 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
136 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
137};
138
139static const char *amd64_zmm_names[] =
140{
141 "zmm0", "zmm1", "zmm2", "zmm3",
142 "zmm4", "zmm5", "zmm6", "zmm7",
143 "zmm8", "zmm9", "zmm10", "zmm11",
144 "zmm12", "zmm13", "zmm14", "zmm15",
145 "zmm16", "zmm17", "zmm18", "zmm19",
146 "zmm20", "zmm21", "zmm22", "zmm23",
147 "zmm24", "zmm25", "zmm26", "zmm27",
148 "zmm28", "zmm29", "zmm30", "zmm31"
149};
150
151static const char *amd64_xmm_avx512_names[] = {
152 "xmm16", "xmm17", "xmm18", "xmm19",
153 "xmm20", "xmm21", "xmm22", "xmm23",
154 "xmm24", "xmm25", "xmm26", "xmm27",
155 "xmm28", "xmm29", "xmm30", "xmm31"
156};
157
c4f35dd8
MK
158/* DWARF Register Number Mapping as defined in the System V psABI,
159 section 3.6. */
53e95fcf 160
e53bef9f 161static int amd64_dwarf_regmap[] =
0e04a514 162{
c4f35dd8 163 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
90f90721
MK
164 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
165 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
166 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
c4f35dd8
MK
167
168 /* Frame Pointer Register RBP. */
90f90721 169 AMD64_RBP_REGNUM,
c4f35dd8
MK
170
171 /* Stack Pointer Register RSP. */
90f90721 172 AMD64_RSP_REGNUM,
c4f35dd8
MK
173
174 /* Extended Integer Registers 8 - 15. */
5b856f36
PM
175 AMD64_R8_REGNUM, /* %r8 */
176 AMD64_R9_REGNUM, /* %r9 */
177 AMD64_R10_REGNUM, /* %r10 */
178 AMD64_R11_REGNUM, /* %r11 */
179 AMD64_R12_REGNUM, /* %r12 */
180 AMD64_R13_REGNUM, /* %r13 */
181 AMD64_R14_REGNUM, /* %r14 */
182 AMD64_R15_REGNUM, /* %r15 */
c4f35dd8 183
59207364 184 /* Return Address RA. Mapped to RIP. */
90f90721 185 AMD64_RIP_REGNUM,
c4f35dd8
MK
186
187 /* SSE Registers 0 - 7. */
90f90721
MK
188 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
189 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
190 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
191 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
c4f35dd8
MK
192
193 /* Extended SSE Registers 8 - 15. */
90f90721
MK
194 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
195 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
196 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
197 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
c4f35dd8
MK
198
199 /* Floating Point Registers 0-7. */
90f90721
MK
200 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
201 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
202 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
c6f4c129
JB
203 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
204
205 /* Control and Status Flags Register. */
206 AMD64_EFLAGS_REGNUM,
207
208 /* Selector Registers. */
209 AMD64_ES_REGNUM,
210 AMD64_CS_REGNUM,
211 AMD64_SS_REGNUM,
212 AMD64_DS_REGNUM,
213 AMD64_FS_REGNUM,
214 AMD64_GS_REGNUM,
215 -1,
216 -1,
217
218 /* Segment Base Address Registers. */
219 -1,
220 -1,
221 -1,
222 -1,
223
224 /* Special Selector Registers. */
225 -1,
226 -1,
227
228 /* Floating Point Control Registers. */
229 AMD64_MXCSR_REGNUM,
230 AMD64_FCTRL_REGNUM,
231 AMD64_FSTAT_REGNUM
c4f35dd8 232};
0e04a514 233
e53bef9f
MK
234static const int amd64_dwarf_regmap_len =
235 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
0e04a514 236
c4f35dd8
MK
237/* Convert DWARF register number REG to the appropriate register
238 number used by GDB. */
26abbdc4 239
c4f35dd8 240static int
d3f73121 241amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
53e95fcf 242{
a055a187
L
243 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
244 int ymm0_regnum = tdep->ymm0_regnum;
c4f35dd8 245 int regnum = -1;
53e95fcf 246
16aff9a6 247 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
e53bef9f 248 regnum = amd64_dwarf_regmap[reg];
53e95fcf 249
c4f35dd8 250 if (regnum == -1)
8a3fe4f8 251 warning (_("Unmapped DWARF Register #%d encountered."), reg);
a055a187
L
252 else if (ymm0_regnum >= 0
253 && i386_xmm_regnum_p (gdbarch, regnum))
254 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
c4f35dd8
MK
255
256 return regnum;
53e95fcf 257}
d532c08f 258
35669430
DE
259/* Map architectural register numbers to gdb register numbers. */
260
261static const int amd64_arch_regmap[16] =
262{
263 AMD64_RAX_REGNUM, /* %rax */
264 AMD64_RCX_REGNUM, /* %rcx */
265 AMD64_RDX_REGNUM, /* %rdx */
266 AMD64_RBX_REGNUM, /* %rbx */
267 AMD64_RSP_REGNUM, /* %rsp */
268 AMD64_RBP_REGNUM, /* %rbp */
269 AMD64_RSI_REGNUM, /* %rsi */
270 AMD64_RDI_REGNUM, /* %rdi */
271 AMD64_R8_REGNUM, /* %r8 */
272 AMD64_R9_REGNUM, /* %r9 */
273 AMD64_R10_REGNUM, /* %r10 */
274 AMD64_R11_REGNUM, /* %r11 */
275 AMD64_R12_REGNUM, /* %r12 */
276 AMD64_R13_REGNUM, /* %r13 */
277 AMD64_R14_REGNUM, /* %r14 */
278 AMD64_R15_REGNUM /* %r15 */
279};
280
281static const int amd64_arch_regmap_len =
282 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
283
284/* Convert architectural register number REG to the appropriate register
285 number used by GDB. */
286
287static int
288amd64_arch_reg_to_regnum (int reg)
289{
290 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
291
292 return amd64_arch_regmap[reg];
293}
294
1ba53b71
L
295/* Register names for byte pseudo-registers. */
296
297static const char *amd64_byte_names[] =
298{
299 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
fe01d668
L
300 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
301 "ah", "bh", "ch", "dh"
1ba53b71
L
302};
303
fe01d668
L
304/* Number of lower byte registers. */
305#define AMD64_NUM_LOWER_BYTE_REGS 16
306
1ba53b71
L
307/* Register names for word pseudo-registers. */
308
309static const char *amd64_word_names[] =
310{
9cad29ac 311 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
1ba53b71
L
312 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
313};
314
315/* Register names for dword pseudo-registers. */
316
317static const char *amd64_dword_names[] =
318{
319 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
fff4548b
MK
320 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
321 "eip"
1ba53b71
L
322};
323
324/* Return the name of register REGNUM. */
325
326static const char *
327amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
328{
329 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
330 if (i386_byte_regnum_p (gdbarch, regnum))
331 return amd64_byte_names[regnum - tdep->al_regnum];
01f9f808
MS
332 else if (i386_zmm_regnum_p (gdbarch, regnum))
333 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
a055a187
L
334 else if (i386_ymm_regnum_p (gdbarch, regnum))
335 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
01f9f808
MS
336 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
337 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
1ba53b71
L
338 else if (i386_word_regnum_p (gdbarch, regnum))
339 return amd64_word_names[regnum - tdep->ax_regnum];
340 else if (i386_dword_regnum_p (gdbarch, regnum))
341 return amd64_dword_names[regnum - tdep->eax_regnum];
342 else
343 return i386_pseudo_register_name (gdbarch, regnum);
344}
345
3543a589
TT
346static struct value *
347amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
348 struct regcache *regcache,
349 int regnum)
1ba53b71
L
350{
351 gdb_byte raw_buf[MAX_REGISTER_SIZE];
352 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
05d1431c 353 enum register_status status;
3543a589
TT
354 struct value *result_value;
355 gdb_byte *buf;
356
357 result_value = allocate_value (register_type (gdbarch, regnum));
358 VALUE_LVAL (result_value) = lval_register;
359 VALUE_REGNUM (result_value) = regnum;
360 buf = value_contents_raw (result_value);
1ba53b71
L
361
362 if (i386_byte_regnum_p (gdbarch, regnum))
363 {
364 int gpnum = regnum - tdep->al_regnum;
365
366 /* Extract (always little endian). */
fe01d668
L
367 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
368 {
369 /* Special handling for AH, BH, CH, DH. */
05d1431c
PA
370 status = regcache_raw_read (regcache,
371 gpnum - AMD64_NUM_LOWER_BYTE_REGS,
372 raw_buf);
373 if (status == REG_VALID)
374 memcpy (buf, raw_buf + 1, 1);
3543a589
TT
375 else
376 mark_value_bytes_unavailable (result_value, 0,
377 TYPE_LENGTH (value_type (result_value)));
fe01d668
L
378 }
379 else
380 {
05d1431c
PA
381 status = regcache_raw_read (regcache, gpnum, raw_buf);
382 if (status == REG_VALID)
383 memcpy (buf, raw_buf, 1);
3543a589
TT
384 else
385 mark_value_bytes_unavailable (result_value, 0,
386 TYPE_LENGTH (value_type (result_value)));
fe01d668 387 }
1ba53b71
L
388 }
389 else if (i386_dword_regnum_p (gdbarch, regnum))
390 {
391 int gpnum = regnum - tdep->eax_regnum;
392 /* Extract (always little endian). */
05d1431c
PA
393 status = regcache_raw_read (regcache, gpnum, raw_buf);
394 if (status == REG_VALID)
395 memcpy (buf, raw_buf, 4);
3543a589
TT
396 else
397 mark_value_bytes_unavailable (result_value, 0,
398 TYPE_LENGTH (value_type (result_value)));
1ba53b71
L
399 }
400 else
3543a589
TT
401 i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
402 result_value);
403
404 return result_value;
1ba53b71
L
405}
406
407static void
408amd64_pseudo_register_write (struct gdbarch *gdbarch,
409 struct regcache *regcache,
410 int regnum, const gdb_byte *buf)
411{
412 gdb_byte raw_buf[MAX_REGISTER_SIZE];
413 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
414
415 if (i386_byte_regnum_p (gdbarch, regnum))
416 {
417 int gpnum = regnum - tdep->al_regnum;
418
fe01d668
L
419 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
420 {
421 /* Read ... AH, BH, CH, DH. */
422 regcache_raw_read (regcache,
423 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
424 /* ... Modify ... (always little endian). */
425 memcpy (raw_buf + 1, buf, 1);
426 /* ... Write. */
427 regcache_raw_write (regcache,
428 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
429 }
430 else
431 {
432 /* Read ... */
433 regcache_raw_read (regcache, gpnum, raw_buf);
434 /* ... Modify ... (always little endian). */
435 memcpy (raw_buf, buf, 1);
436 /* ... Write. */
437 regcache_raw_write (regcache, gpnum, raw_buf);
438 }
1ba53b71
L
439 }
440 else if (i386_dword_regnum_p (gdbarch, regnum))
441 {
442 int gpnum = regnum - tdep->eax_regnum;
443
444 /* Read ... */
445 regcache_raw_read (regcache, gpnum, raw_buf);
446 /* ... Modify ... (always little endian). */
447 memcpy (raw_buf, buf, 4);
448 /* ... Write. */
449 regcache_raw_write (regcache, gpnum, raw_buf);
450 }
451 else
452 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
453}
454
53e95fcf
JS
455\f
456
bf4d6c1c
JB
457/* Register classes as defined in the psABI. */
458
459enum amd64_reg_class
460{
461 AMD64_INTEGER,
462 AMD64_SSE,
463 AMD64_SSEUP,
464 AMD64_X87,
465 AMD64_X87UP,
466 AMD64_COMPLEX_X87,
467 AMD64_NO_CLASS,
468 AMD64_MEMORY
469};
470
efb1c01c
MK
471/* Return the union class of CLASS1 and CLASS2. See the psABI for
472 details. */
473
474static enum amd64_reg_class
475amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
476{
477 /* Rule (a): If both classes are equal, this is the resulting class. */
478 if (class1 == class2)
479 return class1;
480
481 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
482 is the other class. */
483 if (class1 == AMD64_NO_CLASS)
484 return class2;
485 if (class2 == AMD64_NO_CLASS)
486 return class1;
487
488 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
489 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
490 return AMD64_MEMORY;
491
492 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
493 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
494 return AMD64_INTEGER;
495
496 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
497 MEMORY is used as class. */
498 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
499 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
500 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
501 return AMD64_MEMORY;
502
503 /* Rule (f): Otherwise class SSE is used. */
504 return AMD64_SSE;
505}
506
bf4d6c1c
JB
507static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
508
79b1ab3d
MK
509/* Return non-zero if TYPE is a non-POD structure or union type. */
510
511static int
512amd64_non_pod_p (struct type *type)
513{
514 /* ??? A class with a base class certainly isn't POD, but does this
515 catch all non-POD structure types? */
516 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
517 return 1;
518
519 return 0;
520}
521
efb1c01c
MK
522/* Classify TYPE according to the rules for aggregate (structures and
523 arrays) and union types, and store the result in CLASS. */
c4f35dd8
MK
524
525static void
efb1c01c 526amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
53e95fcf 527{
efb1c01c
MK
528 /* 1. If the size of an object is larger than two eightbytes, or in
529 C++, is a non-POD structure or union type, or contains
530 unaligned fields, it has class memory. */
744a8059 531 if (TYPE_LENGTH (type) > 16 || amd64_non_pod_p (type))
53e95fcf 532 {
efb1c01c
MK
533 class[0] = class[1] = AMD64_MEMORY;
534 return;
53e95fcf 535 }
efb1c01c
MK
536
537 /* 2. Both eightbytes get initialized to class NO_CLASS. */
538 class[0] = class[1] = AMD64_NO_CLASS;
539
540 /* 3. Each field of an object is classified recursively so that
541 always two fields are considered. The resulting class is
542 calculated according to the classes of the fields in the
543 eightbyte: */
544
545 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
8ffd9b1b 546 {
efb1c01c
MK
547 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
548
549 /* All fields in an array have the same type. */
550 amd64_classify (subtype, class);
744a8059 551 if (TYPE_LENGTH (type) > 8 && class[1] == AMD64_NO_CLASS)
efb1c01c 552 class[1] = class[0];
8ffd9b1b 553 }
53e95fcf
JS
554 else
555 {
efb1c01c 556 int i;
53e95fcf 557
efb1c01c
MK
558 /* Structure or union. */
559 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
560 || TYPE_CODE (type) == TYPE_CODE_UNION);
561
562 for (i = 0; i < TYPE_NFIELDS (type); i++)
53e95fcf 563 {
efb1c01c
MK
564 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
565 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
566 enum amd64_reg_class subclass[2];
e4e2711a
JB
567 int bitsize = TYPE_FIELD_BITSIZE (type, i);
568 int endpos;
569
570 if (bitsize == 0)
571 bitsize = TYPE_LENGTH (subtype) * 8;
572 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
efb1c01c 573
562c50c2 574 /* Ignore static fields. */
d6a843b5 575 if (field_is_static (&TYPE_FIELD (type, i)))
562c50c2
MK
576 continue;
577
efb1c01c
MK
578 gdb_assert (pos == 0 || pos == 1);
579
580 amd64_classify (subtype, subclass);
581 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
e4e2711a
JB
582 if (bitsize <= 64 && pos == 0 && endpos == 1)
583 /* This is a bit of an odd case: We have a field that would
584 normally fit in one of the two eightbytes, except that
585 it is placed in a way that this field straddles them.
586 This has been seen with a structure containing an array.
587
588 The ABI is a bit unclear in this case, but we assume that
589 this field's class (stored in subclass[0]) must also be merged
590 into class[1]. In other words, our field has a piece stored
591 in the second eight-byte, and thus its class applies to
592 the second eight-byte as well.
593
594 In the case where the field length exceeds 8 bytes,
595 it should not be necessary to merge the field class
596 into class[1]. As LEN > 8, subclass[1] is necessarily
597 different from AMD64_NO_CLASS. If subclass[1] is equal
598 to subclass[0], then the normal class[1]/subclass[1]
599 merging will take care of everything. For subclass[1]
600 to be different from subclass[0], I can only see the case
601 where we have a SSE/SSEUP or X87/X87UP pair, which both
602 use up all 16 bytes of the aggregate, and are already
603 handled just fine (because each portion sits on its own
604 8-byte). */
605 class[1] = amd64_merge_classes (class[1], subclass[0]);
efb1c01c
MK
606 if (pos == 0)
607 class[1] = amd64_merge_classes (class[1], subclass[1]);
53e95fcf 608 }
53e95fcf 609 }
efb1c01c
MK
610
611 /* 4. Then a post merger cleanup is done: */
612
613 /* Rule (a): If one of the classes is MEMORY, the whole argument is
614 passed in memory. */
615 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
616 class[0] = class[1] = AMD64_MEMORY;
617
177b42fe 618 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
efb1c01c
MK
619 SSE. */
620 if (class[0] == AMD64_SSEUP)
621 class[0] = AMD64_SSE;
622 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
623 class[1] = AMD64_SSE;
624}
625
626/* Classify TYPE, and store the result in CLASS. */
627
bf4d6c1c 628static void
efb1c01c
MK
629amd64_classify (struct type *type, enum amd64_reg_class class[2])
630{
631 enum type_code code = TYPE_CODE (type);
632 int len = TYPE_LENGTH (type);
633
634 class[0] = class[1] = AMD64_NO_CLASS;
635
636 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
5a7225ed
JB
637 long, long long, and pointers are in the INTEGER class. Similarly,
638 range types, used by languages such as Ada, are also in the INTEGER
639 class. */
efb1c01c 640 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
b929c77f 641 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
9db13498 642 || code == TYPE_CODE_CHAR
efb1c01c
MK
643 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
644 && (len == 1 || len == 2 || len == 4 || len == 8))
645 class[0] = AMD64_INTEGER;
646
5daa78cc
TJB
647 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
648 are in class SSE. */
649 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
650 && (len == 4 || len == 8))
efb1c01c
MK
651 /* FIXME: __m64 . */
652 class[0] = AMD64_SSE;
653
5daa78cc
TJB
654 /* Arguments of types __float128, _Decimal128 and __m128 are split into
655 two halves. The least significant ones belong to class SSE, the most
efb1c01c 656 significant one to class SSEUP. */
5daa78cc
TJB
657 else if (code == TYPE_CODE_DECFLOAT && len == 16)
658 /* FIXME: __float128, __m128. */
659 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
efb1c01c
MK
660
661 /* The 64-bit mantissa of arguments of type long double belongs to
662 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
663 class X87UP. */
664 else if (code == TYPE_CODE_FLT && len == 16)
665 /* Class X87 and X87UP. */
666 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
667
7f7930dd
MK
668 /* Arguments of complex T where T is one of the types float or
669 double get treated as if they are implemented as:
670
671 struct complexT {
672 T real;
673 T imag;
674 }; */
675 else if (code == TYPE_CODE_COMPLEX && len == 8)
676 class[0] = AMD64_SSE;
677 else if (code == TYPE_CODE_COMPLEX && len == 16)
678 class[0] = class[1] = AMD64_SSE;
679
680 /* A variable of type complex long double is classified as type
681 COMPLEX_X87. */
682 else if (code == TYPE_CODE_COMPLEX && len == 32)
683 class[0] = AMD64_COMPLEX_X87;
684
efb1c01c
MK
685 /* Aggregates. */
686 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
687 || code == TYPE_CODE_UNION)
688 amd64_classify_aggregate (type, class);
689}
690
691static enum return_value_convention
6a3a010b 692amd64_return_value (struct gdbarch *gdbarch, struct value *function,
c055b101 693 struct type *type, struct regcache *regcache,
42835c2b 694 gdb_byte *readbuf, const gdb_byte *writebuf)
efb1c01c
MK
695{
696 enum amd64_reg_class class[2];
697 int len = TYPE_LENGTH (type);
90f90721
MK
698 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
699 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
efb1c01c
MK
700 int integer_reg = 0;
701 int sse_reg = 0;
702 int i;
703
704 gdb_assert (!(readbuf && writebuf));
705
706 /* 1. Classify the return type with the classification algorithm. */
bf4d6c1c 707 amd64_classify (type, class);
efb1c01c
MK
708
709 /* 2. If the type has class MEMORY, then the caller provides space
6fa57a7d 710 for the return value and passes the address of this storage in
0963b4bd 711 %rdi as if it were the first argument to the function. In effect,
6fa57a7d
MK
712 this address becomes a hidden first argument.
713
714 On return %rax will contain the address that has been passed in
715 by the caller in %rdi. */
efb1c01c 716 if (class[0] == AMD64_MEMORY)
6fa57a7d
MK
717 {
718 /* As indicated by the comment above, the ABI guarantees that we
719 can always find the return value just after the function has
720 returned. */
721
722 if (readbuf)
723 {
724 ULONGEST addr;
725
726 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
727 read_memory (addr, readbuf, TYPE_LENGTH (type));
728 }
729
730 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
731 }
efb1c01c 732
7f7930dd
MK
733 /* 8. If the class is COMPLEX_X87, the real part of the value is
734 returned in %st0 and the imaginary part in %st1. */
735 if (class[0] == AMD64_COMPLEX_X87)
736 {
737 if (readbuf)
738 {
739 regcache_raw_read (regcache, AMD64_ST0_REGNUM, readbuf);
740 regcache_raw_read (regcache, AMD64_ST1_REGNUM, readbuf + 16);
741 }
742
743 if (writebuf)
744 {
745 i387_return_value (gdbarch, regcache);
746 regcache_raw_write (regcache, AMD64_ST0_REGNUM, writebuf);
747 regcache_raw_write (regcache, AMD64_ST1_REGNUM, writebuf + 16);
748
749 /* Fix up the tag word such that both %st(0) and %st(1) are
750 marked as valid. */
751 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
752 }
753
754 return RETURN_VALUE_REGISTER_CONVENTION;
755 }
756
efb1c01c 757 gdb_assert (class[1] != AMD64_MEMORY);
bad43aa5 758 gdb_assert (len <= 16);
efb1c01c
MK
759
760 for (i = 0; len > 0; i++, len -= 8)
761 {
762 int regnum = -1;
763 int offset = 0;
764
765 switch (class[i])
766 {
767 case AMD64_INTEGER:
768 /* 3. If the class is INTEGER, the next available register
769 of the sequence %rax, %rdx is used. */
770 regnum = integer_regnum[integer_reg++];
771 break;
772
773 case AMD64_SSE:
774 /* 4. If the class is SSE, the next available SSE register
775 of the sequence %xmm0, %xmm1 is used. */
776 regnum = sse_regnum[sse_reg++];
777 break;
778
779 case AMD64_SSEUP:
780 /* 5. If the class is SSEUP, the eightbyte is passed in the
781 upper half of the last used SSE register. */
782 gdb_assert (sse_reg > 0);
783 regnum = sse_regnum[sse_reg - 1];
784 offset = 8;
785 break;
786
787 case AMD64_X87:
788 /* 6. If the class is X87, the value is returned on the X87
789 stack in %st0 as 80-bit x87 number. */
90f90721 790 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
791 if (writebuf)
792 i387_return_value (gdbarch, regcache);
793 break;
794
795 case AMD64_X87UP:
796 /* 7. If the class is X87UP, the value is returned together
797 with the previous X87 value in %st0. */
798 gdb_assert (i > 0 && class[0] == AMD64_X87);
90f90721 799 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
800 offset = 8;
801 len = 2;
802 break;
803
804 case AMD64_NO_CLASS:
805 continue;
806
807 default:
808 gdb_assert (!"Unexpected register class.");
809 }
810
811 gdb_assert (regnum != -1);
812
813 if (readbuf)
814 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
42835c2b 815 readbuf + i * 8);
efb1c01c
MK
816 if (writebuf)
817 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
42835c2b 818 writebuf + i * 8);
efb1c01c
MK
819 }
820
821 return RETURN_VALUE_REGISTER_CONVENTION;
53e95fcf
JS
822}
823\f
824
720aa428
MK
825static CORE_ADDR
826amd64_push_arguments (struct regcache *regcache, int nargs,
6470d250 827 struct value **args, CORE_ADDR sp, int struct_return)
720aa428 828{
bf4d6c1c
JB
829 static int integer_regnum[] =
830 {
831 AMD64_RDI_REGNUM, /* %rdi */
832 AMD64_RSI_REGNUM, /* %rsi */
833 AMD64_RDX_REGNUM, /* %rdx */
834 AMD64_RCX_REGNUM, /* %rcx */
5b856f36
PM
835 AMD64_R8_REGNUM, /* %r8 */
836 AMD64_R9_REGNUM /* %r9 */
bf4d6c1c 837 };
720aa428
MK
838 static int sse_regnum[] =
839 {
840 /* %xmm0 ... %xmm7 */
90f90721
MK
841 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
842 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
843 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
844 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
720aa428
MK
845 };
846 struct value **stack_args = alloca (nargs * sizeof (struct value *));
847 int num_stack_args = 0;
848 int num_elements = 0;
849 int element = 0;
850 int integer_reg = 0;
851 int sse_reg = 0;
852 int i;
853
6470d250
MK
854 /* Reserve a register for the "hidden" argument. */
855 if (struct_return)
856 integer_reg++;
857
720aa428
MK
858 for (i = 0; i < nargs; i++)
859 {
4991999e 860 struct type *type = value_type (args[i]);
720aa428
MK
861 int len = TYPE_LENGTH (type);
862 enum amd64_reg_class class[2];
863 int needed_integer_regs = 0;
864 int needed_sse_regs = 0;
865 int j;
866
867 /* Classify argument. */
bf4d6c1c 868 amd64_classify (type, class);
720aa428
MK
869
870 /* Calculate the number of integer and SSE registers needed for
871 this argument. */
872 for (j = 0; j < 2; j++)
873 {
874 if (class[j] == AMD64_INTEGER)
875 needed_integer_regs++;
876 else if (class[j] == AMD64_SSE)
877 needed_sse_regs++;
878 }
879
880 /* Check whether enough registers are available, and if the
881 argument should be passed in registers at all. */
bf4d6c1c 882 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
720aa428
MK
883 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
884 || (needed_integer_regs == 0 && needed_sse_regs == 0))
885 {
886 /* The argument will be passed on the stack. */
887 num_elements += ((len + 7) / 8);
849e9755 888 stack_args[num_stack_args++] = args[i];
720aa428
MK
889 }
890 else
891 {
892 /* The argument will be passed in registers. */
d8de1ef7
MK
893 const gdb_byte *valbuf = value_contents (args[i]);
894 gdb_byte buf[8];
720aa428
MK
895
896 gdb_assert (len <= 16);
897
898 for (j = 0; len > 0; j++, len -= 8)
899 {
900 int regnum = -1;
901 int offset = 0;
902
903 switch (class[j])
904 {
905 case AMD64_INTEGER:
bf4d6c1c 906 regnum = integer_regnum[integer_reg++];
720aa428
MK
907 break;
908
909 case AMD64_SSE:
910 regnum = sse_regnum[sse_reg++];
911 break;
912
913 case AMD64_SSEUP:
914 gdb_assert (sse_reg > 0);
915 regnum = sse_regnum[sse_reg - 1];
916 offset = 8;
917 break;
918
919 default:
920 gdb_assert (!"Unexpected register class.");
921 }
922
923 gdb_assert (regnum != -1);
924 memset (buf, 0, sizeof buf);
925 memcpy (buf, valbuf + j * 8, min (len, 8));
926 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
927 }
928 }
929 }
930
931 /* Allocate space for the arguments on the stack. */
932 sp -= num_elements * 8;
933
934 /* The psABI says that "The end of the input argument area shall be
935 aligned on a 16 byte boundary." */
936 sp &= ~0xf;
937
938 /* Write out the arguments to the stack. */
939 for (i = 0; i < num_stack_args; i++)
940 {
4991999e 941 struct type *type = value_type (stack_args[i]);
d8de1ef7 942 const gdb_byte *valbuf = value_contents (stack_args[i]);
849e9755
JB
943 int len = TYPE_LENGTH (type);
944
945 write_memory (sp + element * 8, valbuf, len);
946 element += ((len + 7) / 8);
720aa428
MK
947 }
948
949 /* The psABI says that "For calls that may call functions that use
950 varargs or stdargs (prototype-less calls or calls to functions
951 containing ellipsis (...) in the declaration) %al is used as
952 hidden argument to specify the number of SSE registers used. */
90f90721 953 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
720aa428
MK
954 return sp;
955}
956
c4f35dd8 957static CORE_ADDR
7d9b040b 958amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
e53bef9f
MK
959 struct regcache *regcache, CORE_ADDR bp_addr,
960 int nargs, struct value **args, CORE_ADDR sp,
961 int struct_return, CORE_ADDR struct_addr)
53e95fcf 962{
e17a4113 963 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
d8de1ef7 964 gdb_byte buf[8];
c4f35dd8
MK
965
966 /* Pass arguments. */
6470d250 967 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
c4f35dd8
MK
968
969 /* Pass "hidden" argument". */
970 if (struct_return)
971 {
e17a4113 972 store_unsigned_integer (buf, 8, byte_order, struct_addr);
bf4d6c1c 973 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
c4f35dd8
MK
974 }
975
976 /* Store return address. */
977 sp -= 8;
e17a4113 978 store_unsigned_integer (buf, 8, byte_order, bp_addr);
c4f35dd8
MK
979 write_memory (sp, buf, 8);
980
981 /* Finally, update the stack pointer... */
e17a4113 982 store_unsigned_integer (buf, 8, byte_order, sp);
90f90721 983 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
c4f35dd8
MK
984
985 /* ...and fake a frame pointer. */
90f90721 986 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
c4f35dd8 987
3e210248 988 return sp + 16;
53e95fcf 989}
c4f35dd8 990\f
35669430
DE
991/* Displaced instruction handling. */
992
993/* A partially decoded instruction.
994 This contains enough details for displaced stepping purposes. */
995
996struct amd64_insn
997{
998 /* The number of opcode bytes. */
999 int opcode_len;
1000 /* The offset of the rex prefix or -1 if not present. */
1001 int rex_offset;
1002 /* The offset to the first opcode byte. */
1003 int opcode_offset;
1004 /* The offset to the modrm byte or -1 if not present. */
1005 int modrm_offset;
1006
1007 /* The raw instruction. */
1008 gdb_byte *raw_insn;
1009};
1010
1011struct displaced_step_closure
1012{
1013 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1014 int tmp_used;
1015 int tmp_regno;
1016 ULONGEST tmp_save;
1017
1018 /* Details of the instruction. */
1019 struct amd64_insn insn_details;
1020
1021 /* Amount of space allocated to insn_buf. */
1022 int max_len;
1023
1024 /* The possibly modified insn.
1025 This is a variable-length field. */
1026 gdb_byte insn_buf[1];
1027};
1028
1029/* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1030 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1031 at which point delete these in favor of libopcodes' versions). */
1032
1033static const unsigned char onebyte_has_modrm[256] = {
1034 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1035 /* ------------------------------- */
1036 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1037 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1038 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1039 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1040 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1041 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1042 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1043 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1044 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1045 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1046 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1047 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1048 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1049 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1050 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1051 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1052 /* ------------------------------- */
1053 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1054};
1055
1056static const unsigned char twobyte_has_modrm[256] = {
1057 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1058 /* ------------------------------- */
1059 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1060 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1061 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1062 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1063 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1064 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1065 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1066 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1067 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1068 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1069 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1070 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1071 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1072 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1073 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1074 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1075 /* ------------------------------- */
1076 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1077};
1078
1079static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1080
1081static int
1082rex_prefix_p (gdb_byte pfx)
1083{
1084 return REX_PREFIX_P (pfx);
1085}
1086
1087/* Skip the legacy instruction prefixes in INSN.
1088 We assume INSN is properly sentineled so we don't have to worry
1089 about falling off the end of the buffer. */
1090
1091static gdb_byte *
1903f0e6 1092amd64_skip_prefixes (gdb_byte *insn)
35669430
DE
1093{
1094 while (1)
1095 {
1096 switch (*insn)
1097 {
1098 case DATA_PREFIX_OPCODE:
1099 case ADDR_PREFIX_OPCODE:
1100 case CS_PREFIX_OPCODE:
1101 case DS_PREFIX_OPCODE:
1102 case ES_PREFIX_OPCODE:
1103 case FS_PREFIX_OPCODE:
1104 case GS_PREFIX_OPCODE:
1105 case SS_PREFIX_OPCODE:
1106 case LOCK_PREFIX_OPCODE:
1107 case REPE_PREFIX_OPCODE:
1108 case REPNE_PREFIX_OPCODE:
1109 ++insn;
1110 continue;
1111 default:
1112 break;
1113 }
1114 break;
1115 }
1116
1117 return insn;
1118}
1119
35669430
DE
1120/* Return an integer register (other than RSP) that is unused as an input
1121 operand in INSN.
1122 In order to not require adding a rex prefix if the insn doesn't already
1123 have one, the result is restricted to RAX ... RDI, sans RSP.
1124 The register numbering of the result follows architecture ordering,
1125 e.g. RDI = 7. */
1126
1127static int
1128amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1129{
1130 /* 1 bit for each reg */
1131 int used_regs_mask = 0;
1132
1133 /* There can be at most 3 int regs used as inputs in an insn, and we have
1134 7 to choose from (RAX ... RDI, sans RSP).
1135 This allows us to take a conservative approach and keep things simple.
1136 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1137 that implicitly specify RAX. */
1138
1139 /* Avoid RAX. */
1140 used_regs_mask |= 1 << EAX_REG_NUM;
1141 /* Similarily avoid RDX, implicit operand in divides. */
1142 used_regs_mask |= 1 << EDX_REG_NUM;
1143 /* Avoid RSP. */
1144 used_regs_mask |= 1 << ESP_REG_NUM;
1145
1146 /* If the opcode is one byte long and there's no ModRM byte,
1147 assume the opcode specifies a register. */
1148 if (details->opcode_len == 1 && details->modrm_offset == -1)
1149 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1150
1151 /* Mark used regs in the modrm/sib bytes. */
1152 if (details->modrm_offset != -1)
1153 {
1154 int modrm = details->raw_insn[details->modrm_offset];
1155 int mod = MODRM_MOD_FIELD (modrm);
1156 int reg = MODRM_REG_FIELD (modrm);
1157 int rm = MODRM_RM_FIELD (modrm);
1158 int have_sib = mod != 3 && rm == 4;
1159
1160 /* Assume the reg field of the modrm byte specifies a register. */
1161 used_regs_mask |= 1 << reg;
1162
1163 if (have_sib)
1164 {
1165 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
d48ebb5b 1166 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
35669430 1167 used_regs_mask |= 1 << base;
d48ebb5b 1168 used_regs_mask |= 1 << idx;
35669430
DE
1169 }
1170 else
1171 {
1172 used_regs_mask |= 1 << rm;
1173 }
1174 }
1175
1176 gdb_assert (used_regs_mask < 256);
1177 gdb_assert (used_regs_mask != 255);
1178
1179 /* Finally, find a free reg. */
1180 {
1181 int i;
1182
1183 for (i = 0; i < 8; ++i)
1184 {
1185 if (! (used_regs_mask & (1 << i)))
1186 return i;
1187 }
1188
1189 /* We shouldn't get here. */
1190 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1191 }
1192}
1193
1194/* Extract the details of INSN that we need. */
1195
1196static void
1197amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1198{
1199 gdb_byte *start = insn;
1200 int need_modrm;
1201
1202 details->raw_insn = insn;
1203
1204 details->opcode_len = -1;
1205 details->rex_offset = -1;
1206 details->opcode_offset = -1;
1207 details->modrm_offset = -1;
1208
1209 /* Skip legacy instruction prefixes. */
1903f0e6 1210 insn = amd64_skip_prefixes (insn);
35669430
DE
1211
1212 /* Skip REX instruction prefix. */
1213 if (rex_prefix_p (*insn))
1214 {
1215 details->rex_offset = insn - start;
1216 ++insn;
1217 }
1218
1219 details->opcode_offset = insn - start;
1220
1221 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1222 {
1223 /* Two or three-byte opcode. */
1224 ++insn;
1225 need_modrm = twobyte_has_modrm[*insn];
1226
1227 /* Check for three-byte opcode. */
1903f0e6 1228 switch (*insn)
35669430 1229 {
1903f0e6
DE
1230 case 0x24:
1231 case 0x25:
1232 case 0x38:
1233 case 0x3a:
1234 case 0x7a:
1235 case 0x7b:
35669430
DE
1236 ++insn;
1237 details->opcode_len = 3;
1903f0e6
DE
1238 break;
1239 default:
1240 details->opcode_len = 2;
1241 break;
35669430 1242 }
35669430
DE
1243 }
1244 else
1245 {
1246 /* One-byte opcode. */
1247 need_modrm = onebyte_has_modrm[*insn];
1248 details->opcode_len = 1;
1249 }
1250
1251 if (need_modrm)
1252 {
1253 ++insn;
1254 details->modrm_offset = insn - start;
1255 }
1256}
1257
1258/* Update %rip-relative addressing in INSN.
1259
1260 %rip-relative addressing only uses a 32-bit displacement.
1261 32 bits is not enough to be guaranteed to cover the distance between where
1262 the real instruction is and where its copy is.
1263 Convert the insn to use base+disp addressing.
1264 We set base = pc + insn_length so we can leave disp unchanged. */
c4f35dd8 1265
35669430
DE
1266static void
1267fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1268 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1269{
e17a4113 1270 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
35669430
DE
1271 const struct amd64_insn *insn_details = &dsc->insn_details;
1272 int modrm_offset = insn_details->modrm_offset;
1273 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1274 CORE_ADDR rip_base;
1275 int32_t disp;
1276 int insn_length;
1277 int arch_tmp_regno, tmp_regno;
1278 ULONGEST orig_value;
1279
1280 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1281 ++insn;
1282
1283 /* Compute the rip-relative address. */
e17a4113 1284 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
eda5a4d7
PA
1285 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf,
1286 dsc->max_len, from);
35669430
DE
1287 rip_base = from + insn_length;
1288
1289 /* We need a register to hold the address.
1290 Pick one not used in the insn.
1291 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1292 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1293 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1294
1295 /* REX.B should be unset as we were using rip-relative addressing,
1296 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1297 if (insn_details->rex_offset != -1)
1298 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1299
1300 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1301 dsc->tmp_regno = tmp_regno;
1302 dsc->tmp_save = orig_value;
1303 dsc->tmp_used = 1;
1304
1305 /* Convert the ModRM field to be base+disp. */
1306 dsc->insn_buf[modrm_offset] &= ~0xc7;
1307 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1308
1309 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1310
1311 if (debug_displaced)
1312 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
5af949e3
UW
1313 "displaced: using temp reg %d, old value %s, new value %s\n",
1314 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1315 paddress (gdbarch, rip_base));
35669430
DE
1316}
1317
1318static void
1319fixup_displaced_copy (struct gdbarch *gdbarch,
1320 struct displaced_step_closure *dsc,
1321 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1322{
1323 const struct amd64_insn *details = &dsc->insn_details;
1324
1325 if (details->modrm_offset != -1)
1326 {
1327 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1328
1329 if ((modrm & 0xc7) == 0x05)
1330 {
1331 /* The insn uses rip-relative addressing.
1332 Deal with it. */
1333 fixup_riprel (gdbarch, dsc, from, to, regs);
1334 }
1335 }
1336}
1337
1338struct displaced_step_closure *
1339amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1340 CORE_ADDR from, CORE_ADDR to,
1341 struct regcache *regs)
1342{
1343 int len = gdbarch_max_insn_length (gdbarch);
741e63d7 1344 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
35669430
DE
1345 continually watch for running off the end of the buffer. */
1346 int fixup_sentinel_space = len;
1347 struct displaced_step_closure *dsc =
1348 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1349 gdb_byte *buf = &dsc->insn_buf[0];
1350 struct amd64_insn *details = &dsc->insn_details;
1351
1352 dsc->tmp_used = 0;
1353 dsc->max_len = len + fixup_sentinel_space;
1354
1355 read_memory (from, buf, len);
1356
1357 /* Set up the sentinel space so we don't have to worry about running
1358 off the end of the buffer. An excessive number of leading prefixes
1359 could otherwise cause this. */
1360 memset (buf + len, 0, fixup_sentinel_space);
1361
1362 amd64_get_insn_details (buf, details);
1363
1364 /* GDB may get control back after the insn after the syscall.
1365 Presumably this is a kernel bug.
1366 If this is a syscall, make sure there's a nop afterwards. */
1367 {
1368 int syscall_length;
1369
1370 if (amd64_syscall_p (details, &syscall_length))
1371 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1372 }
1373
1374 /* Modify the insn to cope with the address where it will be executed from.
1375 In particular, handle any rip-relative addressing. */
1376 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1377
1378 write_memory (to, buf, len);
1379
1380 if (debug_displaced)
1381 {
5af949e3
UW
1382 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1383 paddress (gdbarch, from), paddress (gdbarch, to));
35669430
DE
1384 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1385 }
1386
1387 return dsc;
1388}
1389
1390static int
1391amd64_absolute_jmp_p (const struct amd64_insn *details)
1392{
1393 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1394
1395 if (insn[0] == 0xff)
1396 {
1397 /* jump near, absolute indirect (/4) */
1398 if ((insn[1] & 0x38) == 0x20)
1399 return 1;
1400
1401 /* jump far, absolute indirect (/5) */
1402 if ((insn[1] & 0x38) == 0x28)
1403 return 1;
1404 }
1405
1406 return 0;
1407}
1408
c2170eef
MM
1409/* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1410
1411static int
1412amd64_jmp_p (const struct amd64_insn *details)
1413{
1414 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1415
1416 /* jump short, relative. */
1417 if (insn[0] == 0xeb)
1418 return 1;
1419
1420 /* jump near, relative. */
1421 if (insn[0] == 0xe9)
1422 return 1;
1423
1424 return amd64_absolute_jmp_p (details);
1425}
1426
35669430
DE
1427static int
1428amd64_absolute_call_p (const struct amd64_insn *details)
1429{
1430 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1431
1432 if (insn[0] == 0xff)
1433 {
1434 /* Call near, absolute indirect (/2) */
1435 if ((insn[1] & 0x38) == 0x10)
1436 return 1;
1437
1438 /* Call far, absolute indirect (/3) */
1439 if ((insn[1] & 0x38) == 0x18)
1440 return 1;
1441 }
1442
1443 return 0;
1444}
1445
1446static int
1447amd64_ret_p (const struct amd64_insn *details)
1448{
1449 /* NOTE: gcc can emit "repz ; ret". */
1450 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1451
1452 switch (insn[0])
1453 {
1454 case 0xc2: /* ret near, pop N bytes */
1455 case 0xc3: /* ret near */
1456 case 0xca: /* ret far, pop N bytes */
1457 case 0xcb: /* ret far */
1458 case 0xcf: /* iret */
1459 return 1;
1460
1461 default:
1462 return 0;
1463 }
1464}
1465
1466static int
1467amd64_call_p (const struct amd64_insn *details)
1468{
1469 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1470
1471 if (amd64_absolute_call_p (details))
1472 return 1;
1473
1474 /* call near, relative */
1475 if (insn[0] == 0xe8)
1476 return 1;
1477
1478 return 0;
1479}
1480
35669430
DE
1481/* Return non-zero if INSN is a system call, and set *LENGTHP to its
1482 length in bytes. Otherwise, return zero. */
1483
1484static int
1485amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1486{
1487 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1488
1489 if (insn[0] == 0x0f && insn[1] == 0x05)
1490 {
1491 *lengthp = 2;
1492 return 1;
1493 }
1494
1495 return 0;
1496}
1497
c2170eef
MM
1498/* Classify the instruction at ADDR using PRED.
1499 Throw an error if the memory can't be read. */
1500
1501static int
1502amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1503 int (*pred) (const struct amd64_insn *))
1504{
1505 struct amd64_insn details;
1506 gdb_byte *buf;
1507 int len, classification;
1508
1509 len = gdbarch_max_insn_length (gdbarch);
1510 buf = alloca (len);
1511
1512 read_code (addr, buf, len);
1513 amd64_get_insn_details (buf, &details);
1514
1515 classification = pred (&details);
1516
1517 return classification;
1518}
1519
1520/* The gdbarch insn_is_call method. */
1521
1522static int
1523amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1524{
1525 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1526}
1527
1528/* The gdbarch insn_is_ret method. */
1529
1530static int
1531amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1532{
1533 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1534}
1535
1536/* The gdbarch insn_is_jump method. */
1537
1538static int
1539amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1540{
1541 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1542}
1543
35669430
DE
1544/* Fix up the state of registers and memory after having single-stepped
1545 a displaced instruction. */
1546
1547void
1548amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1549 struct displaced_step_closure *dsc,
1550 CORE_ADDR from, CORE_ADDR to,
1551 struct regcache *regs)
1552{
e17a4113 1553 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
35669430
DE
1554 /* The offset we applied to the instruction's address. */
1555 ULONGEST insn_offset = to - from;
1556 gdb_byte *insn = dsc->insn_buf;
1557 const struct amd64_insn *insn_details = &dsc->insn_details;
1558
1559 if (debug_displaced)
1560 fprintf_unfiltered (gdb_stdlog,
5af949e3 1561 "displaced: fixup (%s, %s), "
35669430 1562 "insn = 0x%02x 0x%02x ...\n",
5af949e3
UW
1563 paddress (gdbarch, from), paddress (gdbarch, to),
1564 insn[0], insn[1]);
35669430
DE
1565
1566 /* If we used a tmp reg, restore it. */
1567
1568 if (dsc->tmp_used)
1569 {
1570 if (debug_displaced)
5af949e3
UW
1571 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1572 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
35669430
DE
1573 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1574 }
1575
1576 /* The list of issues to contend with here is taken from
1577 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1578 Yay for Free Software! */
1579
1580 /* Relocate the %rip back to the program's instruction stream,
1581 if necessary. */
1582
1583 /* Except in the case of absolute or indirect jump or call
1584 instructions, or a return instruction, the new rip is relative to
1585 the displaced instruction; make it relative to the original insn.
1586 Well, signal handler returns don't need relocation either, but we use the
1587 value of %rip to recognize those; see below. */
1588 if (! amd64_absolute_jmp_p (insn_details)
1589 && ! amd64_absolute_call_p (insn_details)
1590 && ! amd64_ret_p (insn_details))
1591 {
1592 ULONGEST orig_rip;
1593 int insn_len;
1594
1595 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1596
1597 /* A signal trampoline system call changes the %rip, resuming
1598 execution of the main program after the signal handler has
1599 returned. That makes them like 'return' instructions; we
1600 shouldn't relocate %rip.
1601
1602 But most system calls don't, and we do need to relocate %rip.
1603
1604 Our heuristic for distinguishing these cases: if stepping
1605 over the system call instruction left control directly after
1606 the instruction, the we relocate --- control almost certainly
1607 doesn't belong in the displaced copy. Otherwise, we assume
1608 the instruction has put control where it belongs, and leave
1609 it unrelocated. Goodness help us if there are PC-relative
1610 system calls. */
1611 if (amd64_syscall_p (insn_details, &insn_len)
1612 && orig_rip != to + insn_len
1613 /* GDB can get control back after the insn after the syscall.
1614 Presumably this is a kernel bug.
1615 Fixup ensures its a nop, we add one to the length for it. */
1616 && orig_rip != to + insn_len + 1)
1617 {
1618 if (debug_displaced)
1619 fprintf_unfiltered (gdb_stdlog,
1620 "displaced: syscall changed %%rip; "
1621 "not relocating\n");
1622 }
1623 else
1624 {
1625 ULONGEST rip = orig_rip - insn_offset;
1626
1903f0e6
DE
1627 /* If we just stepped over a breakpoint insn, we don't backup
1628 the pc on purpose; this is to match behaviour without
1629 stepping. */
35669430
DE
1630
1631 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1632
1633 if (debug_displaced)
1634 fprintf_unfiltered (gdb_stdlog,
1635 "displaced: "
5af949e3
UW
1636 "relocated %%rip from %s to %s\n",
1637 paddress (gdbarch, orig_rip),
1638 paddress (gdbarch, rip));
35669430
DE
1639 }
1640 }
1641
1642 /* If the instruction was PUSHFL, then the TF bit will be set in the
1643 pushed value, and should be cleared. We'll leave this for later,
1644 since GDB already messes up the TF flag when stepping over a
1645 pushfl. */
1646
1647 /* If the instruction was a call, the return address now atop the
1648 stack is the address following the copied instruction. We need
1649 to make it the address following the original instruction. */
1650 if (amd64_call_p (insn_details))
1651 {
1652 ULONGEST rsp;
1653 ULONGEST retaddr;
1654 const ULONGEST retaddr_len = 8;
1655
1656 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
e17a4113 1657 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
35669430 1658 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
e17a4113 1659 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
35669430
DE
1660
1661 if (debug_displaced)
1662 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
1663 "displaced: relocated return addr at %s "
1664 "to %s\n",
1665 paddress (gdbarch, rsp),
1666 paddress (gdbarch, retaddr));
35669430
DE
1667 }
1668}
dde08ee1
PA
1669
1670/* If the instruction INSN uses RIP-relative addressing, return the
1671 offset into the raw INSN where the displacement to be adjusted is
1672 found. Returns 0 if the instruction doesn't use RIP-relative
1673 addressing. */
1674
1675static int
1676rip_relative_offset (struct amd64_insn *insn)
1677{
1678 if (insn->modrm_offset != -1)
1679 {
1680 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1681
1682 if ((modrm & 0xc7) == 0x05)
1683 {
1684 /* The displacement is found right after the ModRM byte. */
1685 return insn->modrm_offset + 1;
1686 }
1687 }
1688
1689 return 0;
1690}
1691
1692static void
1693append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1694{
1695 target_write_memory (*to, buf, len);
1696 *to += len;
1697}
1698
60965737 1699static void
dde08ee1
PA
1700amd64_relocate_instruction (struct gdbarch *gdbarch,
1701 CORE_ADDR *to, CORE_ADDR oldloc)
1702{
1703 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1704 int len = gdbarch_max_insn_length (gdbarch);
1705 /* Extra space for sentinels. */
1706 int fixup_sentinel_space = len;
1707 gdb_byte *buf = xmalloc (len + fixup_sentinel_space);
1708 struct amd64_insn insn_details;
1709 int offset = 0;
1710 LONGEST rel32, newrel;
1711 gdb_byte *insn;
1712 int insn_length;
1713
1714 read_memory (oldloc, buf, len);
1715
1716 /* Set up the sentinel space so we don't have to worry about running
1717 off the end of the buffer. An excessive number of leading prefixes
1718 could otherwise cause this. */
1719 memset (buf + len, 0, fixup_sentinel_space);
1720
1721 insn = buf;
1722 amd64_get_insn_details (insn, &insn_details);
1723
1724 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1725
1726 /* Skip legacy instruction prefixes. */
1727 insn = amd64_skip_prefixes (insn);
1728
1729 /* Adjust calls with 32-bit relative addresses as push/jump, with
1730 the address pushed being the location where the original call in
1731 the user program would return to. */
1732 if (insn[0] == 0xe8)
1733 {
1734 gdb_byte push_buf[16];
1735 unsigned int ret_addr;
1736
1737 /* Where "ret" in the original code will return to. */
1738 ret_addr = oldloc + insn_length;
0963b4bd 1739 push_buf[0] = 0x68; /* pushq $... */
144db827 1740 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
dde08ee1
PA
1741 /* Push the push. */
1742 append_insns (to, 5, push_buf);
1743
1744 /* Convert the relative call to a relative jump. */
1745 insn[0] = 0xe9;
1746
1747 /* Adjust the destination offset. */
1748 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1749 newrel = (oldloc - *to) + rel32;
f4a1794a
KY
1750 store_signed_integer (insn + 1, 4, byte_order, newrel);
1751
1752 if (debug_displaced)
1753 fprintf_unfiltered (gdb_stdlog,
1754 "Adjusted insn rel32=%s at %s to"
1755 " rel32=%s at %s\n",
1756 hex_string (rel32), paddress (gdbarch, oldloc),
1757 hex_string (newrel), paddress (gdbarch, *to));
dde08ee1
PA
1758
1759 /* Write the adjusted jump into its displaced location. */
1760 append_insns (to, 5, insn);
1761 return;
1762 }
1763
1764 offset = rip_relative_offset (&insn_details);
1765 if (!offset)
1766 {
1767 /* Adjust jumps with 32-bit relative addresses. Calls are
1768 already handled above. */
1769 if (insn[0] == 0xe9)
1770 offset = 1;
1771 /* Adjust conditional jumps. */
1772 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1773 offset = 2;
1774 }
1775
1776 if (offset)
1777 {
1778 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1779 newrel = (oldloc - *to) + rel32;
f4a1794a 1780 store_signed_integer (insn + offset, 4, byte_order, newrel);
dde08ee1
PA
1781 if (debug_displaced)
1782 fprintf_unfiltered (gdb_stdlog,
f4a1794a
KY
1783 "Adjusted insn rel32=%s at %s to"
1784 " rel32=%s at %s\n",
dde08ee1
PA
1785 hex_string (rel32), paddress (gdbarch, oldloc),
1786 hex_string (newrel), paddress (gdbarch, *to));
1787 }
1788
1789 /* Write the adjusted instruction into its displaced location. */
1790 append_insns (to, insn_length, buf);
1791}
1792
35669430 1793\f
c4f35dd8 1794/* The maximum number of saved registers. This should include %rip. */
90f90721 1795#define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
c4f35dd8 1796
e53bef9f 1797struct amd64_frame_cache
c4f35dd8
MK
1798{
1799 /* Base address. */
1800 CORE_ADDR base;
8fbca658 1801 int base_p;
c4f35dd8
MK
1802 CORE_ADDR sp_offset;
1803 CORE_ADDR pc;
1804
1805 /* Saved registers. */
e53bef9f 1806 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
c4f35dd8 1807 CORE_ADDR saved_sp;
e0c62198 1808 int saved_sp_reg;
c4f35dd8
MK
1809
1810 /* Do we have a frame? */
1811 int frameless_p;
1812};
8dda9770 1813
d2449ee8 1814/* Initialize a frame cache. */
c4f35dd8 1815
d2449ee8
DJ
1816static void
1817amd64_init_frame_cache (struct amd64_frame_cache *cache)
8dda9770 1818{
c4f35dd8
MK
1819 int i;
1820
c4f35dd8
MK
1821 /* Base address. */
1822 cache->base = 0;
8fbca658 1823 cache->base_p = 0;
c4f35dd8
MK
1824 cache->sp_offset = -8;
1825 cache->pc = 0;
1826
1827 /* Saved registers. We initialize these to -1 since zero is a valid
bba66b87
DE
1828 offset (that's where %rbp is supposed to be stored).
1829 The values start out as being offsets, and are later converted to
1830 addresses (at which point -1 is interpreted as an address, still meaning
1831 "invalid"). */
e53bef9f 1832 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
1833 cache->saved_regs[i] = -1;
1834 cache->saved_sp = 0;
e0c62198 1835 cache->saved_sp_reg = -1;
c4f35dd8
MK
1836
1837 /* Frameless until proven otherwise. */
1838 cache->frameless_p = 1;
d2449ee8 1839}
c4f35dd8 1840
d2449ee8
DJ
1841/* Allocate and initialize a frame cache. */
1842
1843static struct amd64_frame_cache *
1844amd64_alloc_frame_cache (void)
1845{
1846 struct amd64_frame_cache *cache;
1847
1848 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1849 amd64_init_frame_cache (cache);
c4f35dd8 1850 return cache;
8dda9770 1851}
53e95fcf 1852
e0c62198
L
1853/* GCC 4.4 and later, can put code in the prologue to realign the
1854 stack pointer. Check whether PC points to such code, and update
1855 CACHE accordingly. Return the first instruction after the code
1856 sequence or CURRENT_PC, whichever is smaller. If we don't
1857 recognize the code, return PC. */
1858
1859static CORE_ADDR
1860amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1861 struct amd64_frame_cache *cache)
1862{
1863 /* There are 2 code sequences to re-align stack before the frame
1864 gets set up:
1865
1866 1. Use a caller-saved saved register:
1867
1868 leaq 8(%rsp), %reg
1869 andq $-XXX, %rsp
1870 pushq -8(%reg)
1871
1872 2. Use a callee-saved saved register:
1873
1874 pushq %reg
1875 leaq 16(%rsp), %reg
1876 andq $-XXX, %rsp
1877 pushq -8(%reg)
1878
1879 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1880
1881 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1882 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1883 */
1884
1885 gdb_byte buf[18];
1886 int reg, r;
1887 int offset, offset_and;
e0c62198 1888
bae8a07a 1889 if (target_read_code (pc, buf, sizeof buf))
e0c62198
L
1890 return pc;
1891
1892 /* Check caller-saved saved register. The first instruction has
1893 to be "leaq 8(%rsp), %reg". */
1894 if ((buf[0] & 0xfb) == 0x48
1895 && buf[1] == 0x8d
1896 && buf[3] == 0x24
1897 && buf[4] == 0x8)
1898 {
1899 /* MOD must be binary 10 and R/M must be binary 100. */
1900 if ((buf[2] & 0xc7) != 0x44)
1901 return pc;
1902
1903 /* REG has register number. */
1904 reg = (buf[2] >> 3) & 7;
1905
1906 /* Check the REX.R bit. */
1907 if (buf[0] == 0x4c)
1908 reg += 8;
1909
1910 offset = 5;
1911 }
1912 else
1913 {
1914 /* Check callee-saved saved register. The first instruction
1915 has to be "pushq %reg". */
1916 reg = 0;
1917 if ((buf[0] & 0xf8) == 0x50)
1918 offset = 0;
1919 else if ((buf[0] & 0xf6) == 0x40
1920 && (buf[1] & 0xf8) == 0x50)
1921 {
1922 /* Check the REX.B bit. */
1923 if ((buf[0] & 1) != 0)
1924 reg = 8;
1925
1926 offset = 1;
1927 }
1928 else
1929 return pc;
1930
1931 /* Get register. */
1932 reg += buf[offset] & 0x7;
1933
1934 offset++;
1935
1936 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1937 if ((buf[offset] & 0xfb) != 0x48
1938 || buf[offset + 1] != 0x8d
1939 || buf[offset + 3] != 0x24
1940 || buf[offset + 4] != 0x10)
1941 return pc;
1942
1943 /* MOD must be binary 10 and R/M must be binary 100. */
1944 if ((buf[offset + 2] & 0xc7) != 0x44)
1945 return pc;
1946
1947 /* REG has register number. */
1948 r = (buf[offset + 2] >> 3) & 7;
1949
1950 /* Check the REX.R bit. */
1951 if (buf[offset] == 0x4c)
1952 r += 8;
1953
1954 /* Registers in pushq and leaq have to be the same. */
1955 if (reg != r)
1956 return pc;
1957
1958 offset += 5;
1959 }
1960
1961 /* Rigister can't be %rsp nor %rbp. */
1962 if (reg == 4 || reg == 5)
1963 return pc;
1964
1965 /* The next instruction has to be "andq $-XXX, %rsp". */
1966 if (buf[offset] != 0x48
1967 || buf[offset + 2] != 0xe4
1968 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1969 return pc;
1970
1971 offset_and = offset;
1972 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1973
1974 /* The next instruction has to be "pushq -8(%reg)". */
1975 r = 0;
1976 if (buf[offset] == 0xff)
1977 offset++;
1978 else if ((buf[offset] & 0xf6) == 0x40
1979 && buf[offset + 1] == 0xff)
1980 {
1981 /* Check the REX.B bit. */
1982 if ((buf[offset] & 0x1) != 0)
1983 r = 8;
1984 offset += 2;
1985 }
1986 else
1987 return pc;
1988
1989 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1990 01. */
1991 if (buf[offset + 1] != 0xf8
1992 || (buf[offset] & 0xf8) != 0x70)
1993 return pc;
1994
1995 /* R/M has register. */
1996 r += buf[offset] & 7;
1997
1998 /* Registers in leaq and pushq have to be the same. */
1999 if (reg != r)
2000 return pc;
2001
2002 if (current_pc > pc + offset_and)
35669430 2003 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
e0c62198
L
2004
2005 return min (pc + offset + 2, current_pc);
2006}
2007
ac142d96
L
2008/* Similar to amd64_analyze_stack_align for x32. */
2009
2010static CORE_ADDR
2011amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2012 struct amd64_frame_cache *cache)
2013{
2014 /* There are 2 code sequences to re-align stack before the frame
2015 gets set up:
2016
2017 1. Use a caller-saved saved register:
2018
2019 leaq 8(%rsp), %reg
2020 andq $-XXX, %rsp
2021 pushq -8(%reg)
2022
2023 or
2024
2025 [addr32] leal 8(%rsp), %reg
2026 andl $-XXX, %esp
2027 [addr32] pushq -8(%reg)
2028
2029 2. Use a callee-saved saved register:
2030
2031 pushq %reg
2032 leaq 16(%rsp), %reg
2033 andq $-XXX, %rsp
2034 pushq -8(%reg)
2035
2036 or
2037
2038 pushq %reg
2039 [addr32] leal 16(%rsp), %reg
2040 andl $-XXX, %esp
2041 [addr32] pushq -8(%reg)
2042
2043 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2044
2045 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2046 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2047
2048 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2049
2050 0x83 0xe4 0xf0 andl $-16, %esp
2051 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2052 */
2053
2054 gdb_byte buf[19];
2055 int reg, r;
2056 int offset, offset_and;
2057
2058 if (target_read_memory (pc, buf, sizeof buf))
2059 return pc;
2060
2061 /* Skip optional addr32 prefix. */
2062 offset = buf[0] == 0x67 ? 1 : 0;
2063
2064 /* Check caller-saved saved register. The first instruction has
2065 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2066 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2067 && buf[offset + 1] == 0x8d
2068 && buf[offset + 3] == 0x24
2069 && buf[offset + 4] == 0x8)
2070 {
2071 /* MOD must be binary 10 and R/M must be binary 100. */
2072 if ((buf[offset + 2] & 0xc7) != 0x44)
2073 return pc;
2074
2075 /* REG has register number. */
2076 reg = (buf[offset + 2] >> 3) & 7;
2077
2078 /* Check the REX.R bit. */
2079 if ((buf[offset] & 0x4) != 0)
2080 reg += 8;
2081
2082 offset += 5;
2083 }
2084 else
2085 {
2086 /* Check callee-saved saved register. The first instruction
2087 has to be "pushq %reg". */
2088 reg = 0;
2089 if ((buf[offset] & 0xf6) == 0x40
2090 && (buf[offset + 1] & 0xf8) == 0x50)
2091 {
2092 /* Check the REX.B bit. */
2093 if ((buf[offset] & 1) != 0)
2094 reg = 8;
2095
2096 offset += 1;
2097 }
2098 else if ((buf[offset] & 0xf8) != 0x50)
2099 return pc;
2100
2101 /* Get register. */
2102 reg += buf[offset] & 0x7;
2103
2104 offset++;
2105
2106 /* Skip optional addr32 prefix. */
2107 if (buf[offset] == 0x67)
2108 offset++;
2109
2110 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2111 "leal 16(%rsp), %reg". */
2112 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2113 || buf[offset + 1] != 0x8d
2114 || buf[offset + 3] != 0x24
2115 || buf[offset + 4] != 0x10)
2116 return pc;
2117
2118 /* MOD must be binary 10 and R/M must be binary 100. */
2119 if ((buf[offset + 2] & 0xc7) != 0x44)
2120 return pc;
2121
2122 /* REG has register number. */
2123 r = (buf[offset + 2] >> 3) & 7;
2124
2125 /* Check the REX.R bit. */
2126 if ((buf[offset] & 0x4) != 0)
2127 r += 8;
2128
2129 /* Registers in pushq and leaq have to be the same. */
2130 if (reg != r)
2131 return pc;
2132
2133 offset += 5;
2134 }
2135
2136 /* Rigister can't be %rsp nor %rbp. */
2137 if (reg == 4 || reg == 5)
2138 return pc;
2139
2140 /* The next instruction may be "andq $-XXX, %rsp" or
2141 "andl $-XXX, %esp". */
2142 if (buf[offset] != 0x48)
2143 offset--;
2144
2145 if (buf[offset + 2] != 0xe4
2146 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2147 return pc;
2148
2149 offset_and = offset;
2150 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2151
2152 /* Skip optional addr32 prefix. */
2153 if (buf[offset] == 0x67)
2154 offset++;
2155
2156 /* The next instruction has to be "pushq -8(%reg)". */
2157 r = 0;
2158 if (buf[offset] == 0xff)
2159 offset++;
2160 else if ((buf[offset] & 0xf6) == 0x40
2161 && buf[offset + 1] == 0xff)
2162 {
2163 /* Check the REX.B bit. */
2164 if ((buf[offset] & 0x1) != 0)
2165 r = 8;
2166 offset += 2;
2167 }
2168 else
2169 return pc;
2170
2171 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2172 01. */
2173 if (buf[offset + 1] != 0xf8
2174 || (buf[offset] & 0xf8) != 0x70)
2175 return pc;
2176
2177 /* R/M has register. */
2178 r += buf[offset] & 7;
2179
2180 /* Registers in leaq and pushq have to be the same. */
2181 if (reg != r)
2182 return pc;
2183
2184 if (current_pc > pc + offset_and)
2185 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2186
2187 return min (pc + offset + 2, current_pc);
2188}
2189
c4f35dd8
MK
2190/* Do a limited analysis of the prologue at PC and update CACHE
2191 accordingly. Bail out early if CURRENT_PC is reached. Return the
2192 address where the analysis stopped.
2193
2194 We will handle only functions beginning with:
2195
2196 pushq %rbp 0x55
50f1ae7b 2197 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
c4f35dd8 2198
649e6d92
MK
2199 or (for the X32 ABI):
2200
2201 pushq %rbp 0x55
2202 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2203
2204 Any function that doesn't start with one of these sequences will be
2205 assumed to have no prologue and thus no valid frame pointer in
2206 %rbp. */
c4f35dd8
MK
2207
2208static CORE_ADDR
e17a4113
UW
2209amd64_analyze_prologue (struct gdbarch *gdbarch,
2210 CORE_ADDR pc, CORE_ADDR current_pc,
e53bef9f 2211 struct amd64_frame_cache *cache)
53e95fcf 2212{
e17a4113 2213 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
50f1ae7b
DE
2214 /* There are two variations of movq %rsp, %rbp. */
2215 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2216 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
649e6d92
MK
2217 /* Ditto for movl %esp, %ebp. */
2218 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2219 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2220
d8de1ef7
MK
2221 gdb_byte buf[3];
2222 gdb_byte op;
c4f35dd8
MK
2223
2224 if (current_pc <= pc)
2225 return current_pc;
2226
ac142d96
L
2227 if (gdbarch_ptr_bit (gdbarch) == 32)
2228 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2229 else
2230 pc = amd64_analyze_stack_align (pc, current_pc, cache);
e0c62198 2231
bae8a07a 2232 op = read_code_unsigned_integer (pc, 1, byte_order);
c4f35dd8
MK
2233
2234 if (op == 0x55) /* pushq %rbp */
2235 {
2236 /* Take into account that we've executed the `pushq %rbp' that
2237 starts this instruction sequence. */
90f90721 2238 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
c4f35dd8
MK
2239 cache->sp_offset += 8;
2240
2241 /* If that's all, return now. */
2242 if (current_pc <= pc + 1)
2243 return current_pc;
2244
bae8a07a 2245 read_code (pc + 1, buf, 3);
c4f35dd8 2246
649e6d92
MK
2247 /* Check for `movq %rsp, %rbp'. */
2248 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2249 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2250 {
2251 /* OK, we actually have a frame. */
2252 cache->frameless_p = 0;
2253 return pc + 4;
2254 }
2255
2256 /* For X32, also check for `movq %esp, %ebp'. */
2257 if (gdbarch_ptr_bit (gdbarch) == 32)
2258 {
2259 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2260 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2261 {
2262 /* OK, we actually have a frame. */
2263 cache->frameless_p = 0;
2264 return pc + 3;
2265 }
2266 }
2267
2268 return pc + 1;
c4f35dd8
MK
2269 }
2270
2271 return pc;
53e95fcf
JS
2272}
2273
df15bd07
JK
2274/* Work around false termination of prologue - GCC PR debug/48827.
2275
2276 START_PC is the first instruction of a function, PC is its minimal already
2277 determined advanced address. Function returns PC if it has nothing to do.
2278
2279 84 c0 test %al,%al
2280 74 23 je after
2281 <-- here is 0 lines advance - the false prologue end marker.
2282 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2283 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2284 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2285 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2286 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2287 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2288 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2289 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2290 after: */
c4f35dd8
MK
2291
2292static CORE_ADDR
df15bd07 2293amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
53e95fcf 2294{
08711b9a
JK
2295 struct symtab_and_line start_pc_sal, next_sal;
2296 gdb_byte buf[4 + 8 * 7];
2297 int offset, xmmreg;
c4f35dd8 2298
08711b9a
JK
2299 if (pc == start_pc)
2300 return pc;
2301
2302 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2303 if (start_pc_sal.symtab == NULL
df15bd07 2304 || producer_is_gcc_ge_4 (start_pc_sal.symtab->producer) < 6
08711b9a
JK
2305 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2306 return pc;
2307
2308 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2309 if (next_sal.line != start_pc_sal.line)
2310 return pc;
2311
2312 /* START_PC can be from overlayed memory, ignored here. */
bae8a07a 2313 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
08711b9a
JK
2314 return pc;
2315
2316 /* test %al,%al */
2317 if (buf[0] != 0x84 || buf[1] != 0xc0)
2318 return pc;
2319 /* je AFTER */
2320 if (buf[2] != 0x74)
2321 return pc;
2322
2323 offset = 4;
2324 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2325 {
bede5f5f 2326 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
08711b9a 2327 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
bede5f5f 2328 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
08711b9a
JK
2329 return pc;
2330
bede5f5f
JK
2331 /* 0b01?????? */
2332 if ((buf[offset + 2] & 0xc0) == 0x40)
08711b9a
JK
2333 {
2334 /* 8-bit displacement. */
2335 offset += 4;
2336 }
bede5f5f
JK
2337 /* 0b10?????? */
2338 else if ((buf[offset + 2] & 0xc0) == 0x80)
08711b9a
JK
2339 {
2340 /* 32-bit displacement. */
2341 offset += 7;
2342 }
2343 else
2344 return pc;
2345 }
2346
2347 /* je AFTER */
2348 if (offset - 4 != buf[3])
2349 return pc;
2350
2351 return next_sal.end;
53e95fcf 2352}
df15bd07
JK
2353
2354/* Return PC of first real instruction. */
2355
2356static CORE_ADDR
2357amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2358{
2359 struct amd64_frame_cache cache;
2360 CORE_ADDR pc;
56bf0743
KB
2361 CORE_ADDR func_addr;
2362
2363 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2364 {
2365 CORE_ADDR post_prologue_pc
2366 = skip_prologue_using_sal (gdbarch, func_addr);
2367 struct symtab *s = find_pc_symtab (func_addr);
2368
2369 /* Clang always emits a line note before the prologue and another
2370 one after. We trust clang to emit usable line notes. */
2371 if (post_prologue_pc
2372 && (s != NULL
2373 && s->producer != NULL
2374 && strncmp (s->producer, "clang ", sizeof ("clang ") - 1) == 0))
2375 return max (start_pc, post_prologue_pc);
2376 }
df15bd07
JK
2377
2378 amd64_init_frame_cache (&cache);
2379 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2380 &cache);
2381 if (cache.frameless_p)
2382 return start_pc;
2383
2384 return amd64_skip_xmm_prologue (pc, start_pc);
2385}
c4f35dd8 2386\f
53e95fcf 2387
c4f35dd8
MK
2388/* Normal frames. */
2389
8fbca658
PA
2390static void
2391amd64_frame_cache_1 (struct frame_info *this_frame,
2392 struct amd64_frame_cache *cache)
6d686a84 2393{
e17a4113
UW
2394 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2395 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
d8de1ef7 2396 gdb_byte buf[8];
6d686a84 2397 int i;
6d686a84 2398
10458914 2399 cache->pc = get_frame_func (this_frame);
c4f35dd8 2400 if (cache->pc != 0)
e17a4113
UW
2401 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2402 cache);
c4f35dd8
MK
2403
2404 if (cache->frameless_p)
2405 {
4a28816e
MK
2406 /* We didn't find a valid frame. If we're at the start of a
2407 function, or somewhere half-way its prologue, the function's
2408 frame probably hasn't been fully setup yet. Try to
2409 reconstruct the base address for the stack frame by looking
2410 at the stack pointer. For truly "frameless" functions this
2411 might work too. */
c4f35dd8 2412
e0c62198
L
2413 if (cache->saved_sp_reg != -1)
2414 {
8fbca658
PA
2415 /* Stack pointer has been saved. */
2416 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2417 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2418
e0c62198
L
2419 /* We're halfway aligning the stack. */
2420 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2421 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2422
2423 /* This will be added back below. */
2424 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2425 }
2426 else
2427 {
2428 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
e17a4113
UW
2429 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2430 + cache->sp_offset;
e0c62198 2431 }
c4f35dd8 2432 }
35883a3f
MK
2433 else
2434 {
10458914 2435 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
e17a4113 2436 cache->base = extract_unsigned_integer (buf, 8, byte_order);
35883a3f 2437 }
c4f35dd8
MK
2438
2439 /* Now that we have the base address for the stack frame we can
2440 calculate the value of %rsp in the calling frame. */
2441 cache->saved_sp = cache->base + 16;
2442
35883a3f
MK
2443 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2444 frame we find it at the same offset from the reconstructed base
e0c62198
L
2445 address. If we're halfway aligning the stack, %rip is handled
2446 differently (see above). */
2447 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2448 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
35883a3f 2449
c4f35dd8
MK
2450 /* Adjust all the saved registers such that they contain addresses
2451 instead of offsets. */
e53bef9f 2452 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
2453 if (cache->saved_regs[i] != -1)
2454 cache->saved_regs[i] += cache->base;
2455
8fbca658
PA
2456 cache->base_p = 1;
2457}
2458
2459static struct amd64_frame_cache *
2460amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
2461{
2462 volatile struct gdb_exception ex;
2463 struct amd64_frame_cache *cache;
2464
2465 if (*this_cache)
2466 return *this_cache;
2467
2468 cache = amd64_alloc_frame_cache ();
2469 *this_cache = cache;
2470
2471 TRY_CATCH (ex, RETURN_MASK_ERROR)
2472 {
2473 amd64_frame_cache_1 (this_frame, cache);
2474 }
2475 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2476 throw_exception (ex);
2477
c4f35dd8 2478 return cache;
6d686a84
ML
2479}
2480
8fbca658
PA
2481static enum unwind_stop_reason
2482amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
2483 void **this_cache)
2484{
2485 struct amd64_frame_cache *cache =
2486 amd64_frame_cache (this_frame, this_cache);
2487
2488 if (!cache->base_p)
2489 return UNWIND_UNAVAILABLE;
2490
2491 /* This marks the outermost frame. */
2492 if (cache->base == 0)
2493 return UNWIND_OUTERMOST;
2494
2495 return UNWIND_NO_REASON;
2496}
2497
c4f35dd8 2498static void
10458914 2499amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
e53bef9f 2500 struct frame_id *this_id)
c4f35dd8 2501{
e53bef9f 2502 struct amd64_frame_cache *cache =
10458914 2503 amd64_frame_cache (this_frame, this_cache);
c4f35dd8 2504
8fbca658 2505 if (!cache->base_p)
5ce0145d
PA
2506 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2507 else if (cache->base == 0)
2508 {
2509 /* This marks the outermost frame. */
2510 return;
2511 }
2512 else
2513 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
c4f35dd8 2514}
e76e1718 2515
10458914
DJ
2516static struct value *
2517amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2518 int regnum)
53e95fcf 2519{
10458914 2520 struct gdbarch *gdbarch = get_frame_arch (this_frame);
e53bef9f 2521 struct amd64_frame_cache *cache =
10458914 2522 amd64_frame_cache (this_frame, this_cache);
e76e1718 2523
c4f35dd8 2524 gdb_assert (regnum >= 0);
b1ab997b 2525
2ae02b47 2526 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
10458914 2527 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
e76e1718 2528
e53bef9f 2529 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
10458914
DJ
2530 return frame_unwind_got_memory (this_frame, regnum,
2531 cache->saved_regs[regnum]);
e76e1718 2532
10458914 2533 return frame_unwind_got_register (this_frame, regnum, regnum);
c4f35dd8 2534}
e76e1718 2535
e53bef9f 2536static const struct frame_unwind amd64_frame_unwind =
c4f35dd8
MK
2537{
2538 NORMAL_FRAME,
8fbca658 2539 amd64_frame_unwind_stop_reason,
e53bef9f 2540 amd64_frame_this_id,
10458914
DJ
2541 amd64_frame_prev_register,
2542 NULL,
2543 default_frame_sniffer
c4f35dd8 2544};
c4f35dd8 2545\f
6710bf39
SS
2546/* Generate a bytecode expression to get the value of the saved PC. */
2547
2548static void
2549amd64_gen_return_address (struct gdbarch *gdbarch,
2550 struct agent_expr *ax, struct axs_value *value,
2551 CORE_ADDR scope)
2552{
2553 /* The following sequence assumes the traditional use of the base
2554 register. */
2555 ax_reg (ax, AMD64_RBP_REGNUM);
2556 ax_const_l (ax, 8);
2557 ax_simple (ax, aop_add);
2558 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2559 value->kind = axs_lvalue_memory;
2560}
2561\f
e76e1718 2562
c4f35dd8
MK
2563/* Signal trampolines. */
2564
2565/* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2566 64-bit variants. This would require using identical frame caches
2567 on both platforms. */
2568
e53bef9f 2569static struct amd64_frame_cache *
10458914 2570amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
c4f35dd8 2571{
e17a4113
UW
2572 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2573 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2574 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8fbca658 2575 volatile struct gdb_exception ex;
e53bef9f 2576 struct amd64_frame_cache *cache;
c4f35dd8 2577 CORE_ADDR addr;
d8de1ef7 2578 gdb_byte buf[8];
2b5e0749 2579 int i;
c4f35dd8
MK
2580
2581 if (*this_cache)
2582 return *this_cache;
2583
e53bef9f 2584 cache = amd64_alloc_frame_cache ();
c4f35dd8 2585
8fbca658
PA
2586 TRY_CATCH (ex, RETURN_MASK_ERROR)
2587 {
2588 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2589 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2590
2591 addr = tdep->sigcontext_addr (this_frame);
2592 gdb_assert (tdep->sc_reg_offset);
2593 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2594 for (i = 0; i < tdep->sc_num_regs; i++)
2595 if (tdep->sc_reg_offset[i] != -1)
2596 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
c4f35dd8 2597
8fbca658
PA
2598 cache->base_p = 1;
2599 }
2600 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2601 throw_exception (ex);
c4f35dd8
MK
2602
2603 *this_cache = cache;
2604 return cache;
53e95fcf
JS
2605}
2606
8fbca658
PA
2607static enum unwind_stop_reason
2608amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
2609 void **this_cache)
2610{
2611 struct amd64_frame_cache *cache =
2612 amd64_sigtramp_frame_cache (this_frame, this_cache);
2613
2614 if (!cache->base_p)
2615 return UNWIND_UNAVAILABLE;
2616
2617 return UNWIND_NO_REASON;
2618}
2619
c4f35dd8 2620static void
10458914 2621amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
e53bef9f 2622 void **this_cache, struct frame_id *this_id)
c4f35dd8 2623{
e53bef9f 2624 struct amd64_frame_cache *cache =
10458914 2625 amd64_sigtramp_frame_cache (this_frame, this_cache);
c4f35dd8 2626
8fbca658 2627 if (!cache->base_p)
5ce0145d
PA
2628 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2629 else if (cache->base == 0)
2630 {
2631 /* This marks the outermost frame. */
2632 return;
2633 }
2634 else
2635 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
c4f35dd8
MK
2636}
2637
10458914
DJ
2638static struct value *
2639amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2640 void **this_cache, int regnum)
c4f35dd8
MK
2641{
2642 /* Make sure we've initialized the cache. */
10458914 2643 amd64_sigtramp_frame_cache (this_frame, this_cache);
c4f35dd8 2644
10458914 2645 return amd64_frame_prev_register (this_frame, this_cache, regnum);
c4f35dd8
MK
2646}
2647
10458914
DJ
2648static int
2649amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2650 struct frame_info *this_frame,
2651 void **this_cache)
c4f35dd8 2652{
10458914 2653 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
911bc6ee
MK
2654
2655 /* We shouldn't even bother if we don't have a sigcontext_addr
2656 handler. */
2657 if (tdep->sigcontext_addr == NULL)
10458914 2658 return 0;
911bc6ee
MK
2659
2660 if (tdep->sigtramp_p != NULL)
2661 {
10458914
DJ
2662 if (tdep->sigtramp_p (this_frame))
2663 return 1;
911bc6ee 2664 }
c4f35dd8 2665
911bc6ee 2666 if (tdep->sigtramp_start != 0)
1c3545ae 2667 {
10458914 2668 CORE_ADDR pc = get_frame_pc (this_frame);
1c3545ae 2669
911bc6ee
MK
2670 gdb_assert (tdep->sigtramp_end != 0);
2671 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
10458914 2672 return 1;
1c3545ae 2673 }
c4f35dd8 2674
10458914 2675 return 0;
c4f35dd8 2676}
10458914
DJ
2677
2678static const struct frame_unwind amd64_sigtramp_frame_unwind =
2679{
2680 SIGTRAMP_FRAME,
8fbca658 2681 amd64_sigtramp_frame_unwind_stop_reason,
10458914
DJ
2682 amd64_sigtramp_frame_this_id,
2683 amd64_sigtramp_frame_prev_register,
2684 NULL,
2685 amd64_sigtramp_frame_sniffer
2686};
c4f35dd8
MK
2687\f
2688
2689static CORE_ADDR
10458914 2690amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
c4f35dd8 2691{
e53bef9f 2692 struct amd64_frame_cache *cache =
10458914 2693 amd64_frame_cache (this_frame, this_cache);
c4f35dd8
MK
2694
2695 return cache->base;
2696}
2697
e53bef9f 2698static const struct frame_base amd64_frame_base =
c4f35dd8 2699{
e53bef9f
MK
2700 &amd64_frame_unwind,
2701 amd64_frame_base_address,
2702 amd64_frame_base_address,
2703 amd64_frame_base_address
c4f35dd8
MK
2704};
2705
872761f4
MS
2706/* Normal frames, but in a function epilogue. */
2707
2708/* The epilogue is defined here as the 'ret' instruction, which will
2709 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2710 the function's stack frame. */
2711
2712static int
2713amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2714{
2715 gdb_byte insn;
e0d00bc7
JK
2716 struct symtab *symtab;
2717
2718 symtab = find_pc_symtab (pc);
2719 if (symtab && symtab->epilogue_unwind_valid)
2720 return 0;
872761f4
MS
2721
2722 if (target_read_memory (pc, &insn, 1))
2723 return 0; /* Can't read memory at pc. */
2724
2725 if (insn != 0xc3) /* 'ret' instruction. */
2726 return 0;
2727
2728 return 1;
2729}
2730
2731static int
2732amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2733 struct frame_info *this_frame,
2734 void **this_prologue_cache)
2735{
2736 if (frame_relative_level (this_frame) == 0)
2737 return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
2738 get_frame_pc (this_frame));
2739 else
2740 return 0;
2741}
2742
2743static struct amd64_frame_cache *
2744amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2745{
2746 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2747 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8fbca658 2748 volatile struct gdb_exception ex;
872761f4 2749 struct amd64_frame_cache *cache;
6c10c06b 2750 gdb_byte buf[8];
872761f4
MS
2751
2752 if (*this_cache)
2753 return *this_cache;
2754
2755 cache = amd64_alloc_frame_cache ();
2756 *this_cache = cache;
2757
8fbca658
PA
2758 TRY_CATCH (ex, RETURN_MASK_ERROR)
2759 {
2760 /* Cache base will be %esp plus cache->sp_offset (-8). */
2761 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2762 cache->base = extract_unsigned_integer (buf, 8,
2763 byte_order) + cache->sp_offset;
2764
2765 /* Cache pc will be the frame func. */
2766 cache->pc = get_frame_pc (this_frame);
872761f4 2767
8fbca658
PA
2768 /* The saved %esp will be at cache->base plus 16. */
2769 cache->saved_sp = cache->base + 16;
872761f4 2770
8fbca658
PA
2771 /* The saved %eip will be at cache->base plus 8. */
2772 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
872761f4 2773
8fbca658
PA
2774 cache->base_p = 1;
2775 }
2776 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2777 throw_exception (ex);
872761f4
MS
2778
2779 return cache;
2780}
2781
8fbca658
PA
2782static enum unwind_stop_reason
2783amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
2784 void **this_cache)
2785{
2786 struct amd64_frame_cache *cache
2787 = amd64_epilogue_frame_cache (this_frame, this_cache);
2788
2789 if (!cache->base_p)
2790 return UNWIND_UNAVAILABLE;
2791
2792 return UNWIND_NO_REASON;
2793}
2794
872761f4
MS
2795static void
2796amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2797 void **this_cache,
2798 struct frame_id *this_id)
2799{
2800 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2801 this_cache);
2802
8fbca658 2803 if (!cache->base_p)
5ce0145d
PA
2804 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2805 else
2806 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
872761f4
MS
2807}
2808
2809static const struct frame_unwind amd64_epilogue_frame_unwind =
2810{
2811 NORMAL_FRAME,
8fbca658 2812 amd64_epilogue_frame_unwind_stop_reason,
872761f4
MS
2813 amd64_epilogue_frame_this_id,
2814 amd64_frame_prev_register,
2815 NULL,
2816 amd64_epilogue_frame_sniffer
2817};
2818
166f4c7b 2819static struct frame_id
10458914 2820amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
166f4c7b 2821{
c4f35dd8
MK
2822 CORE_ADDR fp;
2823
10458914 2824 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
c4f35dd8 2825
10458914 2826 return frame_id_build (fp + 16, get_frame_pc (this_frame));
166f4c7b
ML
2827}
2828
8b148df9
AC
2829/* 16 byte align the SP per frame requirements. */
2830
2831static CORE_ADDR
e53bef9f 2832amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
8b148df9
AC
2833{
2834 return sp & -(CORE_ADDR)16;
2835}
473f17b0
MK
2836\f
2837
593adc23
MK
2838/* Supply register REGNUM from the buffer specified by FPREGS and LEN
2839 in the floating-point register set REGSET to register cache
2840 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
473f17b0
MK
2841
2842static void
e53bef9f
MK
2843amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2844 int regnum, const void *fpregs, size_t len)
473f17b0 2845{
09424cff
AA
2846 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2847 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
473f17b0
MK
2848
2849 gdb_assert (len == tdep->sizeof_fpregset);
90f90721 2850 amd64_supply_fxsave (regcache, regnum, fpregs);
473f17b0 2851}
8b148df9 2852
593adc23
MK
2853/* Collect register REGNUM from the register cache REGCACHE and store
2854 it in the buffer specified by FPREGS and LEN as described by the
2855 floating-point register set REGSET. If REGNUM is -1, do this for
2856 all registers in REGSET. */
2857
2858static void
2859amd64_collect_fpregset (const struct regset *regset,
2860 const struct regcache *regcache,
2861 int regnum, void *fpregs, size_t len)
2862{
09424cff
AA
2863 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2864 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
593adc23
MK
2865
2866 gdb_assert (len == tdep->sizeof_fpregset);
2867 amd64_collect_fxsave (regcache, regnum, fpregs);
2868}
2869
a055a187
L
2870/* Similar to amd64_supply_fpregset, but use XSAVE extended state. */
2871
2872static void
2873amd64_supply_xstateregset (const struct regset *regset,
2874 struct regcache *regcache, int regnum,
2875 const void *xstateregs, size_t len)
2876{
a055a187
L
2877 amd64_supply_xsave (regcache, regnum, xstateregs);
2878}
2879
2880/* Similar to amd64_collect_fpregset, but use XSAVE extended state. */
2881
2882static void
2883amd64_collect_xstateregset (const struct regset *regset,
2884 const struct regcache *regcache,
2885 int regnum, void *xstateregs, size_t len)
2886{
a055a187
L
2887 amd64_collect_xsave (regcache, regnum, xstateregs, 1);
2888}
2889
ecc37a5a
AA
2890static const struct regset amd64_fpregset =
2891 {
2892 NULL, amd64_supply_fpregset, amd64_collect_fpregset
2893 };
2894
2895static const struct regset amd64_xstateregset =
2896 {
2897 NULL, amd64_supply_xstateregset, amd64_collect_xstateregset
2898 };
2899
c6b33596
MK
2900/* Return the appropriate register set for the core section identified
2901 by SECT_NAME and SECT_SIZE. */
2902
2903static const struct regset *
e53bef9f
MK
2904amd64_regset_from_core_section (struct gdbarch *gdbarch,
2905 const char *sect_name, size_t sect_size)
c6b33596
MK
2906{
2907 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2908
2909 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
ecc37a5a 2910 return &amd64_fpregset;
c6b33596 2911
a055a187 2912 if (strcmp (sect_name, ".reg-xstate") == 0)
ecc37a5a 2913 return &amd64_xstateregset;
a055a187 2914
c6b33596
MK
2915 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
2916}
2917\f
2918
436675d3
PA
2919/* Figure out where the longjmp will land. Slurp the jmp_buf out of
2920 %rdi. We expect its value to be a pointer to the jmp_buf structure
2921 from which we extract the address that we will land at. This
2922 address is copied into PC. This routine returns non-zero on
2923 success. */
2924
2925static int
2926amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2927{
2928 gdb_byte buf[8];
2929 CORE_ADDR jb_addr;
2930 struct gdbarch *gdbarch = get_frame_arch (frame);
2931 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
0dfff4cb 2932 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
436675d3
PA
2933
2934 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2935 longjmp will land. */
2936 if (jb_pc_offset == -1)
2937 return 0;
2938
2939 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
0dfff4cb
UW
2940 jb_addr= extract_typed_address
2941 (buf, builtin_type (gdbarch)->builtin_data_ptr);
436675d3
PA
2942 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2943 return 0;
2944
0dfff4cb 2945 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
436675d3
PA
2946
2947 return 1;
2948}
2949
cf648174
HZ
2950static const int amd64_record_regmap[] =
2951{
2952 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2953 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2954 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2955 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2956 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2957 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2958};
2959
2213a65d 2960void
90f90721 2961amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
53e95fcf 2962{
0c1a73d6 2963 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
90884b2b 2964 const struct target_desc *tdesc = info.target_desc;
05c0465e
SDJ
2965 static const char *const stap_integer_prefixes[] = { "$", NULL };
2966 static const char *const stap_register_prefixes[] = { "%", NULL };
2967 static const char *const stap_register_indirection_prefixes[] = { "(",
2968 NULL };
2969 static const char *const stap_register_indirection_suffixes[] = { ")",
2970 NULL };
53e95fcf 2971
473f17b0
MK
2972 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2973 floating-point registers. */
2974 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2975
90884b2b
L
2976 if (! tdesc_has_registers (tdesc))
2977 tdesc = tdesc_amd64;
2978 tdep->tdesc = tdesc;
2979
2980 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
2981 tdep->register_names = amd64_register_names;
2982
01f9f808
MS
2983 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
2984 {
2985 tdep->zmmh_register_names = amd64_zmmh_names;
2986 tdep->k_register_names = amd64_k_names;
2987 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
2988 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
2989
2990 tdep->num_zmm_regs = 32;
2991 tdep->num_xmm_avx512_regs = 16;
2992 tdep->num_ymm_avx512_regs = 16;
2993
2994 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
2995 tdep->k0_regnum = AMD64_K0_REGNUM;
2996 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
2997 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
2998 }
2999
a055a187
L
3000 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
3001 {
3002 tdep->ymmh_register_names = amd64_ymmh_names;
3003 tdep->num_ymm_regs = 16;
3004 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
3005 }
3006
e43e105e
WT
3007 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
3008 {
3009 tdep->mpx_register_names = amd64_mpx_names;
3010 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
3011 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
3012 }
3013
fe01d668 3014 tdep->num_byte_regs = 20;
1ba53b71
L
3015 tdep->num_word_regs = 16;
3016 tdep->num_dword_regs = 16;
3017 /* Avoid wiring in the MMX registers for now. */
3018 tdep->num_mmx_regs = 0;
3019
3543a589
TT
3020 set_gdbarch_pseudo_register_read_value (gdbarch,
3021 amd64_pseudo_register_read_value);
1ba53b71
L
3022 set_gdbarch_pseudo_register_write (gdbarch,
3023 amd64_pseudo_register_write);
3024
3025 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
3026
5716833c 3027 /* AMD64 has an FPU and 16 SSE registers. */
90f90721 3028 tdep->st0_regnum = AMD64_ST0_REGNUM;
0c1a73d6 3029 tdep->num_xmm_regs = 16;
53e95fcf 3030
0c1a73d6 3031 /* This is what all the fuss is about. */
53e95fcf
JS
3032 set_gdbarch_long_bit (gdbarch, 64);
3033 set_gdbarch_long_long_bit (gdbarch, 64);
3034 set_gdbarch_ptr_bit (gdbarch, 64);
3035
e53bef9f
MK
3036 /* In contrast to the i386, on AMD64 a `long double' actually takes
3037 up 128 bits, even though it's still based on the i387 extended
3038 floating-point format which has only 80 significant bits. */
b83b026c
MK
3039 set_gdbarch_long_double_bit (gdbarch, 128);
3040
e53bef9f 3041 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
b83b026c
MK
3042
3043 /* Register numbers of various important registers. */
90f90721
MK
3044 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3045 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3046 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3047 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
b83b026c 3048
e53bef9f
MK
3049 /* The "default" register numbering scheme for AMD64 is referred to
3050 as the "DWARF Register Number Mapping" in the System V psABI.
3051 The preferred debugging format for all known AMD64 targets is
3052 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3053 DWARF-1), but we provide the same mapping just in case. This
3054 mapping is also used for stabs, which GCC does support. */
3055 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
e53bef9f 3056 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
de220d0f 3057
c4f35dd8 3058 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
e53bef9f 3059 be in use on any of the supported AMD64 targets. */
53e95fcf 3060
c4f35dd8 3061 /* Call dummy code. */
e53bef9f
MK
3062 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3063 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
8b148df9 3064 set_gdbarch_frame_red_zone_size (gdbarch, 128);
53e95fcf 3065
83acabca 3066 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
d532c08f
MK
3067 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3068 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3069
efb1c01c 3070 set_gdbarch_return_value (gdbarch, amd64_return_value);
53e95fcf 3071
e53bef9f 3072 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
53e95fcf 3073
cf648174
HZ
3074 tdep->record_regmap = amd64_record_regmap;
3075
10458914 3076 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
53e95fcf 3077
872761f4
MS
3078 /* Hook the function epilogue frame unwinder. This unwinder is
3079 appended to the list first, so that it supercedes the other
3080 unwinders in function epilogues. */
3081 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3082
3083 /* Hook the prologue-based frame unwinders. */
10458914
DJ
3084 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3085 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
e53bef9f 3086 frame_base_set_default (gdbarch, &amd64_frame_base);
c6b33596
MK
3087
3088 /* If we have a register mapping, enable the generic core file support. */
3089 if (tdep->gregset_reg_offset)
3090 set_gdbarch_regset_from_core_section (gdbarch,
e53bef9f 3091 amd64_regset_from_core_section);
436675d3
PA
3092
3093 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
dde08ee1
PA
3094
3095 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
6710bf39
SS
3096
3097 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
55aa24fb
SDJ
3098
3099 /* SystemTap variables and functions. */
05c0465e
SDJ
3100 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3101 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3102 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3103 stap_register_indirection_prefixes);
3104 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3105 stap_register_indirection_suffixes);
55aa24fb
SDJ
3106 set_gdbarch_stap_is_single_operand (gdbarch,
3107 i386_stap_is_single_operand);
3108 set_gdbarch_stap_parse_special_token (gdbarch,
3109 i386_stap_parse_special_token);
c2170eef
MM
3110 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3111 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3112 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
c4f35dd8 3113}
fff4548b
MK
3114\f
3115
3116static struct type *
3117amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3118{
3119 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3120
3121 switch (regnum - tdep->eax_regnum)
3122 {
3123 case AMD64_RBP_REGNUM: /* %ebp */
3124 case AMD64_RSP_REGNUM: /* %esp */
3125 return builtin_type (gdbarch)->builtin_data_ptr;
3126 case AMD64_RIP_REGNUM: /* %eip */
3127 return builtin_type (gdbarch)->builtin_func_ptr;
3128 }
3129
3130 return i386_pseudo_register_type (gdbarch, regnum);
3131}
3132
3133void
3134amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
3135{
3136 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3137 const struct target_desc *tdesc = info.target_desc;
3138
3139 amd64_init_abi (info, gdbarch);
3140
3141 if (! tdesc_has_registers (tdesc))
3142 tdesc = tdesc_x32;
3143 tdep->tdesc = tdesc;
3144
3145 tdep->num_dword_regs = 17;
3146 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3147
3148 set_gdbarch_long_bit (gdbarch, 32);
3149 set_gdbarch_ptr_bit (gdbarch, 32);
3150}
90884b2b
L
3151
3152/* Provide a prototype to silence -Wmissing-prototypes. */
3153void _initialize_amd64_tdep (void);
3154
3155void
3156_initialize_amd64_tdep (void)
3157{
3158 initialize_tdesc_amd64 ();
a055a187 3159 initialize_tdesc_amd64_avx ();
e43e105e 3160 initialize_tdesc_amd64_mpx ();
01f9f808
MS
3161 initialize_tdesc_amd64_avx512 ();
3162
ac1438b5
L
3163 initialize_tdesc_x32 ();
3164 initialize_tdesc_x32_avx ();
01f9f808 3165 initialize_tdesc_x32_avx512 ();
90884b2b 3166}
c4f35dd8
MK
3167\f
3168
41d041d6
MK
3169/* The 64-bit FXSAVE format differs from the 32-bit format in the
3170 sense that the instruction pointer and data pointer are simply
3171 64-bit offsets into the code segment and the data segment instead
3172 of a selector offset pair. The functions below store the upper 32
3173 bits of these pointers (instead of just the 16-bits of the segment
3174 selector). */
3175
3176/* Fill register REGNUM in REGCACHE with the appropriate
0485f6ad
MK
3177 floating-point or SSE register value from *FXSAVE. If REGNUM is
3178 -1, do this for all registers. This function masks off any of the
3179 reserved bits in *FXSAVE. */
c4f35dd8
MK
3180
3181void
90f90721 3182amd64_supply_fxsave (struct regcache *regcache, int regnum,
20a6ec49 3183 const void *fxsave)
c4f35dd8 3184{
20a6ec49
MD
3185 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3186 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3187
41d041d6 3188 i387_supply_fxsave (regcache, regnum, fxsave);
c4f35dd8 3189
233dfcf0
L
3190 if (fxsave
3191 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
c4f35dd8 3192 {
d8de1ef7 3193 const gdb_byte *regs = fxsave;
41d041d6 3194
20a6ec49
MD
3195 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3196 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3197 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3198 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
c4f35dd8 3199 }
0c1a73d6
MK
3200}
3201
a055a187
L
3202/* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3203
3204void
3205amd64_supply_xsave (struct regcache *regcache, int regnum,
3206 const void *xsave)
3207{
3208 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3209 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3210
3211 i387_supply_xsave (regcache, regnum, xsave);
3212
233dfcf0
L
3213 if (xsave
3214 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
a055a187
L
3215 {
3216 const gdb_byte *regs = xsave;
3217
3218 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3219 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep),
3220 regs + 12);
3221 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3222 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep),
3223 regs + 20);
3224 }
3225}
3226
3c017e40
MK
3227/* Fill register REGNUM (if it is a floating-point or SSE register) in
3228 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3229 all registers. This function doesn't touch any of the reserved
3230 bits in *FXSAVE. */
3231
3232void
3233amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3234 void *fxsave)
3235{
20a6ec49
MD
3236 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3237 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
d8de1ef7 3238 gdb_byte *regs = fxsave;
3c017e40
MK
3239
3240 i387_collect_fxsave (regcache, regnum, fxsave);
3241
233dfcf0 3242 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
f0ef85a5 3243 {
20a6ec49
MD
3244 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3245 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3246 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3247 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
f0ef85a5 3248 }
3c017e40 3249}
a055a187 3250
7a9dd1b2 3251/* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
a055a187
L
3252
3253void
3254amd64_collect_xsave (const struct regcache *regcache, int regnum,
3255 void *xsave, int gcore)
3256{
3257 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3258 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3259 gdb_byte *regs = xsave;
3260
3261 i387_collect_xsave (regcache, regnum, xsave, gcore);
3262
233dfcf0 3263 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
a055a187
L
3264 {
3265 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3266 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep),
3267 regs + 12);
3268 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3269 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep),
3270 regs + 20);
3271 }
3272}
This page took 1.333363 seconds and 4 git commands to generate.