Add new infrun.h header.
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright (C) 2001-2014 Free Software Foundation, Inc.
4
5 Contributed by Jiri Smid, SuSE Labs.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "opcode/i386.h"
24 #include "dis-asm.h"
25 #include "arch-utils.h"
26 #include "block.h"
27 #include "dummy-frame.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "inferior.h"
32 #include "infrun.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39 #include "disasm.h"
40 #include "gdb_assert.h"
41 #include "exceptions.h"
42 #include "amd64-tdep.h"
43 #include "i387-tdep.h"
44
45 #include "features/i386/amd64.c"
46 #include "features/i386/amd64-avx.c"
47 #include "features/i386/amd64-mpx.c"
48 #include "features/i386/amd64-avx512.c"
49
50 #include "features/i386/x32.c"
51 #include "features/i386/x32-avx.c"
52 #include "features/i386/x32-avx512.c"
53
54 #include "ax.h"
55 #include "ax-gdb.h"
56
57 /* Note that the AMD64 architecture was previously known as x86-64.
58 The latter is (forever) engraved into the canonical system name as
59 returned by config.guess, and used as the name for the AMD64 port
60 of GNU/Linux. The BSD's have renamed their ports to amd64; they
61 don't like to shout. For GDB we prefer the amd64_-prefix over the
62 x86_64_-prefix since it's so much easier to type. */
63
64 /* Register information. */
65
66 static const char *amd64_register_names[] =
67 {
68 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
69
70 /* %r8 is indeed register number 8. */
71 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
72 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
73
74 /* %st0 is register number 24. */
75 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
76 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
77
78 /* %xmm0 is register number 40. */
79 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
80 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
81 "mxcsr",
82 };
83
84 static const char *amd64_ymm_names[] =
85 {
86 "ymm0", "ymm1", "ymm2", "ymm3",
87 "ymm4", "ymm5", "ymm6", "ymm7",
88 "ymm8", "ymm9", "ymm10", "ymm11",
89 "ymm12", "ymm13", "ymm14", "ymm15"
90 };
91
92 static const char *amd64_ymm_avx512_names[] =
93 {
94 "ymm16", "ymm17", "ymm18", "ymm19",
95 "ymm20", "ymm21", "ymm22", "ymm23",
96 "ymm24", "ymm25", "ymm26", "ymm27",
97 "ymm28", "ymm29", "ymm30", "ymm31"
98 };
99
100 static const char *amd64_ymmh_names[] =
101 {
102 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
103 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
104 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
105 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
106 };
107
108 static const char *amd64_ymmh_avx512_names[] =
109 {
110 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
111 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
112 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
113 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
114 };
115
116 static const char *amd64_mpx_names[] =
117 {
118 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
119 };
120
121 static const char *amd64_k_names[] =
122 {
123 "k0", "k1", "k2", "k3",
124 "k4", "k5", "k6", "k7"
125 };
126
127 static const char *amd64_zmmh_names[] =
128 {
129 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
130 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
131 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
132 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
133 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
134 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
135 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
136 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
137 };
138
139 static const char *amd64_zmm_names[] =
140 {
141 "zmm0", "zmm1", "zmm2", "zmm3",
142 "zmm4", "zmm5", "zmm6", "zmm7",
143 "zmm8", "zmm9", "zmm10", "zmm11",
144 "zmm12", "zmm13", "zmm14", "zmm15",
145 "zmm16", "zmm17", "zmm18", "zmm19",
146 "zmm20", "zmm21", "zmm22", "zmm23",
147 "zmm24", "zmm25", "zmm26", "zmm27",
148 "zmm28", "zmm29", "zmm30", "zmm31"
149 };
150
151 static const char *amd64_xmm_avx512_names[] = {
152 "xmm16", "xmm17", "xmm18", "xmm19",
153 "xmm20", "xmm21", "xmm22", "xmm23",
154 "xmm24", "xmm25", "xmm26", "xmm27",
155 "xmm28", "xmm29", "xmm30", "xmm31"
156 };
157
158 /* DWARF Register Number Mapping as defined in the System V psABI,
159 section 3.6. */
160
161 static int amd64_dwarf_regmap[] =
162 {
163 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
164 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
165 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
166 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
167
168 /* Frame Pointer Register RBP. */
169 AMD64_RBP_REGNUM,
170
171 /* Stack Pointer Register RSP. */
172 AMD64_RSP_REGNUM,
173
174 /* Extended Integer Registers 8 - 15. */
175 AMD64_R8_REGNUM, /* %r8 */
176 AMD64_R9_REGNUM, /* %r9 */
177 AMD64_R10_REGNUM, /* %r10 */
178 AMD64_R11_REGNUM, /* %r11 */
179 AMD64_R12_REGNUM, /* %r12 */
180 AMD64_R13_REGNUM, /* %r13 */
181 AMD64_R14_REGNUM, /* %r14 */
182 AMD64_R15_REGNUM, /* %r15 */
183
184 /* Return Address RA. Mapped to RIP. */
185 AMD64_RIP_REGNUM,
186
187 /* SSE Registers 0 - 7. */
188 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
189 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
190 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
191 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
192
193 /* Extended SSE Registers 8 - 15. */
194 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
195 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
196 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
197 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
198
199 /* Floating Point Registers 0-7. */
200 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
201 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
202 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
203 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
204
205 /* Control and Status Flags Register. */
206 AMD64_EFLAGS_REGNUM,
207
208 /* Selector Registers. */
209 AMD64_ES_REGNUM,
210 AMD64_CS_REGNUM,
211 AMD64_SS_REGNUM,
212 AMD64_DS_REGNUM,
213 AMD64_FS_REGNUM,
214 AMD64_GS_REGNUM,
215 -1,
216 -1,
217
218 /* Segment Base Address Registers. */
219 -1,
220 -1,
221 -1,
222 -1,
223
224 /* Special Selector Registers. */
225 -1,
226 -1,
227
228 /* Floating Point Control Registers. */
229 AMD64_MXCSR_REGNUM,
230 AMD64_FCTRL_REGNUM,
231 AMD64_FSTAT_REGNUM
232 };
233
234 static const int amd64_dwarf_regmap_len =
235 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
236
237 /* Convert DWARF register number REG to the appropriate register
238 number used by GDB. */
239
240 static int
241 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
242 {
243 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
244 int ymm0_regnum = tdep->ymm0_regnum;
245 int regnum = -1;
246
247 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
248 regnum = amd64_dwarf_regmap[reg];
249
250 if (regnum == -1)
251 warning (_("Unmapped DWARF Register #%d encountered."), reg);
252 else if (ymm0_regnum >= 0
253 && i386_xmm_regnum_p (gdbarch, regnum))
254 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
255
256 return regnum;
257 }
258
259 /* Map architectural register numbers to gdb register numbers. */
260
261 static const int amd64_arch_regmap[16] =
262 {
263 AMD64_RAX_REGNUM, /* %rax */
264 AMD64_RCX_REGNUM, /* %rcx */
265 AMD64_RDX_REGNUM, /* %rdx */
266 AMD64_RBX_REGNUM, /* %rbx */
267 AMD64_RSP_REGNUM, /* %rsp */
268 AMD64_RBP_REGNUM, /* %rbp */
269 AMD64_RSI_REGNUM, /* %rsi */
270 AMD64_RDI_REGNUM, /* %rdi */
271 AMD64_R8_REGNUM, /* %r8 */
272 AMD64_R9_REGNUM, /* %r9 */
273 AMD64_R10_REGNUM, /* %r10 */
274 AMD64_R11_REGNUM, /* %r11 */
275 AMD64_R12_REGNUM, /* %r12 */
276 AMD64_R13_REGNUM, /* %r13 */
277 AMD64_R14_REGNUM, /* %r14 */
278 AMD64_R15_REGNUM /* %r15 */
279 };
280
281 static const int amd64_arch_regmap_len =
282 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
283
284 /* Convert architectural register number REG to the appropriate register
285 number used by GDB. */
286
287 static int
288 amd64_arch_reg_to_regnum (int reg)
289 {
290 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
291
292 return amd64_arch_regmap[reg];
293 }
294
295 /* Register names for byte pseudo-registers. */
296
297 static const char *amd64_byte_names[] =
298 {
299 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
300 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
301 "ah", "bh", "ch", "dh"
302 };
303
304 /* Number of lower byte registers. */
305 #define AMD64_NUM_LOWER_BYTE_REGS 16
306
307 /* Register names for word pseudo-registers. */
308
309 static const char *amd64_word_names[] =
310 {
311 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
312 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
313 };
314
315 /* Register names for dword pseudo-registers. */
316
317 static const char *amd64_dword_names[] =
318 {
319 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
320 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
321 "eip"
322 };
323
324 /* Return the name of register REGNUM. */
325
326 static const char *
327 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
328 {
329 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
330 if (i386_byte_regnum_p (gdbarch, regnum))
331 return amd64_byte_names[regnum - tdep->al_regnum];
332 else if (i386_zmm_regnum_p (gdbarch, regnum))
333 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
334 else if (i386_ymm_regnum_p (gdbarch, regnum))
335 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
336 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
337 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
338 else if (i386_word_regnum_p (gdbarch, regnum))
339 return amd64_word_names[regnum - tdep->ax_regnum];
340 else if (i386_dword_regnum_p (gdbarch, regnum))
341 return amd64_dword_names[regnum - tdep->eax_regnum];
342 else
343 return i386_pseudo_register_name (gdbarch, regnum);
344 }
345
346 static struct value *
347 amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
348 struct regcache *regcache,
349 int regnum)
350 {
351 gdb_byte raw_buf[MAX_REGISTER_SIZE];
352 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
353 enum register_status status;
354 struct value *result_value;
355 gdb_byte *buf;
356
357 result_value = allocate_value (register_type (gdbarch, regnum));
358 VALUE_LVAL (result_value) = lval_register;
359 VALUE_REGNUM (result_value) = regnum;
360 buf = value_contents_raw (result_value);
361
362 if (i386_byte_regnum_p (gdbarch, regnum))
363 {
364 int gpnum = regnum - tdep->al_regnum;
365
366 /* Extract (always little endian). */
367 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
368 {
369 /* Special handling for AH, BH, CH, DH. */
370 status = regcache_raw_read (regcache,
371 gpnum - AMD64_NUM_LOWER_BYTE_REGS,
372 raw_buf);
373 if (status == REG_VALID)
374 memcpy (buf, raw_buf + 1, 1);
375 else
376 mark_value_bytes_unavailable (result_value, 0,
377 TYPE_LENGTH (value_type (result_value)));
378 }
379 else
380 {
381 status = regcache_raw_read (regcache, gpnum, raw_buf);
382 if (status == REG_VALID)
383 memcpy (buf, raw_buf, 1);
384 else
385 mark_value_bytes_unavailable (result_value, 0,
386 TYPE_LENGTH (value_type (result_value)));
387 }
388 }
389 else if (i386_dword_regnum_p (gdbarch, regnum))
390 {
391 int gpnum = regnum - tdep->eax_regnum;
392 /* Extract (always little endian). */
393 status = regcache_raw_read (regcache, gpnum, raw_buf);
394 if (status == REG_VALID)
395 memcpy (buf, raw_buf, 4);
396 else
397 mark_value_bytes_unavailable (result_value, 0,
398 TYPE_LENGTH (value_type (result_value)));
399 }
400 else
401 i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
402 result_value);
403
404 return result_value;
405 }
406
407 static void
408 amd64_pseudo_register_write (struct gdbarch *gdbarch,
409 struct regcache *regcache,
410 int regnum, const gdb_byte *buf)
411 {
412 gdb_byte raw_buf[MAX_REGISTER_SIZE];
413 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
414
415 if (i386_byte_regnum_p (gdbarch, regnum))
416 {
417 int gpnum = regnum - tdep->al_regnum;
418
419 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
420 {
421 /* Read ... AH, BH, CH, DH. */
422 regcache_raw_read (regcache,
423 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
424 /* ... Modify ... (always little endian). */
425 memcpy (raw_buf + 1, buf, 1);
426 /* ... Write. */
427 regcache_raw_write (regcache,
428 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
429 }
430 else
431 {
432 /* Read ... */
433 regcache_raw_read (regcache, gpnum, raw_buf);
434 /* ... Modify ... (always little endian). */
435 memcpy (raw_buf, buf, 1);
436 /* ... Write. */
437 regcache_raw_write (regcache, gpnum, raw_buf);
438 }
439 }
440 else if (i386_dword_regnum_p (gdbarch, regnum))
441 {
442 int gpnum = regnum - tdep->eax_regnum;
443
444 /* Read ... */
445 regcache_raw_read (regcache, gpnum, raw_buf);
446 /* ... Modify ... (always little endian). */
447 memcpy (raw_buf, buf, 4);
448 /* ... Write. */
449 regcache_raw_write (regcache, gpnum, raw_buf);
450 }
451 else
452 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
453 }
454
455 \f
456
457 /* Register classes as defined in the psABI. */
458
459 enum amd64_reg_class
460 {
461 AMD64_INTEGER,
462 AMD64_SSE,
463 AMD64_SSEUP,
464 AMD64_X87,
465 AMD64_X87UP,
466 AMD64_COMPLEX_X87,
467 AMD64_NO_CLASS,
468 AMD64_MEMORY
469 };
470
471 /* Return the union class of CLASS1 and CLASS2. See the psABI for
472 details. */
473
474 static enum amd64_reg_class
475 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
476 {
477 /* Rule (a): If both classes are equal, this is the resulting class. */
478 if (class1 == class2)
479 return class1;
480
481 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
482 is the other class. */
483 if (class1 == AMD64_NO_CLASS)
484 return class2;
485 if (class2 == AMD64_NO_CLASS)
486 return class1;
487
488 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
489 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
490 return AMD64_MEMORY;
491
492 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
493 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
494 return AMD64_INTEGER;
495
496 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
497 MEMORY is used as class. */
498 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
499 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
500 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
501 return AMD64_MEMORY;
502
503 /* Rule (f): Otherwise class SSE is used. */
504 return AMD64_SSE;
505 }
506
507 static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
508
509 /* Return non-zero if TYPE is a non-POD structure or union type. */
510
511 static int
512 amd64_non_pod_p (struct type *type)
513 {
514 /* ??? A class with a base class certainly isn't POD, but does this
515 catch all non-POD structure types? */
516 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
517 return 1;
518
519 return 0;
520 }
521
522 /* Classify TYPE according to the rules for aggregate (structures and
523 arrays) and union types, and store the result in CLASS. */
524
525 static void
526 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
527 {
528 /* 1. If the size of an object is larger than two eightbytes, or in
529 C++, is a non-POD structure or union type, or contains
530 unaligned fields, it has class memory. */
531 if (TYPE_LENGTH (type) > 16 || amd64_non_pod_p (type))
532 {
533 class[0] = class[1] = AMD64_MEMORY;
534 return;
535 }
536
537 /* 2. Both eightbytes get initialized to class NO_CLASS. */
538 class[0] = class[1] = AMD64_NO_CLASS;
539
540 /* 3. Each field of an object is classified recursively so that
541 always two fields are considered. The resulting class is
542 calculated according to the classes of the fields in the
543 eightbyte: */
544
545 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
546 {
547 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
548
549 /* All fields in an array have the same type. */
550 amd64_classify (subtype, class);
551 if (TYPE_LENGTH (type) > 8 && class[1] == AMD64_NO_CLASS)
552 class[1] = class[0];
553 }
554 else
555 {
556 int i;
557
558 /* Structure or union. */
559 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
560 || TYPE_CODE (type) == TYPE_CODE_UNION);
561
562 for (i = 0; i < TYPE_NFIELDS (type); i++)
563 {
564 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
565 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
566 enum amd64_reg_class subclass[2];
567 int bitsize = TYPE_FIELD_BITSIZE (type, i);
568 int endpos;
569
570 if (bitsize == 0)
571 bitsize = TYPE_LENGTH (subtype) * 8;
572 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
573
574 /* Ignore static fields. */
575 if (field_is_static (&TYPE_FIELD (type, i)))
576 continue;
577
578 gdb_assert (pos == 0 || pos == 1);
579
580 amd64_classify (subtype, subclass);
581 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
582 if (bitsize <= 64 && pos == 0 && endpos == 1)
583 /* This is a bit of an odd case: We have a field that would
584 normally fit in one of the two eightbytes, except that
585 it is placed in a way that this field straddles them.
586 This has been seen with a structure containing an array.
587
588 The ABI is a bit unclear in this case, but we assume that
589 this field's class (stored in subclass[0]) must also be merged
590 into class[1]. In other words, our field has a piece stored
591 in the second eight-byte, and thus its class applies to
592 the second eight-byte as well.
593
594 In the case where the field length exceeds 8 bytes,
595 it should not be necessary to merge the field class
596 into class[1]. As LEN > 8, subclass[1] is necessarily
597 different from AMD64_NO_CLASS. If subclass[1] is equal
598 to subclass[0], then the normal class[1]/subclass[1]
599 merging will take care of everything. For subclass[1]
600 to be different from subclass[0], I can only see the case
601 where we have a SSE/SSEUP or X87/X87UP pair, which both
602 use up all 16 bytes of the aggregate, and are already
603 handled just fine (because each portion sits on its own
604 8-byte). */
605 class[1] = amd64_merge_classes (class[1], subclass[0]);
606 if (pos == 0)
607 class[1] = amd64_merge_classes (class[1], subclass[1]);
608 }
609 }
610
611 /* 4. Then a post merger cleanup is done: */
612
613 /* Rule (a): If one of the classes is MEMORY, the whole argument is
614 passed in memory. */
615 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
616 class[0] = class[1] = AMD64_MEMORY;
617
618 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
619 SSE. */
620 if (class[0] == AMD64_SSEUP)
621 class[0] = AMD64_SSE;
622 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
623 class[1] = AMD64_SSE;
624 }
625
626 /* Classify TYPE, and store the result in CLASS. */
627
628 static void
629 amd64_classify (struct type *type, enum amd64_reg_class class[2])
630 {
631 enum type_code code = TYPE_CODE (type);
632 int len = TYPE_LENGTH (type);
633
634 class[0] = class[1] = AMD64_NO_CLASS;
635
636 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
637 long, long long, and pointers are in the INTEGER class. Similarly,
638 range types, used by languages such as Ada, are also in the INTEGER
639 class. */
640 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
641 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
642 || code == TYPE_CODE_CHAR
643 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
644 && (len == 1 || len == 2 || len == 4 || len == 8))
645 class[0] = AMD64_INTEGER;
646
647 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
648 are in class SSE. */
649 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
650 && (len == 4 || len == 8))
651 /* FIXME: __m64 . */
652 class[0] = AMD64_SSE;
653
654 /* Arguments of types __float128, _Decimal128 and __m128 are split into
655 two halves. The least significant ones belong to class SSE, the most
656 significant one to class SSEUP. */
657 else if (code == TYPE_CODE_DECFLOAT && len == 16)
658 /* FIXME: __float128, __m128. */
659 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
660
661 /* The 64-bit mantissa of arguments of type long double belongs to
662 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
663 class X87UP. */
664 else if (code == TYPE_CODE_FLT && len == 16)
665 /* Class X87 and X87UP. */
666 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
667
668 /* Arguments of complex T where T is one of the types float or
669 double get treated as if they are implemented as:
670
671 struct complexT {
672 T real;
673 T imag;
674 }; */
675 else if (code == TYPE_CODE_COMPLEX && len == 8)
676 class[0] = AMD64_SSE;
677 else if (code == TYPE_CODE_COMPLEX && len == 16)
678 class[0] = class[1] = AMD64_SSE;
679
680 /* A variable of type complex long double is classified as type
681 COMPLEX_X87. */
682 else if (code == TYPE_CODE_COMPLEX && len == 32)
683 class[0] = AMD64_COMPLEX_X87;
684
685 /* Aggregates. */
686 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
687 || code == TYPE_CODE_UNION)
688 amd64_classify_aggregate (type, class);
689 }
690
691 static enum return_value_convention
692 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
693 struct type *type, struct regcache *regcache,
694 gdb_byte *readbuf, const gdb_byte *writebuf)
695 {
696 enum amd64_reg_class class[2];
697 int len = TYPE_LENGTH (type);
698 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
699 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
700 int integer_reg = 0;
701 int sse_reg = 0;
702 int i;
703
704 gdb_assert (!(readbuf && writebuf));
705
706 /* 1. Classify the return type with the classification algorithm. */
707 amd64_classify (type, class);
708
709 /* 2. If the type has class MEMORY, then the caller provides space
710 for the return value and passes the address of this storage in
711 %rdi as if it were the first argument to the function. In effect,
712 this address becomes a hidden first argument.
713
714 On return %rax will contain the address that has been passed in
715 by the caller in %rdi. */
716 if (class[0] == AMD64_MEMORY)
717 {
718 /* As indicated by the comment above, the ABI guarantees that we
719 can always find the return value just after the function has
720 returned. */
721
722 if (readbuf)
723 {
724 ULONGEST addr;
725
726 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
727 read_memory (addr, readbuf, TYPE_LENGTH (type));
728 }
729
730 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
731 }
732
733 /* 8. If the class is COMPLEX_X87, the real part of the value is
734 returned in %st0 and the imaginary part in %st1. */
735 if (class[0] == AMD64_COMPLEX_X87)
736 {
737 if (readbuf)
738 {
739 regcache_raw_read (regcache, AMD64_ST0_REGNUM, readbuf);
740 regcache_raw_read (regcache, AMD64_ST1_REGNUM, readbuf + 16);
741 }
742
743 if (writebuf)
744 {
745 i387_return_value (gdbarch, regcache);
746 regcache_raw_write (regcache, AMD64_ST0_REGNUM, writebuf);
747 regcache_raw_write (regcache, AMD64_ST1_REGNUM, writebuf + 16);
748
749 /* Fix up the tag word such that both %st(0) and %st(1) are
750 marked as valid. */
751 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
752 }
753
754 return RETURN_VALUE_REGISTER_CONVENTION;
755 }
756
757 gdb_assert (class[1] != AMD64_MEMORY);
758 gdb_assert (len <= 16);
759
760 for (i = 0; len > 0; i++, len -= 8)
761 {
762 int regnum = -1;
763 int offset = 0;
764
765 switch (class[i])
766 {
767 case AMD64_INTEGER:
768 /* 3. If the class is INTEGER, the next available register
769 of the sequence %rax, %rdx is used. */
770 regnum = integer_regnum[integer_reg++];
771 break;
772
773 case AMD64_SSE:
774 /* 4. If the class is SSE, the next available SSE register
775 of the sequence %xmm0, %xmm1 is used. */
776 regnum = sse_regnum[sse_reg++];
777 break;
778
779 case AMD64_SSEUP:
780 /* 5. If the class is SSEUP, the eightbyte is passed in the
781 upper half of the last used SSE register. */
782 gdb_assert (sse_reg > 0);
783 regnum = sse_regnum[sse_reg - 1];
784 offset = 8;
785 break;
786
787 case AMD64_X87:
788 /* 6. If the class is X87, the value is returned on the X87
789 stack in %st0 as 80-bit x87 number. */
790 regnum = AMD64_ST0_REGNUM;
791 if (writebuf)
792 i387_return_value (gdbarch, regcache);
793 break;
794
795 case AMD64_X87UP:
796 /* 7. If the class is X87UP, the value is returned together
797 with the previous X87 value in %st0. */
798 gdb_assert (i > 0 && class[0] == AMD64_X87);
799 regnum = AMD64_ST0_REGNUM;
800 offset = 8;
801 len = 2;
802 break;
803
804 case AMD64_NO_CLASS:
805 continue;
806
807 default:
808 gdb_assert (!"Unexpected register class.");
809 }
810
811 gdb_assert (regnum != -1);
812
813 if (readbuf)
814 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
815 readbuf + i * 8);
816 if (writebuf)
817 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
818 writebuf + i * 8);
819 }
820
821 return RETURN_VALUE_REGISTER_CONVENTION;
822 }
823 \f
824
825 static CORE_ADDR
826 amd64_push_arguments (struct regcache *regcache, int nargs,
827 struct value **args, CORE_ADDR sp, int struct_return)
828 {
829 static int integer_regnum[] =
830 {
831 AMD64_RDI_REGNUM, /* %rdi */
832 AMD64_RSI_REGNUM, /* %rsi */
833 AMD64_RDX_REGNUM, /* %rdx */
834 AMD64_RCX_REGNUM, /* %rcx */
835 AMD64_R8_REGNUM, /* %r8 */
836 AMD64_R9_REGNUM /* %r9 */
837 };
838 static int sse_regnum[] =
839 {
840 /* %xmm0 ... %xmm7 */
841 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
842 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
843 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
844 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
845 };
846 struct value **stack_args = alloca (nargs * sizeof (struct value *));
847 int num_stack_args = 0;
848 int num_elements = 0;
849 int element = 0;
850 int integer_reg = 0;
851 int sse_reg = 0;
852 int i;
853
854 /* Reserve a register for the "hidden" argument. */
855 if (struct_return)
856 integer_reg++;
857
858 for (i = 0; i < nargs; i++)
859 {
860 struct type *type = value_type (args[i]);
861 int len = TYPE_LENGTH (type);
862 enum amd64_reg_class class[2];
863 int needed_integer_regs = 0;
864 int needed_sse_regs = 0;
865 int j;
866
867 /* Classify argument. */
868 amd64_classify (type, class);
869
870 /* Calculate the number of integer and SSE registers needed for
871 this argument. */
872 for (j = 0; j < 2; j++)
873 {
874 if (class[j] == AMD64_INTEGER)
875 needed_integer_regs++;
876 else if (class[j] == AMD64_SSE)
877 needed_sse_regs++;
878 }
879
880 /* Check whether enough registers are available, and if the
881 argument should be passed in registers at all. */
882 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
883 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
884 || (needed_integer_regs == 0 && needed_sse_regs == 0))
885 {
886 /* The argument will be passed on the stack. */
887 num_elements += ((len + 7) / 8);
888 stack_args[num_stack_args++] = args[i];
889 }
890 else
891 {
892 /* The argument will be passed in registers. */
893 const gdb_byte *valbuf = value_contents (args[i]);
894 gdb_byte buf[8];
895
896 gdb_assert (len <= 16);
897
898 for (j = 0; len > 0; j++, len -= 8)
899 {
900 int regnum = -1;
901 int offset = 0;
902
903 switch (class[j])
904 {
905 case AMD64_INTEGER:
906 regnum = integer_regnum[integer_reg++];
907 break;
908
909 case AMD64_SSE:
910 regnum = sse_regnum[sse_reg++];
911 break;
912
913 case AMD64_SSEUP:
914 gdb_assert (sse_reg > 0);
915 regnum = sse_regnum[sse_reg - 1];
916 offset = 8;
917 break;
918
919 default:
920 gdb_assert (!"Unexpected register class.");
921 }
922
923 gdb_assert (regnum != -1);
924 memset (buf, 0, sizeof buf);
925 memcpy (buf, valbuf + j * 8, min (len, 8));
926 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
927 }
928 }
929 }
930
931 /* Allocate space for the arguments on the stack. */
932 sp -= num_elements * 8;
933
934 /* The psABI says that "The end of the input argument area shall be
935 aligned on a 16 byte boundary." */
936 sp &= ~0xf;
937
938 /* Write out the arguments to the stack. */
939 for (i = 0; i < num_stack_args; i++)
940 {
941 struct type *type = value_type (stack_args[i]);
942 const gdb_byte *valbuf = value_contents (stack_args[i]);
943 int len = TYPE_LENGTH (type);
944
945 write_memory (sp + element * 8, valbuf, len);
946 element += ((len + 7) / 8);
947 }
948
949 /* The psABI says that "For calls that may call functions that use
950 varargs or stdargs (prototype-less calls or calls to functions
951 containing ellipsis (...) in the declaration) %al is used as
952 hidden argument to specify the number of SSE registers used. */
953 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
954 return sp;
955 }
956
957 static CORE_ADDR
958 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
959 struct regcache *regcache, CORE_ADDR bp_addr,
960 int nargs, struct value **args, CORE_ADDR sp,
961 int struct_return, CORE_ADDR struct_addr)
962 {
963 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
964 gdb_byte buf[8];
965
966 /* Pass arguments. */
967 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
968
969 /* Pass "hidden" argument". */
970 if (struct_return)
971 {
972 store_unsigned_integer (buf, 8, byte_order, struct_addr);
973 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
974 }
975
976 /* Store return address. */
977 sp -= 8;
978 store_unsigned_integer (buf, 8, byte_order, bp_addr);
979 write_memory (sp, buf, 8);
980
981 /* Finally, update the stack pointer... */
982 store_unsigned_integer (buf, 8, byte_order, sp);
983 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
984
985 /* ...and fake a frame pointer. */
986 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
987
988 return sp + 16;
989 }
990 \f
991 /* Displaced instruction handling. */
992
993 /* A partially decoded instruction.
994 This contains enough details for displaced stepping purposes. */
995
996 struct amd64_insn
997 {
998 /* The number of opcode bytes. */
999 int opcode_len;
1000 /* The offset of the rex prefix or -1 if not present. */
1001 int rex_offset;
1002 /* The offset to the first opcode byte. */
1003 int opcode_offset;
1004 /* The offset to the modrm byte or -1 if not present. */
1005 int modrm_offset;
1006
1007 /* The raw instruction. */
1008 gdb_byte *raw_insn;
1009 };
1010
1011 struct displaced_step_closure
1012 {
1013 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1014 int tmp_used;
1015 int tmp_regno;
1016 ULONGEST tmp_save;
1017
1018 /* Details of the instruction. */
1019 struct amd64_insn insn_details;
1020
1021 /* Amount of space allocated to insn_buf. */
1022 int max_len;
1023
1024 /* The possibly modified insn.
1025 This is a variable-length field. */
1026 gdb_byte insn_buf[1];
1027 };
1028
1029 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1030 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1031 at which point delete these in favor of libopcodes' versions). */
1032
1033 static const unsigned char onebyte_has_modrm[256] = {
1034 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1035 /* ------------------------------- */
1036 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1037 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1038 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1039 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1040 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1041 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1042 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1043 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1044 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1045 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1046 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1047 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1048 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1049 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1050 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1051 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1052 /* ------------------------------- */
1053 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1054 };
1055
1056 static const unsigned char twobyte_has_modrm[256] = {
1057 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1058 /* ------------------------------- */
1059 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1060 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1061 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1062 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1063 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1064 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1065 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1066 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1067 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1068 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1069 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1070 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1071 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1072 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1073 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1074 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1075 /* ------------------------------- */
1076 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1077 };
1078
1079 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1080
1081 static int
1082 rex_prefix_p (gdb_byte pfx)
1083 {
1084 return REX_PREFIX_P (pfx);
1085 }
1086
1087 /* Skip the legacy instruction prefixes in INSN.
1088 We assume INSN is properly sentineled so we don't have to worry
1089 about falling off the end of the buffer. */
1090
1091 static gdb_byte *
1092 amd64_skip_prefixes (gdb_byte *insn)
1093 {
1094 while (1)
1095 {
1096 switch (*insn)
1097 {
1098 case DATA_PREFIX_OPCODE:
1099 case ADDR_PREFIX_OPCODE:
1100 case CS_PREFIX_OPCODE:
1101 case DS_PREFIX_OPCODE:
1102 case ES_PREFIX_OPCODE:
1103 case FS_PREFIX_OPCODE:
1104 case GS_PREFIX_OPCODE:
1105 case SS_PREFIX_OPCODE:
1106 case LOCK_PREFIX_OPCODE:
1107 case REPE_PREFIX_OPCODE:
1108 case REPNE_PREFIX_OPCODE:
1109 ++insn;
1110 continue;
1111 default:
1112 break;
1113 }
1114 break;
1115 }
1116
1117 return insn;
1118 }
1119
1120 /* Return an integer register (other than RSP) that is unused as an input
1121 operand in INSN.
1122 In order to not require adding a rex prefix if the insn doesn't already
1123 have one, the result is restricted to RAX ... RDI, sans RSP.
1124 The register numbering of the result follows architecture ordering,
1125 e.g. RDI = 7. */
1126
1127 static int
1128 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1129 {
1130 /* 1 bit for each reg */
1131 int used_regs_mask = 0;
1132
1133 /* There can be at most 3 int regs used as inputs in an insn, and we have
1134 7 to choose from (RAX ... RDI, sans RSP).
1135 This allows us to take a conservative approach and keep things simple.
1136 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1137 that implicitly specify RAX. */
1138
1139 /* Avoid RAX. */
1140 used_regs_mask |= 1 << EAX_REG_NUM;
1141 /* Similarily avoid RDX, implicit operand in divides. */
1142 used_regs_mask |= 1 << EDX_REG_NUM;
1143 /* Avoid RSP. */
1144 used_regs_mask |= 1 << ESP_REG_NUM;
1145
1146 /* If the opcode is one byte long and there's no ModRM byte,
1147 assume the opcode specifies a register. */
1148 if (details->opcode_len == 1 && details->modrm_offset == -1)
1149 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1150
1151 /* Mark used regs in the modrm/sib bytes. */
1152 if (details->modrm_offset != -1)
1153 {
1154 int modrm = details->raw_insn[details->modrm_offset];
1155 int mod = MODRM_MOD_FIELD (modrm);
1156 int reg = MODRM_REG_FIELD (modrm);
1157 int rm = MODRM_RM_FIELD (modrm);
1158 int have_sib = mod != 3 && rm == 4;
1159
1160 /* Assume the reg field of the modrm byte specifies a register. */
1161 used_regs_mask |= 1 << reg;
1162
1163 if (have_sib)
1164 {
1165 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1166 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1167 used_regs_mask |= 1 << base;
1168 used_regs_mask |= 1 << idx;
1169 }
1170 else
1171 {
1172 used_regs_mask |= 1 << rm;
1173 }
1174 }
1175
1176 gdb_assert (used_regs_mask < 256);
1177 gdb_assert (used_regs_mask != 255);
1178
1179 /* Finally, find a free reg. */
1180 {
1181 int i;
1182
1183 for (i = 0; i < 8; ++i)
1184 {
1185 if (! (used_regs_mask & (1 << i)))
1186 return i;
1187 }
1188
1189 /* We shouldn't get here. */
1190 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1191 }
1192 }
1193
1194 /* Extract the details of INSN that we need. */
1195
1196 static void
1197 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1198 {
1199 gdb_byte *start = insn;
1200 int need_modrm;
1201
1202 details->raw_insn = insn;
1203
1204 details->opcode_len = -1;
1205 details->rex_offset = -1;
1206 details->opcode_offset = -1;
1207 details->modrm_offset = -1;
1208
1209 /* Skip legacy instruction prefixes. */
1210 insn = amd64_skip_prefixes (insn);
1211
1212 /* Skip REX instruction prefix. */
1213 if (rex_prefix_p (*insn))
1214 {
1215 details->rex_offset = insn - start;
1216 ++insn;
1217 }
1218
1219 details->opcode_offset = insn - start;
1220
1221 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1222 {
1223 /* Two or three-byte opcode. */
1224 ++insn;
1225 need_modrm = twobyte_has_modrm[*insn];
1226
1227 /* Check for three-byte opcode. */
1228 switch (*insn)
1229 {
1230 case 0x24:
1231 case 0x25:
1232 case 0x38:
1233 case 0x3a:
1234 case 0x7a:
1235 case 0x7b:
1236 ++insn;
1237 details->opcode_len = 3;
1238 break;
1239 default:
1240 details->opcode_len = 2;
1241 break;
1242 }
1243 }
1244 else
1245 {
1246 /* One-byte opcode. */
1247 need_modrm = onebyte_has_modrm[*insn];
1248 details->opcode_len = 1;
1249 }
1250
1251 if (need_modrm)
1252 {
1253 ++insn;
1254 details->modrm_offset = insn - start;
1255 }
1256 }
1257
1258 /* Update %rip-relative addressing in INSN.
1259
1260 %rip-relative addressing only uses a 32-bit displacement.
1261 32 bits is not enough to be guaranteed to cover the distance between where
1262 the real instruction is and where its copy is.
1263 Convert the insn to use base+disp addressing.
1264 We set base = pc + insn_length so we can leave disp unchanged. */
1265
1266 static void
1267 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1268 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1269 {
1270 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1271 const struct amd64_insn *insn_details = &dsc->insn_details;
1272 int modrm_offset = insn_details->modrm_offset;
1273 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1274 CORE_ADDR rip_base;
1275 int32_t disp;
1276 int insn_length;
1277 int arch_tmp_regno, tmp_regno;
1278 ULONGEST orig_value;
1279
1280 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1281 ++insn;
1282
1283 /* Compute the rip-relative address. */
1284 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
1285 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf,
1286 dsc->max_len, from);
1287 rip_base = from + insn_length;
1288
1289 /* We need a register to hold the address.
1290 Pick one not used in the insn.
1291 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1292 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1293 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1294
1295 /* REX.B should be unset as we were using rip-relative addressing,
1296 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1297 if (insn_details->rex_offset != -1)
1298 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1299
1300 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1301 dsc->tmp_regno = tmp_regno;
1302 dsc->tmp_save = orig_value;
1303 dsc->tmp_used = 1;
1304
1305 /* Convert the ModRM field to be base+disp. */
1306 dsc->insn_buf[modrm_offset] &= ~0xc7;
1307 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1308
1309 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1310
1311 if (debug_displaced)
1312 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1313 "displaced: using temp reg %d, old value %s, new value %s\n",
1314 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1315 paddress (gdbarch, rip_base));
1316 }
1317
1318 static void
1319 fixup_displaced_copy (struct gdbarch *gdbarch,
1320 struct displaced_step_closure *dsc,
1321 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1322 {
1323 const struct amd64_insn *details = &dsc->insn_details;
1324
1325 if (details->modrm_offset != -1)
1326 {
1327 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1328
1329 if ((modrm & 0xc7) == 0x05)
1330 {
1331 /* The insn uses rip-relative addressing.
1332 Deal with it. */
1333 fixup_riprel (gdbarch, dsc, from, to, regs);
1334 }
1335 }
1336 }
1337
1338 struct displaced_step_closure *
1339 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1340 CORE_ADDR from, CORE_ADDR to,
1341 struct regcache *regs)
1342 {
1343 int len = gdbarch_max_insn_length (gdbarch);
1344 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1345 continually watch for running off the end of the buffer. */
1346 int fixup_sentinel_space = len;
1347 struct displaced_step_closure *dsc =
1348 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1349 gdb_byte *buf = &dsc->insn_buf[0];
1350 struct amd64_insn *details = &dsc->insn_details;
1351
1352 dsc->tmp_used = 0;
1353 dsc->max_len = len + fixup_sentinel_space;
1354
1355 read_memory (from, buf, len);
1356
1357 /* Set up the sentinel space so we don't have to worry about running
1358 off the end of the buffer. An excessive number of leading prefixes
1359 could otherwise cause this. */
1360 memset (buf + len, 0, fixup_sentinel_space);
1361
1362 amd64_get_insn_details (buf, details);
1363
1364 /* GDB may get control back after the insn after the syscall.
1365 Presumably this is a kernel bug.
1366 If this is a syscall, make sure there's a nop afterwards. */
1367 {
1368 int syscall_length;
1369
1370 if (amd64_syscall_p (details, &syscall_length))
1371 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1372 }
1373
1374 /* Modify the insn to cope with the address where it will be executed from.
1375 In particular, handle any rip-relative addressing. */
1376 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1377
1378 write_memory (to, buf, len);
1379
1380 if (debug_displaced)
1381 {
1382 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1383 paddress (gdbarch, from), paddress (gdbarch, to));
1384 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1385 }
1386
1387 return dsc;
1388 }
1389
1390 static int
1391 amd64_absolute_jmp_p (const struct amd64_insn *details)
1392 {
1393 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1394
1395 if (insn[0] == 0xff)
1396 {
1397 /* jump near, absolute indirect (/4) */
1398 if ((insn[1] & 0x38) == 0x20)
1399 return 1;
1400
1401 /* jump far, absolute indirect (/5) */
1402 if ((insn[1] & 0x38) == 0x28)
1403 return 1;
1404 }
1405
1406 return 0;
1407 }
1408
1409 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1410
1411 static int
1412 amd64_jmp_p (const struct amd64_insn *details)
1413 {
1414 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1415
1416 /* jump short, relative. */
1417 if (insn[0] == 0xeb)
1418 return 1;
1419
1420 /* jump near, relative. */
1421 if (insn[0] == 0xe9)
1422 return 1;
1423
1424 return amd64_absolute_jmp_p (details);
1425 }
1426
1427 static int
1428 amd64_absolute_call_p (const struct amd64_insn *details)
1429 {
1430 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1431
1432 if (insn[0] == 0xff)
1433 {
1434 /* Call near, absolute indirect (/2) */
1435 if ((insn[1] & 0x38) == 0x10)
1436 return 1;
1437
1438 /* Call far, absolute indirect (/3) */
1439 if ((insn[1] & 0x38) == 0x18)
1440 return 1;
1441 }
1442
1443 return 0;
1444 }
1445
1446 static int
1447 amd64_ret_p (const struct amd64_insn *details)
1448 {
1449 /* NOTE: gcc can emit "repz ; ret". */
1450 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1451
1452 switch (insn[0])
1453 {
1454 case 0xc2: /* ret near, pop N bytes */
1455 case 0xc3: /* ret near */
1456 case 0xca: /* ret far, pop N bytes */
1457 case 0xcb: /* ret far */
1458 case 0xcf: /* iret */
1459 return 1;
1460
1461 default:
1462 return 0;
1463 }
1464 }
1465
1466 static int
1467 amd64_call_p (const struct amd64_insn *details)
1468 {
1469 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1470
1471 if (amd64_absolute_call_p (details))
1472 return 1;
1473
1474 /* call near, relative */
1475 if (insn[0] == 0xe8)
1476 return 1;
1477
1478 return 0;
1479 }
1480
1481 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1482 length in bytes. Otherwise, return zero. */
1483
1484 static int
1485 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1486 {
1487 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1488
1489 if (insn[0] == 0x0f && insn[1] == 0x05)
1490 {
1491 *lengthp = 2;
1492 return 1;
1493 }
1494
1495 return 0;
1496 }
1497
1498 /* Classify the instruction at ADDR using PRED.
1499 Throw an error if the memory can't be read. */
1500
1501 static int
1502 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1503 int (*pred) (const struct amd64_insn *))
1504 {
1505 struct amd64_insn details;
1506 gdb_byte *buf;
1507 int len, classification;
1508
1509 len = gdbarch_max_insn_length (gdbarch);
1510 buf = alloca (len);
1511
1512 read_code (addr, buf, len);
1513 amd64_get_insn_details (buf, &details);
1514
1515 classification = pred (&details);
1516
1517 return classification;
1518 }
1519
1520 /* The gdbarch insn_is_call method. */
1521
1522 static int
1523 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1524 {
1525 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1526 }
1527
1528 /* The gdbarch insn_is_ret method. */
1529
1530 static int
1531 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1532 {
1533 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1534 }
1535
1536 /* The gdbarch insn_is_jump method. */
1537
1538 static int
1539 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1540 {
1541 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1542 }
1543
1544 /* Fix up the state of registers and memory after having single-stepped
1545 a displaced instruction. */
1546
1547 void
1548 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1549 struct displaced_step_closure *dsc,
1550 CORE_ADDR from, CORE_ADDR to,
1551 struct regcache *regs)
1552 {
1553 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1554 /* The offset we applied to the instruction's address. */
1555 ULONGEST insn_offset = to - from;
1556 gdb_byte *insn = dsc->insn_buf;
1557 const struct amd64_insn *insn_details = &dsc->insn_details;
1558
1559 if (debug_displaced)
1560 fprintf_unfiltered (gdb_stdlog,
1561 "displaced: fixup (%s, %s), "
1562 "insn = 0x%02x 0x%02x ...\n",
1563 paddress (gdbarch, from), paddress (gdbarch, to),
1564 insn[0], insn[1]);
1565
1566 /* If we used a tmp reg, restore it. */
1567
1568 if (dsc->tmp_used)
1569 {
1570 if (debug_displaced)
1571 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1572 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1573 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1574 }
1575
1576 /* The list of issues to contend with here is taken from
1577 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1578 Yay for Free Software! */
1579
1580 /* Relocate the %rip back to the program's instruction stream,
1581 if necessary. */
1582
1583 /* Except in the case of absolute or indirect jump or call
1584 instructions, or a return instruction, the new rip is relative to
1585 the displaced instruction; make it relative to the original insn.
1586 Well, signal handler returns don't need relocation either, but we use the
1587 value of %rip to recognize those; see below. */
1588 if (! amd64_absolute_jmp_p (insn_details)
1589 && ! amd64_absolute_call_p (insn_details)
1590 && ! amd64_ret_p (insn_details))
1591 {
1592 ULONGEST orig_rip;
1593 int insn_len;
1594
1595 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1596
1597 /* A signal trampoline system call changes the %rip, resuming
1598 execution of the main program after the signal handler has
1599 returned. That makes them like 'return' instructions; we
1600 shouldn't relocate %rip.
1601
1602 But most system calls don't, and we do need to relocate %rip.
1603
1604 Our heuristic for distinguishing these cases: if stepping
1605 over the system call instruction left control directly after
1606 the instruction, the we relocate --- control almost certainly
1607 doesn't belong in the displaced copy. Otherwise, we assume
1608 the instruction has put control where it belongs, and leave
1609 it unrelocated. Goodness help us if there are PC-relative
1610 system calls. */
1611 if (amd64_syscall_p (insn_details, &insn_len)
1612 && orig_rip != to + insn_len
1613 /* GDB can get control back after the insn after the syscall.
1614 Presumably this is a kernel bug.
1615 Fixup ensures its a nop, we add one to the length for it. */
1616 && orig_rip != to + insn_len + 1)
1617 {
1618 if (debug_displaced)
1619 fprintf_unfiltered (gdb_stdlog,
1620 "displaced: syscall changed %%rip; "
1621 "not relocating\n");
1622 }
1623 else
1624 {
1625 ULONGEST rip = orig_rip - insn_offset;
1626
1627 /* If we just stepped over a breakpoint insn, we don't backup
1628 the pc on purpose; this is to match behaviour without
1629 stepping. */
1630
1631 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1632
1633 if (debug_displaced)
1634 fprintf_unfiltered (gdb_stdlog,
1635 "displaced: "
1636 "relocated %%rip from %s to %s\n",
1637 paddress (gdbarch, orig_rip),
1638 paddress (gdbarch, rip));
1639 }
1640 }
1641
1642 /* If the instruction was PUSHFL, then the TF bit will be set in the
1643 pushed value, and should be cleared. We'll leave this for later,
1644 since GDB already messes up the TF flag when stepping over a
1645 pushfl. */
1646
1647 /* If the instruction was a call, the return address now atop the
1648 stack is the address following the copied instruction. We need
1649 to make it the address following the original instruction. */
1650 if (amd64_call_p (insn_details))
1651 {
1652 ULONGEST rsp;
1653 ULONGEST retaddr;
1654 const ULONGEST retaddr_len = 8;
1655
1656 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1657 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1658 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
1659 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1660
1661 if (debug_displaced)
1662 fprintf_unfiltered (gdb_stdlog,
1663 "displaced: relocated return addr at %s "
1664 "to %s\n",
1665 paddress (gdbarch, rsp),
1666 paddress (gdbarch, retaddr));
1667 }
1668 }
1669
1670 /* If the instruction INSN uses RIP-relative addressing, return the
1671 offset into the raw INSN where the displacement to be adjusted is
1672 found. Returns 0 if the instruction doesn't use RIP-relative
1673 addressing. */
1674
1675 static int
1676 rip_relative_offset (struct amd64_insn *insn)
1677 {
1678 if (insn->modrm_offset != -1)
1679 {
1680 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1681
1682 if ((modrm & 0xc7) == 0x05)
1683 {
1684 /* The displacement is found right after the ModRM byte. */
1685 return insn->modrm_offset + 1;
1686 }
1687 }
1688
1689 return 0;
1690 }
1691
1692 static void
1693 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1694 {
1695 target_write_memory (*to, buf, len);
1696 *to += len;
1697 }
1698
1699 static void
1700 amd64_relocate_instruction (struct gdbarch *gdbarch,
1701 CORE_ADDR *to, CORE_ADDR oldloc)
1702 {
1703 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1704 int len = gdbarch_max_insn_length (gdbarch);
1705 /* Extra space for sentinels. */
1706 int fixup_sentinel_space = len;
1707 gdb_byte *buf = xmalloc (len + fixup_sentinel_space);
1708 struct amd64_insn insn_details;
1709 int offset = 0;
1710 LONGEST rel32, newrel;
1711 gdb_byte *insn;
1712 int insn_length;
1713
1714 read_memory (oldloc, buf, len);
1715
1716 /* Set up the sentinel space so we don't have to worry about running
1717 off the end of the buffer. An excessive number of leading prefixes
1718 could otherwise cause this. */
1719 memset (buf + len, 0, fixup_sentinel_space);
1720
1721 insn = buf;
1722 amd64_get_insn_details (insn, &insn_details);
1723
1724 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1725
1726 /* Skip legacy instruction prefixes. */
1727 insn = amd64_skip_prefixes (insn);
1728
1729 /* Adjust calls with 32-bit relative addresses as push/jump, with
1730 the address pushed being the location where the original call in
1731 the user program would return to. */
1732 if (insn[0] == 0xe8)
1733 {
1734 gdb_byte push_buf[16];
1735 unsigned int ret_addr;
1736
1737 /* Where "ret" in the original code will return to. */
1738 ret_addr = oldloc + insn_length;
1739 push_buf[0] = 0x68; /* pushq $... */
1740 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1741 /* Push the push. */
1742 append_insns (to, 5, push_buf);
1743
1744 /* Convert the relative call to a relative jump. */
1745 insn[0] = 0xe9;
1746
1747 /* Adjust the destination offset. */
1748 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1749 newrel = (oldloc - *to) + rel32;
1750 store_signed_integer (insn + 1, 4, byte_order, newrel);
1751
1752 if (debug_displaced)
1753 fprintf_unfiltered (gdb_stdlog,
1754 "Adjusted insn rel32=%s at %s to"
1755 " rel32=%s at %s\n",
1756 hex_string (rel32), paddress (gdbarch, oldloc),
1757 hex_string (newrel), paddress (gdbarch, *to));
1758
1759 /* Write the adjusted jump into its displaced location. */
1760 append_insns (to, 5, insn);
1761 return;
1762 }
1763
1764 offset = rip_relative_offset (&insn_details);
1765 if (!offset)
1766 {
1767 /* Adjust jumps with 32-bit relative addresses. Calls are
1768 already handled above. */
1769 if (insn[0] == 0xe9)
1770 offset = 1;
1771 /* Adjust conditional jumps. */
1772 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1773 offset = 2;
1774 }
1775
1776 if (offset)
1777 {
1778 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1779 newrel = (oldloc - *to) + rel32;
1780 store_signed_integer (insn + offset, 4, byte_order, newrel);
1781 if (debug_displaced)
1782 fprintf_unfiltered (gdb_stdlog,
1783 "Adjusted insn rel32=%s at %s to"
1784 " rel32=%s at %s\n",
1785 hex_string (rel32), paddress (gdbarch, oldloc),
1786 hex_string (newrel), paddress (gdbarch, *to));
1787 }
1788
1789 /* Write the adjusted instruction into its displaced location. */
1790 append_insns (to, insn_length, buf);
1791 }
1792
1793 \f
1794 /* The maximum number of saved registers. This should include %rip. */
1795 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1796
1797 struct amd64_frame_cache
1798 {
1799 /* Base address. */
1800 CORE_ADDR base;
1801 int base_p;
1802 CORE_ADDR sp_offset;
1803 CORE_ADDR pc;
1804
1805 /* Saved registers. */
1806 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1807 CORE_ADDR saved_sp;
1808 int saved_sp_reg;
1809
1810 /* Do we have a frame? */
1811 int frameless_p;
1812 };
1813
1814 /* Initialize a frame cache. */
1815
1816 static void
1817 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1818 {
1819 int i;
1820
1821 /* Base address. */
1822 cache->base = 0;
1823 cache->base_p = 0;
1824 cache->sp_offset = -8;
1825 cache->pc = 0;
1826
1827 /* Saved registers. We initialize these to -1 since zero is a valid
1828 offset (that's where %rbp is supposed to be stored).
1829 The values start out as being offsets, and are later converted to
1830 addresses (at which point -1 is interpreted as an address, still meaning
1831 "invalid"). */
1832 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1833 cache->saved_regs[i] = -1;
1834 cache->saved_sp = 0;
1835 cache->saved_sp_reg = -1;
1836
1837 /* Frameless until proven otherwise. */
1838 cache->frameless_p = 1;
1839 }
1840
1841 /* Allocate and initialize a frame cache. */
1842
1843 static struct amd64_frame_cache *
1844 amd64_alloc_frame_cache (void)
1845 {
1846 struct amd64_frame_cache *cache;
1847
1848 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1849 amd64_init_frame_cache (cache);
1850 return cache;
1851 }
1852
1853 /* GCC 4.4 and later, can put code in the prologue to realign the
1854 stack pointer. Check whether PC points to such code, and update
1855 CACHE accordingly. Return the first instruction after the code
1856 sequence or CURRENT_PC, whichever is smaller. If we don't
1857 recognize the code, return PC. */
1858
1859 static CORE_ADDR
1860 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1861 struct amd64_frame_cache *cache)
1862 {
1863 /* There are 2 code sequences to re-align stack before the frame
1864 gets set up:
1865
1866 1. Use a caller-saved saved register:
1867
1868 leaq 8(%rsp), %reg
1869 andq $-XXX, %rsp
1870 pushq -8(%reg)
1871
1872 2. Use a callee-saved saved register:
1873
1874 pushq %reg
1875 leaq 16(%rsp), %reg
1876 andq $-XXX, %rsp
1877 pushq -8(%reg)
1878
1879 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1880
1881 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1882 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1883 */
1884
1885 gdb_byte buf[18];
1886 int reg, r;
1887 int offset, offset_and;
1888
1889 if (target_read_code (pc, buf, sizeof buf))
1890 return pc;
1891
1892 /* Check caller-saved saved register. The first instruction has
1893 to be "leaq 8(%rsp), %reg". */
1894 if ((buf[0] & 0xfb) == 0x48
1895 && buf[1] == 0x8d
1896 && buf[3] == 0x24
1897 && buf[4] == 0x8)
1898 {
1899 /* MOD must be binary 10 and R/M must be binary 100. */
1900 if ((buf[2] & 0xc7) != 0x44)
1901 return pc;
1902
1903 /* REG has register number. */
1904 reg = (buf[2] >> 3) & 7;
1905
1906 /* Check the REX.R bit. */
1907 if (buf[0] == 0x4c)
1908 reg += 8;
1909
1910 offset = 5;
1911 }
1912 else
1913 {
1914 /* Check callee-saved saved register. The first instruction
1915 has to be "pushq %reg". */
1916 reg = 0;
1917 if ((buf[0] & 0xf8) == 0x50)
1918 offset = 0;
1919 else if ((buf[0] & 0xf6) == 0x40
1920 && (buf[1] & 0xf8) == 0x50)
1921 {
1922 /* Check the REX.B bit. */
1923 if ((buf[0] & 1) != 0)
1924 reg = 8;
1925
1926 offset = 1;
1927 }
1928 else
1929 return pc;
1930
1931 /* Get register. */
1932 reg += buf[offset] & 0x7;
1933
1934 offset++;
1935
1936 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1937 if ((buf[offset] & 0xfb) != 0x48
1938 || buf[offset + 1] != 0x8d
1939 || buf[offset + 3] != 0x24
1940 || buf[offset + 4] != 0x10)
1941 return pc;
1942
1943 /* MOD must be binary 10 and R/M must be binary 100. */
1944 if ((buf[offset + 2] & 0xc7) != 0x44)
1945 return pc;
1946
1947 /* REG has register number. */
1948 r = (buf[offset + 2] >> 3) & 7;
1949
1950 /* Check the REX.R bit. */
1951 if (buf[offset] == 0x4c)
1952 r += 8;
1953
1954 /* Registers in pushq and leaq have to be the same. */
1955 if (reg != r)
1956 return pc;
1957
1958 offset += 5;
1959 }
1960
1961 /* Rigister can't be %rsp nor %rbp. */
1962 if (reg == 4 || reg == 5)
1963 return pc;
1964
1965 /* The next instruction has to be "andq $-XXX, %rsp". */
1966 if (buf[offset] != 0x48
1967 || buf[offset + 2] != 0xe4
1968 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1969 return pc;
1970
1971 offset_and = offset;
1972 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1973
1974 /* The next instruction has to be "pushq -8(%reg)". */
1975 r = 0;
1976 if (buf[offset] == 0xff)
1977 offset++;
1978 else if ((buf[offset] & 0xf6) == 0x40
1979 && buf[offset + 1] == 0xff)
1980 {
1981 /* Check the REX.B bit. */
1982 if ((buf[offset] & 0x1) != 0)
1983 r = 8;
1984 offset += 2;
1985 }
1986 else
1987 return pc;
1988
1989 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1990 01. */
1991 if (buf[offset + 1] != 0xf8
1992 || (buf[offset] & 0xf8) != 0x70)
1993 return pc;
1994
1995 /* R/M has register. */
1996 r += buf[offset] & 7;
1997
1998 /* Registers in leaq and pushq have to be the same. */
1999 if (reg != r)
2000 return pc;
2001
2002 if (current_pc > pc + offset_and)
2003 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2004
2005 return min (pc + offset + 2, current_pc);
2006 }
2007
2008 /* Similar to amd64_analyze_stack_align for x32. */
2009
2010 static CORE_ADDR
2011 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2012 struct amd64_frame_cache *cache)
2013 {
2014 /* There are 2 code sequences to re-align stack before the frame
2015 gets set up:
2016
2017 1. Use a caller-saved saved register:
2018
2019 leaq 8(%rsp), %reg
2020 andq $-XXX, %rsp
2021 pushq -8(%reg)
2022
2023 or
2024
2025 [addr32] leal 8(%rsp), %reg
2026 andl $-XXX, %esp
2027 [addr32] pushq -8(%reg)
2028
2029 2. Use a callee-saved saved register:
2030
2031 pushq %reg
2032 leaq 16(%rsp), %reg
2033 andq $-XXX, %rsp
2034 pushq -8(%reg)
2035
2036 or
2037
2038 pushq %reg
2039 [addr32] leal 16(%rsp), %reg
2040 andl $-XXX, %esp
2041 [addr32] pushq -8(%reg)
2042
2043 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2044
2045 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2046 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2047
2048 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2049
2050 0x83 0xe4 0xf0 andl $-16, %esp
2051 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2052 */
2053
2054 gdb_byte buf[19];
2055 int reg, r;
2056 int offset, offset_and;
2057
2058 if (target_read_memory (pc, buf, sizeof buf))
2059 return pc;
2060
2061 /* Skip optional addr32 prefix. */
2062 offset = buf[0] == 0x67 ? 1 : 0;
2063
2064 /* Check caller-saved saved register. The first instruction has
2065 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2066 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2067 && buf[offset + 1] == 0x8d
2068 && buf[offset + 3] == 0x24
2069 && buf[offset + 4] == 0x8)
2070 {
2071 /* MOD must be binary 10 and R/M must be binary 100. */
2072 if ((buf[offset + 2] & 0xc7) != 0x44)
2073 return pc;
2074
2075 /* REG has register number. */
2076 reg = (buf[offset + 2] >> 3) & 7;
2077
2078 /* Check the REX.R bit. */
2079 if ((buf[offset] & 0x4) != 0)
2080 reg += 8;
2081
2082 offset += 5;
2083 }
2084 else
2085 {
2086 /* Check callee-saved saved register. The first instruction
2087 has to be "pushq %reg". */
2088 reg = 0;
2089 if ((buf[offset] & 0xf6) == 0x40
2090 && (buf[offset + 1] & 0xf8) == 0x50)
2091 {
2092 /* Check the REX.B bit. */
2093 if ((buf[offset] & 1) != 0)
2094 reg = 8;
2095
2096 offset += 1;
2097 }
2098 else if ((buf[offset] & 0xf8) != 0x50)
2099 return pc;
2100
2101 /* Get register. */
2102 reg += buf[offset] & 0x7;
2103
2104 offset++;
2105
2106 /* Skip optional addr32 prefix. */
2107 if (buf[offset] == 0x67)
2108 offset++;
2109
2110 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2111 "leal 16(%rsp), %reg". */
2112 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2113 || buf[offset + 1] != 0x8d
2114 || buf[offset + 3] != 0x24
2115 || buf[offset + 4] != 0x10)
2116 return pc;
2117
2118 /* MOD must be binary 10 and R/M must be binary 100. */
2119 if ((buf[offset + 2] & 0xc7) != 0x44)
2120 return pc;
2121
2122 /* REG has register number. */
2123 r = (buf[offset + 2] >> 3) & 7;
2124
2125 /* Check the REX.R bit. */
2126 if ((buf[offset] & 0x4) != 0)
2127 r += 8;
2128
2129 /* Registers in pushq and leaq have to be the same. */
2130 if (reg != r)
2131 return pc;
2132
2133 offset += 5;
2134 }
2135
2136 /* Rigister can't be %rsp nor %rbp. */
2137 if (reg == 4 || reg == 5)
2138 return pc;
2139
2140 /* The next instruction may be "andq $-XXX, %rsp" or
2141 "andl $-XXX, %esp". */
2142 if (buf[offset] != 0x48)
2143 offset--;
2144
2145 if (buf[offset + 2] != 0xe4
2146 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2147 return pc;
2148
2149 offset_and = offset;
2150 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2151
2152 /* Skip optional addr32 prefix. */
2153 if (buf[offset] == 0x67)
2154 offset++;
2155
2156 /* The next instruction has to be "pushq -8(%reg)". */
2157 r = 0;
2158 if (buf[offset] == 0xff)
2159 offset++;
2160 else if ((buf[offset] & 0xf6) == 0x40
2161 && buf[offset + 1] == 0xff)
2162 {
2163 /* Check the REX.B bit. */
2164 if ((buf[offset] & 0x1) != 0)
2165 r = 8;
2166 offset += 2;
2167 }
2168 else
2169 return pc;
2170
2171 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2172 01. */
2173 if (buf[offset + 1] != 0xf8
2174 || (buf[offset] & 0xf8) != 0x70)
2175 return pc;
2176
2177 /* R/M has register. */
2178 r += buf[offset] & 7;
2179
2180 /* Registers in leaq and pushq have to be the same. */
2181 if (reg != r)
2182 return pc;
2183
2184 if (current_pc > pc + offset_and)
2185 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2186
2187 return min (pc + offset + 2, current_pc);
2188 }
2189
2190 /* Do a limited analysis of the prologue at PC and update CACHE
2191 accordingly. Bail out early if CURRENT_PC is reached. Return the
2192 address where the analysis stopped.
2193
2194 We will handle only functions beginning with:
2195
2196 pushq %rbp 0x55
2197 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2198
2199 or (for the X32 ABI):
2200
2201 pushq %rbp 0x55
2202 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2203
2204 Any function that doesn't start with one of these sequences will be
2205 assumed to have no prologue and thus no valid frame pointer in
2206 %rbp. */
2207
2208 static CORE_ADDR
2209 amd64_analyze_prologue (struct gdbarch *gdbarch,
2210 CORE_ADDR pc, CORE_ADDR current_pc,
2211 struct amd64_frame_cache *cache)
2212 {
2213 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2214 /* There are two variations of movq %rsp, %rbp. */
2215 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2216 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
2217 /* Ditto for movl %esp, %ebp. */
2218 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2219 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2220
2221 gdb_byte buf[3];
2222 gdb_byte op;
2223
2224 if (current_pc <= pc)
2225 return current_pc;
2226
2227 if (gdbarch_ptr_bit (gdbarch) == 32)
2228 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2229 else
2230 pc = amd64_analyze_stack_align (pc, current_pc, cache);
2231
2232 op = read_code_unsigned_integer (pc, 1, byte_order);
2233
2234 if (op == 0x55) /* pushq %rbp */
2235 {
2236 /* Take into account that we've executed the `pushq %rbp' that
2237 starts this instruction sequence. */
2238 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
2239 cache->sp_offset += 8;
2240
2241 /* If that's all, return now. */
2242 if (current_pc <= pc + 1)
2243 return current_pc;
2244
2245 read_code (pc + 1, buf, 3);
2246
2247 /* Check for `movq %rsp, %rbp'. */
2248 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2249 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2250 {
2251 /* OK, we actually have a frame. */
2252 cache->frameless_p = 0;
2253 return pc + 4;
2254 }
2255
2256 /* For X32, also check for `movq %esp, %ebp'. */
2257 if (gdbarch_ptr_bit (gdbarch) == 32)
2258 {
2259 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2260 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2261 {
2262 /* OK, we actually have a frame. */
2263 cache->frameless_p = 0;
2264 return pc + 3;
2265 }
2266 }
2267
2268 return pc + 1;
2269 }
2270
2271 return pc;
2272 }
2273
2274 /* Work around false termination of prologue - GCC PR debug/48827.
2275
2276 START_PC is the first instruction of a function, PC is its minimal already
2277 determined advanced address. Function returns PC if it has nothing to do.
2278
2279 84 c0 test %al,%al
2280 74 23 je after
2281 <-- here is 0 lines advance - the false prologue end marker.
2282 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2283 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2284 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2285 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2286 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2287 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2288 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2289 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2290 after: */
2291
2292 static CORE_ADDR
2293 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
2294 {
2295 struct symtab_and_line start_pc_sal, next_sal;
2296 gdb_byte buf[4 + 8 * 7];
2297 int offset, xmmreg;
2298
2299 if (pc == start_pc)
2300 return pc;
2301
2302 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2303 if (start_pc_sal.symtab == NULL
2304 || producer_is_gcc_ge_4 (start_pc_sal.symtab->producer) < 6
2305 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2306 return pc;
2307
2308 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2309 if (next_sal.line != start_pc_sal.line)
2310 return pc;
2311
2312 /* START_PC can be from overlayed memory, ignored here. */
2313 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
2314 return pc;
2315
2316 /* test %al,%al */
2317 if (buf[0] != 0x84 || buf[1] != 0xc0)
2318 return pc;
2319 /* je AFTER */
2320 if (buf[2] != 0x74)
2321 return pc;
2322
2323 offset = 4;
2324 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2325 {
2326 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2327 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
2328 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
2329 return pc;
2330
2331 /* 0b01?????? */
2332 if ((buf[offset + 2] & 0xc0) == 0x40)
2333 {
2334 /* 8-bit displacement. */
2335 offset += 4;
2336 }
2337 /* 0b10?????? */
2338 else if ((buf[offset + 2] & 0xc0) == 0x80)
2339 {
2340 /* 32-bit displacement. */
2341 offset += 7;
2342 }
2343 else
2344 return pc;
2345 }
2346
2347 /* je AFTER */
2348 if (offset - 4 != buf[3])
2349 return pc;
2350
2351 return next_sal.end;
2352 }
2353
2354 /* Return PC of first real instruction. */
2355
2356 static CORE_ADDR
2357 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2358 {
2359 struct amd64_frame_cache cache;
2360 CORE_ADDR pc;
2361 CORE_ADDR func_addr;
2362
2363 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2364 {
2365 CORE_ADDR post_prologue_pc
2366 = skip_prologue_using_sal (gdbarch, func_addr);
2367 struct symtab *s = find_pc_symtab (func_addr);
2368
2369 /* Clang always emits a line note before the prologue and another
2370 one after. We trust clang to emit usable line notes. */
2371 if (post_prologue_pc
2372 && (s != NULL
2373 && s->producer != NULL
2374 && strncmp (s->producer, "clang ", sizeof ("clang ") - 1) == 0))
2375 return max (start_pc, post_prologue_pc);
2376 }
2377
2378 amd64_init_frame_cache (&cache);
2379 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2380 &cache);
2381 if (cache.frameless_p)
2382 return start_pc;
2383
2384 return amd64_skip_xmm_prologue (pc, start_pc);
2385 }
2386 \f
2387
2388 /* Normal frames. */
2389
2390 static void
2391 amd64_frame_cache_1 (struct frame_info *this_frame,
2392 struct amd64_frame_cache *cache)
2393 {
2394 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2395 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2396 gdb_byte buf[8];
2397 int i;
2398
2399 cache->pc = get_frame_func (this_frame);
2400 if (cache->pc != 0)
2401 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2402 cache);
2403
2404 if (cache->frameless_p)
2405 {
2406 /* We didn't find a valid frame. If we're at the start of a
2407 function, or somewhere half-way its prologue, the function's
2408 frame probably hasn't been fully setup yet. Try to
2409 reconstruct the base address for the stack frame by looking
2410 at the stack pointer. For truly "frameless" functions this
2411 might work too. */
2412
2413 if (cache->saved_sp_reg != -1)
2414 {
2415 /* Stack pointer has been saved. */
2416 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2417 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2418
2419 /* We're halfway aligning the stack. */
2420 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2421 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2422
2423 /* This will be added back below. */
2424 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2425 }
2426 else
2427 {
2428 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2429 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2430 + cache->sp_offset;
2431 }
2432 }
2433 else
2434 {
2435 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
2436 cache->base = extract_unsigned_integer (buf, 8, byte_order);
2437 }
2438
2439 /* Now that we have the base address for the stack frame we can
2440 calculate the value of %rsp in the calling frame. */
2441 cache->saved_sp = cache->base + 16;
2442
2443 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2444 frame we find it at the same offset from the reconstructed base
2445 address. If we're halfway aligning the stack, %rip is handled
2446 differently (see above). */
2447 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2448 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
2449
2450 /* Adjust all the saved registers such that they contain addresses
2451 instead of offsets. */
2452 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
2453 if (cache->saved_regs[i] != -1)
2454 cache->saved_regs[i] += cache->base;
2455
2456 cache->base_p = 1;
2457 }
2458
2459 static struct amd64_frame_cache *
2460 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
2461 {
2462 volatile struct gdb_exception ex;
2463 struct amd64_frame_cache *cache;
2464
2465 if (*this_cache)
2466 return *this_cache;
2467
2468 cache = amd64_alloc_frame_cache ();
2469 *this_cache = cache;
2470
2471 TRY_CATCH (ex, RETURN_MASK_ERROR)
2472 {
2473 amd64_frame_cache_1 (this_frame, cache);
2474 }
2475 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2476 throw_exception (ex);
2477
2478 return cache;
2479 }
2480
2481 static enum unwind_stop_reason
2482 amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
2483 void **this_cache)
2484 {
2485 struct amd64_frame_cache *cache =
2486 amd64_frame_cache (this_frame, this_cache);
2487
2488 if (!cache->base_p)
2489 return UNWIND_UNAVAILABLE;
2490
2491 /* This marks the outermost frame. */
2492 if (cache->base == 0)
2493 return UNWIND_OUTERMOST;
2494
2495 return UNWIND_NO_REASON;
2496 }
2497
2498 static void
2499 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
2500 struct frame_id *this_id)
2501 {
2502 struct amd64_frame_cache *cache =
2503 amd64_frame_cache (this_frame, this_cache);
2504
2505 if (!cache->base_p)
2506 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2507 else if (cache->base == 0)
2508 {
2509 /* This marks the outermost frame. */
2510 return;
2511 }
2512 else
2513 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2514 }
2515
2516 static struct value *
2517 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2518 int regnum)
2519 {
2520 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2521 struct amd64_frame_cache *cache =
2522 amd64_frame_cache (this_frame, this_cache);
2523
2524 gdb_assert (regnum >= 0);
2525
2526 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
2527 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
2528
2529 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
2530 return frame_unwind_got_memory (this_frame, regnum,
2531 cache->saved_regs[regnum]);
2532
2533 return frame_unwind_got_register (this_frame, regnum, regnum);
2534 }
2535
2536 static const struct frame_unwind amd64_frame_unwind =
2537 {
2538 NORMAL_FRAME,
2539 amd64_frame_unwind_stop_reason,
2540 amd64_frame_this_id,
2541 amd64_frame_prev_register,
2542 NULL,
2543 default_frame_sniffer
2544 };
2545 \f
2546 /* Generate a bytecode expression to get the value of the saved PC. */
2547
2548 static void
2549 amd64_gen_return_address (struct gdbarch *gdbarch,
2550 struct agent_expr *ax, struct axs_value *value,
2551 CORE_ADDR scope)
2552 {
2553 /* The following sequence assumes the traditional use of the base
2554 register. */
2555 ax_reg (ax, AMD64_RBP_REGNUM);
2556 ax_const_l (ax, 8);
2557 ax_simple (ax, aop_add);
2558 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2559 value->kind = axs_lvalue_memory;
2560 }
2561 \f
2562
2563 /* Signal trampolines. */
2564
2565 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2566 64-bit variants. This would require using identical frame caches
2567 on both platforms. */
2568
2569 static struct amd64_frame_cache *
2570 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2571 {
2572 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2573 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2574 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2575 volatile struct gdb_exception ex;
2576 struct amd64_frame_cache *cache;
2577 CORE_ADDR addr;
2578 gdb_byte buf[8];
2579 int i;
2580
2581 if (*this_cache)
2582 return *this_cache;
2583
2584 cache = amd64_alloc_frame_cache ();
2585
2586 TRY_CATCH (ex, RETURN_MASK_ERROR)
2587 {
2588 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2589 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2590
2591 addr = tdep->sigcontext_addr (this_frame);
2592 gdb_assert (tdep->sc_reg_offset);
2593 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2594 for (i = 0; i < tdep->sc_num_regs; i++)
2595 if (tdep->sc_reg_offset[i] != -1)
2596 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
2597
2598 cache->base_p = 1;
2599 }
2600 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2601 throw_exception (ex);
2602
2603 *this_cache = cache;
2604 return cache;
2605 }
2606
2607 static enum unwind_stop_reason
2608 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
2609 void **this_cache)
2610 {
2611 struct amd64_frame_cache *cache =
2612 amd64_sigtramp_frame_cache (this_frame, this_cache);
2613
2614 if (!cache->base_p)
2615 return UNWIND_UNAVAILABLE;
2616
2617 return UNWIND_NO_REASON;
2618 }
2619
2620 static void
2621 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
2622 void **this_cache, struct frame_id *this_id)
2623 {
2624 struct amd64_frame_cache *cache =
2625 amd64_sigtramp_frame_cache (this_frame, this_cache);
2626
2627 if (!cache->base_p)
2628 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2629 else if (cache->base == 0)
2630 {
2631 /* This marks the outermost frame. */
2632 return;
2633 }
2634 else
2635 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
2636 }
2637
2638 static struct value *
2639 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2640 void **this_cache, int regnum)
2641 {
2642 /* Make sure we've initialized the cache. */
2643 amd64_sigtramp_frame_cache (this_frame, this_cache);
2644
2645 return amd64_frame_prev_register (this_frame, this_cache, regnum);
2646 }
2647
2648 static int
2649 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2650 struct frame_info *this_frame,
2651 void **this_cache)
2652 {
2653 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2654
2655 /* We shouldn't even bother if we don't have a sigcontext_addr
2656 handler. */
2657 if (tdep->sigcontext_addr == NULL)
2658 return 0;
2659
2660 if (tdep->sigtramp_p != NULL)
2661 {
2662 if (tdep->sigtramp_p (this_frame))
2663 return 1;
2664 }
2665
2666 if (tdep->sigtramp_start != 0)
2667 {
2668 CORE_ADDR pc = get_frame_pc (this_frame);
2669
2670 gdb_assert (tdep->sigtramp_end != 0);
2671 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2672 return 1;
2673 }
2674
2675 return 0;
2676 }
2677
2678 static const struct frame_unwind amd64_sigtramp_frame_unwind =
2679 {
2680 SIGTRAMP_FRAME,
2681 amd64_sigtramp_frame_unwind_stop_reason,
2682 amd64_sigtramp_frame_this_id,
2683 amd64_sigtramp_frame_prev_register,
2684 NULL,
2685 amd64_sigtramp_frame_sniffer
2686 };
2687 \f
2688
2689 static CORE_ADDR
2690 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2691 {
2692 struct amd64_frame_cache *cache =
2693 amd64_frame_cache (this_frame, this_cache);
2694
2695 return cache->base;
2696 }
2697
2698 static const struct frame_base amd64_frame_base =
2699 {
2700 &amd64_frame_unwind,
2701 amd64_frame_base_address,
2702 amd64_frame_base_address,
2703 amd64_frame_base_address
2704 };
2705
2706 /* Normal frames, but in a function epilogue. */
2707
2708 /* The epilogue is defined here as the 'ret' instruction, which will
2709 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2710 the function's stack frame. */
2711
2712 static int
2713 amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2714 {
2715 gdb_byte insn;
2716 struct symtab *symtab;
2717
2718 symtab = find_pc_symtab (pc);
2719 if (symtab && symtab->epilogue_unwind_valid)
2720 return 0;
2721
2722 if (target_read_memory (pc, &insn, 1))
2723 return 0; /* Can't read memory at pc. */
2724
2725 if (insn != 0xc3) /* 'ret' instruction. */
2726 return 0;
2727
2728 return 1;
2729 }
2730
2731 static int
2732 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2733 struct frame_info *this_frame,
2734 void **this_prologue_cache)
2735 {
2736 if (frame_relative_level (this_frame) == 0)
2737 return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
2738 get_frame_pc (this_frame));
2739 else
2740 return 0;
2741 }
2742
2743 static struct amd64_frame_cache *
2744 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2745 {
2746 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2747 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2748 volatile struct gdb_exception ex;
2749 struct amd64_frame_cache *cache;
2750 gdb_byte buf[8];
2751
2752 if (*this_cache)
2753 return *this_cache;
2754
2755 cache = amd64_alloc_frame_cache ();
2756 *this_cache = cache;
2757
2758 TRY_CATCH (ex, RETURN_MASK_ERROR)
2759 {
2760 /* Cache base will be %esp plus cache->sp_offset (-8). */
2761 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2762 cache->base = extract_unsigned_integer (buf, 8,
2763 byte_order) + cache->sp_offset;
2764
2765 /* Cache pc will be the frame func. */
2766 cache->pc = get_frame_pc (this_frame);
2767
2768 /* The saved %esp will be at cache->base plus 16. */
2769 cache->saved_sp = cache->base + 16;
2770
2771 /* The saved %eip will be at cache->base plus 8. */
2772 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2773
2774 cache->base_p = 1;
2775 }
2776 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2777 throw_exception (ex);
2778
2779 return cache;
2780 }
2781
2782 static enum unwind_stop_reason
2783 amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
2784 void **this_cache)
2785 {
2786 struct amd64_frame_cache *cache
2787 = amd64_epilogue_frame_cache (this_frame, this_cache);
2788
2789 if (!cache->base_p)
2790 return UNWIND_UNAVAILABLE;
2791
2792 return UNWIND_NO_REASON;
2793 }
2794
2795 static void
2796 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2797 void **this_cache,
2798 struct frame_id *this_id)
2799 {
2800 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2801 this_cache);
2802
2803 if (!cache->base_p)
2804 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2805 else
2806 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2807 }
2808
2809 static const struct frame_unwind amd64_epilogue_frame_unwind =
2810 {
2811 NORMAL_FRAME,
2812 amd64_epilogue_frame_unwind_stop_reason,
2813 amd64_epilogue_frame_this_id,
2814 amd64_frame_prev_register,
2815 NULL,
2816 amd64_epilogue_frame_sniffer
2817 };
2818
2819 static struct frame_id
2820 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2821 {
2822 CORE_ADDR fp;
2823
2824 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2825
2826 return frame_id_build (fp + 16, get_frame_pc (this_frame));
2827 }
2828
2829 /* 16 byte align the SP per frame requirements. */
2830
2831 static CORE_ADDR
2832 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2833 {
2834 return sp & -(CORE_ADDR)16;
2835 }
2836 \f
2837
2838 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2839 in the floating-point register set REGSET to register cache
2840 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2841
2842 static void
2843 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2844 int regnum, const void *fpregs, size_t len)
2845 {
2846 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2847 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2848
2849 gdb_assert (len == tdep->sizeof_fpregset);
2850 amd64_supply_fxsave (regcache, regnum, fpregs);
2851 }
2852
2853 /* Collect register REGNUM from the register cache REGCACHE and store
2854 it in the buffer specified by FPREGS and LEN as described by the
2855 floating-point register set REGSET. If REGNUM is -1, do this for
2856 all registers in REGSET. */
2857
2858 static void
2859 amd64_collect_fpregset (const struct regset *regset,
2860 const struct regcache *regcache,
2861 int regnum, void *fpregs, size_t len)
2862 {
2863 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2864 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2865
2866 gdb_assert (len == tdep->sizeof_fpregset);
2867 amd64_collect_fxsave (regcache, regnum, fpregs);
2868 }
2869
2870 /* Similar to amd64_supply_fpregset, but use XSAVE extended state. */
2871
2872 static void
2873 amd64_supply_xstateregset (const struct regset *regset,
2874 struct regcache *regcache, int regnum,
2875 const void *xstateregs, size_t len)
2876 {
2877 amd64_supply_xsave (regcache, regnum, xstateregs);
2878 }
2879
2880 /* Similar to amd64_collect_fpregset, but use XSAVE extended state. */
2881
2882 static void
2883 amd64_collect_xstateregset (const struct regset *regset,
2884 const struct regcache *regcache,
2885 int regnum, void *xstateregs, size_t len)
2886 {
2887 amd64_collect_xsave (regcache, regnum, xstateregs, 1);
2888 }
2889
2890 static const struct regset amd64_fpregset =
2891 {
2892 NULL, amd64_supply_fpregset, amd64_collect_fpregset
2893 };
2894
2895 static const struct regset amd64_xstateregset =
2896 {
2897 NULL, amd64_supply_xstateregset, amd64_collect_xstateregset
2898 };
2899
2900 /* Return the appropriate register set for the core section identified
2901 by SECT_NAME and SECT_SIZE. */
2902
2903 static const struct regset *
2904 amd64_regset_from_core_section (struct gdbarch *gdbarch,
2905 const char *sect_name, size_t sect_size)
2906 {
2907 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2908
2909 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
2910 return &amd64_fpregset;
2911
2912 if (strcmp (sect_name, ".reg-xstate") == 0)
2913 return &amd64_xstateregset;
2914
2915 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
2916 }
2917 \f
2918
2919 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2920 %rdi. We expect its value to be a pointer to the jmp_buf structure
2921 from which we extract the address that we will land at. This
2922 address is copied into PC. This routine returns non-zero on
2923 success. */
2924
2925 static int
2926 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2927 {
2928 gdb_byte buf[8];
2929 CORE_ADDR jb_addr;
2930 struct gdbarch *gdbarch = get_frame_arch (frame);
2931 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
2932 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
2933
2934 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2935 longjmp will land. */
2936 if (jb_pc_offset == -1)
2937 return 0;
2938
2939 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
2940 jb_addr= extract_typed_address
2941 (buf, builtin_type (gdbarch)->builtin_data_ptr);
2942 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2943 return 0;
2944
2945 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
2946
2947 return 1;
2948 }
2949
2950 static const int amd64_record_regmap[] =
2951 {
2952 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2953 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2954 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2955 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2956 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2957 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2958 };
2959
2960 void
2961 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
2962 {
2963 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2964 const struct target_desc *tdesc = info.target_desc;
2965 static const char *const stap_integer_prefixes[] = { "$", NULL };
2966 static const char *const stap_register_prefixes[] = { "%", NULL };
2967 static const char *const stap_register_indirection_prefixes[] = { "(",
2968 NULL };
2969 static const char *const stap_register_indirection_suffixes[] = { ")",
2970 NULL };
2971
2972 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2973 floating-point registers. */
2974 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2975
2976 if (! tdesc_has_registers (tdesc))
2977 tdesc = tdesc_amd64;
2978 tdep->tdesc = tdesc;
2979
2980 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
2981 tdep->register_names = amd64_register_names;
2982
2983 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
2984 {
2985 tdep->zmmh_register_names = amd64_zmmh_names;
2986 tdep->k_register_names = amd64_k_names;
2987 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
2988 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
2989
2990 tdep->num_zmm_regs = 32;
2991 tdep->num_xmm_avx512_regs = 16;
2992 tdep->num_ymm_avx512_regs = 16;
2993
2994 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
2995 tdep->k0_regnum = AMD64_K0_REGNUM;
2996 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
2997 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
2998 }
2999
3000 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
3001 {
3002 tdep->ymmh_register_names = amd64_ymmh_names;
3003 tdep->num_ymm_regs = 16;
3004 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
3005 }
3006
3007 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
3008 {
3009 tdep->mpx_register_names = amd64_mpx_names;
3010 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
3011 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
3012 }
3013
3014 tdep->num_byte_regs = 20;
3015 tdep->num_word_regs = 16;
3016 tdep->num_dword_regs = 16;
3017 /* Avoid wiring in the MMX registers for now. */
3018 tdep->num_mmx_regs = 0;
3019
3020 set_gdbarch_pseudo_register_read_value (gdbarch,
3021 amd64_pseudo_register_read_value);
3022 set_gdbarch_pseudo_register_write (gdbarch,
3023 amd64_pseudo_register_write);
3024
3025 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
3026
3027 /* AMD64 has an FPU and 16 SSE registers. */
3028 tdep->st0_regnum = AMD64_ST0_REGNUM;
3029 tdep->num_xmm_regs = 16;
3030
3031 /* This is what all the fuss is about. */
3032 set_gdbarch_long_bit (gdbarch, 64);
3033 set_gdbarch_long_long_bit (gdbarch, 64);
3034 set_gdbarch_ptr_bit (gdbarch, 64);
3035
3036 /* In contrast to the i386, on AMD64 a `long double' actually takes
3037 up 128 bits, even though it's still based on the i387 extended
3038 floating-point format which has only 80 significant bits. */
3039 set_gdbarch_long_double_bit (gdbarch, 128);
3040
3041 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
3042
3043 /* Register numbers of various important registers. */
3044 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3045 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3046 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3047 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
3048
3049 /* The "default" register numbering scheme for AMD64 is referred to
3050 as the "DWARF Register Number Mapping" in the System V psABI.
3051 The preferred debugging format for all known AMD64 targets is
3052 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3053 DWARF-1), but we provide the same mapping just in case. This
3054 mapping is also used for stabs, which GCC does support. */
3055 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3056 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3057
3058 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3059 be in use on any of the supported AMD64 targets. */
3060
3061 /* Call dummy code. */
3062 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3063 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
3064 set_gdbarch_frame_red_zone_size (gdbarch, 128);
3065
3066 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
3067 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3068 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3069
3070 set_gdbarch_return_value (gdbarch, amd64_return_value);
3071
3072 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
3073
3074 tdep->record_regmap = amd64_record_regmap;
3075
3076 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
3077
3078 /* Hook the function epilogue frame unwinder. This unwinder is
3079 appended to the list first, so that it supercedes the other
3080 unwinders in function epilogues. */
3081 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3082
3083 /* Hook the prologue-based frame unwinders. */
3084 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3085 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
3086 frame_base_set_default (gdbarch, &amd64_frame_base);
3087
3088 /* If we have a register mapping, enable the generic core file support. */
3089 if (tdep->gregset_reg_offset)
3090 set_gdbarch_regset_from_core_section (gdbarch,
3091 amd64_regset_from_core_section);
3092
3093 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
3094
3095 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
3096
3097 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
3098
3099 /* SystemTap variables and functions. */
3100 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3101 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3102 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3103 stap_register_indirection_prefixes);
3104 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3105 stap_register_indirection_suffixes);
3106 set_gdbarch_stap_is_single_operand (gdbarch,
3107 i386_stap_is_single_operand);
3108 set_gdbarch_stap_parse_special_token (gdbarch,
3109 i386_stap_parse_special_token);
3110 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3111 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3112 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
3113 }
3114 \f
3115
3116 static struct type *
3117 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3118 {
3119 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3120
3121 switch (regnum - tdep->eax_regnum)
3122 {
3123 case AMD64_RBP_REGNUM: /* %ebp */
3124 case AMD64_RSP_REGNUM: /* %esp */
3125 return builtin_type (gdbarch)->builtin_data_ptr;
3126 case AMD64_RIP_REGNUM: /* %eip */
3127 return builtin_type (gdbarch)->builtin_func_ptr;
3128 }
3129
3130 return i386_pseudo_register_type (gdbarch, regnum);
3131 }
3132
3133 void
3134 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
3135 {
3136 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3137 const struct target_desc *tdesc = info.target_desc;
3138
3139 amd64_init_abi (info, gdbarch);
3140
3141 if (! tdesc_has_registers (tdesc))
3142 tdesc = tdesc_x32;
3143 tdep->tdesc = tdesc;
3144
3145 tdep->num_dword_regs = 17;
3146 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3147
3148 set_gdbarch_long_bit (gdbarch, 32);
3149 set_gdbarch_ptr_bit (gdbarch, 32);
3150 }
3151
3152 /* Provide a prototype to silence -Wmissing-prototypes. */
3153 void _initialize_amd64_tdep (void);
3154
3155 void
3156 _initialize_amd64_tdep (void)
3157 {
3158 initialize_tdesc_amd64 ();
3159 initialize_tdesc_amd64_avx ();
3160 initialize_tdesc_amd64_mpx ();
3161 initialize_tdesc_amd64_avx512 ();
3162
3163 initialize_tdesc_x32 ();
3164 initialize_tdesc_x32_avx ();
3165 initialize_tdesc_x32_avx512 ();
3166 }
3167 \f
3168
3169 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3170 sense that the instruction pointer and data pointer are simply
3171 64-bit offsets into the code segment and the data segment instead
3172 of a selector offset pair. The functions below store the upper 32
3173 bits of these pointers (instead of just the 16-bits of the segment
3174 selector). */
3175
3176 /* Fill register REGNUM in REGCACHE with the appropriate
3177 floating-point or SSE register value from *FXSAVE. If REGNUM is
3178 -1, do this for all registers. This function masks off any of the
3179 reserved bits in *FXSAVE. */
3180
3181 void
3182 amd64_supply_fxsave (struct regcache *regcache, int regnum,
3183 const void *fxsave)
3184 {
3185 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3186 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3187
3188 i387_supply_fxsave (regcache, regnum, fxsave);
3189
3190 if (fxsave
3191 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3192 {
3193 const gdb_byte *regs = fxsave;
3194
3195 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3196 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3197 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3198 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
3199 }
3200 }
3201
3202 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3203
3204 void
3205 amd64_supply_xsave (struct regcache *regcache, int regnum,
3206 const void *xsave)
3207 {
3208 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3209 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3210
3211 i387_supply_xsave (regcache, regnum, xsave);
3212
3213 if (xsave
3214 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3215 {
3216 const gdb_byte *regs = xsave;
3217
3218 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3219 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep),
3220 regs + 12);
3221 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3222 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep),
3223 regs + 20);
3224 }
3225 }
3226
3227 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3228 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3229 all registers. This function doesn't touch any of the reserved
3230 bits in *FXSAVE. */
3231
3232 void
3233 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3234 void *fxsave)
3235 {
3236 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3237 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3238 gdb_byte *regs = fxsave;
3239
3240 i387_collect_fxsave (regcache, regnum, fxsave);
3241
3242 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3243 {
3244 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3245 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3246 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3247 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
3248 }
3249 }
3250
3251 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3252
3253 void
3254 amd64_collect_xsave (const struct regcache *regcache, int regnum,
3255 void *xsave, int gcore)
3256 {
3257 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3258 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3259 gdb_byte *regs = xsave;
3260
3261 i387_collect_xsave (regcache, regnum, xsave, gcore);
3262
3263 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3264 {
3265 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3266 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep),
3267 regs + 12);
3268 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3269 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep),
3270 regs + 20);
3271 }
3272 }
This page took 0.095264 seconds and 5 git commands to generate.