[gdb/tdep] Fix 'Unexpected register class' assert in amd64_push_arguments
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright (C) 2001-2019 Free Software Foundation, Inc.
4
5 Contributed by Jiri Smid, SuSE Labs.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "opcode/i386.h"
24 #include "dis-asm.h"
25 #include "arch-utils.h"
26 #include "block.h"
27 #include "dummy-frame.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "inferior.h"
32 #include "infrun.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39 #include "disasm.h"
40 #include "amd64-tdep.h"
41 #include "i387-tdep.h"
42 #include "gdbsupport/x86-xstate.h"
43 #include <algorithm>
44 #include "target-descriptions.h"
45 #include "arch/amd64.h"
46 #include "producer.h"
47 #include "ax.h"
48 #include "ax-gdb.h"
49 #include "gdbsupport/byte-vector.h"
50 #include "osabi.h"
51 #include "x86-tdep.h"
52
53 /* Note that the AMD64 architecture was previously known as x86-64.
54 The latter is (forever) engraved into the canonical system name as
55 returned by config.guess, and used as the name for the AMD64 port
56 of GNU/Linux. The BSD's have renamed their ports to amd64; they
57 don't like to shout. For GDB we prefer the amd64_-prefix over the
58 x86_64_-prefix since it's so much easier to type. */
59
60 /* Register information. */
61
62 static const char *amd64_register_names[] =
63 {
64 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
65
66 /* %r8 is indeed register number 8. */
67 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
68 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
69
70 /* %st0 is register number 24. */
71 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
72 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
73
74 /* %xmm0 is register number 40. */
75 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
76 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
77 "mxcsr",
78 };
79
80 static const char *amd64_ymm_names[] =
81 {
82 "ymm0", "ymm1", "ymm2", "ymm3",
83 "ymm4", "ymm5", "ymm6", "ymm7",
84 "ymm8", "ymm9", "ymm10", "ymm11",
85 "ymm12", "ymm13", "ymm14", "ymm15"
86 };
87
88 static const char *amd64_ymm_avx512_names[] =
89 {
90 "ymm16", "ymm17", "ymm18", "ymm19",
91 "ymm20", "ymm21", "ymm22", "ymm23",
92 "ymm24", "ymm25", "ymm26", "ymm27",
93 "ymm28", "ymm29", "ymm30", "ymm31"
94 };
95
96 static const char *amd64_ymmh_names[] =
97 {
98 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
99 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
100 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
101 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
102 };
103
104 static const char *amd64_ymmh_avx512_names[] =
105 {
106 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
107 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
108 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
109 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
110 };
111
112 static const char *amd64_mpx_names[] =
113 {
114 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
115 };
116
117 static const char *amd64_k_names[] =
118 {
119 "k0", "k1", "k2", "k3",
120 "k4", "k5", "k6", "k7"
121 };
122
123 static const char *amd64_zmmh_names[] =
124 {
125 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
126 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
127 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
128 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
129 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
130 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
131 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
132 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
133 };
134
135 static const char *amd64_zmm_names[] =
136 {
137 "zmm0", "zmm1", "zmm2", "zmm3",
138 "zmm4", "zmm5", "zmm6", "zmm7",
139 "zmm8", "zmm9", "zmm10", "zmm11",
140 "zmm12", "zmm13", "zmm14", "zmm15",
141 "zmm16", "zmm17", "zmm18", "zmm19",
142 "zmm20", "zmm21", "zmm22", "zmm23",
143 "zmm24", "zmm25", "zmm26", "zmm27",
144 "zmm28", "zmm29", "zmm30", "zmm31"
145 };
146
147 static const char *amd64_xmm_avx512_names[] = {
148 "xmm16", "xmm17", "xmm18", "xmm19",
149 "xmm20", "xmm21", "xmm22", "xmm23",
150 "xmm24", "xmm25", "xmm26", "xmm27",
151 "xmm28", "xmm29", "xmm30", "xmm31"
152 };
153
154 static const char *amd64_pkeys_names[] = {
155 "pkru"
156 };
157
158 /* DWARF Register Number Mapping as defined in the System V psABI,
159 section 3.6. */
160
161 static int amd64_dwarf_regmap[] =
162 {
163 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
164 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
165 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
166 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
167
168 /* Frame Pointer Register RBP. */
169 AMD64_RBP_REGNUM,
170
171 /* Stack Pointer Register RSP. */
172 AMD64_RSP_REGNUM,
173
174 /* Extended Integer Registers 8 - 15. */
175 AMD64_R8_REGNUM, /* %r8 */
176 AMD64_R9_REGNUM, /* %r9 */
177 AMD64_R10_REGNUM, /* %r10 */
178 AMD64_R11_REGNUM, /* %r11 */
179 AMD64_R12_REGNUM, /* %r12 */
180 AMD64_R13_REGNUM, /* %r13 */
181 AMD64_R14_REGNUM, /* %r14 */
182 AMD64_R15_REGNUM, /* %r15 */
183
184 /* Return Address RA. Mapped to RIP. */
185 AMD64_RIP_REGNUM,
186
187 /* SSE Registers 0 - 7. */
188 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
189 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
190 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
191 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
192
193 /* Extended SSE Registers 8 - 15. */
194 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
195 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
196 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
197 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
198
199 /* Floating Point Registers 0-7. */
200 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
201 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
202 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
203 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
204
205 /* MMX Registers 0 - 7.
206 We have to handle those registers specifically, as their register
207 number within GDB depends on the target (or they may even not be
208 available at all). */
209 -1, -1, -1, -1, -1, -1, -1, -1,
210
211 /* Control and Status Flags Register. */
212 AMD64_EFLAGS_REGNUM,
213
214 /* Selector Registers. */
215 AMD64_ES_REGNUM,
216 AMD64_CS_REGNUM,
217 AMD64_SS_REGNUM,
218 AMD64_DS_REGNUM,
219 AMD64_FS_REGNUM,
220 AMD64_GS_REGNUM,
221 -1,
222 -1,
223
224 /* Segment Base Address Registers. */
225 -1,
226 -1,
227 -1,
228 -1,
229
230 /* Special Selector Registers. */
231 -1,
232 -1,
233
234 /* Floating Point Control Registers. */
235 AMD64_MXCSR_REGNUM,
236 AMD64_FCTRL_REGNUM,
237 AMD64_FSTAT_REGNUM
238 };
239
240 static const int amd64_dwarf_regmap_len =
241 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
242
243 /* Convert DWARF register number REG to the appropriate register
244 number used by GDB. */
245
246 static int
247 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
248 {
249 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
250 int ymm0_regnum = tdep->ymm0_regnum;
251 int regnum = -1;
252
253 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
254 regnum = amd64_dwarf_regmap[reg];
255
256 if (ymm0_regnum >= 0
257 && i386_xmm_regnum_p (gdbarch, regnum))
258 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
259
260 return regnum;
261 }
262
263 /* Map architectural register numbers to gdb register numbers. */
264
265 static const int amd64_arch_regmap[16] =
266 {
267 AMD64_RAX_REGNUM, /* %rax */
268 AMD64_RCX_REGNUM, /* %rcx */
269 AMD64_RDX_REGNUM, /* %rdx */
270 AMD64_RBX_REGNUM, /* %rbx */
271 AMD64_RSP_REGNUM, /* %rsp */
272 AMD64_RBP_REGNUM, /* %rbp */
273 AMD64_RSI_REGNUM, /* %rsi */
274 AMD64_RDI_REGNUM, /* %rdi */
275 AMD64_R8_REGNUM, /* %r8 */
276 AMD64_R9_REGNUM, /* %r9 */
277 AMD64_R10_REGNUM, /* %r10 */
278 AMD64_R11_REGNUM, /* %r11 */
279 AMD64_R12_REGNUM, /* %r12 */
280 AMD64_R13_REGNUM, /* %r13 */
281 AMD64_R14_REGNUM, /* %r14 */
282 AMD64_R15_REGNUM /* %r15 */
283 };
284
285 static const int amd64_arch_regmap_len =
286 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
287
288 /* Convert architectural register number REG to the appropriate register
289 number used by GDB. */
290
291 static int
292 amd64_arch_reg_to_regnum (int reg)
293 {
294 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
295
296 return amd64_arch_regmap[reg];
297 }
298
299 /* Register names for byte pseudo-registers. */
300
301 static const char *amd64_byte_names[] =
302 {
303 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
304 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
305 "ah", "bh", "ch", "dh"
306 };
307
308 /* Number of lower byte registers. */
309 #define AMD64_NUM_LOWER_BYTE_REGS 16
310
311 /* Register names for word pseudo-registers. */
312
313 static const char *amd64_word_names[] =
314 {
315 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
316 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
317 };
318
319 /* Register names for dword pseudo-registers. */
320
321 static const char *amd64_dword_names[] =
322 {
323 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
324 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
325 "eip"
326 };
327
328 /* Return the name of register REGNUM. */
329
330 static const char *
331 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
332 {
333 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
334 if (i386_byte_regnum_p (gdbarch, regnum))
335 return amd64_byte_names[regnum - tdep->al_regnum];
336 else if (i386_zmm_regnum_p (gdbarch, regnum))
337 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
338 else if (i386_ymm_regnum_p (gdbarch, regnum))
339 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
340 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
341 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
342 else if (i386_word_regnum_p (gdbarch, regnum))
343 return amd64_word_names[regnum - tdep->ax_regnum];
344 else if (i386_dword_regnum_p (gdbarch, regnum))
345 return amd64_dword_names[regnum - tdep->eax_regnum];
346 else
347 return i386_pseudo_register_name (gdbarch, regnum);
348 }
349
350 static struct value *
351 amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
352 readable_regcache *regcache,
353 int regnum)
354 {
355 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
356
357 value *result_value = allocate_value (register_type (gdbarch, regnum));
358 VALUE_LVAL (result_value) = lval_register;
359 VALUE_REGNUM (result_value) = regnum;
360 gdb_byte *buf = value_contents_raw (result_value);
361
362 if (i386_byte_regnum_p (gdbarch, regnum))
363 {
364 int gpnum = regnum - tdep->al_regnum;
365
366 /* Extract (always little endian). */
367 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
368 {
369 gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
370 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
371
372 /* Special handling for AH, BH, CH, DH. */
373 register_status status = regcache->raw_read (gpnum, raw_buf);
374 if (status == REG_VALID)
375 memcpy (buf, raw_buf + 1, 1);
376 else
377 mark_value_bytes_unavailable (result_value, 0,
378 TYPE_LENGTH (value_type (result_value)));
379 }
380 else
381 {
382 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
383 register_status status = regcache->raw_read (gpnum, raw_buf);
384 if (status == REG_VALID)
385 memcpy (buf, raw_buf, 1);
386 else
387 mark_value_bytes_unavailable (result_value, 0,
388 TYPE_LENGTH (value_type (result_value)));
389 }
390 }
391 else if (i386_dword_regnum_p (gdbarch, regnum))
392 {
393 int gpnum = regnum - tdep->eax_regnum;
394 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
395 /* Extract (always little endian). */
396 register_status status = regcache->raw_read (gpnum, raw_buf);
397 if (status == REG_VALID)
398 memcpy (buf, raw_buf, 4);
399 else
400 mark_value_bytes_unavailable (result_value, 0,
401 TYPE_LENGTH (value_type (result_value)));
402 }
403 else
404 i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
405 result_value);
406
407 return result_value;
408 }
409
410 static void
411 amd64_pseudo_register_write (struct gdbarch *gdbarch,
412 struct regcache *regcache,
413 int regnum, const gdb_byte *buf)
414 {
415 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
416
417 if (i386_byte_regnum_p (gdbarch, regnum))
418 {
419 int gpnum = regnum - tdep->al_regnum;
420
421 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
422 {
423 gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
424 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
425
426 /* Read ... AH, BH, CH, DH. */
427 regcache->raw_read (gpnum, raw_buf);
428 /* ... Modify ... (always little endian). */
429 memcpy (raw_buf + 1, buf, 1);
430 /* ... Write. */
431 regcache->raw_write (gpnum, raw_buf);
432 }
433 else
434 {
435 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
436
437 /* Read ... */
438 regcache->raw_read (gpnum, raw_buf);
439 /* ... Modify ... (always little endian). */
440 memcpy (raw_buf, buf, 1);
441 /* ... Write. */
442 regcache->raw_write (gpnum, raw_buf);
443 }
444 }
445 else if (i386_dword_regnum_p (gdbarch, regnum))
446 {
447 int gpnum = regnum - tdep->eax_regnum;
448 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
449
450 /* Read ... */
451 regcache->raw_read (gpnum, raw_buf);
452 /* ... Modify ... (always little endian). */
453 memcpy (raw_buf, buf, 4);
454 /* ... Write. */
455 regcache->raw_write (gpnum, raw_buf);
456 }
457 else
458 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
459 }
460
461 /* Implement the 'ax_pseudo_register_collect' gdbarch method. */
462
463 static int
464 amd64_ax_pseudo_register_collect (struct gdbarch *gdbarch,
465 struct agent_expr *ax, int regnum)
466 {
467 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
468
469 if (i386_byte_regnum_p (gdbarch, regnum))
470 {
471 int gpnum = regnum - tdep->al_regnum;
472
473 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
474 ax_reg_mask (ax, gpnum - AMD64_NUM_LOWER_BYTE_REGS);
475 else
476 ax_reg_mask (ax, gpnum);
477 return 0;
478 }
479 else if (i386_dword_regnum_p (gdbarch, regnum))
480 {
481 int gpnum = regnum - tdep->eax_regnum;
482
483 ax_reg_mask (ax, gpnum);
484 return 0;
485 }
486 else
487 return i386_ax_pseudo_register_collect (gdbarch, ax, regnum);
488 }
489
490 \f
491
492 /* Register classes as defined in the psABI. */
493
494 enum amd64_reg_class
495 {
496 AMD64_INTEGER,
497 AMD64_SSE,
498 AMD64_SSEUP,
499 AMD64_X87,
500 AMD64_X87UP,
501 AMD64_COMPLEX_X87,
502 AMD64_NO_CLASS,
503 AMD64_MEMORY
504 };
505
506 /* Return the union class of CLASS1 and CLASS2. See the psABI for
507 details. */
508
509 static enum amd64_reg_class
510 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
511 {
512 /* Rule (a): If both classes are equal, this is the resulting class. */
513 if (class1 == class2)
514 return class1;
515
516 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
517 is the other class. */
518 if (class1 == AMD64_NO_CLASS)
519 return class2;
520 if (class2 == AMD64_NO_CLASS)
521 return class1;
522
523 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
524 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
525 return AMD64_MEMORY;
526
527 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
528 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
529 return AMD64_INTEGER;
530
531 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
532 MEMORY is used as class. */
533 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
534 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
535 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
536 return AMD64_MEMORY;
537
538 /* Rule (f): Otherwise class SSE is used. */
539 return AMD64_SSE;
540 }
541
542 static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]);
543
544 /* Return true if TYPE is a structure or union with unaligned fields. */
545
546 static bool
547 amd64_has_unaligned_fields (struct type *type)
548 {
549 if (TYPE_CODE (type) == TYPE_CODE_STRUCT
550 || TYPE_CODE (type) == TYPE_CODE_UNION)
551 {
552 for (int i = 0; i < TYPE_NFIELDS (type); i++)
553 {
554 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
555 int bitpos = TYPE_FIELD_BITPOS (type, i);
556 int align = type_align(subtype);
557
558 /* Ignore static fields, empty fields (for example nested
559 empty structures), and bitfields (these are handled by
560 the caller). */
561 if (field_is_static (&TYPE_FIELD (type, i))
562 || (TYPE_FIELD_BITSIZE (type, i) == 0
563 && TYPE_LENGTH (subtype) == 0)
564 || TYPE_FIELD_PACKED (type, i))
565 continue;
566
567 if (bitpos % 8 != 0)
568 return true;
569
570 int bytepos = bitpos / 8;
571 if (bytepos % align != 0)
572 return true;
573
574 if (amd64_has_unaligned_fields (subtype))
575 return true;
576 }
577 }
578
579 return false;
580 }
581
582 /* Classify TYPE according to the rules for aggregate (structures and
583 arrays) and union types, and store the result in CLASS. */
584
585 static void
586 amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
587 {
588 /* 1. If the size of an object is larger than two eightbytes, or it has
589 unaligned fields, it has class memory. */
590 if (TYPE_LENGTH (type) > 16 || amd64_has_unaligned_fields (type))
591 {
592 theclass[0] = theclass[1] = AMD64_MEMORY;
593 return;
594 }
595
596 /* 2. Both eightbytes get initialized to class NO_CLASS. */
597 theclass[0] = theclass[1] = AMD64_NO_CLASS;
598
599 /* 3. Each field of an object is classified recursively so that
600 always two fields are considered. The resulting class is
601 calculated according to the classes of the fields in the
602 eightbyte: */
603
604 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
605 {
606 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
607
608 /* All fields in an array have the same type. */
609 amd64_classify (subtype, theclass);
610 if (TYPE_LENGTH (type) > 8 && theclass[1] == AMD64_NO_CLASS)
611 theclass[1] = theclass[0];
612 }
613 else
614 {
615 int i;
616
617 /* Structure or union. */
618 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
619 || TYPE_CODE (type) == TYPE_CODE_UNION);
620
621 for (i = 0; i < TYPE_NFIELDS (type); i++)
622 {
623 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
624 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
625 enum amd64_reg_class subclass[2];
626 int bitsize = TYPE_FIELD_BITSIZE (type, i);
627 int endpos;
628
629 if (bitsize == 0)
630 bitsize = TYPE_LENGTH (subtype) * 8;
631 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
632
633 /* Ignore static fields, or empty fields, for example nested
634 empty structures.*/
635 if (field_is_static (&TYPE_FIELD (type, i)) || bitsize == 0)
636 continue;
637
638 gdb_assert (pos == 0 || pos == 1);
639
640 amd64_classify (subtype, subclass);
641 theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]);
642 if (bitsize <= 64 && pos == 0 && endpos == 1)
643 /* This is a bit of an odd case: We have a field that would
644 normally fit in one of the two eightbytes, except that
645 it is placed in a way that this field straddles them.
646 This has been seen with a structure containing an array.
647
648 The ABI is a bit unclear in this case, but we assume that
649 this field's class (stored in subclass[0]) must also be merged
650 into class[1]. In other words, our field has a piece stored
651 in the second eight-byte, and thus its class applies to
652 the second eight-byte as well.
653
654 In the case where the field length exceeds 8 bytes,
655 it should not be necessary to merge the field class
656 into class[1]. As LEN > 8, subclass[1] is necessarily
657 different from AMD64_NO_CLASS. If subclass[1] is equal
658 to subclass[0], then the normal class[1]/subclass[1]
659 merging will take care of everything. For subclass[1]
660 to be different from subclass[0], I can only see the case
661 where we have a SSE/SSEUP or X87/X87UP pair, which both
662 use up all 16 bytes of the aggregate, and are already
663 handled just fine (because each portion sits on its own
664 8-byte). */
665 theclass[1] = amd64_merge_classes (theclass[1], subclass[0]);
666 if (pos == 0)
667 theclass[1] = amd64_merge_classes (theclass[1], subclass[1]);
668 }
669 }
670
671 /* 4. Then a post merger cleanup is done: */
672
673 /* Rule (a): If one of the classes is MEMORY, the whole argument is
674 passed in memory. */
675 if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY)
676 theclass[0] = theclass[1] = AMD64_MEMORY;
677
678 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
679 SSE. */
680 if (theclass[0] == AMD64_SSEUP)
681 theclass[0] = AMD64_SSE;
682 if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE)
683 theclass[1] = AMD64_SSE;
684 }
685
686 /* Classify TYPE, and store the result in CLASS. */
687
688 static void
689 amd64_classify (struct type *type, enum amd64_reg_class theclass[2])
690 {
691 enum type_code code = TYPE_CODE (type);
692 int len = TYPE_LENGTH (type);
693
694 theclass[0] = theclass[1] = AMD64_NO_CLASS;
695
696 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
697 long, long long, and pointers are in the INTEGER class. Similarly,
698 range types, used by languages such as Ada, are also in the INTEGER
699 class. */
700 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
701 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
702 || code == TYPE_CODE_CHAR
703 || code == TYPE_CODE_PTR || TYPE_IS_REFERENCE (type))
704 && (len == 1 || len == 2 || len == 4 || len == 8))
705 theclass[0] = AMD64_INTEGER;
706
707 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
708 are in class SSE. */
709 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
710 && (len == 4 || len == 8))
711 /* FIXME: __m64 . */
712 theclass[0] = AMD64_SSE;
713
714 /* Arguments of types __float128, _Decimal128 and __m128 are split into
715 two halves. The least significant ones belong to class SSE, the most
716 significant one to class SSEUP. */
717 else if (code == TYPE_CODE_DECFLOAT && len == 16)
718 /* FIXME: __float128, __m128. */
719 theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP;
720
721 /* The 64-bit mantissa of arguments of type long double belongs to
722 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
723 class X87UP. */
724 else if (code == TYPE_CODE_FLT && len == 16)
725 /* Class X87 and X87UP. */
726 theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP;
727
728 /* Arguments of complex T where T is one of the types float or
729 double get treated as if they are implemented as:
730
731 struct complexT {
732 T real;
733 T imag;
734 };
735
736 */
737 else if (code == TYPE_CODE_COMPLEX && len == 8)
738 theclass[0] = AMD64_SSE;
739 else if (code == TYPE_CODE_COMPLEX && len == 16)
740 theclass[0] = theclass[1] = AMD64_SSE;
741
742 /* A variable of type complex long double is classified as type
743 COMPLEX_X87. */
744 else if (code == TYPE_CODE_COMPLEX && len == 32)
745 theclass[0] = AMD64_COMPLEX_X87;
746
747 /* Aggregates. */
748 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
749 || code == TYPE_CODE_UNION)
750 amd64_classify_aggregate (type, theclass);
751 }
752
753 static enum return_value_convention
754 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
755 struct type *type, struct regcache *regcache,
756 gdb_byte *readbuf, const gdb_byte *writebuf)
757 {
758 enum amd64_reg_class theclass[2];
759 int len = TYPE_LENGTH (type);
760 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
761 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
762 int integer_reg = 0;
763 int sse_reg = 0;
764 int i;
765
766 gdb_assert (!(readbuf && writebuf));
767
768 /* 1. Classify the return type with the classification algorithm. */
769 amd64_classify (type, theclass);
770
771 /* 2. If the type has class MEMORY, then the caller provides space
772 for the return value and passes the address of this storage in
773 %rdi as if it were the first argument to the function. In effect,
774 this address becomes a hidden first argument.
775
776 On return %rax will contain the address that has been passed in
777 by the caller in %rdi. */
778 if (theclass[0] == AMD64_MEMORY)
779 {
780 /* As indicated by the comment above, the ABI guarantees that we
781 can always find the return value just after the function has
782 returned. */
783
784 if (readbuf)
785 {
786 ULONGEST addr;
787
788 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
789 read_memory (addr, readbuf, TYPE_LENGTH (type));
790 }
791
792 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
793 }
794
795 /* 8. If the class is COMPLEX_X87, the real part of the value is
796 returned in %st0 and the imaginary part in %st1. */
797 if (theclass[0] == AMD64_COMPLEX_X87)
798 {
799 if (readbuf)
800 {
801 regcache->raw_read (AMD64_ST0_REGNUM, readbuf);
802 regcache->raw_read (AMD64_ST1_REGNUM, readbuf + 16);
803 }
804
805 if (writebuf)
806 {
807 i387_return_value (gdbarch, regcache);
808 regcache->raw_write (AMD64_ST0_REGNUM, writebuf);
809 regcache->raw_write (AMD64_ST1_REGNUM, writebuf + 16);
810
811 /* Fix up the tag word such that both %st(0) and %st(1) are
812 marked as valid. */
813 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
814 }
815
816 return RETURN_VALUE_REGISTER_CONVENTION;
817 }
818
819 gdb_assert (theclass[1] != AMD64_MEMORY);
820 gdb_assert (len <= 16);
821
822 for (i = 0; len > 0; i++, len -= 8)
823 {
824 int regnum = -1;
825 int offset = 0;
826
827 switch (theclass[i])
828 {
829 case AMD64_INTEGER:
830 /* 3. If the class is INTEGER, the next available register
831 of the sequence %rax, %rdx is used. */
832 regnum = integer_regnum[integer_reg++];
833 break;
834
835 case AMD64_SSE:
836 /* 4. If the class is SSE, the next available SSE register
837 of the sequence %xmm0, %xmm1 is used. */
838 regnum = sse_regnum[sse_reg++];
839 break;
840
841 case AMD64_SSEUP:
842 /* 5. If the class is SSEUP, the eightbyte is passed in the
843 upper half of the last used SSE register. */
844 gdb_assert (sse_reg > 0);
845 regnum = sse_regnum[sse_reg - 1];
846 offset = 8;
847 break;
848
849 case AMD64_X87:
850 /* 6. If the class is X87, the value is returned on the X87
851 stack in %st0 as 80-bit x87 number. */
852 regnum = AMD64_ST0_REGNUM;
853 if (writebuf)
854 i387_return_value (gdbarch, regcache);
855 break;
856
857 case AMD64_X87UP:
858 /* 7. If the class is X87UP, the value is returned together
859 with the previous X87 value in %st0. */
860 gdb_assert (i > 0 && theclass[0] == AMD64_X87);
861 regnum = AMD64_ST0_REGNUM;
862 offset = 8;
863 len = 2;
864 break;
865
866 case AMD64_NO_CLASS:
867 continue;
868
869 default:
870 gdb_assert (!"Unexpected register class.");
871 }
872
873 gdb_assert (regnum != -1);
874
875 if (readbuf)
876 regcache->raw_read_part (regnum, offset, std::min (len, 8),
877 readbuf + i * 8);
878 if (writebuf)
879 regcache->raw_write_part (regnum, offset, std::min (len, 8),
880 writebuf + i * 8);
881 }
882
883 return RETURN_VALUE_REGISTER_CONVENTION;
884 }
885 \f
886
887 static CORE_ADDR
888 amd64_push_arguments (struct regcache *regcache, int nargs, struct value **args,
889 CORE_ADDR sp, function_call_return_method return_method)
890 {
891 static int integer_regnum[] =
892 {
893 AMD64_RDI_REGNUM, /* %rdi */
894 AMD64_RSI_REGNUM, /* %rsi */
895 AMD64_RDX_REGNUM, /* %rdx */
896 AMD64_RCX_REGNUM, /* %rcx */
897 AMD64_R8_REGNUM, /* %r8 */
898 AMD64_R9_REGNUM /* %r9 */
899 };
900 static int sse_regnum[] =
901 {
902 /* %xmm0 ... %xmm7 */
903 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
904 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
905 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
906 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
907 };
908 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
909 int num_stack_args = 0;
910 int num_elements = 0;
911 int element = 0;
912 int integer_reg = 0;
913 int sse_reg = 0;
914 int i;
915
916 /* Reserve a register for the "hidden" argument. */
917 if (return_method == return_method_struct)
918 integer_reg++;
919
920 for (i = 0; i < nargs; i++)
921 {
922 struct type *type = value_type (args[i]);
923 int len = TYPE_LENGTH (type);
924 enum amd64_reg_class theclass[2];
925 int needed_integer_regs = 0;
926 int needed_sse_regs = 0;
927 int j;
928
929 /* Classify argument. */
930 amd64_classify (type, theclass);
931
932 /* Calculate the number of integer and SSE registers needed for
933 this argument. */
934 for (j = 0; j < 2; j++)
935 {
936 if (theclass[j] == AMD64_INTEGER)
937 needed_integer_regs++;
938 else if (theclass[j] == AMD64_SSE)
939 needed_sse_regs++;
940 }
941
942 /* Check whether enough registers are available, and if the
943 argument should be passed in registers at all. */
944 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
945 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
946 || (needed_integer_regs == 0 && needed_sse_regs == 0))
947 {
948 /* The argument will be passed on the stack. */
949 num_elements += ((len + 7) / 8);
950 stack_args[num_stack_args++] = args[i];
951 }
952 else
953 {
954 /* The argument will be passed in registers. */
955 const gdb_byte *valbuf = value_contents (args[i]);
956 gdb_byte buf[8];
957
958 gdb_assert (len <= 16);
959
960 for (j = 0; len > 0; j++, len -= 8)
961 {
962 int regnum = -1;
963 int offset = 0;
964
965 switch (theclass[j])
966 {
967 case AMD64_INTEGER:
968 regnum = integer_regnum[integer_reg++];
969 break;
970
971 case AMD64_SSE:
972 regnum = sse_regnum[sse_reg++];
973 break;
974
975 case AMD64_SSEUP:
976 gdb_assert (sse_reg > 0);
977 regnum = sse_regnum[sse_reg - 1];
978 offset = 8;
979 break;
980
981 case AMD64_NO_CLASS:
982 continue;
983
984 default:
985 gdb_assert (!"Unexpected register class.");
986 }
987
988 gdb_assert (regnum != -1);
989 memset (buf, 0, sizeof buf);
990 memcpy (buf, valbuf + j * 8, std::min (len, 8));
991 regcache->raw_write_part (regnum, offset, 8, buf);
992 }
993 }
994 }
995
996 /* Allocate space for the arguments on the stack. */
997 sp -= num_elements * 8;
998
999 /* The psABI says that "The end of the input argument area shall be
1000 aligned on a 16 byte boundary." */
1001 sp &= ~0xf;
1002
1003 /* Write out the arguments to the stack. */
1004 for (i = 0; i < num_stack_args; i++)
1005 {
1006 struct type *type = value_type (stack_args[i]);
1007 const gdb_byte *valbuf = value_contents (stack_args[i]);
1008 int len = TYPE_LENGTH (type);
1009
1010 write_memory (sp + element * 8, valbuf, len);
1011 element += ((len + 7) / 8);
1012 }
1013
1014 /* The psABI says that "For calls that may call functions that use
1015 varargs or stdargs (prototype-less calls or calls to functions
1016 containing ellipsis (...) in the declaration) %al is used as
1017 hidden argument to specify the number of SSE registers used. */
1018 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
1019 return sp;
1020 }
1021
1022 static CORE_ADDR
1023 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1024 struct regcache *regcache, CORE_ADDR bp_addr,
1025 int nargs, struct value **args, CORE_ADDR sp,
1026 function_call_return_method return_method,
1027 CORE_ADDR struct_addr)
1028 {
1029 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1030 gdb_byte buf[8];
1031
1032 /* BND registers can be in arbitrary values at the moment of the
1033 inferior call. This can cause boundary violations that are not
1034 due to a real bug or even desired by the user. The best to be done
1035 is set the BND registers to allow access to the whole memory, INIT
1036 state, before pushing the inferior call. */
1037 i387_reset_bnd_regs (gdbarch, regcache);
1038
1039 /* Pass arguments. */
1040 sp = amd64_push_arguments (regcache, nargs, args, sp, return_method);
1041
1042 /* Pass "hidden" argument". */
1043 if (return_method == return_method_struct)
1044 {
1045 store_unsigned_integer (buf, 8, byte_order, struct_addr);
1046 regcache->cooked_write (AMD64_RDI_REGNUM, buf);
1047 }
1048
1049 /* Store return address. */
1050 sp -= 8;
1051 store_unsigned_integer (buf, 8, byte_order, bp_addr);
1052 write_memory (sp, buf, 8);
1053
1054 /* Finally, update the stack pointer... */
1055 store_unsigned_integer (buf, 8, byte_order, sp);
1056 regcache->cooked_write (AMD64_RSP_REGNUM, buf);
1057
1058 /* ...and fake a frame pointer. */
1059 regcache->cooked_write (AMD64_RBP_REGNUM, buf);
1060
1061 return sp + 16;
1062 }
1063 \f
1064 /* Displaced instruction handling. */
1065
1066 /* A partially decoded instruction.
1067 This contains enough details for displaced stepping purposes. */
1068
1069 struct amd64_insn
1070 {
1071 /* The number of opcode bytes. */
1072 int opcode_len;
1073 /* The offset of the REX/VEX instruction encoding prefix or -1 if
1074 not present. */
1075 int enc_prefix_offset;
1076 /* The offset to the first opcode byte. */
1077 int opcode_offset;
1078 /* The offset to the modrm byte or -1 if not present. */
1079 int modrm_offset;
1080
1081 /* The raw instruction. */
1082 gdb_byte *raw_insn;
1083 };
1084
1085 struct amd64_displaced_step_closure : public displaced_step_closure
1086 {
1087 amd64_displaced_step_closure (int insn_buf_len)
1088 : insn_buf (insn_buf_len, 0)
1089 {}
1090
1091 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1092 int tmp_used = 0;
1093 int tmp_regno;
1094 ULONGEST tmp_save;
1095
1096 /* Details of the instruction. */
1097 struct amd64_insn insn_details;
1098
1099 /* The possibly modified insn. */
1100 gdb::byte_vector insn_buf;
1101 };
1102
1103 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1104 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1105 at which point delete these in favor of libopcodes' versions). */
1106
1107 static const unsigned char onebyte_has_modrm[256] = {
1108 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1109 /* ------------------------------- */
1110 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1111 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1112 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1113 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1114 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1115 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1116 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1117 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1118 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1119 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1120 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1121 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1122 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1123 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1124 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1125 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1126 /* ------------------------------- */
1127 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1128 };
1129
1130 static const unsigned char twobyte_has_modrm[256] = {
1131 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1132 /* ------------------------------- */
1133 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1134 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1135 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1136 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1137 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1138 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1139 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1140 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1141 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1142 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1143 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1144 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1145 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1146 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1147 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1148 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1149 /* ------------------------------- */
1150 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1151 };
1152
1153 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1154
1155 static int
1156 rex_prefix_p (gdb_byte pfx)
1157 {
1158 return REX_PREFIX_P (pfx);
1159 }
1160
1161 /* True if PFX is the start of the 2-byte VEX prefix. */
1162
1163 static bool
1164 vex2_prefix_p (gdb_byte pfx)
1165 {
1166 return pfx == 0xc5;
1167 }
1168
1169 /* True if PFX is the start of the 3-byte VEX prefix. */
1170
1171 static bool
1172 vex3_prefix_p (gdb_byte pfx)
1173 {
1174 return pfx == 0xc4;
1175 }
1176
1177 /* Skip the legacy instruction prefixes in INSN.
1178 We assume INSN is properly sentineled so we don't have to worry
1179 about falling off the end of the buffer. */
1180
1181 static gdb_byte *
1182 amd64_skip_prefixes (gdb_byte *insn)
1183 {
1184 while (1)
1185 {
1186 switch (*insn)
1187 {
1188 case DATA_PREFIX_OPCODE:
1189 case ADDR_PREFIX_OPCODE:
1190 case CS_PREFIX_OPCODE:
1191 case DS_PREFIX_OPCODE:
1192 case ES_PREFIX_OPCODE:
1193 case FS_PREFIX_OPCODE:
1194 case GS_PREFIX_OPCODE:
1195 case SS_PREFIX_OPCODE:
1196 case LOCK_PREFIX_OPCODE:
1197 case REPE_PREFIX_OPCODE:
1198 case REPNE_PREFIX_OPCODE:
1199 ++insn;
1200 continue;
1201 default:
1202 break;
1203 }
1204 break;
1205 }
1206
1207 return insn;
1208 }
1209
1210 /* Return an integer register (other than RSP) that is unused as an input
1211 operand in INSN.
1212 In order to not require adding a rex prefix if the insn doesn't already
1213 have one, the result is restricted to RAX ... RDI, sans RSP.
1214 The register numbering of the result follows architecture ordering,
1215 e.g. RDI = 7. */
1216
1217 static int
1218 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1219 {
1220 /* 1 bit for each reg */
1221 int used_regs_mask = 0;
1222
1223 /* There can be at most 3 int regs used as inputs in an insn, and we have
1224 7 to choose from (RAX ... RDI, sans RSP).
1225 This allows us to take a conservative approach and keep things simple.
1226 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1227 that implicitly specify RAX. */
1228
1229 /* Avoid RAX. */
1230 used_regs_mask |= 1 << EAX_REG_NUM;
1231 /* Similarily avoid RDX, implicit operand in divides. */
1232 used_regs_mask |= 1 << EDX_REG_NUM;
1233 /* Avoid RSP. */
1234 used_regs_mask |= 1 << ESP_REG_NUM;
1235
1236 /* If the opcode is one byte long and there's no ModRM byte,
1237 assume the opcode specifies a register. */
1238 if (details->opcode_len == 1 && details->modrm_offset == -1)
1239 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1240
1241 /* Mark used regs in the modrm/sib bytes. */
1242 if (details->modrm_offset != -1)
1243 {
1244 int modrm = details->raw_insn[details->modrm_offset];
1245 int mod = MODRM_MOD_FIELD (modrm);
1246 int reg = MODRM_REG_FIELD (modrm);
1247 int rm = MODRM_RM_FIELD (modrm);
1248 int have_sib = mod != 3 && rm == 4;
1249
1250 /* Assume the reg field of the modrm byte specifies a register. */
1251 used_regs_mask |= 1 << reg;
1252
1253 if (have_sib)
1254 {
1255 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1256 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1257 used_regs_mask |= 1 << base;
1258 used_regs_mask |= 1 << idx;
1259 }
1260 else
1261 {
1262 used_regs_mask |= 1 << rm;
1263 }
1264 }
1265
1266 gdb_assert (used_regs_mask < 256);
1267 gdb_assert (used_regs_mask != 255);
1268
1269 /* Finally, find a free reg. */
1270 {
1271 int i;
1272
1273 for (i = 0; i < 8; ++i)
1274 {
1275 if (! (used_regs_mask & (1 << i)))
1276 return i;
1277 }
1278
1279 /* We shouldn't get here. */
1280 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1281 }
1282 }
1283
1284 /* Extract the details of INSN that we need. */
1285
1286 static void
1287 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1288 {
1289 gdb_byte *start = insn;
1290 int need_modrm;
1291
1292 details->raw_insn = insn;
1293
1294 details->opcode_len = -1;
1295 details->enc_prefix_offset = -1;
1296 details->opcode_offset = -1;
1297 details->modrm_offset = -1;
1298
1299 /* Skip legacy instruction prefixes. */
1300 insn = amd64_skip_prefixes (insn);
1301
1302 /* Skip REX/VEX instruction encoding prefixes. */
1303 if (rex_prefix_p (*insn))
1304 {
1305 details->enc_prefix_offset = insn - start;
1306 ++insn;
1307 }
1308 else if (vex2_prefix_p (*insn))
1309 {
1310 /* Don't record the offset in this case because this prefix has
1311 no REX.B equivalent. */
1312 insn += 2;
1313 }
1314 else if (vex3_prefix_p (*insn))
1315 {
1316 details->enc_prefix_offset = insn - start;
1317 insn += 3;
1318 }
1319
1320 details->opcode_offset = insn - start;
1321
1322 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1323 {
1324 /* Two or three-byte opcode. */
1325 ++insn;
1326 need_modrm = twobyte_has_modrm[*insn];
1327
1328 /* Check for three-byte opcode. */
1329 switch (*insn)
1330 {
1331 case 0x24:
1332 case 0x25:
1333 case 0x38:
1334 case 0x3a:
1335 case 0x7a:
1336 case 0x7b:
1337 ++insn;
1338 details->opcode_len = 3;
1339 break;
1340 default:
1341 details->opcode_len = 2;
1342 break;
1343 }
1344 }
1345 else
1346 {
1347 /* One-byte opcode. */
1348 need_modrm = onebyte_has_modrm[*insn];
1349 details->opcode_len = 1;
1350 }
1351
1352 if (need_modrm)
1353 {
1354 ++insn;
1355 details->modrm_offset = insn - start;
1356 }
1357 }
1358
1359 /* Update %rip-relative addressing in INSN.
1360
1361 %rip-relative addressing only uses a 32-bit displacement.
1362 32 bits is not enough to be guaranteed to cover the distance between where
1363 the real instruction is and where its copy is.
1364 Convert the insn to use base+disp addressing.
1365 We set base = pc + insn_length so we can leave disp unchanged. */
1366
1367 static void
1368 fixup_riprel (struct gdbarch *gdbarch, amd64_displaced_step_closure *dsc,
1369 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1370 {
1371 const struct amd64_insn *insn_details = &dsc->insn_details;
1372 int modrm_offset = insn_details->modrm_offset;
1373 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1374 CORE_ADDR rip_base;
1375 int insn_length;
1376 int arch_tmp_regno, tmp_regno;
1377 ULONGEST orig_value;
1378
1379 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1380 ++insn;
1381
1382 /* Compute the rip-relative address. */
1383 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf.data (),
1384 dsc->insn_buf.size (), from);
1385 rip_base = from + insn_length;
1386
1387 /* We need a register to hold the address.
1388 Pick one not used in the insn.
1389 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1390 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1391 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1392
1393 /* Position of the not-B bit in the 3-byte VEX prefix (in byte 1). */
1394 static constexpr gdb_byte VEX3_NOT_B = 0x20;
1395
1396 /* REX.B should be unset (VEX.!B set) as we were using rip-relative
1397 addressing, but ensure it's unset (set for VEX) anyway, tmp_regno
1398 is not r8-r15. */
1399 if (insn_details->enc_prefix_offset != -1)
1400 {
1401 gdb_byte *pfx = &dsc->insn_buf[insn_details->enc_prefix_offset];
1402 if (rex_prefix_p (pfx[0]))
1403 pfx[0] &= ~REX_B;
1404 else if (vex3_prefix_p (pfx[0]))
1405 pfx[1] |= VEX3_NOT_B;
1406 else
1407 gdb_assert_not_reached ("unhandled prefix");
1408 }
1409
1410 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1411 dsc->tmp_regno = tmp_regno;
1412 dsc->tmp_save = orig_value;
1413 dsc->tmp_used = 1;
1414
1415 /* Convert the ModRM field to be base+disp. */
1416 dsc->insn_buf[modrm_offset] &= ~0xc7;
1417 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1418
1419 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1420
1421 if (debug_displaced)
1422 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1423 "displaced: using temp reg %d, old value %s, new value %s\n",
1424 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1425 paddress (gdbarch, rip_base));
1426 }
1427
1428 static void
1429 fixup_displaced_copy (struct gdbarch *gdbarch,
1430 amd64_displaced_step_closure *dsc,
1431 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1432 {
1433 const struct amd64_insn *details = &dsc->insn_details;
1434
1435 if (details->modrm_offset != -1)
1436 {
1437 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1438
1439 if ((modrm & 0xc7) == 0x05)
1440 {
1441 /* The insn uses rip-relative addressing.
1442 Deal with it. */
1443 fixup_riprel (gdbarch, dsc, from, to, regs);
1444 }
1445 }
1446 }
1447
1448 struct displaced_step_closure *
1449 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1450 CORE_ADDR from, CORE_ADDR to,
1451 struct regcache *regs)
1452 {
1453 int len = gdbarch_max_insn_length (gdbarch);
1454 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1455 continually watch for running off the end of the buffer. */
1456 int fixup_sentinel_space = len;
1457 amd64_displaced_step_closure *dsc
1458 = new amd64_displaced_step_closure (len + fixup_sentinel_space);
1459 gdb_byte *buf = &dsc->insn_buf[0];
1460 struct amd64_insn *details = &dsc->insn_details;
1461
1462 read_memory (from, buf, len);
1463
1464 /* Set up the sentinel space so we don't have to worry about running
1465 off the end of the buffer. An excessive number of leading prefixes
1466 could otherwise cause this. */
1467 memset (buf + len, 0, fixup_sentinel_space);
1468
1469 amd64_get_insn_details (buf, details);
1470
1471 /* GDB may get control back after the insn after the syscall.
1472 Presumably this is a kernel bug.
1473 If this is a syscall, make sure there's a nop afterwards. */
1474 {
1475 int syscall_length;
1476
1477 if (amd64_syscall_p (details, &syscall_length))
1478 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1479 }
1480
1481 /* Modify the insn to cope with the address where it will be executed from.
1482 In particular, handle any rip-relative addressing. */
1483 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1484
1485 write_memory (to, buf, len);
1486
1487 if (debug_displaced)
1488 {
1489 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1490 paddress (gdbarch, from), paddress (gdbarch, to));
1491 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1492 }
1493
1494 return dsc;
1495 }
1496
1497 static int
1498 amd64_absolute_jmp_p (const struct amd64_insn *details)
1499 {
1500 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1501
1502 if (insn[0] == 0xff)
1503 {
1504 /* jump near, absolute indirect (/4) */
1505 if ((insn[1] & 0x38) == 0x20)
1506 return 1;
1507
1508 /* jump far, absolute indirect (/5) */
1509 if ((insn[1] & 0x38) == 0x28)
1510 return 1;
1511 }
1512
1513 return 0;
1514 }
1515
1516 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1517
1518 static int
1519 amd64_jmp_p (const struct amd64_insn *details)
1520 {
1521 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1522
1523 /* jump short, relative. */
1524 if (insn[0] == 0xeb)
1525 return 1;
1526
1527 /* jump near, relative. */
1528 if (insn[0] == 0xe9)
1529 return 1;
1530
1531 return amd64_absolute_jmp_p (details);
1532 }
1533
1534 static int
1535 amd64_absolute_call_p (const struct amd64_insn *details)
1536 {
1537 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1538
1539 if (insn[0] == 0xff)
1540 {
1541 /* Call near, absolute indirect (/2) */
1542 if ((insn[1] & 0x38) == 0x10)
1543 return 1;
1544
1545 /* Call far, absolute indirect (/3) */
1546 if ((insn[1] & 0x38) == 0x18)
1547 return 1;
1548 }
1549
1550 return 0;
1551 }
1552
1553 static int
1554 amd64_ret_p (const struct amd64_insn *details)
1555 {
1556 /* NOTE: gcc can emit "repz ; ret". */
1557 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1558
1559 switch (insn[0])
1560 {
1561 case 0xc2: /* ret near, pop N bytes */
1562 case 0xc3: /* ret near */
1563 case 0xca: /* ret far, pop N bytes */
1564 case 0xcb: /* ret far */
1565 case 0xcf: /* iret */
1566 return 1;
1567
1568 default:
1569 return 0;
1570 }
1571 }
1572
1573 static int
1574 amd64_call_p (const struct amd64_insn *details)
1575 {
1576 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1577
1578 if (amd64_absolute_call_p (details))
1579 return 1;
1580
1581 /* call near, relative */
1582 if (insn[0] == 0xe8)
1583 return 1;
1584
1585 return 0;
1586 }
1587
1588 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1589 length in bytes. Otherwise, return zero. */
1590
1591 static int
1592 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1593 {
1594 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1595
1596 if (insn[0] == 0x0f && insn[1] == 0x05)
1597 {
1598 *lengthp = 2;
1599 return 1;
1600 }
1601
1602 return 0;
1603 }
1604
1605 /* Classify the instruction at ADDR using PRED.
1606 Throw an error if the memory can't be read. */
1607
1608 static int
1609 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1610 int (*pred) (const struct amd64_insn *))
1611 {
1612 struct amd64_insn details;
1613 gdb_byte *buf;
1614 int len, classification;
1615
1616 len = gdbarch_max_insn_length (gdbarch);
1617 buf = (gdb_byte *) alloca (len);
1618
1619 read_code (addr, buf, len);
1620 amd64_get_insn_details (buf, &details);
1621
1622 classification = pred (&details);
1623
1624 return classification;
1625 }
1626
1627 /* The gdbarch insn_is_call method. */
1628
1629 static int
1630 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1631 {
1632 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1633 }
1634
1635 /* The gdbarch insn_is_ret method. */
1636
1637 static int
1638 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1639 {
1640 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1641 }
1642
1643 /* The gdbarch insn_is_jump method. */
1644
1645 static int
1646 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1647 {
1648 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1649 }
1650
1651 /* Fix up the state of registers and memory after having single-stepped
1652 a displaced instruction. */
1653
1654 void
1655 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1656 struct displaced_step_closure *dsc_,
1657 CORE_ADDR from, CORE_ADDR to,
1658 struct regcache *regs)
1659 {
1660 amd64_displaced_step_closure *dsc = (amd64_displaced_step_closure *) dsc_;
1661 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1662 /* The offset we applied to the instruction's address. */
1663 ULONGEST insn_offset = to - from;
1664 gdb_byte *insn = dsc->insn_buf.data ();
1665 const struct amd64_insn *insn_details = &dsc->insn_details;
1666
1667 if (debug_displaced)
1668 fprintf_unfiltered (gdb_stdlog,
1669 "displaced: fixup (%s, %s), "
1670 "insn = 0x%02x 0x%02x ...\n",
1671 paddress (gdbarch, from), paddress (gdbarch, to),
1672 insn[0], insn[1]);
1673
1674 /* If we used a tmp reg, restore it. */
1675
1676 if (dsc->tmp_used)
1677 {
1678 if (debug_displaced)
1679 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1680 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1681 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1682 }
1683
1684 /* The list of issues to contend with here is taken from
1685 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1686 Yay for Free Software! */
1687
1688 /* Relocate the %rip back to the program's instruction stream,
1689 if necessary. */
1690
1691 /* Except in the case of absolute or indirect jump or call
1692 instructions, or a return instruction, the new rip is relative to
1693 the displaced instruction; make it relative to the original insn.
1694 Well, signal handler returns don't need relocation either, but we use the
1695 value of %rip to recognize those; see below. */
1696 if (! amd64_absolute_jmp_p (insn_details)
1697 && ! amd64_absolute_call_p (insn_details)
1698 && ! amd64_ret_p (insn_details))
1699 {
1700 ULONGEST orig_rip;
1701 int insn_len;
1702
1703 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1704
1705 /* A signal trampoline system call changes the %rip, resuming
1706 execution of the main program after the signal handler has
1707 returned. That makes them like 'return' instructions; we
1708 shouldn't relocate %rip.
1709
1710 But most system calls don't, and we do need to relocate %rip.
1711
1712 Our heuristic for distinguishing these cases: if stepping
1713 over the system call instruction left control directly after
1714 the instruction, the we relocate --- control almost certainly
1715 doesn't belong in the displaced copy. Otherwise, we assume
1716 the instruction has put control where it belongs, and leave
1717 it unrelocated. Goodness help us if there are PC-relative
1718 system calls. */
1719 if (amd64_syscall_p (insn_details, &insn_len)
1720 && orig_rip != to + insn_len
1721 /* GDB can get control back after the insn after the syscall.
1722 Presumably this is a kernel bug.
1723 Fixup ensures its a nop, we add one to the length for it. */
1724 && orig_rip != to + insn_len + 1)
1725 {
1726 if (debug_displaced)
1727 fprintf_unfiltered (gdb_stdlog,
1728 "displaced: syscall changed %%rip; "
1729 "not relocating\n");
1730 }
1731 else
1732 {
1733 ULONGEST rip = orig_rip - insn_offset;
1734
1735 /* If we just stepped over a breakpoint insn, we don't backup
1736 the pc on purpose; this is to match behaviour without
1737 stepping. */
1738
1739 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1740
1741 if (debug_displaced)
1742 fprintf_unfiltered (gdb_stdlog,
1743 "displaced: "
1744 "relocated %%rip from %s to %s\n",
1745 paddress (gdbarch, orig_rip),
1746 paddress (gdbarch, rip));
1747 }
1748 }
1749
1750 /* If the instruction was PUSHFL, then the TF bit will be set in the
1751 pushed value, and should be cleared. We'll leave this for later,
1752 since GDB already messes up the TF flag when stepping over a
1753 pushfl. */
1754
1755 /* If the instruction was a call, the return address now atop the
1756 stack is the address following the copied instruction. We need
1757 to make it the address following the original instruction. */
1758 if (amd64_call_p (insn_details))
1759 {
1760 ULONGEST rsp;
1761 ULONGEST retaddr;
1762 const ULONGEST retaddr_len = 8;
1763
1764 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1765 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1766 retaddr = (retaddr - insn_offset) & 0xffffffffffffffffULL;
1767 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1768
1769 if (debug_displaced)
1770 fprintf_unfiltered (gdb_stdlog,
1771 "displaced: relocated return addr at %s "
1772 "to %s\n",
1773 paddress (gdbarch, rsp),
1774 paddress (gdbarch, retaddr));
1775 }
1776 }
1777
1778 /* If the instruction INSN uses RIP-relative addressing, return the
1779 offset into the raw INSN where the displacement to be adjusted is
1780 found. Returns 0 if the instruction doesn't use RIP-relative
1781 addressing. */
1782
1783 static int
1784 rip_relative_offset (struct amd64_insn *insn)
1785 {
1786 if (insn->modrm_offset != -1)
1787 {
1788 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1789
1790 if ((modrm & 0xc7) == 0x05)
1791 {
1792 /* The displacement is found right after the ModRM byte. */
1793 return insn->modrm_offset + 1;
1794 }
1795 }
1796
1797 return 0;
1798 }
1799
1800 static void
1801 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1802 {
1803 target_write_memory (*to, buf, len);
1804 *to += len;
1805 }
1806
1807 static void
1808 amd64_relocate_instruction (struct gdbarch *gdbarch,
1809 CORE_ADDR *to, CORE_ADDR oldloc)
1810 {
1811 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1812 int len = gdbarch_max_insn_length (gdbarch);
1813 /* Extra space for sentinels. */
1814 int fixup_sentinel_space = len;
1815 gdb_byte *buf = (gdb_byte *) xmalloc (len + fixup_sentinel_space);
1816 struct amd64_insn insn_details;
1817 int offset = 0;
1818 LONGEST rel32, newrel;
1819 gdb_byte *insn;
1820 int insn_length;
1821
1822 read_memory (oldloc, buf, len);
1823
1824 /* Set up the sentinel space so we don't have to worry about running
1825 off the end of the buffer. An excessive number of leading prefixes
1826 could otherwise cause this. */
1827 memset (buf + len, 0, fixup_sentinel_space);
1828
1829 insn = buf;
1830 amd64_get_insn_details (insn, &insn_details);
1831
1832 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1833
1834 /* Skip legacy instruction prefixes. */
1835 insn = amd64_skip_prefixes (insn);
1836
1837 /* Adjust calls with 32-bit relative addresses as push/jump, with
1838 the address pushed being the location where the original call in
1839 the user program would return to. */
1840 if (insn[0] == 0xe8)
1841 {
1842 gdb_byte push_buf[32];
1843 CORE_ADDR ret_addr;
1844 int i = 0;
1845
1846 /* Where "ret" in the original code will return to. */
1847 ret_addr = oldloc + insn_length;
1848
1849 /* If pushing an address higher than or equal to 0x80000000,
1850 avoid 'pushq', as that sign extends its 32-bit operand, which
1851 would be incorrect. */
1852 if (ret_addr <= 0x7fffffff)
1853 {
1854 push_buf[0] = 0x68; /* pushq $... */
1855 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1856 i = 5;
1857 }
1858 else
1859 {
1860 push_buf[i++] = 0x48; /* sub $0x8,%rsp */
1861 push_buf[i++] = 0x83;
1862 push_buf[i++] = 0xec;
1863 push_buf[i++] = 0x08;
1864
1865 push_buf[i++] = 0xc7; /* movl $imm,(%rsp) */
1866 push_buf[i++] = 0x04;
1867 push_buf[i++] = 0x24;
1868 store_unsigned_integer (&push_buf[i], 4, byte_order,
1869 ret_addr & 0xffffffff);
1870 i += 4;
1871
1872 push_buf[i++] = 0xc7; /* movl $imm,4(%rsp) */
1873 push_buf[i++] = 0x44;
1874 push_buf[i++] = 0x24;
1875 push_buf[i++] = 0x04;
1876 store_unsigned_integer (&push_buf[i], 4, byte_order,
1877 ret_addr >> 32);
1878 i += 4;
1879 }
1880 gdb_assert (i <= sizeof (push_buf));
1881 /* Push the push. */
1882 append_insns (to, i, push_buf);
1883
1884 /* Convert the relative call to a relative jump. */
1885 insn[0] = 0xe9;
1886
1887 /* Adjust the destination offset. */
1888 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1889 newrel = (oldloc - *to) + rel32;
1890 store_signed_integer (insn + 1, 4, byte_order, newrel);
1891
1892 if (debug_displaced)
1893 fprintf_unfiltered (gdb_stdlog,
1894 "Adjusted insn rel32=%s at %s to"
1895 " rel32=%s at %s\n",
1896 hex_string (rel32), paddress (gdbarch, oldloc),
1897 hex_string (newrel), paddress (gdbarch, *to));
1898
1899 /* Write the adjusted jump into its displaced location. */
1900 append_insns (to, 5, insn);
1901 return;
1902 }
1903
1904 offset = rip_relative_offset (&insn_details);
1905 if (!offset)
1906 {
1907 /* Adjust jumps with 32-bit relative addresses. Calls are
1908 already handled above. */
1909 if (insn[0] == 0xe9)
1910 offset = 1;
1911 /* Adjust conditional jumps. */
1912 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1913 offset = 2;
1914 }
1915
1916 if (offset)
1917 {
1918 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1919 newrel = (oldloc - *to) + rel32;
1920 store_signed_integer (insn + offset, 4, byte_order, newrel);
1921 if (debug_displaced)
1922 fprintf_unfiltered (gdb_stdlog,
1923 "Adjusted insn rel32=%s at %s to"
1924 " rel32=%s at %s\n",
1925 hex_string (rel32), paddress (gdbarch, oldloc),
1926 hex_string (newrel), paddress (gdbarch, *to));
1927 }
1928
1929 /* Write the adjusted instruction into its displaced location. */
1930 append_insns (to, insn_length, buf);
1931 }
1932
1933 \f
1934 /* The maximum number of saved registers. This should include %rip. */
1935 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1936
1937 struct amd64_frame_cache
1938 {
1939 /* Base address. */
1940 CORE_ADDR base;
1941 int base_p;
1942 CORE_ADDR sp_offset;
1943 CORE_ADDR pc;
1944
1945 /* Saved registers. */
1946 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1947 CORE_ADDR saved_sp;
1948 int saved_sp_reg;
1949
1950 /* Do we have a frame? */
1951 int frameless_p;
1952 };
1953
1954 /* Initialize a frame cache. */
1955
1956 static void
1957 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1958 {
1959 int i;
1960
1961 /* Base address. */
1962 cache->base = 0;
1963 cache->base_p = 0;
1964 cache->sp_offset = -8;
1965 cache->pc = 0;
1966
1967 /* Saved registers. We initialize these to -1 since zero is a valid
1968 offset (that's where %rbp is supposed to be stored).
1969 The values start out as being offsets, and are later converted to
1970 addresses (at which point -1 is interpreted as an address, still meaning
1971 "invalid"). */
1972 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1973 cache->saved_regs[i] = -1;
1974 cache->saved_sp = 0;
1975 cache->saved_sp_reg = -1;
1976
1977 /* Frameless until proven otherwise. */
1978 cache->frameless_p = 1;
1979 }
1980
1981 /* Allocate and initialize a frame cache. */
1982
1983 static struct amd64_frame_cache *
1984 amd64_alloc_frame_cache (void)
1985 {
1986 struct amd64_frame_cache *cache;
1987
1988 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1989 amd64_init_frame_cache (cache);
1990 return cache;
1991 }
1992
1993 /* GCC 4.4 and later, can put code in the prologue to realign the
1994 stack pointer. Check whether PC points to such code, and update
1995 CACHE accordingly. Return the first instruction after the code
1996 sequence or CURRENT_PC, whichever is smaller. If we don't
1997 recognize the code, return PC. */
1998
1999 static CORE_ADDR
2000 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2001 struct amd64_frame_cache *cache)
2002 {
2003 /* There are 2 code sequences to re-align stack before the frame
2004 gets set up:
2005
2006 1. Use a caller-saved saved register:
2007
2008 leaq 8(%rsp), %reg
2009 andq $-XXX, %rsp
2010 pushq -8(%reg)
2011
2012 2. Use a callee-saved saved register:
2013
2014 pushq %reg
2015 leaq 16(%rsp), %reg
2016 andq $-XXX, %rsp
2017 pushq -8(%reg)
2018
2019 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2020
2021 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2022 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2023 */
2024
2025 gdb_byte buf[18];
2026 int reg, r;
2027 int offset, offset_and;
2028
2029 if (target_read_code (pc, buf, sizeof buf))
2030 return pc;
2031
2032 /* Check caller-saved saved register. The first instruction has
2033 to be "leaq 8(%rsp), %reg". */
2034 if ((buf[0] & 0xfb) == 0x48
2035 && buf[1] == 0x8d
2036 && buf[3] == 0x24
2037 && buf[4] == 0x8)
2038 {
2039 /* MOD must be binary 10 and R/M must be binary 100. */
2040 if ((buf[2] & 0xc7) != 0x44)
2041 return pc;
2042
2043 /* REG has register number. */
2044 reg = (buf[2] >> 3) & 7;
2045
2046 /* Check the REX.R bit. */
2047 if (buf[0] == 0x4c)
2048 reg += 8;
2049
2050 offset = 5;
2051 }
2052 else
2053 {
2054 /* Check callee-saved saved register. The first instruction
2055 has to be "pushq %reg". */
2056 reg = 0;
2057 if ((buf[0] & 0xf8) == 0x50)
2058 offset = 0;
2059 else if ((buf[0] & 0xf6) == 0x40
2060 && (buf[1] & 0xf8) == 0x50)
2061 {
2062 /* Check the REX.B bit. */
2063 if ((buf[0] & 1) != 0)
2064 reg = 8;
2065
2066 offset = 1;
2067 }
2068 else
2069 return pc;
2070
2071 /* Get register. */
2072 reg += buf[offset] & 0x7;
2073
2074 offset++;
2075
2076 /* The next instruction has to be "leaq 16(%rsp), %reg". */
2077 if ((buf[offset] & 0xfb) != 0x48
2078 || buf[offset + 1] != 0x8d
2079 || buf[offset + 3] != 0x24
2080 || buf[offset + 4] != 0x10)
2081 return pc;
2082
2083 /* MOD must be binary 10 and R/M must be binary 100. */
2084 if ((buf[offset + 2] & 0xc7) != 0x44)
2085 return pc;
2086
2087 /* REG has register number. */
2088 r = (buf[offset + 2] >> 3) & 7;
2089
2090 /* Check the REX.R bit. */
2091 if (buf[offset] == 0x4c)
2092 r += 8;
2093
2094 /* Registers in pushq and leaq have to be the same. */
2095 if (reg != r)
2096 return pc;
2097
2098 offset += 5;
2099 }
2100
2101 /* Rigister can't be %rsp nor %rbp. */
2102 if (reg == 4 || reg == 5)
2103 return pc;
2104
2105 /* The next instruction has to be "andq $-XXX, %rsp". */
2106 if (buf[offset] != 0x48
2107 || buf[offset + 2] != 0xe4
2108 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2109 return pc;
2110
2111 offset_and = offset;
2112 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2113
2114 /* The next instruction has to be "pushq -8(%reg)". */
2115 r = 0;
2116 if (buf[offset] == 0xff)
2117 offset++;
2118 else if ((buf[offset] & 0xf6) == 0x40
2119 && buf[offset + 1] == 0xff)
2120 {
2121 /* Check the REX.B bit. */
2122 if ((buf[offset] & 0x1) != 0)
2123 r = 8;
2124 offset += 2;
2125 }
2126 else
2127 return pc;
2128
2129 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2130 01. */
2131 if (buf[offset + 1] != 0xf8
2132 || (buf[offset] & 0xf8) != 0x70)
2133 return pc;
2134
2135 /* R/M has register. */
2136 r += buf[offset] & 7;
2137
2138 /* Registers in leaq and pushq have to be the same. */
2139 if (reg != r)
2140 return pc;
2141
2142 if (current_pc > pc + offset_and)
2143 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2144
2145 return std::min (pc + offset + 2, current_pc);
2146 }
2147
2148 /* Similar to amd64_analyze_stack_align for x32. */
2149
2150 static CORE_ADDR
2151 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2152 struct amd64_frame_cache *cache)
2153 {
2154 /* There are 2 code sequences to re-align stack before the frame
2155 gets set up:
2156
2157 1. Use a caller-saved saved register:
2158
2159 leaq 8(%rsp), %reg
2160 andq $-XXX, %rsp
2161 pushq -8(%reg)
2162
2163 or
2164
2165 [addr32] leal 8(%rsp), %reg
2166 andl $-XXX, %esp
2167 [addr32] pushq -8(%reg)
2168
2169 2. Use a callee-saved saved register:
2170
2171 pushq %reg
2172 leaq 16(%rsp), %reg
2173 andq $-XXX, %rsp
2174 pushq -8(%reg)
2175
2176 or
2177
2178 pushq %reg
2179 [addr32] leal 16(%rsp), %reg
2180 andl $-XXX, %esp
2181 [addr32] pushq -8(%reg)
2182
2183 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2184
2185 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2186 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2187
2188 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2189
2190 0x83 0xe4 0xf0 andl $-16, %esp
2191 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2192 */
2193
2194 gdb_byte buf[19];
2195 int reg, r;
2196 int offset, offset_and;
2197
2198 if (target_read_memory (pc, buf, sizeof buf))
2199 return pc;
2200
2201 /* Skip optional addr32 prefix. */
2202 offset = buf[0] == 0x67 ? 1 : 0;
2203
2204 /* Check caller-saved saved register. The first instruction has
2205 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2206 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2207 && buf[offset + 1] == 0x8d
2208 && buf[offset + 3] == 0x24
2209 && buf[offset + 4] == 0x8)
2210 {
2211 /* MOD must be binary 10 and R/M must be binary 100. */
2212 if ((buf[offset + 2] & 0xc7) != 0x44)
2213 return pc;
2214
2215 /* REG has register number. */
2216 reg = (buf[offset + 2] >> 3) & 7;
2217
2218 /* Check the REX.R bit. */
2219 if ((buf[offset] & 0x4) != 0)
2220 reg += 8;
2221
2222 offset += 5;
2223 }
2224 else
2225 {
2226 /* Check callee-saved saved register. The first instruction
2227 has to be "pushq %reg". */
2228 reg = 0;
2229 if ((buf[offset] & 0xf6) == 0x40
2230 && (buf[offset + 1] & 0xf8) == 0x50)
2231 {
2232 /* Check the REX.B bit. */
2233 if ((buf[offset] & 1) != 0)
2234 reg = 8;
2235
2236 offset += 1;
2237 }
2238 else if ((buf[offset] & 0xf8) != 0x50)
2239 return pc;
2240
2241 /* Get register. */
2242 reg += buf[offset] & 0x7;
2243
2244 offset++;
2245
2246 /* Skip optional addr32 prefix. */
2247 if (buf[offset] == 0x67)
2248 offset++;
2249
2250 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2251 "leal 16(%rsp), %reg". */
2252 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2253 || buf[offset + 1] != 0x8d
2254 || buf[offset + 3] != 0x24
2255 || buf[offset + 4] != 0x10)
2256 return pc;
2257
2258 /* MOD must be binary 10 and R/M must be binary 100. */
2259 if ((buf[offset + 2] & 0xc7) != 0x44)
2260 return pc;
2261
2262 /* REG has register number. */
2263 r = (buf[offset + 2] >> 3) & 7;
2264
2265 /* Check the REX.R bit. */
2266 if ((buf[offset] & 0x4) != 0)
2267 r += 8;
2268
2269 /* Registers in pushq and leaq have to be the same. */
2270 if (reg != r)
2271 return pc;
2272
2273 offset += 5;
2274 }
2275
2276 /* Rigister can't be %rsp nor %rbp. */
2277 if (reg == 4 || reg == 5)
2278 return pc;
2279
2280 /* The next instruction may be "andq $-XXX, %rsp" or
2281 "andl $-XXX, %esp". */
2282 if (buf[offset] != 0x48)
2283 offset--;
2284
2285 if (buf[offset + 2] != 0xe4
2286 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2287 return pc;
2288
2289 offset_and = offset;
2290 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2291
2292 /* Skip optional addr32 prefix. */
2293 if (buf[offset] == 0x67)
2294 offset++;
2295
2296 /* The next instruction has to be "pushq -8(%reg)". */
2297 r = 0;
2298 if (buf[offset] == 0xff)
2299 offset++;
2300 else if ((buf[offset] & 0xf6) == 0x40
2301 && buf[offset + 1] == 0xff)
2302 {
2303 /* Check the REX.B bit. */
2304 if ((buf[offset] & 0x1) != 0)
2305 r = 8;
2306 offset += 2;
2307 }
2308 else
2309 return pc;
2310
2311 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2312 01. */
2313 if (buf[offset + 1] != 0xf8
2314 || (buf[offset] & 0xf8) != 0x70)
2315 return pc;
2316
2317 /* R/M has register. */
2318 r += buf[offset] & 7;
2319
2320 /* Registers in leaq and pushq have to be the same. */
2321 if (reg != r)
2322 return pc;
2323
2324 if (current_pc > pc + offset_and)
2325 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2326
2327 return std::min (pc + offset + 2, current_pc);
2328 }
2329
2330 /* Do a limited analysis of the prologue at PC and update CACHE
2331 accordingly. Bail out early if CURRENT_PC is reached. Return the
2332 address where the analysis stopped.
2333
2334 We will handle only functions beginning with:
2335
2336 pushq %rbp 0x55
2337 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2338
2339 or (for the X32 ABI):
2340
2341 pushq %rbp 0x55
2342 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2343
2344 Any function that doesn't start with one of these sequences will be
2345 assumed to have no prologue and thus no valid frame pointer in
2346 %rbp. */
2347
2348 static CORE_ADDR
2349 amd64_analyze_prologue (struct gdbarch *gdbarch,
2350 CORE_ADDR pc, CORE_ADDR current_pc,
2351 struct amd64_frame_cache *cache)
2352 {
2353 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2354 /* There are two variations of movq %rsp, %rbp. */
2355 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2356 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
2357 /* Ditto for movl %esp, %ebp. */
2358 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2359 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2360
2361 gdb_byte buf[3];
2362 gdb_byte op;
2363
2364 if (current_pc <= pc)
2365 return current_pc;
2366
2367 if (gdbarch_ptr_bit (gdbarch) == 32)
2368 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2369 else
2370 pc = amd64_analyze_stack_align (pc, current_pc, cache);
2371
2372 op = read_code_unsigned_integer (pc, 1, byte_order);
2373
2374 if (op == 0x55) /* pushq %rbp */
2375 {
2376 /* Take into account that we've executed the `pushq %rbp' that
2377 starts this instruction sequence. */
2378 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
2379 cache->sp_offset += 8;
2380
2381 /* If that's all, return now. */
2382 if (current_pc <= pc + 1)
2383 return current_pc;
2384
2385 read_code (pc + 1, buf, 3);
2386
2387 /* Check for `movq %rsp, %rbp'. */
2388 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2389 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2390 {
2391 /* OK, we actually have a frame. */
2392 cache->frameless_p = 0;
2393 return pc + 4;
2394 }
2395
2396 /* For X32, also check for `movq %esp, %ebp'. */
2397 if (gdbarch_ptr_bit (gdbarch) == 32)
2398 {
2399 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2400 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2401 {
2402 /* OK, we actually have a frame. */
2403 cache->frameless_p = 0;
2404 return pc + 3;
2405 }
2406 }
2407
2408 return pc + 1;
2409 }
2410
2411 return pc;
2412 }
2413
2414 /* Work around false termination of prologue - GCC PR debug/48827.
2415
2416 START_PC is the first instruction of a function, PC is its minimal already
2417 determined advanced address. Function returns PC if it has nothing to do.
2418
2419 84 c0 test %al,%al
2420 74 23 je after
2421 <-- here is 0 lines advance - the false prologue end marker.
2422 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2423 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2424 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2425 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2426 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2427 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2428 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2429 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2430 after: */
2431
2432 static CORE_ADDR
2433 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
2434 {
2435 struct symtab_and_line start_pc_sal, next_sal;
2436 gdb_byte buf[4 + 8 * 7];
2437 int offset, xmmreg;
2438
2439 if (pc == start_pc)
2440 return pc;
2441
2442 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2443 if (start_pc_sal.symtab == NULL
2444 || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
2445 (SYMTAB_COMPUNIT (start_pc_sal.symtab))) < 6
2446 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2447 return pc;
2448
2449 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2450 if (next_sal.line != start_pc_sal.line)
2451 return pc;
2452
2453 /* START_PC can be from overlayed memory, ignored here. */
2454 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
2455 return pc;
2456
2457 /* test %al,%al */
2458 if (buf[0] != 0x84 || buf[1] != 0xc0)
2459 return pc;
2460 /* je AFTER */
2461 if (buf[2] != 0x74)
2462 return pc;
2463
2464 offset = 4;
2465 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2466 {
2467 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2468 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
2469 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
2470 return pc;
2471
2472 /* 0b01?????? */
2473 if ((buf[offset + 2] & 0xc0) == 0x40)
2474 {
2475 /* 8-bit displacement. */
2476 offset += 4;
2477 }
2478 /* 0b10?????? */
2479 else if ((buf[offset + 2] & 0xc0) == 0x80)
2480 {
2481 /* 32-bit displacement. */
2482 offset += 7;
2483 }
2484 else
2485 return pc;
2486 }
2487
2488 /* je AFTER */
2489 if (offset - 4 != buf[3])
2490 return pc;
2491
2492 return next_sal.end;
2493 }
2494
2495 /* Return PC of first real instruction. */
2496
2497 static CORE_ADDR
2498 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2499 {
2500 struct amd64_frame_cache cache;
2501 CORE_ADDR pc;
2502 CORE_ADDR func_addr;
2503
2504 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2505 {
2506 CORE_ADDR post_prologue_pc
2507 = skip_prologue_using_sal (gdbarch, func_addr);
2508 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
2509
2510 /* Clang always emits a line note before the prologue and another
2511 one after. We trust clang to emit usable line notes. */
2512 if (post_prologue_pc
2513 && (cust != NULL
2514 && COMPUNIT_PRODUCER (cust) != NULL
2515 && startswith (COMPUNIT_PRODUCER (cust), "clang ")))
2516 return std::max (start_pc, post_prologue_pc);
2517 }
2518
2519 amd64_init_frame_cache (&cache);
2520 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2521 &cache);
2522 if (cache.frameless_p)
2523 return start_pc;
2524
2525 return amd64_skip_xmm_prologue (pc, start_pc);
2526 }
2527 \f
2528
2529 /* Normal frames. */
2530
2531 static void
2532 amd64_frame_cache_1 (struct frame_info *this_frame,
2533 struct amd64_frame_cache *cache)
2534 {
2535 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2536 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2537 gdb_byte buf[8];
2538 int i;
2539
2540 cache->pc = get_frame_func (this_frame);
2541 if (cache->pc != 0)
2542 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2543 cache);
2544
2545 if (cache->frameless_p)
2546 {
2547 /* We didn't find a valid frame. If we're at the start of a
2548 function, or somewhere half-way its prologue, the function's
2549 frame probably hasn't been fully setup yet. Try to
2550 reconstruct the base address for the stack frame by looking
2551 at the stack pointer. For truly "frameless" functions this
2552 might work too. */
2553
2554 if (cache->saved_sp_reg != -1)
2555 {
2556 /* Stack pointer has been saved. */
2557 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2558 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2559
2560 /* We're halfway aligning the stack. */
2561 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2562 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2563
2564 /* This will be added back below. */
2565 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2566 }
2567 else
2568 {
2569 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2570 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2571 + cache->sp_offset;
2572 }
2573 }
2574 else
2575 {
2576 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
2577 cache->base = extract_unsigned_integer (buf, 8, byte_order);
2578 }
2579
2580 /* Now that we have the base address for the stack frame we can
2581 calculate the value of %rsp in the calling frame. */
2582 cache->saved_sp = cache->base + 16;
2583
2584 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2585 frame we find it at the same offset from the reconstructed base
2586 address. If we're halfway aligning the stack, %rip is handled
2587 differently (see above). */
2588 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2589 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
2590
2591 /* Adjust all the saved registers such that they contain addresses
2592 instead of offsets. */
2593 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
2594 if (cache->saved_regs[i] != -1)
2595 cache->saved_regs[i] += cache->base;
2596
2597 cache->base_p = 1;
2598 }
2599
2600 static struct amd64_frame_cache *
2601 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
2602 {
2603 struct amd64_frame_cache *cache;
2604
2605 if (*this_cache)
2606 return (struct amd64_frame_cache *) *this_cache;
2607
2608 cache = amd64_alloc_frame_cache ();
2609 *this_cache = cache;
2610
2611 try
2612 {
2613 amd64_frame_cache_1 (this_frame, cache);
2614 }
2615 catch (const gdb_exception_error &ex)
2616 {
2617 if (ex.error != NOT_AVAILABLE_ERROR)
2618 throw;
2619 }
2620
2621 return cache;
2622 }
2623
2624 static enum unwind_stop_reason
2625 amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
2626 void **this_cache)
2627 {
2628 struct amd64_frame_cache *cache =
2629 amd64_frame_cache (this_frame, this_cache);
2630
2631 if (!cache->base_p)
2632 return UNWIND_UNAVAILABLE;
2633
2634 /* This marks the outermost frame. */
2635 if (cache->base == 0)
2636 return UNWIND_OUTERMOST;
2637
2638 return UNWIND_NO_REASON;
2639 }
2640
2641 static void
2642 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
2643 struct frame_id *this_id)
2644 {
2645 struct amd64_frame_cache *cache =
2646 amd64_frame_cache (this_frame, this_cache);
2647
2648 if (!cache->base_p)
2649 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2650 else if (cache->base == 0)
2651 {
2652 /* This marks the outermost frame. */
2653 return;
2654 }
2655 else
2656 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2657 }
2658
2659 static struct value *
2660 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2661 int regnum)
2662 {
2663 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2664 struct amd64_frame_cache *cache =
2665 amd64_frame_cache (this_frame, this_cache);
2666
2667 gdb_assert (regnum >= 0);
2668
2669 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
2670 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
2671
2672 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
2673 return frame_unwind_got_memory (this_frame, regnum,
2674 cache->saved_regs[regnum]);
2675
2676 return frame_unwind_got_register (this_frame, regnum, regnum);
2677 }
2678
2679 static const struct frame_unwind amd64_frame_unwind =
2680 {
2681 NORMAL_FRAME,
2682 amd64_frame_unwind_stop_reason,
2683 amd64_frame_this_id,
2684 amd64_frame_prev_register,
2685 NULL,
2686 default_frame_sniffer
2687 };
2688 \f
2689 /* Generate a bytecode expression to get the value of the saved PC. */
2690
2691 static void
2692 amd64_gen_return_address (struct gdbarch *gdbarch,
2693 struct agent_expr *ax, struct axs_value *value,
2694 CORE_ADDR scope)
2695 {
2696 /* The following sequence assumes the traditional use of the base
2697 register. */
2698 ax_reg (ax, AMD64_RBP_REGNUM);
2699 ax_const_l (ax, 8);
2700 ax_simple (ax, aop_add);
2701 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2702 value->kind = axs_lvalue_memory;
2703 }
2704 \f
2705
2706 /* Signal trampolines. */
2707
2708 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2709 64-bit variants. This would require using identical frame caches
2710 on both platforms. */
2711
2712 static struct amd64_frame_cache *
2713 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2714 {
2715 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2716 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2717 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2718 struct amd64_frame_cache *cache;
2719 CORE_ADDR addr;
2720 gdb_byte buf[8];
2721 int i;
2722
2723 if (*this_cache)
2724 return (struct amd64_frame_cache *) *this_cache;
2725
2726 cache = amd64_alloc_frame_cache ();
2727
2728 try
2729 {
2730 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2731 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2732
2733 addr = tdep->sigcontext_addr (this_frame);
2734 gdb_assert (tdep->sc_reg_offset);
2735 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2736 for (i = 0; i < tdep->sc_num_regs; i++)
2737 if (tdep->sc_reg_offset[i] != -1)
2738 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
2739
2740 cache->base_p = 1;
2741 }
2742 catch (const gdb_exception_error &ex)
2743 {
2744 if (ex.error != NOT_AVAILABLE_ERROR)
2745 throw;
2746 }
2747
2748 *this_cache = cache;
2749 return cache;
2750 }
2751
2752 static enum unwind_stop_reason
2753 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
2754 void **this_cache)
2755 {
2756 struct amd64_frame_cache *cache =
2757 amd64_sigtramp_frame_cache (this_frame, this_cache);
2758
2759 if (!cache->base_p)
2760 return UNWIND_UNAVAILABLE;
2761
2762 return UNWIND_NO_REASON;
2763 }
2764
2765 static void
2766 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
2767 void **this_cache, struct frame_id *this_id)
2768 {
2769 struct amd64_frame_cache *cache =
2770 amd64_sigtramp_frame_cache (this_frame, this_cache);
2771
2772 if (!cache->base_p)
2773 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2774 else if (cache->base == 0)
2775 {
2776 /* This marks the outermost frame. */
2777 return;
2778 }
2779 else
2780 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
2781 }
2782
2783 static struct value *
2784 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2785 void **this_cache, int regnum)
2786 {
2787 /* Make sure we've initialized the cache. */
2788 amd64_sigtramp_frame_cache (this_frame, this_cache);
2789
2790 return amd64_frame_prev_register (this_frame, this_cache, regnum);
2791 }
2792
2793 static int
2794 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2795 struct frame_info *this_frame,
2796 void **this_cache)
2797 {
2798 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2799
2800 /* We shouldn't even bother if we don't have a sigcontext_addr
2801 handler. */
2802 if (tdep->sigcontext_addr == NULL)
2803 return 0;
2804
2805 if (tdep->sigtramp_p != NULL)
2806 {
2807 if (tdep->sigtramp_p (this_frame))
2808 return 1;
2809 }
2810
2811 if (tdep->sigtramp_start != 0)
2812 {
2813 CORE_ADDR pc = get_frame_pc (this_frame);
2814
2815 gdb_assert (tdep->sigtramp_end != 0);
2816 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2817 return 1;
2818 }
2819
2820 return 0;
2821 }
2822
2823 static const struct frame_unwind amd64_sigtramp_frame_unwind =
2824 {
2825 SIGTRAMP_FRAME,
2826 amd64_sigtramp_frame_unwind_stop_reason,
2827 amd64_sigtramp_frame_this_id,
2828 amd64_sigtramp_frame_prev_register,
2829 NULL,
2830 amd64_sigtramp_frame_sniffer
2831 };
2832 \f
2833
2834 static CORE_ADDR
2835 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2836 {
2837 struct amd64_frame_cache *cache =
2838 amd64_frame_cache (this_frame, this_cache);
2839
2840 return cache->base;
2841 }
2842
2843 static const struct frame_base amd64_frame_base =
2844 {
2845 &amd64_frame_unwind,
2846 amd64_frame_base_address,
2847 amd64_frame_base_address,
2848 amd64_frame_base_address
2849 };
2850
2851 /* Normal frames, but in a function epilogue. */
2852
2853 /* Implement the stack_frame_destroyed_p gdbarch method.
2854
2855 The epilogue is defined here as the 'ret' instruction, which will
2856 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2857 the function's stack frame. */
2858
2859 static int
2860 amd64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2861 {
2862 gdb_byte insn;
2863 struct compunit_symtab *cust;
2864
2865 cust = find_pc_compunit_symtab (pc);
2866 if (cust != NULL && COMPUNIT_EPILOGUE_UNWIND_VALID (cust))
2867 return 0;
2868
2869 if (target_read_memory (pc, &insn, 1))
2870 return 0; /* Can't read memory at pc. */
2871
2872 if (insn != 0xc3) /* 'ret' instruction. */
2873 return 0;
2874
2875 return 1;
2876 }
2877
2878 static int
2879 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2880 struct frame_info *this_frame,
2881 void **this_prologue_cache)
2882 {
2883 if (frame_relative_level (this_frame) == 0)
2884 return amd64_stack_frame_destroyed_p (get_frame_arch (this_frame),
2885 get_frame_pc (this_frame));
2886 else
2887 return 0;
2888 }
2889
2890 static struct amd64_frame_cache *
2891 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2892 {
2893 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2894 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2895 struct amd64_frame_cache *cache;
2896 gdb_byte buf[8];
2897
2898 if (*this_cache)
2899 return (struct amd64_frame_cache *) *this_cache;
2900
2901 cache = amd64_alloc_frame_cache ();
2902 *this_cache = cache;
2903
2904 try
2905 {
2906 /* Cache base will be %esp plus cache->sp_offset (-8). */
2907 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2908 cache->base = extract_unsigned_integer (buf, 8,
2909 byte_order) + cache->sp_offset;
2910
2911 /* Cache pc will be the frame func. */
2912 cache->pc = get_frame_pc (this_frame);
2913
2914 /* The saved %esp will be at cache->base plus 16. */
2915 cache->saved_sp = cache->base + 16;
2916
2917 /* The saved %eip will be at cache->base plus 8. */
2918 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2919
2920 cache->base_p = 1;
2921 }
2922 catch (const gdb_exception_error &ex)
2923 {
2924 if (ex.error != NOT_AVAILABLE_ERROR)
2925 throw;
2926 }
2927
2928 return cache;
2929 }
2930
2931 static enum unwind_stop_reason
2932 amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
2933 void **this_cache)
2934 {
2935 struct amd64_frame_cache *cache
2936 = amd64_epilogue_frame_cache (this_frame, this_cache);
2937
2938 if (!cache->base_p)
2939 return UNWIND_UNAVAILABLE;
2940
2941 return UNWIND_NO_REASON;
2942 }
2943
2944 static void
2945 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2946 void **this_cache,
2947 struct frame_id *this_id)
2948 {
2949 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2950 this_cache);
2951
2952 if (!cache->base_p)
2953 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2954 else
2955 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2956 }
2957
2958 static const struct frame_unwind amd64_epilogue_frame_unwind =
2959 {
2960 NORMAL_FRAME,
2961 amd64_epilogue_frame_unwind_stop_reason,
2962 amd64_epilogue_frame_this_id,
2963 amd64_frame_prev_register,
2964 NULL,
2965 amd64_epilogue_frame_sniffer
2966 };
2967
2968 static struct frame_id
2969 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2970 {
2971 CORE_ADDR fp;
2972
2973 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2974
2975 return frame_id_build (fp + 16, get_frame_pc (this_frame));
2976 }
2977
2978 /* 16 byte align the SP per frame requirements. */
2979
2980 static CORE_ADDR
2981 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2982 {
2983 return sp & -(CORE_ADDR)16;
2984 }
2985 \f
2986
2987 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2988 in the floating-point register set REGSET to register cache
2989 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2990
2991 static void
2992 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2993 int regnum, const void *fpregs, size_t len)
2994 {
2995 struct gdbarch *gdbarch = regcache->arch ();
2996 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2997
2998 gdb_assert (len >= tdep->sizeof_fpregset);
2999 amd64_supply_fxsave (regcache, regnum, fpregs);
3000 }
3001
3002 /* Collect register REGNUM from the register cache REGCACHE and store
3003 it in the buffer specified by FPREGS and LEN as described by the
3004 floating-point register set REGSET. If REGNUM is -1, do this for
3005 all registers in REGSET. */
3006
3007 static void
3008 amd64_collect_fpregset (const struct regset *regset,
3009 const struct regcache *regcache,
3010 int regnum, void *fpregs, size_t len)
3011 {
3012 struct gdbarch *gdbarch = regcache->arch ();
3013 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3014
3015 gdb_assert (len >= tdep->sizeof_fpregset);
3016 amd64_collect_fxsave (regcache, regnum, fpregs);
3017 }
3018
3019 const struct regset amd64_fpregset =
3020 {
3021 NULL, amd64_supply_fpregset, amd64_collect_fpregset
3022 };
3023 \f
3024
3025 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
3026 %rdi. We expect its value to be a pointer to the jmp_buf structure
3027 from which we extract the address that we will land at. This
3028 address is copied into PC. This routine returns non-zero on
3029 success. */
3030
3031 static int
3032 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
3033 {
3034 gdb_byte buf[8];
3035 CORE_ADDR jb_addr;
3036 struct gdbarch *gdbarch = get_frame_arch (frame);
3037 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
3038 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
3039
3040 /* If JB_PC_OFFSET is -1, we have no way to find out where the
3041 longjmp will land. */
3042 if (jb_pc_offset == -1)
3043 return 0;
3044
3045 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
3046 jb_addr= extract_typed_address
3047 (buf, builtin_type (gdbarch)->builtin_data_ptr);
3048 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
3049 return 0;
3050
3051 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
3052
3053 return 1;
3054 }
3055
3056 static const int amd64_record_regmap[] =
3057 {
3058 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
3059 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
3060 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
3061 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
3062 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
3063 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
3064 };
3065
3066 /* Implement the "in_indirect_branch_thunk" gdbarch function. */
3067
3068 static bool
3069 amd64_in_indirect_branch_thunk (struct gdbarch *gdbarch, CORE_ADDR pc)
3070 {
3071 return x86_in_indirect_branch_thunk (pc, amd64_register_names,
3072 AMD64_RAX_REGNUM,
3073 AMD64_RIP_REGNUM);
3074 }
3075
3076 void
3077 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3078 const target_desc *default_tdesc)
3079 {
3080 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3081 const struct target_desc *tdesc = info.target_desc;
3082 static const char *const stap_integer_prefixes[] = { "$", NULL };
3083 static const char *const stap_register_prefixes[] = { "%", NULL };
3084 static const char *const stap_register_indirection_prefixes[] = { "(",
3085 NULL };
3086 static const char *const stap_register_indirection_suffixes[] = { ")",
3087 NULL };
3088
3089 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
3090 floating-point registers. */
3091 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
3092 tdep->fpregset = &amd64_fpregset;
3093
3094 if (! tdesc_has_registers (tdesc))
3095 tdesc = default_tdesc;
3096 tdep->tdesc = tdesc;
3097
3098 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
3099 tdep->register_names = amd64_register_names;
3100
3101 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
3102 {
3103 tdep->zmmh_register_names = amd64_zmmh_names;
3104 tdep->k_register_names = amd64_k_names;
3105 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
3106 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
3107
3108 tdep->num_zmm_regs = 32;
3109 tdep->num_xmm_avx512_regs = 16;
3110 tdep->num_ymm_avx512_regs = 16;
3111
3112 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
3113 tdep->k0_regnum = AMD64_K0_REGNUM;
3114 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
3115 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
3116 }
3117
3118 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
3119 {
3120 tdep->ymmh_register_names = amd64_ymmh_names;
3121 tdep->num_ymm_regs = 16;
3122 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
3123 }
3124
3125 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
3126 {
3127 tdep->mpx_register_names = amd64_mpx_names;
3128 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
3129 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
3130 }
3131
3132 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments") != NULL)
3133 {
3134 tdep->fsbase_regnum = AMD64_FSBASE_REGNUM;
3135 }
3136
3137 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.pkeys") != NULL)
3138 {
3139 tdep->pkeys_register_names = amd64_pkeys_names;
3140 tdep->pkru_regnum = AMD64_PKRU_REGNUM;
3141 tdep->num_pkeys_regs = 1;
3142 }
3143
3144 tdep->num_byte_regs = 20;
3145 tdep->num_word_regs = 16;
3146 tdep->num_dword_regs = 16;
3147 /* Avoid wiring in the MMX registers for now. */
3148 tdep->num_mmx_regs = 0;
3149
3150 set_gdbarch_pseudo_register_read_value (gdbarch,
3151 amd64_pseudo_register_read_value);
3152 set_gdbarch_pseudo_register_write (gdbarch,
3153 amd64_pseudo_register_write);
3154 set_gdbarch_ax_pseudo_register_collect (gdbarch,
3155 amd64_ax_pseudo_register_collect);
3156
3157 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
3158
3159 /* AMD64 has an FPU and 16 SSE registers. */
3160 tdep->st0_regnum = AMD64_ST0_REGNUM;
3161 tdep->num_xmm_regs = 16;
3162
3163 /* This is what all the fuss is about. */
3164 set_gdbarch_long_bit (gdbarch, 64);
3165 set_gdbarch_long_long_bit (gdbarch, 64);
3166 set_gdbarch_ptr_bit (gdbarch, 64);
3167
3168 /* In contrast to the i386, on AMD64 a `long double' actually takes
3169 up 128 bits, even though it's still based on the i387 extended
3170 floating-point format which has only 80 significant bits. */
3171 set_gdbarch_long_double_bit (gdbarch, 128);
3172
3173 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
3174
3175 /* Register numbers of various important registers. */
3176 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3177 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3178 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3179 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
3180
3181 /* The "default" register numbering scheme for AMD64 is referred to
3182 as the "DWARF Register Number Mapping" in the System V psABI.
3183 The preferred debugging format for all known AMD64 targets is
3184 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3185 DWARF-1), but we provide the same mapping just in case. This
3186 mapping is also used for stabs, which GCC does support. */
3187 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3188 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3189
3190 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3191 be in use on any of the supported AMD64 targets. */
3192
3193 /* Call dummy code. */
3194 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3195 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
3196 set_gdbarch_frame_red_zone_size (gdbarch, 128);
3197
3198 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
3199 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3200 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3201
3202 set_gdbarch_return_value (gdbarch, amd64_return_value);
3203
3204 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
3205
3206 tdep->record_regmap = amd64_record_regmap;
3207
3208 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
3209
3210 /* Hook the function epilogue frame unwinder. This unwinder is
3211 appended to the list first, so that it supercedes the other
3212 unwinders in function epilogues. */
3213 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3214
3215 /* Hook the prologue-based frame unwinders. */
3216 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3217 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
3218 frame_base_set_default (gdbarch, &amd64_frame_base);
3219
3220 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
3221
3222 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
3223
3224 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
3225
3226 /* SystemTap variables and functions. */
3227 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3228 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3229 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3230 stap_register_indirection_prefixes);
3231 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3232 stap_register_indirection_suffixes);
3233 set_gdbarch_stap_is_single_operand (gdbarch,
3234 i386_stap_is_single_operand);
3235 set_gdbarch_stap_parse_special_token (gdbarch,
3236 i386_stap_parse_special_token);
3237 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3238 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3239 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
3240
3241 set_gdbarch_in_indirect_branch_thunk (gdbarch,
3242 amd64_in_indirect_branch_thunk);
3243 }
3244
3245 /* Initialize ARCH for x86-64, no osabi. */
3246
3247 static void
3248 amd64_none_init_abi (gdbarch_info info, gdbarch *arch)
3249 {
3250 amd64_init_abi (info, arch, amd64_target_description (X86_XSTATE_SSE_MASK,
3251 true));
3252 }
3253
3254 static struct type *
3255 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3256 {
3257 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3258
3259 switch (regnum - tdep->eax_regnum)
3260 {
3261 case AMD64_RBP_REGNUM: /* %ebp */
3262 case AMD64_RSP_REGNUM: /* %esp */
3263 return builtin_type (gdbarch)->builtin_data_ptr;
3264 case AMD64_RIP_REGNUM: /* %eip */
3265 return builtin_type (gdbarch)->builtin_func_ptr;
3266 }
3267
3268 return i386_pseudo_register_type (gdbarch, regnum);
3269 }
3270
3271 void
3272 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3273 const target_desc *default_tdesc)
3274 {
3275 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3276
3277 amd64_init_abi (info, gdbarch, default_tdesc);
3278
3279 tdep->num_dword_regs = 17;
3280 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3281
3282 set_gdbarch_long_bit (gdbarch, 32);
3283 set_gdbarch_ptr_bit (gdbarch, 32);
3284 }
3285
3286 /* Initialize ARCH for x64-32, no osabi. */
3287
3288 static void
3289 amd64_x32_none_init_abi (gdbarch_info info, gdbarch *arch)
3290 {
3291 amd64_x32_init_abi (info, arch,
3292 amd64_target_description (X86_XSTATE_SSE_MASK, true));
3293 }
3294
3295 /* Return the target description for a specified XSAVE feature mask. */
3296
3297 const struct target_desc *
3298 amd64_target_description (uint64_t xcr0, bool segments)
3299 {
3300 static target_desc *amd64_tdescs \
3301 [2/*AVX*/][2/*MPX*/][2/*AVX512*/][2/*PKRU*/][2/*segments*/] = {};
3302 target_desc **tdesc;
3303
3304 tdesc = &amd64_tdescs[(xcr0 & X86_XSTATE_AVX) ? 1 : 0]
3305 [(xcr0 & X86_XSTATE_MPX) ? 1 : 0]
3306 [(xcr0 & X86_XSTATE_AVX512) ? 1 : 0]
3307 [(xcr0 & X86_XSTATE_PKRU) ? 1 : 0]
3308 [segments ? 1 : 0];
3309
3310 if (*tdesc == NULL)
3311 *tdesc = amd64_create_target_description (xcr0, false, false,
3312 segments);
3313
3314 return *tdesc;
3315 }
3316
3317 void
3318 _initialize_amd64_tdep (void)
3319 {
3320 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_NONE,
3321 amd64_none_init_abi);
3322 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x64_32, GDB_OSABI_NONE,
3323 amd64_x32_none_init_abi);
3324 }
3325 \f
3326
3327 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3328 sense that the instruction pointer and data pointer are simply
3329 64-bit offsets into the code segment and the data segment instead
3330 of a selector offset pair. The functions below store the upper 32
3331 bits of these pointers (instead of just the 16-bits of the segment
3332 selector). */
3333
3334 /* Fill register REGNUM in REGCACHE with the appropriate
3335 floating-point or SSE register value from *FXSAVE. If REGNUM is
3336 -1, do this for all registers. This function masks off any of the
3337 reserved bits in *FXSAVE. */
3338
3339 void
3340 amd64_supply_fxsave (struct regcache *regcache, int regnum,
3341 const void *fxsave)
3342 {
3343 struct gdbarch *gdbarch = regcache->arch ();
3344 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3345
3346 i387_supply_fxsave (regcache, regnum, fxsave);
3347
3348 if (fxsave
3349 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3350 {
3351 const gdb_byte *regs = (const gdb_byte *) fxsave;
3352
3353 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3354 regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
3355 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3356 regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
3357 }
3358 }
3359
3360 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3361
3362 void
3363 amd64_supply_xsave (struct regcache *regcache, int regnum,
3364 const void *xsave)
3365 {
3366 struct gdbarch *gdbarch = regcache->arch ();
3367 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3368
3369 i387_supply_xsave (regcache, regnum, xsave);
3370
3371 if (xsave
3372 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3373 {
3374 const gdb_byte *regs = (const gdb_byte *) xsave;
3375 ULONGEST clear_bv;
3376
3377 clear_bv = i387_xsave_get_clear_bv (gdbarch, xsave);
3378
3379 /* If the FISEG and FOSEG registers have not been initialised yet
3380 (their CLEAR_BV bit is set) then their default values of zero will
3381 have already been setup by I387_SUPPLY_XSAVE. */
3382 if (!(clear_bv & X86_XSTATE_X87))
3383 {
3384 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3385 regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
3386 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3387 regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
3388 }
3389 }
3390 }
3391
3392 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3393 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3394 all registers. This function doesn't touch any of the reserved
3395 bits in *FXSAVE. */
3396
3397 void
3398 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3399 void *fxsave)
3400 {
3401 struct gdbarch *gdbarch = regcache->arch ();
3402 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3403 gdb_byte *regs = (gdb_byte *) fxsave;
3404
3405 i387_collect_fxsave (regcache, regnum, fxsave);
3406
3407 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3408 {
3409 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3410 regcache->raw_collect (I387_FISEG_REGNUM (tdep), regs + 12);
3411 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3412 regcache->raw_collect (I387_FOSEG_REGNUM (tdep), regs + 20);
3413 }
3414 }
3415
3416 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3417
3418 void
3419 amd64_collect_xsave (const struct regcache *regcache, int regnum,
3420 void *xsave, int gcore)
3421 {
3422 struct gdbarch *gdbarch = regcache->arch ();
3423 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3424 gdb_byte *regs = (gdb_byte *) xsave;
3425
3426 i387_collect_xsave (regcache, regnum, xsave, gcore);
3427
3428 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3429 {
3430 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3431 regcache->raw_collect (I387_FISEG_REGNUM (tdep),
3432 regs + 12);
3433 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3434 regcache->raw_collect (I387_FOSEG_REGNUM (tdep),
3435 regs + 20);
3436 }
3437 }
This page took 0.122049 seconds and 4 git commands to generate.