Add support for Intel PKRU register to GDB and GDBserver.
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright (C) 2001-2017 Free Software Foundation, Inc.
4
5 Contributed by Jiri Smid, SuSE Labs.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "opcode/i386.h"
24 #include "dis-asm.h"
25 #include "arch-utils.h"
26 #include "block.h"
27 #include "dummy-frame.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "inferior.h"
32 #include "infrun.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39 #include "disasm.h"
40 #include "amd64-tdep.h"
41 #include "i387-tdep.h"
42 #include "x86-xstate.h"
43 #include <algorithm>
44
45 #include "features/i386/amd64.c"
46 #include "features/i386/amd64-avx.c"
47 #include "features/i386/amd64-mpx.c"
48 #include "features/i386/amd64-avx-mpx.c"
49 #include "features/i386/amd64-avx-avx512.c"
50 #include "features/i386/amd64-avx-mpx-avx512-pku.c"
51
52 #include "features/i386/x32.c"
53 #include "features/i386/x32-avx.c"
54 #include "features/i386/x32-avx-avx512.c"
55
56 #include "ax.h"
57 #include "ax-gdb.h"
58
59 /* Note that the AMD64 architecture was previously known as x86-64.
60 The latter is (forever) engraved into the canonical system name as
61 returned by config.guess, and used as the name for the AMD64 port
62 of GNU/Linux. The BSD's have renamed their ports to amd64; they
63 don't like to shout. For GDB we prefer the amd64_-prefix over the
64 x86_64_-prefix since it's so much easier to type. */
65
66 /* Register information. */
67
68 static const char *amd64_register_names[] =
69 {
70 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
71
72 /* %r8 is indeed register number 8. */
73 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
74 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
75
76 /* %st0 is register number 24. */
77 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
78 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
79
80 /* %xmm0 is register number 40. */
81 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
82 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
83 "mxcsr",
84 };
85
86 static const char *amd64_ymm_names[] =
87 {
88 "ymm0", "ymm1", "ymm2", "ymm3",
89 "ymm4", "ymm5", "ymm6", "ymm7",
90 "ymm8", "ymm9", "ymm10", "ymm11",
91 "ymm12", "ymm13", "ymm14", "ymm15"
92 };
93
94 static const char *amd64_ymm_avx512_names[] =
95 {
96 "ymm16", "ymm17", "ymm18", "ymm19",
97 "ymm20", "ymm21", "ymm22", "ymm23",
98 "ymm24", "ymm25", "ymm26", "ymm27",
99 "ymm28", "ymm29", "ymm30", "ymm31"
100 };
101
102 static const char *amd64_ymmh_names[] =
103 {
104 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
105 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
106 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
107 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
108 };
109
110 static const char *amd64_ymmh_avx512_names[] =
111 {
112 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
113 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
114 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
115 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
116 };
117
118 static const char *amd64_mpx_names[] =
119 {
120 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
121 };
122
123 static const char *amd64_k_names[] =
124 {
125 "k0", "k1", "k2", "k3",
126 "k4", "k5", "k6", "k7"
127 };
128
129 static const char *amd64_zmmh_names[] =
130 {
131 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
132 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
133 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
134 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
135 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
136 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
137 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
138 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
139 };
140
141 static const char *amd64_zmm_names[] =
142 {
143 "zmm0", "zmm1", "zmm2", "zmm3",
144 "zmm4", "zmm5", "zmm6", "zmm7",
145 "zmm8", "zmm9", "zmm10", "zmm11",
146 "zmm12", "zmm13", "zmm14", "zmm15",
147 "zmm16", "zmm17", "zmm18", "zmm19",
148 "zmm20", "zmm21", "zmm22", "zmm23",
149 "zmm24", "zmm25", "zmm26", "zmm27",
150 "zmm28", "zmm29", "zmm30", "zmm31"
151 };
152
153 static const char *amd64_xmm_avx512_names[] = {
154 "xmm16", "xmm17", "xmm18", "xmm19",
155 "xmm20", "xmm21", "xmm22", "xmm23",
156 "xmm24", "xmm25", "xmm26", "xmm27",
157 "xmm28", "xmm29", "xmm30", "xmm31"
158 };
159
160 static const char *amd64_pkeys_names[] = {
161 "pkru"
162 };
163
164 /* DWARF Register Number Mapping as defined in the System V psABI,
165 section 3.6. */
166
167 static int amd64_dwarf_regmap[] =
168 {
169 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
170 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
171 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
172 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
173
174 /* Frame Pointer Register RBP. */
175 AMD64_RBP_REGNUM,
176
177 /* Stack Pointer Register RSP. */
178 AMD64_RSP_REGNUM,
179
180 /* Extended Integer Registers 8 - 15. */
181 AMD64_R8_REGNUM, /* %r8 */
182 AMD64_R9_REGNUM, /* %r9 */
183 AMD64_R10_REGNUM, /* %r10 */
184 AMD64_R11_REGNUM, /* %r11 */
185 AMD64_R12_REGNUM, /* %r12 */
186 AMD64_R13_REGNUM, /* %r13 */
187 AMD64_R14_REGNUM, /* %r14 */
188 AMD64_R15_REGNUM, /* %r15 */
189
190 /* Return Address RA. Mapped to RIP. */
191 AMD64_RIP_REGNUM,
192
193 /* SSE Registers 0 - 7. */
194 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
195 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
196 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
197 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
198
199 /* Extended SSE Registers 8 - 15. */
200 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
201 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
202 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
203 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
204
205 /* Floating Point Registers 0-7. */
206 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
207 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
208 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
209 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
210
211 /* MMX Registers 0 - 7.
212 We have to handle those registers specifically, as their register
213 number within GDB depends on the target (or they may even not be
214 available at all). */
215 -1, -1, -1, -1, -1, -1, -1, -1,
216
217 /* Control and Status Flags Register. */
218 AMD64_EFLAGS_REGNUM,
219
220 /* Selector Registers. */
221 AMD64_ES_REGNUM,
222 AMD64_CS_REGNUM,
223 AMD64_SS_REGNUM,
224 AMD64_DS_REGNUM,
225 AMD64_FS_REGNUM,
226 AMD64_GS_REGNUM,
227 -1,
228 -1,
229
230 /* Segment Base Address Registers. */
231 -1,
232 -1,
233 -1,
234 -1,
235
236 /* Special Selector Registers. */
237 -1,
238 -1,
239
240 /* Floating Point Control Registers. */
241 AMD64_MXCSR_REGNUM,
242 AMD64_FCTRL_REGNUM,
243 AMD64_FSTAT_REGNUM
244 };
245
246 static const int amd64_dwarf_regmap_len =
247 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
248
249 /* Convert DWARF register number REG to the appropriate register
250 number used by GDB. */
251
252 static int
253 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
254 {
255 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
256 int ymm0_regnum = tdep->ymm0_regnum;
257 int regnum = -1;
258
259 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
260 regnum = amd64_dwarf_regmap[reg];
261
262 if (ymm0_regnum >= 0
263 && i386_xmm_regnum_p (gdbarch, regnum))
264 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
265
266 return regnum;
267 }
268
269 /* Map architectural register numbers to gdb register numbers. */
270
271 static const int amd64_arch_regmap[16] =
272 {
273 AMD64_RAX_REGNUM, /* %rax */
274 AMD64_RCX_REGNUM, /* %rcx */
275 AMD64_RDX_REGNUM, /* %rdx */
276 AMD64_RBX_REGNUM, /* %rbx */
277 AMD64_RSP_REGNUM, /* %rsp */
278 AMD64_RBP_REGNUM, /* %rbp */
279 AMD64_RSI_REGNUM, /* %rsi */
280 AMD64_RDI_REGNUM, /* %rdi */
281 AMD64_R8_REGNUM, /* %r8 */
282 AMD64_R9_REGNUM, /* %r9 */
283 AMD64_R10_REGNUM, /* %r10 */
284 AMD64_R11_REGNUM, /* %r11 */
285 AMD64_R12_REGNUM, /* %r12 */
286 AMD64_R13_REGNUM, /* %r13 */
287 AMD64_R14_REGNUM, /* %r14 */
288 AMD64_R15_REGNUM /* %r15 */
289 };
290
291 static const int amd64_arch_regmap_len =
292 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
293
294 /* Convert architectural register number REG to the appropriate register
295 number used by GDB. */
296
297 static int
298 amd64_arch_reg_to_regnum (int reg)
299 {
300 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
301
302 return amd64_arch_regmap[reg];
303 }
304
305 /* Register names for byte pseudo-registers. */
306
307 static const char *amd64_byte_names[] =
308 {
309 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
310 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
311 "ah", "bh", "ch", "dh"
312 };
313
314 /* Number of lower byte registers. */
315 #define AMD64_NUM_LOWER_BYTE_REGS 16
316
317 /* Register names for word pseudo-registers. */
318
319 static const char *amd64_word_names[] =
320 {
321 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
322 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
323 };
324
325 /* Register names for dword pseudo-registers. */
326
327 static const char *amd64_dword_names[] =
328 {
329 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
330 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
331 "eip"
332 };
333
334 /* Return the name of register REGNUM. */
335
336 static const char *
337 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
338 {
339 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
340 if (i386_byte_regnum_p (gdbarch, regnum))
341 return amd64_byte_names[regnum - tdep->al_regnum];
342 else if (i386_zmm_regnum_p (gdbarch, regnum))
343 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
344 else if (i386_ymm_regnum_p (gdbarch, regnum))
345 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
346 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
347 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
348 else if (i386_word_regnum_p (gdbarch, regnum))
349 return amd64_word_names[regnum - tdep->ax_regnum];
350 else if (i386_dword_regnum_p (gdbarch, regnum))
351 return amd64_dword_names[regnum - tdep->eax_regnum];
352 else
353 return i386_pseudo_register_name (gdbarch, regnum);
354 }
355
356 static struct value *
357 amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
358 struct regcache *regcache,
359 int regnum)
360 {
361 gdb_byte *raw_buf = (gdb_byte *) alloca (register_size (gdbarch, regnum));
362 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
363 enum register_status status;
364 struct value *result_value;
365 gdb_byte *buf;
366
367 result_value = allocate_value (register_type (gdbarch, regnum));
368 VALUE_LVAL (result_value) = lval_register;
369 VALUE_REGNUM (result_value) = regnum;
370 buf = value_contents_raw (result_value);
371
372 if (i386_byte_regnum_p (gdbarch, regnum))
373 {
374 int gpnum = regnum - tdep->al_regnum;
375
376 /* Extract (always little endian). */
377 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
378 {
379 /* Special handling for AH, BH, CH, DH. */
380 status = regcache_raw_read (regcache,
381 gpnum - AMD64_NUM_LOWER_BYTE_REGS,
382 raw_buf);
383 if (status == REG_VALID)
384 memcpy (buf, raw_buf + 1, 1);
385 else
386 mark_value_bytes_unavailable (result_value, 0,
387 TYPE_LENGTH (value_type (result_value)));
388 }
389 else
390 {
391 status = regcache_raw_read (regcache, gpnum, raw_buf);
392 if (status == REG_VALID)
393 memcpy (buf, raw_buf, 1);
394 else
395 mark_value_bytes_unavailable (result_value, 0,
396 TYPE_LENGTH (value_type (result_value)));
397 }
398 }
399 else if (i386_dword_regnum_p (gdbarch, regnum))
400 {
401 int gpnum = regnum - tdep->eax_regnum;
402 /* Extract (always little endian). */
403 status = regcache_raw_read (regcache, gpnum, raw_buf);
404 if (status == REG_VALID)
405 memcpy (buf, raw_buf, 4);
406 else
407 mark_value_bytes_unavailable (result_value, 0,
408 TYPE_LENGTH (value_type (result_value)));
409 }
410 else
411 i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
412 result_value);
413
414 return result_value;
415 }
416
417 static void
418 amd64_pseudo_register_write (struct gdbarch *gdbarch,
419 struct regcache *regcache,
420 int regnum, const gdb_byte *buf)
421 {
422 gdb_byte *raw_buf = (gdb_byte *) alloca (register_size (gdbarch, regnum));
423 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
424
425 if (i386_byte_regnum_p (gdbarch, regnum))
426 {
427 int gpnum = regnum - tdep->al_regnum;
428
429 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
430 {
431 /* Read ... AH, BH, CH, DH. */
432 regcache_raw_read (regcache,
433 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
434 /* ... Modify ... (always little endian). */
435 memcpy (raw_buf + 1, buf, 1);
436 /* ... Write. */
437 regcache_raw_write (regcache,
438 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
439 }
440 else
441 {
442 /* Read ... */
443 regcache_raw_read (regcache, gpnum, raw_buf);
444 /* ... Modify ... (always little endian). */
445 memcpy (raw_buf, buf, 1);
446 /* ... Write. */
447 regcache_raw_write (regcache, gpnum, raw_buf);
448 }
449 }
450 else if (i386_dword_regnum_p (gdbarch, regnum))
451 {
452 int gpnum = regnum - tdep->eax_regnum;
453
454 /* Read ... */
455 regcache_raw_read (regcache, gpnum, raw_buf);
456 /* ... Modify ... (always little endian). */
457 memcpy (raw_buf, buf, 4);
458 /* ... Write. */
459 regcache_raw_write (regcache, gpnum, raw_buf);
460 }
461 else
462 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
463 }
464
465 /* Implement the 'ax_pseudo_register_collect' gdbarch method. */
466
467 static int
468 amd64_ax_pseudo_register_collect (struct gdbarch *gdbarch,
469 struct agent_expr *ax, int regnum)
470 {
471 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
472
473 if (i386_byte_regnum_p (gdbarch, regnum))
474 {
475 int gpnum = regnum - tdep->al_regnum;
476
477 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
478 ax_reg_mask (ax, gpnum - AMD64_NUM_LOWER_BYTE_REGS);
479 else
480 ax_reg_mask (ax, gpnum);
481 return 0;
482 }
483 else if (i386_dword_regnum_p (gdbarch, regnum))
484 {
485 int gpnum = regnum - tdep->eax_regnum;
486
487 ax_reg_mask (ax, gpnum);
488 return 0;
489 }
490 else
491 return i386_ax_pseudo_register_collect (gdbarch, ax, regnum);
492 }
493
494 \f
495
496 /* Register classes as defined in the psABI. */
497
498 enum amd64_reg_class
499 {
500 AMD64_INTEGER,
501 AMD64_SSE,
502 AMD64_SSEUP,
503 AMD64_X87,
504 AMD64_X87UP,
505 AMD64_COMPLEX_X87,
506 AMD64_NO_CLASS,
507 AMD64_MEMORY
508 };
509
510 /* Return the union class of CLASS1 and CLASS2. See the psABI for
511 details. */
512
513 static enum amd64_reg_class
514 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
515 {
516 /* Rule (a): If both classes are equal, this is the resulting class. */
517 if (class1 == class2)
518 return class1;
519
520 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
521 is the other class. */
522 if (class1 == AMD64_NO_CLASS)
523 return class2;
524 if (class2 == AMD64_NO_CLASS)
525 return class1;
526
527 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
528 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
529 return AMD64_MEMORY;
530
531 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
532 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
533 return AMD64_INTEGER;
534
535 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
536 MEMORY is used as class. */
537 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
538 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
539 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
540 return AMD64_MEMORY;
541
542 /* Rule (f): Otherwise class SSE is used. */
543 return AMD64_SSE;
544 }
545
546 static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]);
547
548 /* Return non-zero if TYPE is a non-POD structure or union type. */
549
550 static int
551 amd64_non_pod_p (struct type *type)
552 {
553 /* ??? A class with a base class certainly isn't POD, but does this
554 catch all non-POD structure types? */
555 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
556 return 1;
557
558 return 0;
559 }
560
561 /* Classify TYPE according to the rules for aggregate (structures and
562 arrays) and union types, and store the result in CLASS. */
563
564 static void
565 amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
566 {
567 /* 1. If the size of an object is larger than two eightbytes, or in
568 C++, is a non-POD structure or union type, or contains
569 unaligned fields, it has class memory. */
570 if (TYPE_LENGTH (type) > 16 || amd64_non_pod_p (type))
571 {
572 theclass[0] = theclass[1] = AMD64_MEMORY;
573 return;
574 }
575
576 /* 2. Both eightbytes get initialized to class NO_CLASS. */
577 theclass[0] = theclass[1] = AMD64_NO_CLASS;
578
579 /* 3. Each field of an object is classified recursively so that
580 always two fields are considered. The resulting class is
581 calculated according to the classes of the fields in the
582 eightbyte: */
583
584 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
585 {
586 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
587
588 /* All fields in an array have the same type. */
589 amd64_classify (subtype, theclass);
590 if (TYPE_LENGTH (type) > 8 && theclass[1] == AMD64_NO_CLASS)
591 theclass[1] = theclass[0];
592 }
593 else
594 {
595 int i;
596
597 /* Structure or union. */
598 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
599 || TYPE_CODE (type) == TYPE_CODE_UNION);
600
601 for (i = 0; i < TYPE_NFIELDS (type); i++)
602 {
603 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
604 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
605 enum amd64_reg_class subclass[2];
606 int bitsize = TYPE_FIELD_BITSIZE (type, i);
607 int endpos;
608
609 if (bitsize == 0)
610 bitsize = TYPE_LENGTH (subtype) * 8;
611 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
612
613 /* Ignore static fields. */
614 if (field_is_static (&TYPE_FIELD (type, i)))
615 continue;
616
617 gdb_assert (pos == 0 || pos == 1);
618
619 amd64_classify (subtype, subclass);
620 theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]);
621 if (bitsize <= 64 && pos == 0 && endpos == 1)
622 /* This is a bit of an odd case: We have a field that would
623 normally fit in one of the two eightbytes, except that
624 it is placed in a way that this field straddles them.
625 This has been seen with a structure containing an array.
626
627 The ABI is a bit unclear in this case, but we assume that
628 this field's class (stored in subclass[0]) must also be merged
629 into class[1]. In other words, our field has a piece stored
630 in the second eight-byte, and thus its class applies to
631 the second eight-byte as well.
632
633 In the case where the field length exceeds 8 bytes,
634 it should not be necessary to merge the field class
635 into class[1]. As LEN > 8, subclass[1] is necessarily
636 different from AMD64_NO_CLASS. If subclass[1] is equal
637 to subclass[0], then the normal class[1]/subclass[1]
638 merging will take care of everything. For subclass[1]
639 to be different from subclass[0], I can only see the case
640 where we have a SSE/SSEUP or X87/X87UP pair, which both
641 use up all 16 bytes of the aggregate, and are already
642 handled just fine (because each portion sits on its own
643 8-byte). */
644 theclass[1] = amd64_merge_classes (theclass[1], subclass[0]);
645 if (pos == 0)
646 theclass[1] = amd64_merge_classes (theclass[1], subclass[1]);
647 }
648 }
649
650 /* 4. Then a post merger cleanup is done: */
651
652 /* Rule (a): If one of the classes is MEMORY, the whole argument is
653 passed in memory. */
654 if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY)
655 theclass[0] = theclass[1] = AMD64_MEMORY;
656
657 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
658 SSE. */
659 if (theclass[0] == AMD64_SSEUP)
660 theclass[0] = AMD64_SSE;
661 if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE)
662 theclass[1] = AMD64_SSE;
663 }
664
665 /* Classify TYPE, and store the result in CLASS. */
666
667 static void
668 amd64_classify (struct type *type, enum amd64_reg_class theclass[2])
669 {
670 enum type_code code = TYPE_CODE (type);
671 int len = TYPE_LENGTH (type);
672
673 theclass[0] = theclass[1] = AMD64_NO_CLASS;
674
675 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
676 long, long long, and pointers are in the INTEGER class. Similarly,
677 range types, used by languages such as Ada, are also in the INTEGER
678 class. */
679 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
680 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
681 || code == TYPE_CODE_CHAR
682 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
683 && (len == 1 || len == 2 || len == 4 || len == 8))
684 theclass[0] = AMD64_INTEGER;
685
686 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
687 are in class SSE. */
688 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
689 && (len == 4 || len == 8))
690 /* FIXME: __m64 . */
691 theclass[0] = AMD64_SSE;
692
693 /* Arguments of types __float128, _Decimal128 and __m128 are split into
694 two halves. The least significant ones belong to class SSE, the most
695 significant one to class SSEUP. */
696 else if (code == TYPE_CODE_DECFLOAT && len == 16)
697 /* FIXME: __float128, __m128. */
698 theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP;
699
700 /* The 64-bit mantissa of arguments of type long double belongs to
701 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
702 class X87UP. */
703 else if (code == TYPE_CODE_FLT && len == 16)
704 /* Class X87 and X87UP. */
705 theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP;
706
707 /* Arguments of complex T where T is one of the types float or
708 double get treated as if they are implemented as:
709
710 struct complexT {
711 T real;
712 T imag;
713 };
714
715 */
716 else if (code == TYPE_CODE_COMPLEX && len == 8)
717 theclass[0] = AMD64_SSE;
718 else if (code == TYPE_CODE_COMPLEX && len == 16)
719 theclass[0] = theclass[1] = AMD64_SSE;
720
721 /* A variable of type complex long double is classified as type
722 COMPLEX_X87. */
723 else if (code == TYPE_CODE_COMPLEX && len == 32)
724 theclass[0] = AMD64_COMPLEX_X87;
725
726 /* Aggregates. */
727 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
728 || code == TYPE_CODE_UNION)
729 amd64_classify_aggregate (type, theclass);
730 }
731
732 static enum return_value_convention
733 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
734 struct type *type, struct regcache *regcache,
735 gdb_byte *readbuf, const gdb_byte *writebuf)
736 {
737 enum amd64_reg_class theclass[2];
738 int len = TYPE_LENGTH (type);
739 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
740 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
741 int integer_reg = 0;
742 int sse_reg = 0;
743 int i;
744
745 gdb_assert (!(readbuf && writebuf));
746
747 /* 1. Classify the return type with the classification algorithm. */
748 amd64_classify (type, theclass);
749
750 /* 2. If the type has class MEMORY, then the caller provides space
751 for the return value and passes the address of this storage in
752 %rdi as if it were the first argument to the function. In effect,
753 this address becomes a hidden first argument.
754
755 On return %rax will contain the address that has been passed in
756 by the caller in %rdi. */
757 if (theclass[0] == AMD64_MEMORY)
758 {
759 /* As indicated by the comment above, the ABI guarantees that we
760 can always find the return value just after the function has
761 returned. */
762
763 if (readbuf)
764 {
765 ULONGEST addr;
766
767 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
768 read_memory (addr, readbuf, TYPE_LENGTH (type));
769 }
770
771 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
772 }
773
774 /* 8. If the class is COMPLEX_X87, the real part of the value is
775 returned in %st0 and the imaginary part in %st1. */
776 if (theclass[0] == AMD64_COMPLEX_X87)
777 {
778 if (readbuf)
779 {
780 regcache_raw_read (regcache, AMD64_ST0_REGNUM, readbuf);
781 regcache_raw_read (regcache, AMD64_ST1_REGNUM, readbuf + 16);
782 }
783
784 if (writebuf)
785 {
786 i387_return_value (gdbarch, regcache);
787 regcache_raw_write (regcache, AMD64_ST0_REGNUM, writebuf);
788 regcache_raw_write (regcache, AMD64_ST1_REGNUM, writebuf + 16);
789
790 /* Fix up the tag word such that both %st(0) and %st(1) are
791 marked as valid. */
792 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
793 }
794
795 return RETURN_VALUE_REGISTER_CONVENTION;
796 }
797
798 gdb_assert (theclass[1] != AMD64_MEMORY);
799 gdb_assert (len <= 16);
800
801 for (i = 0; len > 0; i++, len -= 8)
802 {
803 int regnum = -1;
804 int offset = 0;
805
806 switch (theclass[i])
807 {
808 case AMD64_INTEGER:
809 /* 3. If the class is INTEGER, the next available register
810 of the sequence %rax, %rdx is used. */
811 regnum = integer_regnum[integer_reg++];
812 break;
813
814 case AMD64_SSE:
815 /* 4. If the class is SSE, the next available SSE register
816 of the sequence %xmm0, %xmm1 is used. */
817 regnum = sse_regnum[sse_reg++];
818 break;
819
820 case AMD64_SSEUP:
821 /* 5. If the class is SSEUP, the eightbyte is passed in the
822 upper half of the last used SSE register. */
823 gdb_assert (sse_reg > 0);
824 regnum = sse_regnum[sse_reg - 1];
825 offset = 8;
826 break;
827
828 case AMD64_X87:
829 /* 6. If the class is X87, the value is returned on the X87
830 stack in %st0 as 80-bit x87 number. */
831 regnum = AMD64_ST0_REGNUM;
832 if (writebuf)
833 i387_return_value (gdbarch, regcache);
834 break;
835
836 case AMD64_X87UP:
837 /* 7. If the class is X87UP, the value is returned together
838 with the previous X87 value in %st0. */
839 gdb_assert (i > 0 && theclass[0] == AMD64_X87);
840 regnum = AMD64_ST0_REGNUM;
841 offset = 8;
842 len = 2;
843 break;
844
845 case AMD64_NO_CLASS:
846 continue;
847
848 default:
849 gdb_assert (!"Unexpected register class.");
850 }
851
852 gdb_assert (regnum != -1);
853
854 if (readbuf)
855 regcache_raw_read_part (regcache, regnum, offset, std::min (len, 8),
856 readbuf + i * 8);
857 if (writebuf)
858 regcache_raw_write_part (regcache, regnum, offset, std::min (len, 8),
859 writebuf + i * 8);
860 }
861
862 return RETURN_VALUE_REGISTER_CONVENTION;
863 }
864 \f
865
866 static CORE_ADDR
867 amd64_push_arguments (struct regcache *regcache, int nargs,
868 struct value **args, CORE_ADDR sp, int struct_return)
869 {
870 static int integer_regnum[] =
871 {
872 AMD64_RDI_REGNUM, /* %rdi */
873 AMD64_RSI_REGNUM, /* %rsi */
874 AMD64_RDX_REGNUM, /* %rdx */
875 AMD64_RCX_REGNUM, /* %rcx */
876 AMD64_R8_REGNUM, /* %r8 */
877 AMD64_R9_REGNUM /* %r9 */
878 };
879 static int sse_regnum[] =
880 {
881 /* %xmm0 ... %xmm7 */
882 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
883 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
884 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
885 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
886 };
887 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
888 int num_stack_args = 0;
889 int num_elements = 0;
890 int element = 0;
891 int integer_reg = 0;
892 int sse_reg = 0;
893 int i;
894
895 /* Reserve a register for the "hidden" argument. */
896 if (struct_return)
897 integer_reg++;
898
899 for (i = 0; i < nargs; i++)
900 {
901 struct type *type = value_type (args[i]);
902 int len = TYPE_LENGTH (type);
903 enum amd64_reg_class theclass[2];
904 int needed_integer_regs = 0;
905 int needed_sse_regs = 0;
906 int j;
907
908 /* Classify argument. */
909 amd64_classify (type, theclass);
910
911 /* Calculate the number of integer and SSE registers needed for
912 this argument. */
913 for (j = 0; j < 2; j++)
914 {
915 if (theclass[j] == AMD64_INTEGER)
916 needed_integer_regs++;
917 else if (theclass[j] == AMD64_SSE)
918 needed_sse_regs++;
919 }
920
921 /* Check whether enough registers are available, and if the
922 argument should be passed in registers at all. */
923 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
924 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
925 || (needed_integer_regs == 0 && needed_sse_regs == 0))
926 {
927 /* The argument will be passed on the stack. */
928 num_elements += ((len + 7) / 8);
929 stack_args[num_stack_args++] = args[i];
930 }
931 else
932 {
933 /* The argument will be passed in registers. */
934 const gdb_byte *valbuf = value_contents (args[i]);
935 gdb_byte buf[8];
936
937 gdb_assert (len <= 16);
938
939 for (j = 0; len > 0; j++, len -= 8)
940 {
941 int regnum = -1;
942 int offset = 0;
943
944 switch (theclass[j])
945 {
946 case AMD64_INTEGER:
947 regnum = integer_regnum[integer_reg++];
948 break;
949
950 case AMD64_SSE:
951 regnum = sse_regnum[sse_reg++];
952 break;
953
954 case AMD64_SSEUP:
955 gdb_assert (sse_reg > 0);
956 regnum = sse_regnum[sse_reg - 1];
957 offset = 8;
958 break;
959
960 default:
961 gdb_assert (!"Unexpected register class.");
962 }
963
964 gdb_assert (regnum != -1);
965 memset (buf, 0, sizeof buf);
966 memcpy (buf, valbuf + j * 8, std::min (len, 8));
967 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
968 }
969 }
970 }
971
972 /* Allocate space for the arguments on the stack. */
973 sp -= num_elements * 8;
974
975 /* The psABI says that "The end of the input argument area shall be
976 aligned on a 16 byte boundary." */
977 sp &= ~0xf;
978
979 /* Write out the arguments to the stack. */
980 for (i = 0; i < num_stack_args; i++)
981 {
982 struct type *type = value_type (stack_args[i]);
983 const gdb_byte *valbuf = value_contents (stack_args[i]);
984 int len = TYPE_LENGTH (type);
985
986 write_memory (sp + element * 8, valbuf, len);
987 element += ((len + 7) / 8);
988 }
989
990 /* The psABI says that "For calls that may call functions that use
991 varargs or stdargs (prototype-less calls or calls to functions
992 containing ellipsis (...) in the declaration) %al is used as
993 hidden argument to specify the number of SSE registers used. */
994 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
995 return sp;
996 }
997
998 static CORE_ADDR
999 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1000 struct regcache *regcache, CORE_ADDR bp_addr,
1001 int nargs, struct value **args, CORE_ADDR sp,
1002 int struct_return, CORE_ADDR struct_addr)
1003 {
1004 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1005 gdb_byte buf[8];
1006
1007 /* Pass arguments. */
1008 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
1009
1010 /* Pass "hidden" argument". */
1011 if (struct_return)
1012 {
1013 store_unsigned_integer (buf, 8, byte_order, struct_addr);
1014 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
1015 }
1016
1017 /* Store return address. */
1018 sp -= 8;
1019 store_unsigned_integer (buf, 8, byte_order, bp_addr);
1020 write_memory (sp, buf, 8);
1021
1022 /* Finally, update the stack pointer... */
1023 store_unsigned_integer (buf, 8, byte_order, sp);
1024 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
1025
1026 /* ...and fake a frame pointer. */
1027 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
1028
1029 return sp + 16;
1030 }
1031 \f
1032 /* Displaced instruction handling. */
1033
1034 /* A partially decoded instruction.
1035 This contains enough details for displaced stepping purposes. */
1036
1037 struct amd64_insn
1038 {
1039 /* The number of opcode bytes. */
1040 int opcode_len;
1041 /* The offset of the rex prefix or -1 if not present. */
1042 int rex_offset;
1043 /* The offset to the first opcode byte. */
1044 int opcode_offset;
1045 /* The offset to the modrm byte or -1 if not present. */
1046 int modrm_offset;
1047
1048 /* The raw instruction. */
1049 gdb_byte *raw_insn;
1050 };
1051
1052 struct displaced_step_closure
1053 {
1054 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1055 int tmp_used;
1056 int tmp_regno;
1057 ULONGEST tmp_save;
1058
1059 /* Details of the instruction. */
1060 struct amd64_insn insn_details;
1061
1062 /* Amount of space allocated to insn_buf. */
1063 int max_len;
1064
1065 /* The possibly modified insn.
1066 This is a variable-length field. */
1067 gdb_byte insn_buf[1];
1068 };
1069
1070 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1071 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1072 at which point delete these in favor of libopcodes' versions). */
1073
1074 static const unsigned char onebyte_has_modrm[256] = {
1075 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1076 /* ------------------------------- */
1077 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1078 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1079 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1080 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1081 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1082 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1083 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1084 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1085 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1086 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1087 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1088 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1089 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1090 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1091 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1092 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1093 /* ------------------------------- */
1094 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1095 };
1096
1097 static const unsigned char twobyte_has_modrm[256] = {
1098 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1099 /* ------------------------------- */
1100 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1101 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1102 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1103 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1104 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1105 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1106 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1107 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1108 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1109 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1110 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1111 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1112 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1113 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1114 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1115 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1116 /* ------------------------------- */
1117 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1118 };
1119
1120 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1121
1122 static int
1123 rex_prefix_p (gdb_byte pfx)
1124 {
1125 return REX_PREFIX_P (pfx);
1126 }
1127
1128 /* Skip the legacy instruction prefixes in INSN.
1129 We assume INSN is properly sentineled so we don't have to worry
1130 about falling off the end of the buffer. */
1131
1132 static gdb_byte *
1133 amd64_skip_prefixes (gdb_byte *insn)
1134 {
1135 while (1)
1136 {
1137 switch (*insn)
1138 {
1139 case DATA_PREFIX_OPCODE:
1140 case ADDR_PREFIX_OPCODE:
1141 case CS_PREFIX_OPCODE:
1142 case DS_PREFIX_OPCODE:
1143 case ES_PREFIX_OPCODE:
1144 case FS_PREFIX_OPCODE:
1145 case GS_PREFIX_OPCODE:
1146 case SS_PREFIX_OPCODE:
1147 case LOCK_PREFIX_OPCODE:
1148 case REPE_PREFIX_OPCODE:
1149 case REPNE_PREFIX_OPCODE:
1150 ++insn;
1151 continue;
1152 default:
1153 break;
1154 }
1155 break;
1156 }
1157
1158 return insn;
1159 }
1160
1161 /* Return an integer register (other than RSP) that is unused as an input
1162 operand in INSN.
1163 In order to not require adding a rex prefix if the insn doesn't already
1164 have one, the result is restricted to RAX ... RDI, sans RSP.
1165 The register numbering of the result follows architecture ordering,
1166 e.g. RDI = 7. */
1167
1168 static int
1169 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1170 {
1171 /* 1 bit for each reg */
1172 int used_regs_mask = 0;
1173
1174 /* There can be at most 3 int regs used as inputs in an insn, and we have
1175 7 to choose from (RAX ... RDI, sans RSP).
1176 This allows us to take a conservative approach and keep things simple.
1177 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1178 that implicitly specify RAX. */
1179
1180 /* Avoid RAX. */
1181 used_regs_mask |= 1 << EAX_REG_NUM;
1182 /* Similarily avoid RDX, implicit operand in divides. */
1183 used_regs_mask |= 1 << EDX_REG_NUM;
1184 /* Avoid RSP. */
1185 used_regs_mask |= 1 << ESP_REG_NUM;
1186
1187 /* If the opcode is one byte long and there's no ModRM byte,
1188 assume the opcode specifies a register. */
1189 if (details->opcode_len == 1 && details->modrm_offset == -1)
1190 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1191
1192 /* Mark used regs in the modrm/sib bytes. */
1193 if (details->modrm_offset != -1)
1194 {
1195 int modrm = details->raw_insn[details->modrm_offset];
1196 int mod = MODRM_MOD_FIELD (modrm);
1197 int reg = MODRM_REG_FIELD (modrm);
1198 int rm = MODRM_RM_FIELD (modrm);
1199 int have_sib = mod != 3 && rm == 4;
1200
1201 /* Assume the reg field of the modrm byte specifies a register. */
1202 used_regs_mask |= 1 << reg;
1203
1204 if (have_sib)
1205 {
1206 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1207 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1208 used_regs_mask |= 1 << base;
1209 used_regs_mask |= 1 << idx;
1210 }
1211 else
1212 {
1213 used_regs_mask |= 1 << rm;
1214 }
1215 }
1216
1217 gdb_assert (used_regs_mask < 256);
1218 gdb_assert (used_regs_mask != 255);
1219
1220 /* Finally, find a free reg. */
1221 {
1222 int i;
1223
1224 for (i = 0; i < 8; ++i)
1225 {
1226 if (! (used_regs_mask & (1 << i)))
1227 return i;
1228 }
1229
1230 /* We shouldn't get here. */
1231 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1232 }
1233 }
1234
1235 /* Extract the details of INSN that we need. */
1236
1237 static void
1238 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1239 {
1240 gdb_byte *start = insn;
1241 int need_modrm;
1242
1243 details->raw_insn = insn;
1244
1245 details->opcode_len = -1;
1246 details->rex_offset = -1;
1247 details->opcode_offset = -1;
1248 details->modrm_offset = -1;
1249
1250 /* Skip legacy instruction prefixes. */
1251 insn = amd64_skip_prefixes (insn);
1252
1253 /* Skip REX instruction prefix. */
1254 if (rex_prefix_p (*insn))
1255 {
1256 details->rex_offset = insn - start;
1257 ++insn;
1258 }
1259
1260 details->opcode_offset = insn - start;
1261
1262 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1263 {
1264 /* Two or three-byte opcode. */
1265 ++insn;
1266 need_modrm = twobyte_has_modrm[*insn];
1267
1268 /* Check for three-byte opcode. */
1269 switch (*insn)
1270 {
1271 case 0x24:
1272 case 0x25:
1273 case 0x38:
1274 case 0x3a:
1275 case 0x7a:
1276 case 0x7b:
1277 ++insn;
1278 details->opcode_len = 3;
1279 break;
1280 default:
1281 details->opcode_len = 2;
1282 break;
1283 }
1284 }
1285 else
1286 {
1287 /* One-byte opcode. */
1288 need_modrm = onebyte_has_modrm[*insn];
1289 details->opcode_len = 1;
1290 }
1291
1292 if (need_modrm)
1293 {
1294 ++insn;
1295 details->modrm_offset = insn - start;
1296 }
1297 }
1298
1299 /* Update %rip-relative addressing in INSN.
1300
1301 %rip-relative addressing only uses a 32-bit displacement.
1302 32 bits is not enough to be guaranteed to cover the distance between where
1303 the real instruction is and where its copy is.
1304 Convert the insn to use base+disp addressing.
1305 We set base = pc + insn_length so we can leave disp unchanged. */
1306
1307 static void
1308 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1309 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1310 {
1311 const struct amd64_insn *insn_details = &dsc->insn_details;
1312 int modrm_offset = insn_details->modrm_offset;
1313 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1314 CORE_ADDR rip_base;
1315 int insn_length;
1316 int arch_tmp_regno, tmp_regno;
1317 ULONGEST orig_value;
1318
1319 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1320 ++insn;
1321
1322 /* Compute the rip-relative address. */
1323 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf,
1324 dsc->max_len, from);
1325 rip_base = from + insn_length;
1326
1327 /* We need a register to hold the address.
1328 Pick one not used in the insn.
1329 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1330 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1331 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1332
1333 /* REX.B should be unset as we were using rip-relative addressing,
1334 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1335 if (insn_details->rex_offset != -1)
1336 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1337
1338 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1339 dsc->tmp_regno = tmp_regno;
1340 dsc->tmp_save = orig_value;
1341 dsc->tmp_used = 1;
1342
1343 /* Convert the ModRM field to be base+disp. */
1344 dsc->insn_buf[modrm_offset] &= ~0xc7;
1345 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1346
1347 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1348
1349 if (debug_displaced)
1350 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1351 "displaced: using temp reg %d, old value %s, new value %s\n",
1352 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1353 paddress (gdbarch, rip_base));
1354 }
1355
1356 static void
1357 fixup_displaced_copy (struct gdbarch *gdbarch,
1358 struct displaced_step_closure *dsc,
1359 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1360 {
1361 const struct amd64_insn *details = &dsc->insn_details;
1362
1363 if (details->modrm_offset != -1)
1364 {
1365 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1366
1367 if ((modrm & 0xc7) == 0x05)
1368 {
1369 /* The insn uses rip-relative addressing.
1370 Deal with it. */
1371 fixup_riprel (gdbarch, dsc, from, to, regs);
1372 }
1373 }
1374 }
1375
1376 struct displaced_step_closure *
1377 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1378 CORE_ADDR from, CORE_ADDR to,
1379 struct regcache *regs)
1380 {
1381 int len = gdbarch_max_insn_length (gdbarch);
1382 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1383 continually watch for running off the end of the buffer. */
1384 int fixup_sentinel_space = len;
1385 struct displaced_step_closure *dsc
1386 = ((struct displaced_step_closure *)
1387 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space));
1388 gdb_byte *buf = &dsc->insn_buf[0];
1389 struct amd64_insn *details = &dsc->insn_details;
1390
1391 dsc->tmp_used = 0;
1392 dsc->max_len = len + fixup_sentinel_space;
1393
1394 read_memory (from, buf, len);
1395
1396 /* Set up the sentinel space so we don't have to worry about running
1397 off the end of the buffer. An excessive number of leading prefixes
1398 could otherwise cause this. */
1399 memset (buf + len, 0, fixup_sentinel_space);
1400
1401 amd64_get_insn_details (buf, details);
1402
1403 /* GDB may get control back after the insn after the syscall.
1404 Presumably this is a kernel bug.
1405 If this is a syscall, make sure there's a nop afterwards. */
1406 {
1407 int syscall_length;
1408
1409 if (amd64_syscall_p (details, &syscall_length))
1410 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1411 }
1412
1413 /* Modify the insn to cope with the address where it will be executed from.
1414 In particular, handle any rip-relative addressing. */
1415 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1416
1417 write_memory (to, buf, len);
1418
1419 if (debug_displaced)
1420 {
1421 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1422 paddress (gdbarch, from), paddress (gdbarch, to));
1423 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1424 }
1425
1426 return dsc;
1427 }
1428
1429 static int
1430 amd64_absolute_jmp_p (const struct amd64_insn *details)
1431 {
1432 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1433
1434 if (insn[0] == 0xff)
1435 {
1436 /* jump near, absolute indirect (/4) */
1437 if ((insn[1] & 0x38) == 0x20)
1438 return 1;
1439
1440 /* jump far, absolute indirect (/5) */
1441 if ((insn[1] & 0x38) == 0x28)
1442 return 1;
1443 }
1444
1445 return 0;
1446 }
1447
1448 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1449
1450 static int
1451 amd64_jmp_p (const struct amd64_insn *details)
1452 {
1453 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1454
1455 /* jump short, relative. */
1456 if (insn[0] == 0xeb)
1457 return 1;
1458
1459 /* jump near, relative. */
1460 if (insn[0] == 0xe9)
1461 return 1;
1462
1463 return amd64_absolute_jmp_p (details);
1464 }
1465
1466 static int
1467 amd64_absolute_call_p (const struct amd64_insn *details)
1468 {
1469 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1470
1471 if (insn[0] == 0xff)
1472 {
1473 /* Call near, absolute indirect (/2) */
1474 if ((insn[1] & 0x38) == 0x10)
1475 return 1;
1476
1477 /* Call far, absolute indirect (/3) */
1478 if ((insn[1] & 0x38) == 0x18)
1479 return 1;
1480 }
1481
1482 return 0;
1483 }
1484
1485 static int
1486 amd64_ret_p (const struct amd64_insn *details)
1487 {
1488 /* NOTE: gcc can emit "repz ; ret". */
1489 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1490
1491 switch (insn[0])
1492 {
1493 case 0xc2: /* ret near, pop N bytes */
1494 case 0xc3: /* ret near */
1495 case 0xca: /* ret far, pop N bytes */
1496 case 0xcb: /* ret far */
1497 case 0xcf: /* iret */
1498 return 1;
1499
1500 default:
1501 return 0;
1502 }
1503 }
1504
1505 static int
1506 amd64_call_p (const struct amd64_insn *details)
1507 {
1508 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1509
1510 if (amd64_absolute_call_p (details))
1511 return 1;
1512
1513 /* call near, relative */
1514 if (insn[0] == 0xe8)
1515 return 1;
1516
1517 return 0;
1518 }
1519
1520 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1521 length in bytes. Otherwise, return zero. */
1522
1523 static int
1524 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1525 {
1526 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1527
1528 if (insn[0] == 0x0f && insn[1] == 0x05)
1529 {
1530 *lengthp = 2;
1531 return 1;
1532 }
1533
1534 return 0;
1535 }
1536
1537 /* Classify the instruction at ADDR using PRED.
1538 Throw an error if the memory can't be read. */
1539
1540 static int
1541 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1542 int (*pred) (const struct amd64_insn *))
1543 {
1544 struct amd64_insn details;
1545 gdb_byte *buf;
1546 int len, classification;
1547
1548 len = gdbarch_max_insn_length (gdbarch);
1549 buf = (gdb_byte *) alloca (len);
1550
1551 read_code (addr, buf, len);
1552 amd64_get_insn_details (buf, &details);
1553
1554 classification = pred (&details);
1555
1556 return classification;
1557 }
1558
1559 /* The gdbarch insn_is_call method. */
1560
1561 static int
1562 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1563 {
1564 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1565 }
1566
1567 /* The gdbarch insn_is_ret method. */
1568
1569 static int
1570 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1571 {
1572 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1573 }
1574
1575 /* The gdbarch insn_is_jump method. */
1576
1577 static int
1578 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1579 {
1580 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1581 }
1582
1583 /* Fix up the state of registers and memory after having single-stepped
1584 a displaced instruction. */
1585
1586 void
1587 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1588 struct displaced_step_closure *dsc,
1589 CORE_ADDR from, CORE_ADDR to,
1590 struct regcache *regs)
1591 {
1592 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1593 /* The offset we applied to the instruction's address. */
1594 ULONGEST insn_offset = to - from;
1595 gdb_byte *insn = dsc->insn_buf;
1596 const struct amd64_insn *insn_details = &dsc->insn_details;
1597
1598 if (debug_displaced)
1599 fprintf_unfiltered (gdb_stdlog,
1600 "displaced: fixup (%s, %s), "
1601 "insn = 0x%02x 0x%02x ...\n",
1602 paddress (gdbarch, from), paddress (gdbarch, to),
1603 insn[0], insn[1]);
1604
1605 /* If we used a tmp reg, restore it. */
1606
1607 if (dsc->tmp_used)
1608 {
1609 if (debug_displaced)
1610 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1611 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1612 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1613 }
1614
1615 /* The list of issues to contend with here is taken from
1616 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1617 Yay for Free Software! */
1618
1619 /* Relocate the %rip back to the program's instruction stream,
1620 if necessary. */
1621
1622 /* Except in the case of absolute or indirect jump or call
1623 instructions, or a return instruction, the new rip is relative to
1624 the displaced instruction; make it relative to the original insn.
1625 Well, signal handler returns don't need relocation either, but we use the
1626 value of %rip to recognize those; see below. */
1627 if (! amd64_absolute_jmp_p (insn_details)
1628 && ! amd64_absolute_call_p (insn_details)
1629 && ! amd64_ret_p (insn_details))
1630 {
1631 ULONGEST orig_rip;
1632 int insn_len;
1633
1634 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1635
1636 /* A signal trampoline system call changes the %rip, resuming
1637 execution of the main program after the signal handler has
1638 returned. That makes them like 'return' instructions; we
1639 shouldn't relocate %rip.
1640
1641 But most system calls don't, and we do need to relocate %rip.
1642
1643 Our heuristic for distinguishing these cases: if stepping
1644 over the system call instruction left control directly after
1645 the instruction, the we relocate --- control almost certainly
1646 doesn't belong in the displaced copy. Otherwise, we assume
1647 the instruction has put control where it belongs, and leave
1648 it unrelocated. Goodness help us if there are PC-relative
1649 system calls. */
1650 if (amd64_syscall_p (insn_details, &insn_len)
1651 && orig_rip != to + insn_len
1652 /* GDB can get control back after the insn after the syscall.
1653 Presumably this is a kernel bug.
1654 Fixup ensures its a nop, we add one to the length for it. */
1655 && orig_rip != to + insn_len + 1)
1656 {
1657 if (debug_displaced)
1658 fprintf_unfiltered (gdb_stdlog,
1659 "displaced: syscall changed %%rip; "
1660 "not relocating\n");
1661 }
1662 else
1663 {
1664 ULONGEST rip = orig_rip - insn_offset;
1665
1666 /* If we just stepped over a breakpoint insn, we don't backup
1667 the pc on purpose; this is to match behaviour without
1668 stepping. */
1669
1670 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1671
1672 if (debug_displaced)
1673 fprintf_unfiltered (gdb_stdlog,
1674 "displaced: "
1675 "relocated %%rip from %s to %s\n",
1676 paddress (gdbarch, orig_rip),
1677 paddress (gdbarch, rip));
1678 }
1679 }
1680
1681 /* If the instruction was PUSHFL, then the TF bit will be set in the
1682 pushed value, and should be cleared. We'll leave this for later,
1683 since GDB already messes up the TF flag when stepping over a
1684 pushfl. */
1685
1686 /* If the instruction was a call, the return address now atop the
1687 stack is the address following the copied instruction. We need
1688 to make it the address following the original instruction. */
1689 if (amd64_call_p (insn_details))
1690 {
1691 ULONGEST rsp;
1692 ULONGEST retaddr;
1693 const ULONGEST retaddr_len = 8;
1694
1695 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1696 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1697 retaddr = (retaddr - insn_offset) & 0xffffffffffffffffULL;
1698 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1699
1700 if (debug_displaced)
1701 fprintf_unfiltered (gdb_stdlog,
1702 "displaced: relocated return addr at %s "
1703 "to %s\n",
1704 paddress (gdbarch, rsp),
1705 paddress (gdbarch, retaddr));
1706 }
1707 }
1708
1709 /* If the instruction INSN uses RIP-relative addressing, return the
1710 offset into the raw INSN where the displacement to be adjusted is
1711 found. Returns 0 if the instruction doesn't use RIP-relative
1712 addressing. */
1713
1714 static int
1715 rip_relative_offset (struct amd64_insn *insn)
1716 {
1717 if (insn->modrm_offset != -1)
1718 {
1719 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1720
1721 if ((modrm & 0xc7) == 0x05)
1722 {
1723 /* The displacement is found right after the ModRM byte. */
1724 return insn->modrm_offset + 1;
1725 }
1726 }
1727
1728 return 0;
1729 }
1730
1731 static void
1732 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1733 {
1734 target_write_memory (*to, buf, len);
1735 *to += len;
1736 }
1737
1738 static void
1739 amd64_relocate_instruction (struct gdbarch *gdbarch,
1740 CORE_ADDR *to, CORE_ADDR oldloc)
1741 {
1742 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1743 int len = gdbarch_max_insn_length (gdbarch);
1744 /* Extra space for sentinels. */
1745 int fixup_sentinel_space = len;
1746 gdb_byte *buf = (gdb_byte *) xmalloc (len + fixup_sentinel_space);
1747 struct amd64_insn insn_details;
1748 int offset = 0;
1749 LONGEST rel32, newrel;
1750 gdb_byte *insn;
1751 int insn_length;
1752
1753 read_memory (oldloc, buf, len);
1754
1755 /* Set up the sentinel space so we don't have to worry about running
1756 off the end of the buffer. An excessive number of leading prefixes
1757 could otherwise cause this. */
1758 memset (buf + len, 0, fixup_sentinel_space);
1759
1760 insn = buf;
1761 amd64_get_insn_details (insn, &insn_details);
1762
1763 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1764
1765 /* Skip legacy instruction prefixes. */
1766 insn = amd64_skip_prefixes (insn);
1767
1768 /* Adjust calls with 32-bit relative addresses as push/jump, with
1769 the address pushed being the location where the original call in
1770 the user program would return to. */
1771 if (insn[0] == 0xe8)
1772 {
1773 gdb_byte push_buf[32];
1774 CORE_ADDR ret_addr;
1775 int i = 0;
1776
1777 /* Where "ret" in the original code will return to. */
1778 ret_addr = oldloc + insn_length;
1779
1780 /* If pushing an address higher than or equal to 0x80000000,
1781 avoid 'pushq', as that sign extends its 32-bit operand, which
1782 would be incorrect. */
1783 if (ret_addr <= 0x7fffffff)
1784 {
1785 push_buf[0] = 0x68; /* pushq $... */
1786 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1787 i = 5;
1788 }
1789 else
1790 {
1791 push_buf[i++] = 0x48; /* sub $0x8,%rsp */
1792 push_buf[i++] = 0x83;
1793 push_buf[i++] = 0xec;
1794 push_buf[i++] = 0x08;
1795
1796 push_buf[i++] = 0xc7; /* movl $imm,(%rsp) */
1797 push_buf[i++] = 0x04;
1798 push_buf[i++] = 0x24;
1799 store_unsigned_integer (&push_buf[i], 4, byte_order,
1800 ret_addr & 0xffffffff);
1801 i += 4;
1802
1803 push_buf[i++] = 0xc7; /* movl $imm,4(%rsp) */
1804 push_buf[i++] = 0x44;
1805 push_buf[i++] = 0x24;
1806 push_buf[i++] = 0x04;
1807 store_unsigned_integer (&push_buf[i], 4, byte_order,
1808 ret_addr >> 32);
1809 i += 4;
1810 }
1811 gdb_assert (i <= sizeof (push_buf));
1812 /* Push the push. */
1813 append_insns (to, i, push_buf);
1814
1815 /* Convert the relative call to a relative jump. */
1816 insn[0] = 0xe9;
1817
1818 /* Adjust the destination offset. */
1819 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1820 newrel = (oldloc - *to) + rel32;
1821 store_signed_integer (insn + 1, 4, byte_order, newrel);
1822
1823 if (debug_displaced)
1824 fprintf_unfiltered (gdb_stdlog,
1825 "Adjusted insn rel32=%s at %s to"
1826 " rel32=%s at %s\n",
1827 hex_string (rel32), paddress (gdbarch, oldloc),
1828 hex_string (newrel), paddress (gdbarch, *to));
1829
1830 /* Write the adjusted jump into its displaced location. */
1831 append_insns (to, 5, insn);
1832 return;
1833 }
1834
1835 offset = rip_relative_offset (&insn_details);
1836 if (!offset)
1837 {
1838 /* Adjust jumps with 32-bit relative addresses. Calls are
1839 already handled above. */
1840 if (insn[0] == 0xe9)
1841 offset = 1;
1842 /* Adjust conditional jumps. */
1843 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1844 offset = 2;
1845 }
1846
1847 if (offset)
1848 {
1849 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1850 newrel = (oldloc - *to) + rel32;
1851 store_signed_integer (insn + offset, 4, byte_order, newrel);
1852 if (debug_displaced)
1853 fprintf_unfiltered (gdb_stdlog,
1854 "Adjusted insn rel32=%s at %s to"
1855 " rel32=%s at %s\n",
1856 hex_string (rel32), paddress (gdbarch, oldloc),
1857 hex_string (newrel), paddress (gdbarch, *to));
1858 }
1859
1860 /* Write the adjusted instruction into its displaced location. */
1861 append_insns (to, insn_length, buf);
1862 }
1863
1864 \f
1865 /* The maximum number of saved registers. This should include %rip. */
1866 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1867
1868 struct amd64_frame_cache
1869 {
1870 /* Base address. */
1871 CORE_ADDR base;
1872 int base_p;
1873 CORE_ADDR sp_offset;
1874 CORE_ADDR pc;
1875
1876 /* Saved registers. */
1877 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1878 CORE_ADDR saved_sp;
1879 int saved_sp_reg;
1880
1881 /* Do we have a frame? */
1882 int frameless_p;
1883 };
1884
1885 /* Initialize a frame cache. */
1886
1887 static void
1888 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1889 {
1890 int i;
1891
1892 /* Base address. */
1893 cache->base = 0;
1894 cache->base_p = 0;
1895 cache->sp_offset = -8;
1896 cache->pc = 0;
1897
1898 /* Saved registers. We initialize these to -1 since zero is a valid
1899 offset (that's where %rbp is supposed to be stored).
1900 The values start out as being offsets, and are later converted to
1901 addresses (at which point -1 is interpreted as an address, still meaning
1902 "invalid"). */
1903 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1904 cache->saved_regs[i] = -1;
1905 cache->saved_sp = 0;
1906 cache->saved_sp_reg = -1;
1907
1908 /* Frameless until proven otherwise. */
1909 cache->frameless_p = 1;
1910 }
1911
1912 /* Allocate and initialize a frame cache. */
1913
1914 static struct amd64_frame_cache *
1915 amd64_alloc_frame_cache (void)
1916 {
1917 struct amd64_frame_cache *cache;
1918
1919 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1920 amd64_init_frame_cache (cache);
1921 return cache;
1922 }
1923
1924 /* GCC 4.4 and later, can put code in the prologue to realign the
1925 stack pointer. Check whether PC points to such code, and update
1926 CACHE accordingly. Return the first instruction after the code
1927 sequence or CURRENT_PC, whichever is smaller. If we don't
1928 recognize the code, return PC. */
1929
1930 static CORE_ADDR
1931 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1932 struct amd64_frame_cache *cache)
1933 {
1934 /* There are 2 code sequences to re-align stack before the frame
1935 gets set up:
1936
1937 1. Use a caller-saved saved register:
1938
1939 leaq 8(%rsp), %reg
1940 andq $-XXX, %rsp
1941 pushq -8(%reg)
1942
1943 2. Use a callee-saved saved register:
1944
1945 pushq %reg
1946 leaq 16(%rsp), %reg
1947 andq $-XXX, %rsp
1948 pushq -8(%reg)
1949
1950 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1951
1952 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1953 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1954 */
1955
1956 gdb_byte buf[18];
1957 int reg, r;
1958 int offset, offset_and;
1959
1960 if (target_read_code (pc, buf, sizeof buf))
1961 return pc;
1962
1963 /* Check caller-saved saved register. The first instruction has
1964 to be "leaq 8(%rsp), %reg". */
1965 if ((buf[0] & 0xfb) == 0x48
1966 && buf[1] == 0x8d
1967 && buf[3] == 0x24
1968 && buf[4] == 0x8)
1969 {
1970 /* MOD must be binary 10 and R/M must be binary 100. */
1971 if ((buf[2] & 0xc7) != 0x44)
1972 return pc;
1973
1974 /* REG has register number. */
1975 reg = (buf[2] >> 3) & 7;
1976
1977 /* Check the REX.R bit. */
1978 if (buf[0] == 0x4c)
1979 reg += 8;
1980
1981 offset = 5;
1982 }
1983 else
1984 {
1985 /* Check callee-saved saved register. The first instruction
1986 has to be "pushq %reg". */
1987 reg = 0;
1988 if ((buf[0] & 0xf8) == 0x50)
1989 offset = 0;
1990 else if ((buf[0] & 0xf6) == 0x40
1991 && (buf[1] & 0xf8) == 0x50)
1992 {
1993 /* Check the REX.B bit. */
1994 if ((buf[0] & 1) != 0)
1995 reg = 8;
1996
1997 offset = 1;
1998 }
1999 else
2000 return pc;
2001
2002 /* Get register. */
2003 reg += buf[offset] & 0x7;
2004
2005 offset++;
2006
2007 /* The next instruction has to be "leaq 16(%rsp), %reg". */
2008 if ((buf[offset] & 0xfb) != 0x48
2009 || buf[offset + 1] != 0x8d
2010 || buf[offset + 3] != 0x24
2011 || buf[offset + 4] != 0x10)
2012 return pc;
2013
2014 /* MOD must be binary 10 and R/M must be binary 100. */
2015 if ((buf[offset + 2] & 0xc7) != 0x44)
2016 return pc;
2017
2018 /* REG has register number. */
2019 r = (buf[offset + 2] >> 3) & 7;
2020
2021 /* Check the REX.R bit. */
2022 if (buf[offset] == 0x4c)
2023 r += 8;
2024
2025 /* Registers in pushq and leaq have to be the same. */
2026 if (reg != r)
2027 return pc;
2028
2029 offset += 5;
2030 }
2031
2032 /* Rigister can't be %rsp nor %rbp. */
2033 if (reg == 4 || reg == 5)
2034 return pc;
2035
2036 /* The next instruction has to be "andq $-XXX, %rsp". */
2037 if (buf[offset] != 0x48
2038 || buf[offset + 2] != 0xe4
2039 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2040 return pc;
2041
2042 offset_and = offset;
2043 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2044
2045 /* The next instruction has to be "pushq -8(%reg)". */
2046 r = 0;
2047 if (buf[offset] == 0xff)
2048 offset++;
2049 else if ((buf[offset] & 0xf6) == 0x40
2050 && buf[offset + 1] == 0xff)
2051 {
2052 /* Check the REX.B bit. */
2053 if ((buf[offset] & 0x1) != 0)
2054 r = 8;
2055 offset += 2;
2056 }
2057 else
2058 return pc;
2059
2060 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2061 01. */
2062 if (buf[offset + 1] != 0xf8
2063 || (buf[offset] & 0xf8) != 0x70)
2064 return pc;
2065
2066 /* R/M has register. */
2067 r += buf[offset] & 7;
2068
2069 /* Registers in leaq and pushq have to be the same. */
2070 if (reg != r)
2071 return pc;
2072
2073 if (current_pc > pc + offset_and)
2074 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2075
2076 return std::min (pc + offset + 2, current_pc);
2077 }
2078
2079 /* Similar to amd64_analyze_stack_align for x32. */
2080
2081 static CORE_ADDR
2082 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2083 struct amd64_frame_cache *cache)
2084 {
2085 /* There are 2 code sequences to re-align stack before the frame
2086 gets set up:
2087
2088 1. Use a caller-saved saved register:
2089
2090 leaq 8(%rsp), %reg
2091 andq $-XXX, %rsp
2092 pushq -8(%reg)
2093
2094 or
2095
2096 [addr32] leal 8(%rsp), %reg
2097 andl $-XXX, %esp
2098 [addr32] pushq -8(%reg)
2099
2100 2. Use a callee-saved saved register:
2101
2102 pushq %reg
2103 leaq 16(%rsp), %reg
2104 andq $-XXX, %rsp
2105 pushq -8(%reg)
2106
2107 or
2108
2109 pushq %reg
2110 [addr32] leal 16(%rsp), %reg
2111 andl $-XXX, %esp
2112 [addr32] pushq -8(%reg)
2113
2114 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2115
2116 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2117 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2118
2119 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2120
2121 0x83 0xe4 0xf0 andl $-16, %esp
2122 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2123 */
2124
2125 gdb_byte buf[19];
2126 int reg, r;
2127 int offset, offset_and;
2128
2129 if (target_read_memory (pc, buf, sizeof buf))
2130 return pc;
2131
2132 /* Skip optional addr32 prefix. */
2133 offset = buf[0] == 0x67 ? 1 : 0;
2134
2135 /* Check caller-saved saved register. The first instruction has
2136 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2137 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2138 && buf[offset + 1] == 0x8d
2139 && buf[offset + 3] == 0x24
2140 && buf[offset + 4] == 0x8)
2141 {
2142 /* MOD must be binary 10 and R/M must be binary 100. */
2143 if ((buf[offset + 2] & 0xc7) != 0x44)
2144 return pc;
2145
2146 /* REG has register number. */
2147 reg = (buf[offset + 2] >> 3) & 7;
2148
2149 /* Check the REX.R bit. */
2150 if ((buf[offset] & 0x4) != 0)
2151 reg += 8;
2152
2153 offset += 5;
2154 }
2155 else
2156 {
2157 /* Check callee-saved saved register. The first instruction
2158 has to be "pushq %reg". */
2159 reg = 0;
2160 if ((buf[offset] & 0xf6) == 0x40
2161 && (buf[offset + 1] & 0xf8) == 0x50)
2162 {
2163 /* Check the REX.B bit. */
2164 if ((buf[offset] & 1) != 0)
2165 reg = 8;
2166
2167 offset += 1;
2168 }
2169 else if ((buf[offset] & 0xf8) != 0x50)
2170 return pc;
2171
2172 /* Get register. */
2173 reg += buf[offset] & 0x7;
2174
2175 offset++;
2176
2177 /* Skip optional addr32 prefix. */
2178 if (buf[offset] == 0x67)
2179 offset++;
2180
2181 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2182 "leal 16(%rsp), %reg". */
2183 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2184 || buf[offset + 1] != 0x8d
2185 || buf[offset + 3] != 0x24
2186 || buf[offset + 4] != 0x10)
2187 return pc;
2188
2189 /* MOD must be binary 10 and R/M must be binary 100. */
2190 if ((buf[offset + 2] & 0xc7) != 0x44)
2191 return pc;
2192
2193 /* REG has register number. */
2194 r = (buf[offset + 2] >> 3) & 7;
2195
2196 /* Check the REX.R bit. */
2197 if ((buf[offset] & 0x4) != 0)
2198 r += 8;
2199
2200 /* Registers in pushq and leaq have to be the same. */
2201 if (reg != r)
2202 return pc;
2203
2204 offset += 5;
2205 }
2206
2207 /* Rigister can't be %rsp nor %rbp. */
2208 if (reg == 4 || reg == 5)
2209 return pc;
2210
2211 /* The next instruction may be "andq $-XXX, %rsp" or
2212 "andl $-XXX, %esp". */
2213 if (buf[offset] != 0x48)
2214 offset--;
2215
2216 if (buf[offset + 2] != 0xe4
2217 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2218 return pc;
2219
2220 offset_and = offset;
2221 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2222
2223 /* Skip optional addr32 prefix. */
2224 if (buf[offset] == 0x67)
2225 offset++;
2226
2227 /* The next instruction has to be "pushq -8(%reg)". */
2228 r = 0;
2229 if (buf[offset] == 0xff)
2230 offset++;
2231 else if ((buf[offset] & 0xf6) == 0x40
2232 && buf[offset + 1] == 0xff)
2233 {
2234 /* Check the REX.B bit. */
2235 if ((buf[offset] & 0x1) != 0)
2236 r = 8;
2237 offset += 2;
2238 }
2239 else
2240 return pc;
2241
2242 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2243 01. */
2244 if (buf[offset + 1] != 0xf8
2245 || (buf[offset] & 0xf8) != 0x70)
2246 return pc;
2247
2248 /* R/M has register. */
2249 r += buf[offset] & 7;
2250
2251 /* Registers in leaq and pushq have to be the same. */
2252 if (reg != r)
2253 return pc;
2254
2255 if (current_pc > pc + offset_and)
2256 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2257
2258 return std::min (pc + offset + 2, current_pc);
2259 }
2260
2261 /* Do a limited analysis of the prologue at PC and update CACHE
2262 accordingly. Bail out early if CURRENT_PC is reached. Return the
2263 address where the analysis stopped.
2264
2265 We will handle only functions beginning with:
2266
2267 pushq %rbp 0x55
2268 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2269
2270 or (for the X32 ABI):
2271
2272 pushq %rbp 0x55
2273 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2274
2275 Any function that doesn't start with one of these sequences will be
2276 assumed to have no prologue and thus no valid frame pointer in
2277 %rbp. */
2278
2279 static CORE_ADDR
2280 amd64_analyze_prologue (struct gdbarch *gdbarch,
2281 CORE_ADDR pc, CORE_ADDR current_pc,
2282 struct amd64_frame_cache *cache)
2283 {
2284 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2285 /* There are two variations of movq %rsp, %rbp. */
2286 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2287 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
2288 /* Ditto for movl %esp, %ebp. */
2289 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2290 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2291
2292 gdb_byte buf[3];
2293 gdb_byte op;
2294
2295 if (current_pc <= pc)
2296 return current_pc;
2297
2298 if (gdbarch_ptr_bit (gdbarch) == 32)
2299 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2300 else
2301 pc = amd64_analyze_stack_align (pc, current_pc, cache);
2302
2303 op = read_code_unsigned_integer (pc, 1, byte_order);
2304
2305 if (op == 0x55) /* pushq %rbp */
2306 {
2307 /* Take into account that we've executed the `pushq %rbp' that
2308 starts this instruction sequence. */
2309 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
2310 cache->sp_offset += 8;
2311
2312 /* If that's all, return now. */
2313 if (current_pc <= pc + 1)
2314 return current_pc;
2315
2316 read_code (pc + 1, buf, 3);
2317
2318 /* Check for `movq %rsp, %rbp'. */
2319 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2320 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2321 {
2322 /* OK, we actually have a frame. */
2323 cache->frameless_p = 0;
2324 return pc + 4;
2325 }
2326
2327 /* For X32, also check for `movq %esp, %ebp'. */
2328 if (gdbarch_ptr_bit (gdbarch) == 32)
2329 {
2330 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2331 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2332 {
2333 /* OK, we actually have a frame. */
2334 cache->frameless_p = 0;
2335 return pc + 3;
2336 }
2337 }
2338
2339 return pc + 1;
2340 }
2341
2342 return pc;
2343 }
2344
2345 /* Work around false termination of prologue - GCC PR debug/48827.
2346
2347 START_PC is the first instruction of a function, PC is its minimal already
2348 determined advanced address. Function returns PC if it has nothing to do.
2349
2350 84 c0 test %al,%al
2351 74 23 je after
2352 <-- here is 0 lines advance - the false prologue end marker.
2353 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2354 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2355 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2356 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2357 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2358 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2359 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2360 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2361 after: */
2362
2363 static CORE_ADDR
2364 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
2365 {
2366 struct symtab_and_line start_pc_sal, next_sal;
2367 gdb_byte buf[4 + 8 * 7];
2368 int offset, xmmreg;
2369
2370 if (pc == start_pc)
2371 return pc;
2372
2373 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2374 if (start_pc_sal.symtab == NULL
2375 || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
2376 (SYMTAB_COMPUNIT (start_pc_sal.symtab))) < 6
2377 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2378 return pc;
2379
2380 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2381 if (next_sal.line != start_pc_sal.line)
2382 return pc;
2383
2384 /* START_PC can be from overlayed memory, ignored here. */
2385 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
2386 return pc;
2387
2388 /* test %al,%al */
2389 if (buf[0] != 0x84 || buf[1] != 0xc0)
2390 return pc;
2391 /* je AFTER */
2392 if (buf[2] != 0x74)
2393 return pc;
2394
2395 offset = 4;
2396 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2397 {
2398 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2399 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
2400 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
2401 return pc;
2402
2403 /* 0b01?????? */
2404 if ((buf[offset + 2] & 0xc0) == 0x40)
2405 {
2406 /* 8-bit displacement. */
2407 offset += 4;
2408 }
2409 /* 0b10?????? */
2410 else if ((buf[offset + 2] & 0xc0) == 0x80)
2411 {
2412 /* 32-bit displacement. */
2413 offset += 7;
2414 }
2415 else
2416 return pc;
2417 }
2418
2419 /* je AFTER */
2420 if (offset - 4 != buf[3])
2421 return pc;
2422
2423 return next_sal.end;
2424 }
2425
2426 /* Return PC of first real instruction. */
2427
2428 static CORE_ADDR
2429 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2430 {
2431 struct amd64_frame_cache cache;
2432 CORE_ADDR pc;
2433 CORE_ADDR func_addr;
2434
2435 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2436 {
2437 CORE_ADDR post_prologue_pc
2438 = skip_prologue_using_sal (gdbarch, func_addr);
2439 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
2440
2441 /* Clang always emits a line note before the prologue and another
2442 one after. We trust clang to emit usable line notes. */
2443 if (post_prologue_pc
2444 && (cust != NULL
2445 && COMPUNIT_PRODUCER (cust) != NULL
2446 && startswith (COMPUNIT_PRODUCER (cust), "clang ")))
2447 return std::max (start_pc, post_prologue_pc);
2448 }
2449
2450 amd64_init_frame_cache (&cache);
2451 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2452 &cache);
2453 if (cache.frameless_p)
2454 return start_pc;
2455
2456 return amd64_skip_xmm_prologue (pc, start_pc);
2457 }
2458 \f
2459
2460 /* Normal frames. */
2461
2462 static void
2463 amd64_frame_cache_1 (struct frame_info *this_frame,
2464 struct amd64_frame_cache *cache)
2465 {
2466 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2467 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2468 gdb_byte buf[8];
2469 int i;
2470
2471 cache->pc = get_frame_func (this_frame);
2472 if (cache->pc != 0)
2473 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2474 cache);
2475
2476 if (cache->frameless_p)
2477 {
2478 /* We didn't find a valid frame. If we're at the start of a
2479 function, or somewhere half-way its prologue, the function's
2480 frame probably hasn't been fully setup yet. Try to
2481 reconstruct the base address for the stack frame by looking
2482 at the stack pointer. For truly "frameless" functions this
2483 might work too. */
2484
2485 if (cache->saved_sp_reg != -1)
2486 {
2487 /* Stack pointer has been saved. */
2488 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2489 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2490
2491 /* We're halfway aligning the stack. */
2492 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2493 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2494
2495 /* This will be added back below. */
2496 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2497 }
2498 else
2499 {
2500 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2501 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2502 + cache->sp_offset;
2503 }
2504 }
2505 else
2506 {
2507 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
2508 cache->base = extract_unsigned_integer (buf, 8, byte_order);
2509 }
2510
2511 /* Now that we have the base address for the stack frame we can
2512 calculate the value of %rsp in the calling frame. */
2513 cache->saved_sp = cache->base + 16;
2514
2515 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2516 frame we find it at the same offset from the reconstructed base
2517 address. If we're halfway aligning the stack, %rip is handled
2518 differently (see above). */
2519 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2520 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
2521
2522 /* Adjust all the saved registers such that they contain addresses
2523 instead of offsets. */
2524 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
2525 if (cache->saved_regs[i] != -1)
2526 cache->saved_regs[i] += cache->base;
2527
2528 cache->base_p = 1;
2529 }
2530
2531 static struct amd64_frame_cache *
2532 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
2533 {
2534 struct amd64_frame_cache *cache;
2535
2536 if (*this_cache)
2537 return (struct amd64_frame_cache *) *this_cache;
2538
2539 cache = amd64_alloc_frame_cache ();
2540 *this_cache = cache;
2541
2542 TRY
2543 {
2544 amd64_frame_cache_1 (this_frame, cache);
2545 }
2546 CATCH (ex, RETURN_MASK_ERROR)
2547 {
2548 if (ex.error != NOT_AVAILABLE_ERROR)
2549 throw_exception (ex);
2550 }
2551 END_CATCH
2552
2553 return cache;
2554 }
2555
2556 static enum unwind_stop_reason
2557 amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
2558 void **this_cache)
2559 {
2560 struct amd64_frame_cache *cache =
2561 amd64_frame_cache (this_frame, this_cache);
2562
2563 if (!cache->base_p)
2564 return UNWIND_UNAVAILABLE;
2565
2566 /* This marks the outermost frame. */
2567 if (cache->base == 0)
2568 return UNWIND_OUTERMOST;
2569
2570 return UNWIND_NO_REASON;
2571 }
2572
2573 static void
2574 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
2575 struct frame_id *this_id)
2576 {
2577 struct amd64_frame_cache *cache =
2578 amd64_frame_cache (this_frame, this_cache);
2579
2580 if (!cache->base_p)
2581 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2582 else if (cache->base == 0)
2583 {
2584 /* This marks the outermost frame. */
2585 return;
2586 }
2587 else
2588 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2589 }
2590
2591 static struct value *
2592 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2593 int regnum)
2594 {
2595 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2596 struct amd64_frame_cache *cache =
2597 amd64_frame_cache (this_frame, this_cache);
2598
2599 gdb_assert (regnum >= 0);
2600
2601 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
2602 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
2603
2604 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
2605 return frame_unwind_got_memory (this_frame, regnum,
2606 cache->saved_regs[regnum]);
2607
2608 return frame_unwind_got_register (this_frame, regnum, regnum);
2609 }
2610
2611 static const struct frame_unwind amd64_frame_unwind =
2612 {
2613 NORMAL_FRAME,
2614 amd64_frame_unwind_stop_reason,
2615 amd64_frame_this_id,
2616 amd64_frame_prev_register,
2617 NULL,
2618 default_frame_sniffer
2619 };
2620 \f
2621 /* Generate a bytecode expression to get the value of the saved PC. */
2622
2623 static void
2624 amd64_gen_return_address (struct gdbarch *gdbarch,
2625 struct agent_expr *ax, struct axs_value *value,
2626 CORE_ADDR scope)
2627 {
2628 /* The following sequence assumes the traditional use of the base
2629 register. */
2630 ax_reg (ax, AMD64_RBP_REGNUM);
2631 ax_const_l (ax, 8);
2632 ax_simple (ax, aop_add);
2633 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2634 value->kind = axs_lvalue_memory;
2635 }
2636 \f
2637
2638 /* Signal trampolines. */
2639
2640 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2641 64-bit variants. This would require using identical frame caches
2642 on both platforms. */
2643
2644 static struct amd64_frame_cache *
2645 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2646 {
2647 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2648 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2649 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2650 struct amd64_frame_cache *cache;
2651 CORE_ADDR addr;
2652 gdb_byte buf[8];
2653 int i;
2654
2655 if (*this_cache)
2656 return (struct amd64_frame_cache *) *this_cache;
2657
2658 cache = amd64_alloc_frame_cache ();
2659
2660 TRY
2661 {
2662 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2663 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2664
2665 addr = tdep->sigcontext_addr (this_frame);
2666 gdb_assert (tdep->sc_reg_offset);
2667 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2668 for (i = 0; i < tdep->sc_num_regs; i++)
2669 if (tdep->sc_reg_offset[i] != -1)
2670 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
2671
2672 cache->base_p = 1;
2673 }
2674 CATCH (ex, RETURN_MASK_ERROR)
2675 {
2676 if (ex.error != NOT_AVAILABLE_ERROR)
2677 throw_exception (ex);
2678 }
2679 END_CATCH
2680
2681 *this_cache = cache;
2682 return cache;
2683 }
2684
2685 static enum unwind_stop_reason
2686 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
2687 void **this_cache)
2688 {
2689 struct amd64_frame_cache *cache =
2690 amd64_sigtramp_frame_cache (this_frame, this_cache);
2691
2692 if (!cache->base_p)
2693 return UNWIND_UNAVAILABLE;
2694
2695 return UNWIND_NO_REASON;
2696 }
2697
2698 static void
2699 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
2700 void **this_cache, struct frame_id *this_id)
2701 {
2702 struct amd64_frame_cache *cache =
2703 amd64_sigtramp_frame_cache (this_frame, this_cache);
2704
2705 if (!cache->base_p)
2706 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2707 else if (cache->base == 0)
2708 {
2709 /* This marks the outermost frame. */
2710 return;
2711 }
2712 else
2713 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
2714 }
2715
2716 static struct value *
2717 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2718 void **this_cache, int regnum)
2719 {
2720 /* Make sure we've initialized the cache. */
2721 amd64_sigtramp_frame_cache (this_frame, this_cache);
2722
2723 return amd64_frame_prev_register (this_frame, this_cache, regnum);
2724 }
2725
2726 static int
2727 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2728 struct frame_info *this_frame,
2729 void **this_cache)
2730 {
2731 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2732
2733 /* We shouldn't even bother if we don't have a sigcontext_addr
2734 handler. */
2735 if (tdep->sigcontext_addr == NULL)
2736 return 0;
2737
2738 if (tdep->sigtramp_p != NULL)
2739 {
2740 if (tdep->sigtramp_p (this_frame))
2741 return 1;
2742 }
2743
2744 if (tdep->sigtramp_start != 0)
2745 {
2746 CORE_ADDR pc = get_frame_pc (this_frame);
2747
2748 gdb_assert (tdep->sigtramp_end != 0);
2749 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2750 return 1;
2751 }
2752
2753 return 0;
2754 }
2755
2756 static const struct frame_unwind amd64_sigtramp_frame_unwind =
2757 {
2758 SIGTRAMP_FRAME,
2759 amd64_sigtramp_frame_unwind_stop_reason,
2760 amd64_sigtramp_frame_this_id,
2761 amd64_sigtramp_frame_prev_register,
2762 NULL,
2763 amd64_sigtramp_frame_sniffer
2764 };
2765 \f
2766
2767 static CORE_ADDR
2768 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2769 {
2770 struct amd64_frame_cache *cache =
2771 amd64_frame_cache (this_frame, this_cache);
2772
2773 return cache->base;
2774 }
2775
2776 static const struct frame_base amd64_frame_base =
2777 {
2778 &amd64_frame_unwind,
2779 amd64_frame_base_address,
2780 amd64_frame_base_address,
2781 amd64_frame_base_address
2782 };
2783
2784 /* Normal frames, but in a function epilogue. */
2785
2786 /* Implement the stack_frame_destroyed_p gdbarch method.
2787
2788 The epilogue is defined here as the 'ret' instruction, which will
2789 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2790 the function's stack frame. */
2791
2792 static int
2793 amd64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2794 {
2795 gdb_byte insn;
2796 struct compunit_symtab *cust;
2797
2798 cust = find_pc_compunit_symtab (pc);
2799 if (cust != NULL && COMPUNIT_EPILOGUE_UNWIND_VALID (cust))
2800 return 0;
2801
2802 if (target_read_memory (pc, &insn, 1))
2803 return 0; /* Can't read memory at pc. */
2804
2805 if (insn != 0xc3) /* 'ret' instruction. */
2806 return 0;
2807
2808 return 1;
2809 }
2810
2811 static int
2812 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2813 struct frame_info *this_frame,
2814 void **this_prologue_cache)
2815 {
2816 if (frame_relative_level (this_frame) == 0)
2817 return amd64_stack_frame_destroyed_p (get_frame_arch (this_frame),
2818 get_frame_pc (this_frame));
2819 else
2820 return 0;
2821 }
2822
2823 static struct amd64_frame_cache *
2824 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2825 {
2826 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2827 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2828 struct amd64_frame_cache *cache;
2829 gdb_byte buf[8];
2830
2831 if (*this_cache)
2832 return (struct amd64_frame_cache *) *this_cache;
2833
2834 cache = amd64_alloc_frame_cache ();
2835 *this_cache = cache;
2836
2837 TRY
2838 {
2839 /* Cache base will be %esp plus cache->sp_offset (-8). */
2840 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2841 cache->base = extract_unsigned_integer (buf, 8,
2842 byte_order) + cache->sp_offset;
2843
2844 /* Cache pc will be the frame func. */
2845 cache->pc = get_frame_pc (this_frame);
2846
2847 /* The saved %esp will be at cache->base plus 16. */
2848 cache->saved_sp = cache->base + 16;
2849
2850 /* The saved %eip will be at cache->base plus 8. */
2851 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2852
2853 cache->base_p = 1;
2854 }
2855 CATCH (ex, RETURN_MASK_ERROR)
2856 {
2857 if (ex.error != NOT_AVAILABLE_ERROR)
2858 throw_exception (ex);
2859 }
2860 END_CATCH
2861
2862 return cache;
2863 }
2864
2865 static enum unwind_stop_reason
2866 amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
2867 void **this_cache)
2868 {
2869 struct amd64_frame_cache *cache
2870 = amd64_epilogue_frame_cache (this_frame, this_cache);
2871
2872 if (!cache->base_p)
2873 return UNWIND_UNAVAILABLE;
2874
2875 return UNWIND_NO_REASON;
2876 }
2877
2878 static void
2879 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2880 void **this_cache,
2881 struct frame_id *this_id)
2882 {
2883 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2884 this_cache);
2885
2886 if (!cache->base_p)
2887 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2888 else
2889 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2890 }
2891
2892 static const struct frame_unwind amd64_epilogue_frame_unwind =
2893 {
2894 NORMAL_FRAME,
2895 amd64_epilogue_frame_unwind_stop_reason,
2896 amd64_epilogue_frame_this_id,
2897 amd64_frame_prev_register,
2898 NULL,
2899 amd64_epilogue_frame_sniffer
2900 };
2901
2902 static struct frame_id
2903 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2904 {
2905 CORE_ADDR fp;
2906
2907 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2908
2909 return frame_id_build (fp + 16, get_frame_pc (this_frame));
2910 }
2911
2912 /* 16 byte align the SP per frame requirements. */
2913
2914 static CORE_ADDR
2915 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2916 {
2917 return sp & -(CORE_ADDR)16;
2918 }
2919 \f
2920
2921 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2922 in the floating-point register set REGSET to register cache
2923 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2924
2925 static void
2926 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2927 int regnum, const void *fpregs, size_t len)
2928 {
2929 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2930 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2931
2932 gdb_assert (len >= tdep->sizeof_fpregset);
2933 amd64_supply_fxsave (regcache, regnum, fpregs);
2934 }
2935
2936 /* Collect register REGNUM from the register cache REGCACHE and store
2937 it in the buffer specified by FPREGS and LEN as described by the
2938 floating-point register set REGSET. If REGNUM is -1, do this for
2939 all registers in REGSET. */
2940
2941 static void
2942 amd64_collect_fpregset (const struct regset *regset,
2943 const struct regcache *regcache,
2944 int regnum, void *fpregs, size_t len)
2945 {
2946 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2947 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2948
2949 gdb_assert (len >= tdep->sizeof_fpregset);
2950 amd64_collect_fxsave (regcache, regnum, fpregs);
2951 }
2952
2953 const struct regset amd64_fpregset =
2954 {
2955 NULL, amd64_supply_fpregset, amd64_collect_fpregset
2956 };
2957 \f
2958
2959 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2960 %rdi. We expect its value to be a pointer to the jmp_buf structure
2961 from which we extract the address that we will land at. This
2962 address is copied into PC. This routine returns non-zero on
2963 success. */
2964
2965 static int
2966 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2967 {
2968 gdb_byte buf[8];
2969 CORE_ADDR jb_addr;
2970 struct gdbarch *gdbarch = get_frame_arch (frame);
2971 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
2972 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
2973
2974 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2975 longjmp will land. */
2976 if (jb_pc_offset == -1)
2977 return 0;
2978
2979 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
2980 jb_addr= extract_typed_address
2981 (buf, builtin_type (gdbarch)->builtin_data_ptr);
2982 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2983 return 0;
2984
2985 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
2986
2987 return 1;
2988 }
2989
2990 static const int amd64_record_regmap[] =
2991 {
2992 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2993 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2994 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2995 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2996 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2997 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2998 };
2999
3000 void
3001 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
3002 {
3003 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3004 const struct target_desc *tdesc = info.target_desc;
3005 static const char *const stap_integer_prefixes[] = { "$", NULL };
3006 static const char *const stap_register_prefixes[] = { "%", NULL };
3007 static const char *const stap_register_indirection_prefixes[] = { "(",
3008 NULL };
3009 static const char *const stap_register_indirection_suffixes[] = { ")",
3010 NULL };
3011
3012 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
3013 floating-point registers. */
3014 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
3015 tdep->fpregset = &amd64_fpregset;
3016
3017 if (! tdesc_has_registers (tdesc))
3018 tdesc = tdesc_amd64;
3019 tdep->tdesc = tdesc;
3020
3021 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
3022 tdep->register_names = amd64_register_names;
3023
3024 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
3025 {
3026 tdep->zmmh_register_names = amd64_zmmh_names;
3027 tdep->k_register_names = amd64_k_names;
3028 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
3029 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
3030
3031 tdep->num_zmm_regs = 32;
3032 tdep->num_xmm_avx512_regs = 16;
3033 tdep->num_ymm_avx512_regs = 16;
3034
3035 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
3036 tdep->k0_regnum = AMD64_K0_REGNUM;
3037 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
3038 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
3039 }
3040
3041 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
3042 {
3043 tdep->ymmh_register_names = amd64_ymmh_names;
3044 tdep->num_ymm_regs = 16;
3045 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
3046 }
3047
3048 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
3049 {
3050 tdep->mpx_register_names = amd64_mpx_names;
3051 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
3052 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
3053 }
3054
3055 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments") != NULL)
3056 {
3057 const struct tdesc_feature *feature =
3058 tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments");
3059 struct tdesc_arch_data *tdesc_data_segments =
3060 (struct tdesc_arch_data *) info.tdep_info;
3061
3062 tdesc_numbered_register (feature, tdesc_data_segments,
3063 AMD64_FSBASE_REGNUM, "fs_base");
3064 tdesc_numbered_register (feature, tdesc_data_segments,
3065 AMD64_GSBASE_REGNUM, "gs_base");
3066 }
3067
3068 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.pkeys") != NULL)
3069 {
3070 tdep->pkeys_register_names = amd64_pkeys_names;
3071 tdep->pkru_regnum = AMD64_PKRU_REGNUM;
3072 tdep->num_pkeys_regs = 1;
3073 }
3074
3075 tdep->num_byte_regs = 20;
3076 tdep->num_word_regs = 16;
3077 tdep->num_dword_regs = 16;
3078 /* Avoid wiring in the MMX registers for now. */
3079 tdep->num_mmx_regs = 0;
3080
3081 set_gdbarch_pseudo_register_read_value (gdbarch,
3082 amd64_pseudo_register_read_value);
3083 set_gdbarch_pseudo_register_write (gdbarch,
3084 amd64_pseudo_register_write);
3085 set_gdbarch_ax_pseudo_register_collect (gdbarch,
3086 amd64_ax_pseudo_register_collect);
3087
3088 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
3089
3090 /* AMD64 has an FPU and 16 SSE registers. */
3091 tdep->st0_regnum = AMD64_ST0_REGNUM;
3092 tdep->num_xmm_regs = 16;
3093
3094 /* This is what all the fuss is about. */
3095 set_gdbarch_long_bit (gdbarch, 64);
3096 set_gdbarch_long_long_bit (gdbarch, 64);
3097 set_gdbarch_ptr_bit (gdbarch, 64);
3098
3099 /* In contrast to the i386, on AMD64 a `long double' actually takes
3100 up 128 bits, even though it's still based on the i387 extended
3101 floating-point format which has only 80 significant bits. */
3102 set_gdbarch_long_double_bit (gdbarch, 128);
3103
3104 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
3105
3106 /* Register numbers of various important registers. */
3107 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3108 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3109 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3110 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
3111
3112 /* The "default" register numbering scheme for AMD64 is referred to
3113 as the "DWARF Register Number Mapping" in the System V psABI.
3114 The preferred debugging format for all known AMD64 targets is
3115 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3116 DWARF-1), but we provide the same mapping just in case. This
3117 mapping is also used for stabs, which GCC does support. */
3118 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3119 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3120
3121 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3122 be in use on any of the supported AMD64 targets. */
3123
3124 /* Call dummy code. */
3125 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3126 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
3127 set_gdbarch_frame_red_zone_size (gdbarch, 128);
3128
3129 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
3130 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3131 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3132
3133 set_gdbarch_return_value (gdbarch, amd64_return_value);
3134
3135 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
3136
3137 tdep->record_regmap = amd64_record_regmap;
3138
3139 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
3140
3141 /* Hook the function epilogue frame unwinder. This unwinder is
3142 appended to the list first, so that it supercedes the other
3143 unwinders in function epilogues. */
3144 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3145
3146 /* Hook the prologue-based frame unwinders. */
3147 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3148 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
3149 frame_base_set_default (gdbarch, &amd64_frame_base);
3150
3151 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
3152
3153 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
3154
3155 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
3156
3157 /* SystemTap variables and functions. */
3158 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3159 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3160 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3161 stap_register_indirection_prefixes);
3162 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3163 stap_register_indirection_suffixes);
3164 set_gdbarch_stap_is_single_operand (gdbarch,
3165 i386_stap_is_single_operand);
3166 set_gdbarch_stap_parse_special_token (gdbarch,
3167 i386_stap_parse_special_token);
3168 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3169 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3170 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
3171 }
3172 \f
3173
3174 static struct type *
3175 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3176 {
3177 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3178
3179 switch (regnum - tdep->eax_regnum)
3180 {
3181 case AMD64_RBP_REGNUM: /* %ebp */
3182 case AMD64_RSP_REGNUM: /* %esp */
3183 return builtin_type (gdbarch)->builtin_data_ptr;
3184 case AMD64_RIP_REGNUM: /* %eip */
3185 return builtin_type (gdbarch)->builtin_func_ptr;
3186 }
3187
3188 return i386_pseudo_register_type (gdbarch, regnum);
3189 }
3190
3191 void
3192 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
3193 {
3194 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3195 const struct target_desc *tdesc = info.target_desc;
3196
3197 amd64_init_abi (info, gdbarch);
3198
3199 if (! tdesc_has_registers (tdesc))
3200 tdesc = tdesc_x32;
3201 tdep->tdesc = tdesc;
3202
3203 tdep->num_dword_regs = 17;
3204 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3205
3206 set_gdbarch_long_bit (gdbarch, 32);
3207 set_gdbarch_ptr_bit (gdbarch, 32);
3208 }
3209
3210 /* Return the target description for a specified XSAVE feature mask. */
3211
3212 const struct target_desc *
3213 amd64_target_description (uint64_t xcr0)
3214 {
3215 switch (xcr0 & X86_XSTATE_ALL_MASK)
3216 {
3217 case X86_XSTATE_AVX_MPX_AVX512_PKU_MASK:
3218 return tdesc_amd64_avx_mpx_avx512_pku;
3219 case X86_XSTATE_AVX_AVX512_MASK:
3220 return tdesc_amd64_avx_avx512;
3221 case X86_XSTATE_MPX_MASK:
3222 return tdesc_amd64_mpx;
3223 case X86_XSTATE_AVX_MPX_MASK:
3224 return tdesc_amd64_avx_mpx;
3225 case X86_XSTATE_AVX_MASK:
3226 return tdesc_amd64_avx;
3227 default:
3228 return tdesc_amd64;
3229 }
3230 }
3231
3232 /* Provide a prototype to silence -Wmissing-prototypes. */
3233 void _initialize_amd64_tdep (void);
3234
3235 void
3236 _initialize_amd64_tdep (void)
3237 {
3238 initialize_tdesc_amd64 ();
3239 initialize_tdesc_amd64_avx ();
3240 initialize_tdesc_amd64_mpx ();
3241 initialize_tdesc_amd64_avx_mpx ();
3242 initialize_tdesc_amd64_avx_avx512 ();
3243 initialize_tdesc_amd64_avx_mpx_avx512_pku ();
3244
3245 initialize_tdesc_x32 ();
3246 initialize_tdesc_x32_avx ();
3247 initialize_tdesc_x32_avx_avx512 ();
3248 }
3249 \f
3250
3251 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3252 sense that the instruction pointer and data pointer are simply
3253 64-bit offsets into the code segment and the data segment instead
3254 of a selector offset pair. The functions below store the upper 32
3255 bits of these pointers (instead of just the 16-bits of the segment
3256 selector). */
3257
3258 /* Fill register REGNUM in REGCACHE with the appropriate
3259 floating-point or SSE register value from *FXSAVE. If REGNUM is
3260 -1, do this for all registers. This function masks off any of the
3261 reserved bits in *FXSAVE. */
3262
3263 void
3264 amd64_supply_fxsave (struct regcache *regcache, int regnum,
3265 const void *fxsave)
3266 {
3267 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3268 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3269
3270 i387_supply_fxsave (regcache, regnum, fxsave);
3271
3272 if (fxsave
3273 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3274 {
3275 const gdb_byte *regs = (const gdb_byte *) fxsave;
3276
3277 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3278 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3279 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3280 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
3281 }
3282 }
3283
3284 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3285
3286 void
3287 amd64_supply_xsave (struct regcache *regcache, int regnum,
3288 const void *xsave)
3289 {
3290 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3291 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3292
3293 i387_supply_xsave (regcache, regnum, xsave);
3294
3295 if (xsave
3296 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3297 {
3298 const gdb_byte *regs = (const gdb_byte *) xsave;
3299
3300 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3301 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep),
3302 regs + 12);
3303 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3304 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep),
3305 regs + 20);
3306 }
3307 }
3308
3309 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3310 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3311 all registers. This function doesn't touch any of the reserved
3312 bits in *FXSAVE. */
3313
3314 void
3315 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3316 void *fxsave)
3317 {
3318 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3319 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3320 gdb_byte *regs = (gdb_byte *) fxsave;
3321
3322 i387_collect_fxsave (regcache, regnum, fxsave);
3323
3324 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3325 {
3326 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3327 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3328 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3329 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
3330 }
3331 }
3332
3333 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3334
3335 void
3336 amd64_collect_xsave (const struct regcache *regcache, int regnum,
3337 void *xsave, int gcore)
3338 {
3339 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3340 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3341 gdb_byte *regs = (gdb_byte *) xsave;
3342
3343 i387_collect_xsave (regcache, regnum, xsave, gcore);
3344
3345 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3346 {
3347 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3348 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep),
3349 regs + 12);
3350 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3351 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep),
3352 regs + 20);
3353 }
3354 }
This page took 0.150929 seconds and 4 git commands to generate.