gdbarch: add instruction predicate methods
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright (C) 2001-2014 Free Software Foundation, Inc.
4
5 Contributed by Jiri Smid, SuSE Labs.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "opcode/i386.h"
24 #include "dis-asm.h"
25 #include "arch-utils.h"
26 #include "block.h"
27 #include "dummy-frame.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "inferior.h"
32 #include "gdbcmd.h"
33 #include "gdbcore.h"
34 #include "objfiles.h"
35 #include "regcache.h"
36 #include "regset.h"
37 #include "symfile.h"
38 #include "disasm.h"
39 #include "gdb_assert.h"
40 #include "exceptions.h"
41 #include "amd64-tdep.h"
42 #include "i387-tdep.h"
43
44 #include "features/i386/amd64.c"
45 #include "features/i386/amd64-avx.c"
46 #include "features/i386/amd64-mpx.c"
47 #include "features/i386/x32.c"
48 #include "features/i386/x32-avx.c"
49
50 #include "ax.h"
51 #include "ax-gdb.h"
52
53 /* Note that the AMD64 architecture was previously known as x86-64.
54 The latter is (forever) engraved into the canonical system name as
55 returned by config.guess, and used as the name for the AMD64 port
56 of GNU/Linux. The BSD's have renamed their ports to amd64; they
57 don't like to shout. For GDB we prefer the amd64_-prefix over the
58 x86_64_-prefix since it's so much easier to type. */
59
60 /* Register information. */
61
62 static const char *amd64_register_names[] =
63 {
64 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
65
66 /* %r8 is indeed register number 8. */
67 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
68 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
69
70 /* %st0 is register number 24. */
71 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
72 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
73
74 /* %xmm0 is register number 40. */
75 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
76 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
77 "mxcsr",
78 };
79
80 static const char *amd64_ymm_names[] =
81 {
82 "ymm0", "ymm1", "ymm2", "ymm3",
83 "ymm4", "ymm5", "ymm6", "ymm7",
84 "ymm8", "ymm9", "ymm10", "ymm11",
85 "ymm12", "ymm13", "ymm14", "ymm15"
86 };
87
88 static const char *amd64_ymmh_names[] =
89 {
90 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
91 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
92 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
93 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
94 };
95
96 static const char *amd64_mpx_names[] =
97 {
98 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
99 };
100
101 /* DWARF Register Number Mapping as defined in the System V psABI,
102 section 3.6. */
103
104 static int amd64_dwarf_regmap[] =
105 {
106 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
107 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
108 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
109 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
110
111 /* Frame Pointer Register RBP. */
112 AMD64_RBP_REGNUM,
113
114 /* Stack Pointer Register RSP. */
115 AMD64_RSP_REGNUM,
116
117 /* Extended Integer Registers 8 - 15. */
118 AMD64_R8_REGNUM, /* %r8 */
119 AMD64_R9_REGNUM, /* %r9 */
120 AMD64_R10_REGNUM, /* %r10 */
121 AMD64_R11_REGNUM, /* %r11 */
122 AMD64_R12_REGNUM, /* %r12 */
123 AMD64_R13_REGNUM, /* %r13 */
124 AMD64_R14_REGNUM, /* %r14 */
125 AMD64_R15_REGNUM, /* %r15 */
126
127 /* Return Address RA. Mapped to RIP. */
128 AMD64_RIP_REGNUM,
129
130 /* SSE Registers 0 - 7. */
131 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
132 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
133 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
134 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
135
136 /* Extended SSE Registers 8 - 15. */
137 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
138 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
139 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
140 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
141
142 /* Floating Point Registers 0-7. */
143 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
144 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
145 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
146 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
147
148 /* Control and Status Flags Register. */
149 AMD64_EFLAGS_REGNUM,
150
151 /* Selector Registers. */
152 AMD64_ES_REGNUM,
153 AMD64_CS_REGNUM,
154 AMD64_SS_REGNUM,
155 AMD64_DS_REGNUM,
156 AMD64_FS_REGNUM,
157 AMD64_GS_REGNUM,
158 -1,
159 -1,
160
161 /* Segment Base Address Registers. */
162 -1,
163 -1,
164 -1,
165 -1,
166
167 /* Special Selector Registers. */
168 -1,
169 -1,
170
171 /* Floating Point Control Registers. */
172 AMD64_MXCSR_REGNUM,
173 AMD64_FCTRL_REGNUM,
174 AMD64_FSTAT_REGNUM
175 };
176
177 static const int amd64_dwarf_regmap_len =
178 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
179
180 /* Convert DWARF register number REG to the appropriate register
181 number used by GDB. */
182
183 static int
184 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
185 {
186 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
187 int ymm0_regnum = tdep->ymm0_regnum;
188 int regnum = -1;
189
190 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
191 regnum = amd64_dwarf_regmap[reg];
192
193 if (regnum == -1)
194 warning (_("Unmapped DWARF Register #%d encountered."), reg);
195 else if (ymm0_regnum >= 0
196 && i386_xmm_regnum_p (gdbarch, regnum))
197 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
198
199 return regnum;
200 }
201
202 /* Map architectural register numbers to gdb register numbers. */
203
204 static const int amd64_arch_regmap[16] =
205 {
206 AMD64_RAX_REGNUM, /* %rax */
207 AMD64_RCX_REGNUM, /* %rcx */
208 AMD64_RDX_REGNUM, /* %rdx */
209 AMD64_RBX_REGNUM, /* %rbx */
210 AMD64_RSP_REGNUM, /* %rsp */
211 AMD64_RBP_REGNUM, /* %rbp */
212 AMD64_RSI_REGNUM, /* %rsi */
213 AMD64_RDI_REGNUM, /* %rdi */
214 AMD64_R8_REGNUM, /* %r8 */
215 AMD64_R9_REGNUM, /* %r9 */
216 AMD64_R10_REGNUM, /* %r10 */
217 AMD64_R11_REGNUM, /* %r11 */
218 AMD64_R12_REGNUM, /* %r12 */
219 AMD64_R13_REGNUM, /* %r13 */
220 AMD64_R14_REGNUM, /* %r14 */
221 AMD64_R15_REGNUM /* %r15 */
222 };
223
224 static const int amd64_arch_regmap_len =
225 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
226
227 /* Convert architectural register number REG to the appropriate register
228 number used by GDB. */
229
230 static int
231 amd64_arch_reg_to_regnum (int reg)
232 {
233 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
234
235 return amd64_arch_regmap[reg];
236 }
237
238 /* Register names for byte pseudo-registers. */
239
240 static const char *amd64_byte_names[] =
241 {
242 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
243 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
244 "ah", "bh", "ch", "dh"
245 };
246
247 /* Number of lower byte registers. */
248 #define AMD64_NUM_LOWER_BYTE_REGS 16
249
250 /* Register names for word pseudo-registers. */
251
252 static const char *amd64_word_names[] =
253 {
254 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
255 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
256 };
257
258 /* Register names for dword pseudo-registers. */
259
260 static const char *amd64_dword_names[] =
261 {
262 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
263 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
264 "eip"
265 };
266
267 /* Return the name of register REGNUM. */
268
269 static const char *
270 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
271 {
272 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
273 if (i386_byte_regnum_p (gdbarch, regnum))
274 return amd64_byte_names[regnum - tdep->al_regnum];
275 else if (i386_ymm_regnum_p (gdbarch, regnum))
276 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
277 else if (i386_word_regnum_p (gdbarch, regnum))
278 return amd64_word_names[regnum - tdep->ax_regnum];
279 else if (i386_dword_regnum_p (gdbarch, regnum))
280 return amd64_dword_names[regnum - tdep->eax_regnum];
281 else
282 return i386_pseudo_register_name (gdbarch, regnum);
283 }
284
285 static struct value *
286 amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
287 struct regcache *regcache,
288 int regnum)
289 {
290 gdb_byte raw_buf[MAX_REGISTER_SIZE];
291 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
292 enum register_status status;
293 struct value *result_value;
294 gdb_byte *buf;
295
296 result_value = allocate_value (register_type (gdbarch, regnum));
297 VALUE_LVAL (result_value) = lval_register;
298 VALUE_REGNUM (result_value) = regnum;
299 buf = value_contents_raw (result_value);
300
301 if (i386_byte_regnum_p (gdbarch, regnum))
302 {
303 int gpnum = regnum - tdep->al_regnum;
304
305 /* Extract (always little endian). */
306 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
307 {
308 /* Special handling for AH, BH, CH, DH. */
309 status = regcache_raw_read (regcache,
310 gpnum - AMD64_NUM_LOWER_BYTE_REGS,
311 raw_buf);
312 if (status == REG_VALID)
313 memcpy (buf, raw_buf + 1, 1);
314 else
315 mark_value_bytes_unavailable (result_value, 0,
316 TYPE_LENGTH (value_type (result_value)));
317 }
318 else
319 {
320 status = regcache_raw_read (regcache, gpnum, raw_buf);
321 if (status == REG_VALID)
322 memcpy (buf, raw_buf, 1);
323 else
324 mark_value_bytes_unavailable (result_value, 0,
325 TYPE_LENGTH (value_type (result_value)));
326 }
327 }
328 else if (i386_dword_regnum_p (gdbarch, regnum))
329 {
330 int gpnum = regnum - tdep->eax_regnum;
331 /* Extract (always little endian). */
332 status = regcache_raw_read (regcache, gpnum, raw_buf);
333 if (status == REG_VALID)
334 memcpy (buf, raw_buf, 4);
335 else
336 mark_value_bytes_unavailable (result_value, 0,
337 TYPE_LENGTH (value_type (result_value)));
338 }
339 else
340 i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
341 result_value);
342
343 return result_value;
344 }
345
346 static void
347 amd64_pseudo_register_write (struct gdbarch *gdbarch,
348 struct regcache *regcache,
349 int regnum, const gdb_byte *buf)
350 {
351 gdb_byte raw_buf[MAX_REGISTER_SIZE];
352 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
353
354 if (i386_byte_regnum_p (gdbarch, regnum))
355 {
356 int gpnum = regnum - tdep->al_regnum;
357
358 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
359 {
360 /* Read ... AH, BH, CH, DH. */
361 regcache_raw_read (regcache,
362 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
363 /* ... Modify ... (always little endian). */
364 memcpy (raw_buf + 1, buf, 1);
365 /* ... Write. */
366 regcache_raw_write (regcache,
367 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
368 }
369 else
370 {
371 /* Read ... */
372 regcache_raw_read (regcache, gpnum, raw_buf);
373 /* ... Modify ... (always little endian). */
374 memcpy (raw_buf, buf, 1);
375 /* ... Write. */
376 regcache_raw_write (regcache, gpnum, raw_buf);
377 }
378 }
379 else if (i386_dword_regnum_p (gdbarch, regnum))
380 {
381 int gpnum = regnum - tdep->eax_regnum;
382
383 /* Read ... */
384 regcache_raw_read (regcache, gpnum, raw_buf);
385 /* ... Modify ... (always little endian). */
386 memcpy (raw_buf, buf, 4);
387 /* ... Write. */
388 regcache_raw_write (regcache, gpnum, raw_buf);
389 }
390 else
391 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
392 }
393
394 \f
395
396 /* Register classes as defined in the psABI. */
397
398 enum amd64_reg_class
399 {
400 AMD64_INTEGER,
401 AMD64_SSE,
402 AMD64_SSEUP,
403 AMD64_X87,
404 AMD64_X87UP,
405 AMD64_COMPLEX_X87,
406 AMD64_NO_CLASS,
407 AMD64_MEMORY
408 };
409
410 /* Return the union class of CLASS1 and CLASS2. See the psABI for
411 details. */
412
413 static enum amd64_reg_class
414 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
415 {
416 /* Rule (a): If both classes are equal, this is the resulting class. */
417 if (class1 == class2)
418 return class1;
419
420 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
421 is the other class. */
422 if (class1 == AMD64_NO_CLASS)
423 return class2;
424 if (class2 == AMD64_NO_CLASS)
425 return class1;
426
427 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
428 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
429 return AMD64_MEMORY;
430
431 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
432 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
433 return AMD64_INTEGER;
434
435 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
436 MEMORY is used as class. */
437 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
438 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
439 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
440 return AMD64_MEMORY;
441
442 /* Rule (f): Otherwise class SSE is used. */
443 return AMD64_SSE;
444 }
445
446 static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
447
448 /* Return non-zero if TYPE is a non-POD structure or union type. */
449
450 static int
451 amd64_non_pod_p (struct type *type)
452 {
453 /* ??? A class with a base class certainly isn't POD, but does this
454 catch all non-POD structure types? */
455 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
456 return 1;
457
458 return 0;
459 }
460
461 /* Classify TYPE according to the rules for aggregate (structures and
462 arrays) and union types, and store the result in CLASS. */
463
464 static void
465 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
466 {
467 /* 1. If the size of an object is larger than two eightbytes, or in
468 C++, is a non-POD structure or union type, or contains
469 unaligned fields, it has class memory. */
470 if (TYPE_LENGTH (type) > 16 || amd64_non_pod_p (type))
471 {
472 class[0] = class[1] = AMD64_MEMORY;
473 return;
474 }
475
476 /* 2. Both eightbytes get initialized to class NO_CLASS. */
477 class[0] = class[1] = AMD64_NO_CLASS;
478
479 /* 3. Each field of an object is classified recursively so that
480 always two fields are considered. The resulting class is
481 calculated according to the classes of the fields in the
482 eightbyte: */
483
484 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
485 {
486 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
487
488 /* All fields in an array have the same type. */
489 amd64_classify (subtype, class);
490 if (TYPE_LENGTH (type) > 8 && class[1] == AMD64_NO_CLASS)
491 class[1] = class[0];
492 }
493 else
494 {
495 int i;
496
497 /* Structure or union. */
498 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
499 || TYPE_CODE (type) == TYPE_CODE_UNION);
500
501 for (i = 0; i < TYPE_NFIELDS (type); i++)
502 {
503 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
504 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
505 enum amd64_reg_class subclass[2];
506 int bitsize = TYPE_FIELD_BITSIZE (type, i);
507 int endpos;
508
509 if (bitsize == 0)
510 bitsize = TYPE_LENGTH (subtype) * 8;
511 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
512
513 /* Ignore static fields. */
514 if (field_is_static (&TYPE_FIELD (type, i)))
515 continue;
516
517 gdb_assert (pos == 0 || pos == 1);
518
519 amd64_classify (subtype, subclass);
520 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
521 if (bitsize <= 64 && pos == 0 && endpos == 1)
522 /* This is a bit of an odd case: We have a field that would
523 normally fit in one of the two eightbytes, except that
524 it is placed in a way that this field straddles them.
525 This has been seen with a structure containing an array.
526
527 The ABI is a bit unclear in this case, but we assume that
528 this field's class (stored in subclass[0]) must also be merged
529 into class[1]. In other words, our field has a piece stored
530 in the second eight-byte, and thus its class applies to
531 the second eight-byte as well.
532
533 In the case where the field length exceeds 8 bytes,
534 it should not be necessary to merge the field class
535 into class[1]. As LEN > 8, subclass[1] is necessarily
536 different from AMD64_NO_CLASS. If subclass[1] is equal
537 to subclass[0], then the normal class[1]/subclass[1]
538 merging will take care of everything. For subclass[1]
539 to be different from subclass[0], I can only see the case
540 where we have a SSE/SSEUP or X87/X87UP pair, which both
541 use up all 16 bytes of the aggregate, and are already
542 handled just fine (because each portion sits on its own
543 8-byte). */
544 class[1] = amd64_merge_classes (class[1], subclass[0]);
545 if (pos == 0)
546 class[1] = amd64_merge_classes (class[1], subclass[1]);
547 }
548 }
549
550 /* 4. Then a post merger cleanup is done: */
551
552 /* Rule (a): If one of the classes is MEMORY, the whole argument is
553 passed in memory. */
554 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
555 class[0] = class[1] = AMD64_MEMORY;
556
557 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
558 SSE. */
559 if (class[0] == AMD64_SSEUP)
560 class[0] = AMD64_SSE;
561 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
562 class[1] = AMD64_SSE;
563 }
564
565 /* Classify TYPE, and store the result in CLASS. */
566
567 static void
568 amd64_classify (struct type *type, enum amd64_reg_class class[2])
569 {
570 enum type_code code = TYPE_CODE (type);
571 int len = TYPE_LENGTH (type);
572
573 class[0] = class[1] = AMD64_NO_CLASS;
574
575 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
576 long, long long, and pointers are in the INTEGER class. Similarly,
577 range types, used by languages such as Ada, are also in the INTEGER
578 class. */
579 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
580 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
581 || code == TYPE_CODE_CHAR
582 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
583 && (len == 1 || len == 2 || len == 4 || len == 8))
584 class[0] = AMD64_INTEGER;
585
586 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
587 are in class SSE. */
588 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
589 && (len == 4 || len == 8))
590 /* FIXME: __m64 . */
591 class[0] = AMD64_SSE;
592
593 /* Arguments of types __float128, _Decimal128 and __m128 are split into
594 two halves. The least significant ones belong to class SSE, the most
595 significant one to class SSEUP. */
596 else if (code == TYPE_CODE_DECFLOAT && len == 16)
597 /* FIXME: __float128, __m128. */
598 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
599
600 /* The 64-bit mantissa of arguments of type long double belongs to
601 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
602 class X87UP. */
603 else if (code == TYPE_CODE_FLT && len == 16)
604 /* Class X87 and X87UP. */
605 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
606
607 /* Arguments of complex T where T is one of the types float or
608 double get treated as if they are implemented as:
609
610 struct complexT {
611 T real;
612 T imag;
613 }; */
614 else if (code == TYPE_CODE_COMPLEX && len == 8)
615 class[0] = AMD64_SSE;
616 else if (code == TYPE_CODE_COMPLEX && len == 16)
617 class[0] = class[1] = AMD64_SSE;
618
619 /* A variable of type complex long double is classified as type
620 COMPLEX_X87. */
621 else if (code == TYPE_CODE_COMPLEX && len == 32)
622 class[0] = AMD64_COMPLEX_X87;
623
624 /* Aggregates. */
625 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
626 || code == TYPE_CODE_UNION)
627 amd64_classify_aggregate (type, class);
628 }
629
630 static enum return_value_convention
631 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
632 struct type *type, struct regcache *regcache,
633 gdb_byte *readbuf, const gdb_byte *writebuf)
634 {
635 enum amd64_reg_class class[2];
636 int len = TYPE_LENGTH (type);
637 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
638 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
639 int integer_reg = 0;
640 int sse_reg = 0;
641 int i;
642
643 gdb_assert (!(readbuf && writebuf));
644
645 /* 1. Classify the return type with the classification algorithm. */
646 amd64_classify (type, class);
647
648 /* 2. If the type has class MEMORY, then the caller provides space
649 for the return value and passes the address of this storage in
650 %rdi as if it were the first argument to the function. In effect,
651 this address becomes a hidden first argument.
652
653 On return %rax will contain the address that has been passed in
654 by the caller in %rdi. */
655 if (class[0] == AMD64_MEMORY)
656 {
657 /* As indicated by the comment above, the ABI guarantees that we
658 can always find the return value just after the function has
659 returned. */
660
661 if (readbuf)
662 {
663 ULONGEST addr;
664
665 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
666 read_memory (addr, readbuf, TYPE_LENGTH (type));
667 }
668
669 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
670 }
671
672 /* 8. If the class is COMPLEX_X87, the real part of the value is
673 returned in %st0 and the imaginary part in %st1. */
674 if (class[0] == AMD64_COMPLEX_X87)
675 {
676 if (readbuf)
677 {
678 regcache_raw_read (regcache, AMD64_ST0_REGNUM, readbuf);
679 regcache_raw_read (regcache, AMD64_ST1_REGNUM, readbuf + 16);
680 }
681
682 if (writebuf)
683 {
684 i387_return_value (gdbarch, regcache);
685 regcache_raw_write (regcache, AMD64_ST0_REGNUM, writebuf);
686 regcache_raw_write (regcache, AMD64_ST1_REGNUM, writebuf + 16);
687
688 /* Fix up the tag word such that both %st(0) and %st(1) are
689 marked as valid. */
690 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
691 }
692
693 return RETURN_VALUE_REGISTER_CONVENTION;
694 }
695
696 gdb_assert (class[1] != AMD64_MEMORY);
697 gdb_assert (len <= 16);
698
699 for (i = 0; len > 0; i++, len -= 8)
700 {
701 int regnum = -1;
702 int offset = 0;
703
704 switch (class[i])
705 {
706 case AMD64_INTEGER:
707 /* 3. If the class is INTEGER, the next available register
708 of the sequence %rax, %rdx is used. */
709 regnum = integer_regnum[integer_reg++];
710 break;
711
712 case AMD64_SSE:
713 /* 4. If the class is SSE, the next available SSE register
714 of the sequence %xmm0, %xmm1 is used. */
715 regnum = sse_regnum[sse_reg++];
716 break;
717
718 case AMD64_SSEUP:
719 /* 5. If the class is SSEUP, the eightbyte is passed in the
720 upper half of the last used SSE register. */
721 gdb_assert (sse_reg > 0);
722 regnum = sse_regnum[sse_reg - 1];
723 offset = 8;
724 break;
725
726 case AMD64_X87:
727 /* 6. If the class is X87, the value is returned on the X87
728 stack in %st0 as 80-bit x87 number. */
729 regnum = AMD64_ST0_REGNUM;
730 if (writebuf)
731 i387_return_value (gdbarch, regcache);
732 break;
733
734 case AMD64_X87UP:
735 /* 7. If the class is X87UP, the value is returned together
736 with the previous X87 value in %st0. */
737 gdb_assert (i > 0 && class[0] == AMD64_X87);
738 regnum = AMD64_ST0_REGNUM;
739 offset = 8;
740 len = 2;
741 break;
742
743 case AMD64_NO_CLASS:
744 continue;
745
746 default:
747 gdb_assert (!"Unexpected register class.");
748 }
749
750 gdb_assert (regnum != -1);
751
752 if (readbuf)
753 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
754 readbuf + i * 8);
755 if (writebuf)
756 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
757 writebuf + i * 8);
758 }
759
760 return RETURN_VALUE_REGISTER_CONVENTION;
761 }
762 \f
763
764 static CORE_ADDR
765 amd64_push_arguments (struct regcache *regcache, int nargs,
766 struct value **args, CORE_ADDR sp, int struct_return)
767 {
768 static int integer_regnum[] =
769 {
770 AMD64_RDI_REGNUM, /* %rdi */
771 AMD64_RSI_REGNUM, /* %rsi */
772 AMD64_RDX_REGNUM, /* %rdx */
773 AMD64_RCX_REGNUM, /* %rcx */
774 AMD64_R8_REGNUM, /* %r8 */
775 AMD64_R9_REGNUM /* %r9 */
776 };
777 static int sse_regnum[] =
778 {
779 /* %xmm0 ... %xmm7 */
780 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
781 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
782 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
783 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
784 };
785 struct value **stack_args = alloca (nargs * sizeof (struct value *));
786 int num_stack_args = 0;
787 int num_elements = 0;
788 int element = 0;
789 int integer_reg = 0;
790 int sse_reg = 0;
791 int i;
792
793 /* Reserve a register for the "hidden" argument. */
794 if (struct_return)
795 integer_reg++;
796
797 for (i = 0; i < nargs; i++)
798 {
799 struct type *type = value_type (args[i]);
800 int len = TYPE_LENGTH (type);
801 enum amd64_reg_class class[2];
802 int needed_integer_regs = 0;
803 int needed_sse_regs = 0;
804 int j;
805
806 /* Classify argument. */
807 amd64_classify (type, class);
808
809 /* Calculate the number of integer and SSE registers needed for
810 this argument. */
811 for (j = 0; j < 2; j++)
812 {
813 if (class[j] == AMD64_INTEGER)
814 needed_integer_regs++;
815 else if (class[j] == AMD64_SSE)
816 needed_sse_regs++;
817 }
818
819 /* Check whether enough registers are available, and if the
820 argument should be passed in registers at all. */
821 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
822 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
823 || (needed_integer_regs == 0 && needed_sse_regs == 0))
824 {
825 /* The argument will be passed on the stack. */
826 num_elements += ((len + 7) / 8);
827 stack_args[num_stack_args++] = args[i];
828 }
829 else
830 {
831 /* The argument will be passed in registers. */
832 const gdb_byte *valbuf = value_contents (args[i]);
833 gdb_byte buf[8];
834
835 gdb_assert (len <= 16);
836
837 for (j = 0; len > 0; j++, len -= 8)
838 {
839 int regnum = -1;
840 int offset = 0;
841
842 switch (class[j])
843 {
844 case AMD64_INTEGER:
845 regnum = integer_regnum[integer_reg++];
846 break;
847
848 case AMD64_SSE:
849 regnum = sse_regnum[sse_reg++];
850 break;
851
852 case AMD64_SSEUP:
853 gdb_assert (sse_reg > 0);
854 regnum = sse_regnum[sse_reg - 1];
855 offset = 8;
856 break;
857
858 default:
859 gdb_assert (!"Unexpected register class.");
860 }
861
862 gdb_assert (regnum != -1);
863 memset (buf, 0, sizeof buf);
864 memcpy (buf, valbuf + j * 8, min (len, 8));
865 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
866 }
867 }
868 }
869
870 /* Allocate space for the arguments on the stack. */
871 sp -= num_elements * 8;
872
873 /* The psABI says that "The end of the input argument area shall be
874 aligned on a 16 byte boundary." */
875 sp &= ~0xf;
876
877 /* Write out the arguments to the stack. */
878 for (i = 0; i < num_stack_args; i++)
879 {
880 struct type *type = value_type (stack_args[i]);
881 const gdb_byte *valbuf = value_contents (stack_args[i]);
882 int len = TYPE_LENGTH (type);
883
884 write_memory (sp + element * 8, valbuf, len);
885 element += ((len + 7) / 8);
886 }
887
888 /* The psABI says that "For calls that may call functions that use
889 varargs or stdargs (prototype-less calls or calls to functions
890 containing ellipsis (...) in the declaration) %al is used as
891 hidden argument to specify the number of SSE registers used. */
892 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
893 return sp;
894 }
895
896 static CORE_ADDR
897 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
898 struct regcache *regcache, CORE_ADDR bp_addr,
899 int nargs, struct value **args, CORE_ADDR sp,
900 int struct_return, CORE_ADDR struct_addr)
901 {
902 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
903 gdb_byte buf[8];
904
905 /* Pass arguments. */
906 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
907
908 /* Pass "hidden" argument". */
909 if (struct_return)
910 {
911 store_unsigned_integer (buf, 8, byte_order, struct_addr);
912 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
913 }
914
915 /* Store return address. */
916 sp -= 8;
917 store_unsigned_integer (buf, 8, byte_order, bp_addr);
918 write_memory (sp, buf, 8);
919
920 /* Finally, update the stack pointer... */
921 store_unsigned_integer (buf, 8, byte_order, sp);
922 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
923
924 /* ...and fake a frame pointer. */
925 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
926
927 return sp + 16;
928 }
929 \f
930 /* Displaced instruction handling. */
931
932 /* A partially decoded instruction.
933 This contains enough details for displaced stepping purposes. */
934
935 struct amd64_insn
936 {
937 /* The number of opcode bytes. */
938 int opcode_len;
939 /* The offset of the rex prefix or -1 if not present. */
940 int rex_offset;
941 /* The offset to the first opcode byte. */
942 int opcode_offset;
943 /* The offset to the modrm byte or -1 if not present. */
944 int modrm_offset;
945
946 /* The raw instruction. */
947 gdb_byte *raw_insn;
948 };
949
950 struct displaced_step_closure
951 {
952 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
953 int tmp_used;
954 int tmp_regno;
955 ULONGEST tmp_save;
956
957 /* Details of the instruction. */
958 struct amd64_insn insn_details;
959
960 /* Amount of space allocated to insn_buf. */
961 int max_len;
962
963 /* The possibly modified insn.
964 This is a variable-length field. */
965 gdb_byte insn_buf[1];
966 };
967
968 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
969 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
970 at which point delete these in favor of libopcodes' versions). */
971
972 static const unsigned char onebyte_has_modrm[256] = {
973 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
974 /* ------------------------------- */
975 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
976 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
977 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
978 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
979 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
980 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
981 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
982 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
983 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
984 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
985 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
986 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
987 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
988 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
989 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
990 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
991 /* ------------------------------- */
992 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
993 };
994
995 static const unsigned char twobyte_has_modrm[256] = {
996 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
997 /* ------------------------------- */
998 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
999 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1000 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1001 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1002 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1003 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1004 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1005 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1006 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1007 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1008 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1009 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1010 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1011 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1012 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1013 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1014 /* ------------------------------- */
1015 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1016 };
1017
1018 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1019
1020 static int
1021 rex_prefix_p (gdb_byte pfx)
1022 {
1023 return REX_PREFIX_P (pfx);
1024 }
1025
1026 /* Skip the legacy instruction prefixes in INSN.
1027 We assume INSN is properly sentineled so we don't have to worry
1028 about falling off the end of the buffer. */
1029
1030 static gdb_byte *
1031 amd64_skip_prefixes (gdb_byte *insn)
1032 {
1033 while (1)
1034 {
1035 switch (*insn)
1036 {
1037 case DATA_PREFIX_OPCODE:
1038 case ADDR_PREFIX_OPCODE:
1039 case CS_PREFIX_OPCODE:
1040 case DS_PREFIX_OPCODE:
1041 case ES_PREFIX_OPCODE:
1042 case FS_PREFIX_OPCODE:
1043 case GS_PREFIX_OPCODE:
1044 case SS_PREFIX_OPCODE:
1045 case LOCK_PREFIX_OPCODE:
1046 case REPE_PREFIX_OPCODE:
1047 case REPNE_PREFIX_OPCODE:
1048 ++insn;
1049 continue;
1050 default:
1051 break;
1052 }
1053 break;
1054 }
1055
1056 return insn;
1057 }
1058
1059 /* Return an integer register (other than RSP) that is unused as an input
1060 operand in INSN.
1061 In order to not require adding a rex prefix if the insn doesn't already
1062 have one, the result is restricted to RAX ... RDI, sans RSP.
1063 The register numbering of the result follows architecture ordering,
1064 e.g. RDI = 7. */
1065
1066 static int
1067 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1068 {
1069 /* 1 bit for each reg */
1070 int used_regs_mask = 0;
1071
1072 /* There can be at most 3 int regs used as inputs in an insn, and we have
1073 7 to choose from (RAX ... RDI, sans RSP).
1074 This allows us to take a conservative approach and keep things simple.
1075 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1076 that implicitly specify RAX. */
1077
1078 /* Avoid RAX. */
1079 used_regs_mask |= 1 << EAX_REG_NUM;
1080 /* Similarily avoid RDX, implicit operand in divides. */
1081 used_regs_mask |= 1 << EDX_REG_NUM;
1082 /* Avoid RSP. */
1083 used_regs_mask |= 1 << ESP_REG_NUM;
1084
1085 /* If the opcode is one byte long and there's no ModRM byte,
1086 assume the opcode specifies a register. */
1087 if (details->opcode_len == 1 && details->modrm_offset == -1)
1088 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1089
1090 /* Mark used regs in the modrm/sib bytes. */
1091 if (details->modrm_offset != -1)
1092 {
1093 int modrm = details->raw_insn[details->modrm_offset];
1094 int mod = MODRM_MOD_FIELD (modrm);
1095 int reg = MODRM_REG_FIELD (modrm);
1096 int rm = MODRM_RM_FIELD (modrm);
1097 int have_sib = mod != 3 && rm == 4;
1098
1099 /* Assume the reg field of the modrm byte specifies a register. */
1100 used_regs_mask |= 1 << reg;
1101
1102 if (have_sib)
1103 {
1104 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1105 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1106 used_regs_mask |= 1 << base;
1107 used_regs_mask |= 1 << idx;
1108 }
1109 else
1110 {
1111 used_regs_mask |= 1 << rm;
1112 }
1113 }
1114
1115 gdb_assert (used_regs_mask < 256);
1116 gdb_assert (used_regs_mask != 255);
1117
1118 /* Finally, find a free reg. */
1119 {
1120 int i;
1121
1122 for (i = 0; i < 8; ++i)
1123 {
1124 if (! (used_regs_mask & (1 << i)))
1125 return i;
1126 }
1127
1128 /* We shouldn't get here. */
1129 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1130 }
1131 }
1132
1133 /* Extract the details of INSN that we need. */
1134
1135 static void
1136 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1137 {
1138 gdb_byte *start = insn;
1139 int need_modrm;
1140
1141 details->raw_insn = insn;
1142
1143 details->opcode_len = -1;
1144 details->rex_offset = -1;
1145 details->opcode_offset = -1;
1146 details->modrm_offset = -1;
1147
1148 /* Skip legacy instruction prefixes. */
1149 insn = amd64_skip_prefixes (insn);
1150
1151 /* Skip REX instruction prefix. */
1152 if (rex_prefix_p (*insn))
1153 {
1154 details->rex_offset = insn - start;
1155 ++insn;
1156 }
1157
1158 details->opcode_offset = insn - start;
1159
1160 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1161 {
1162 /* Two or three-byte opcode. */
1163 ++insn;
1164 need_modrm = twobyte_has_modrm[*insn];
1165
1166 /* Check for three-byte opcode. */
1167 switch (*insn)
1168 {
1169 case 0x24:
1170 case 0x25:
1171 case 0x38:
1172 case 0x3a:
1173 case 0x7a:
1174 case 0x7b:
1175 ++insn;
1176 details->opcode_len = 3;
1177 break;
1178 default:
1179 details->opcode_len = 2;
1180 break;
1181 }
1182 }
1183 else
1184 {
1185 /* One-byte opcode. */
1186 need_modrm = onebyte_has_modrm[*insn];
1187 details->opcode_len = 1;
1188 }
1189
1190 if (need_modrm)
1191 {
1192 ++insn;
1193 details->modrm_offset = insn - start;
1194 }
1195 }
1196
1197 /* Update %rip-relative addressing in INSN.
1198
1199 %rip-relative addressing only uses a 32-bit displacement.
1200 32 bits is not enough to be guaranteed to cover the distance between where
1201 the real instruction is and where its copy is.
1202 Convert the insn to use base+disp addressing.
1203 We set base = pc + insn_length so we can leave disp unchanged. */
1204
1205 static void
1206 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1207 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1208 {
1209 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1210 const struct amd64_insn *insn_details = &dsc->insn_details;
1211 int modrm_offset = insn_details->modrm_offset;
1212 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1213 CORE_ADDR rip_base;
1214 int32_t disp;
1215 int insn_length;
1216 int arch_tmp_regno, tmp_regno;
1217 ULONGEST orig_value;
1218
1219 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1220 ++insn;
1221
1222 /* Compute the rip-relative address. */
1223 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
1224 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf,
1225 dsc->max_len, from);
1226 rip_base = from + insn_length;
1227
1228 /* We need a register to hold the address.
1229 Pick one not used in the insn.
1230 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1231 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1232 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1233
1234 /* REX.B should be unset as we were using rip-relative addressing,
1235 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1236 if (insn_details->rex_offset != -1)
1237 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1238
1239 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1240 dsc->tmp_regno = tmp_regno;
1241 dsc->tmp_save = orig_value;
1242 dsc->tmp_used = 1;
1243
1244 /* Convert the ModRM field to be base+disp. */
1245 dsc->insn_buf[modrm_offset] &= ~0xc7;
1246 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1247
1248 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1249
1250 if (debug_displaced)
1251 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1252 "displaced: using temp reg %d, old value %s, new value %s\n",
1253 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1254 paddress (gdbarch, rip_base));
1255 }
1256
1257 static void
1258 fixup_displaced_copy (struct gdbarch *gdbarch,
1259 struct displaced_step_closure *dsc,
1260 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1261 {
1262 const struct amd64_insn *details = &dsc->insn_details;
1263
1264 if (details->modrm_offset != -1)
1265 {
1266 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1267
1268 if ((modrm & 0xc7) == 0x05)
1269 {
1270 /* The insn uses rip-relative addressing.
1271 Deal with it. */
1272 fixup_riprel (gdbarch, dsc, from, to, regs);
1273 }
1274 }
1275 }
1276
1277 struct displaced_step_closure *
1278 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1279 CORE_ADDR from, CORE_ADDR to,
1280 struct regcache *regs)
1281 {
1282 int len = gdbarch_max_insn_length (gdbarch);
1283 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1284 continually watch for running off the end of the buffer. */
1285 int fixup_sentinel_space = len;
1286 struct displaced_step_closure *dsc =
1287 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1288 gdb_byte *buf = &dsc->insn_buf[0];
1289 struct amd64_insn *details = &dsc->insn_details;
1290
1291 dsc->tmp_used = 0;
1292 dsc->max_len = len + fixup_sentinel_space;
1293
1294 read_memory (from, buf, len);
1295
1296 /* Set up the sentinel space so we don't have to worry about running
1297 off the end of the buffer. An excessive number of leading prefixes
1298 could otherwise cause this. */
1299 memset (buf + len, 0, fixup_sentinel_space);
1300
1301 amd64_get_insn_details (buf, details);
1302
1303 /* GDB may get control back after the insn after the syscall.
1304 Presumably this is a kernel bug.
1305 If this is a syscall, make sure there's a nop afterwards. */
1306 {
1307 int syscall_length;
1308
1309 if (amd64_syscall_p (details, &syscall_length))
1310 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1311 }
1312
1313 /* Modify the insn to cope with the address where it will be executed from.
1314 In particular, handle any rip-relative addressing. */
1315 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1316
1317 write_memory (to, buf, len);
1318
1319 if (debug_displaced)
1320 {
1321 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1322 paddress (gdbarch, from), paddress (gdbarch, to));
1323 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1324 }
1325
1326 return dsc;
1327 }
1328
1329 static int
1330 amd64_absolute_jmp_p (const struct amd64_insn *details)
1331 {
1332 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1333
1334 if (insn[0] == 0xff)
1335 {
1336 /* jump near, absolute indirect (/4) */
1337 if ((insn[1] & 0x38) == 0x20)
1338 return 1;
1339
1340 /* jump far, absolute indirect (/5) */
1341 if ((insn[1] & 0x38) == 0x28)
1342 return 1;
1343 }
1344
1345 return 0;
1346 }
1347
1348 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1349
1350 static int
1351 amd64_jmp_p (const struct amd64_insn *details)
1352 {
1353 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1354
1355 /* jump short, relative. */
1356 if (insn[0] == 0xeb)
1357 return 1;
1358
1359 /* jump near, relative. */
1360 if (insn[0] == 0xe9)
1361 return 1;
1362
1363 return amd64_absolute_jmp_p (details);
1364 }
1365
1366 static int
1367 amd64_absolute_call_p (const struct amd64_insn *details)
1368 {
1369 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1370
1371 if (insn[0] == 0xff)
1372 {
1373 /* Call near, absolute indirect (/2) */
1374 if ((insn[1] & 0x38) == 0x10)
1375 return 1;
1376
1377 /* Call far, absolute indirect (/3) */
1378 if ((insn[1] & 0x38) == 0x18)
1379 return 1;
1380 }
1381
1382 return 0;
1383 }
1384
1385 static int
1386 amd64_ret_p (const struct amd64_insn *details)
1387 {
1388 /* NOTE: gcc can emit "repz ; ret". */
1389 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1390
1391 switch (insn[0])
1392 {
1393 case 0xc2: /* ret near, pop N bytes */
1394 case 0xc3: /* ret near */
1395 case 0xca: /* ret far, pop N bytes */
1396 case 0xcb: /* ret far */
1397 case 0xcf: /* iret */
1398 return 1;
1399
1400 default:
1401 return 0;
1402 }
1403 }
1404
1405 static int
1406 amd64_call_p (const struct amd64_insn *details)
1407 {
1408 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1409
1410 if (amd64_absolute_call_p (details))
1411 return 1;
1412
1413 /* call near, relative */
1414 if (insn[0] == 0xe8)
1415 return 1;
1416
1417 return 0;
1418 }
1419
1420 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1421 length in bytes. Otherwise, return zero. */
1422
1423 static int
1424 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1425 {
1426 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1427
1428 if (insn[0] == 0x0f && insn[1] == 0x05)
1429 {
1430 *lengthp = 2;
1431 return 1;
1432 }
1433
1434 return 0;
1435 }
1436
1437 /* Classify the instruction at ADDR using PRED.
1438 Throw an error if the memory can't be read. */
1439
1440 static int
1441 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1442 int (*pred) (const struct amd64_insn *))
1443 {
1444 struct amd64_insn details;
1445 gdb_byte *buf;
1446 int len, classification;
1447
1448 len = gdbarch_max_insn_length (gdbarch);
1449 buf = alloca (len);
1450
1451 read_code (addr, buf, len);
1452 amd64_get_insn_details (buf, &details);
1453
1454 classification = pred (&details);
1455
1456 return classification;
1457 }
1458
1459 /* The gdbarch insn_is_call method. */
1460
1461 static int
1462 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1463 {
1464 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1465 }
1466
1467 /* The gdbarch insn_is_ret method. */
1468
1469 static int
1470 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1471 {
1472 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1473 }
1474
1475 /* The gdbarch insn_is_jump method. */
1476
1477 static int
1478 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1479 {
1480 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1481 }
1482
1483 /* Fix up the state of registers and memory after having single-stepped
1484 a displaced instruction. */
1485
1486 void
1487 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1488 struct displaced_step_closure *dsc,
1489 CORE_ADDR from, CORE_ADDR to,
1490 struct regcache *regs)
1491 {
1492 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1493 /* The offset we applied to the instruction's address. */
1494 ULONGEST insn_offset = to - from;
1495 gdb_byte *insn = dsc->insn_buf;
1496 const struct amd64_insn *insn_details = &dsc->insn_details;
1497
1498 if (debug_displaced)
1499 fprintf_unfiltered (gdb_stdlog,
1500 "displaced: fixup (%s, %s), "
1501 "insn = 0x%02x 0x%02x ...\n",
1502 paddress (gdbarch, from), paddress (gdbarch, to),
1503 insn[0], insn[1]);
1504
1505 /* If we used a tmp reg, restore it. */
1506
1507 if (dsc->tmp_used)
1508 {
1509 if (debug_displaced)
1510 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1511 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1512 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1513 }
1514
1515 /* The list of issues to contend with here is taken from
1516 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1517 Yay for Free Software! */
1518
1519 /* Relocate the %rip back to the program's instruction stream,
1520 if necessary. */
1521
1522 /* Except in the case of absolute or indirect jump or call
1523 instructions, or a return instruction, the new rip is relative to
1524 the displaced instruction; make it relative to the original insn.
1525 Well, signal handler returns don't need relocation either, but we use the
1526 value of %rip to recognize those; see below. */
1527 if (! amd64_absolute_jmp_p (insn_details)
1528 && ! amd64_absolute_call_p (insn_details)
1529 && ! amd64_ret_p (insn_details))
1530 {
1531 ULONGEST orig_rip;
1532 int insn_len;
1533
1534 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1535
1536 /* A signal trampoline system call changes the %rip, resuming
1537 execution of the main program after the signal handler has
1538 returned. That makes them like 'return' instructions; we
1539 shouldn't relocate %rip.
1540
1541 But most system calls don't, and we do need to relocate %rip.
1542
1543 Our heuristic for distinguishing these cases: if stepping
1544 over the system call instruction left control directly after
1545 the instruction, the we relocate --- control almost certainly
1546 doesn't belong in the displaced copy. Otherwise, we assume
1547 the instruction has put control where it belongs, and leave
1548 it unrelocated. Goodness help us if there are PC-relative
1549 system calls. */
1550 if (amd64_syscall_p (insn_details, &insn_len)
1551 && orig_rip != to + insn_len
1552 /* GDB can get control back after the insn after the syscall.
1553 Presumably this is a kernel bug.
1554 Fixup ensures its a nop, we add one to the length for it. */
1555 && orig_rip != to + insn_len + 1)
1556 {
1557 if (debug_displaced)
1558 fprintf_unfiltered (gdb_stdlog,
1559 "displaced: syscall changed %%rip; "
1560 "not relocating\n");
1561 }
1562 else
1563 {
1564 ULONGEST rip = orig_rip - insn_offset;
1565
1566 /* If we just stepped over a breakpoint insn, we don't backup
1567 the pc on purpose; this is to match behaviour without
1568 stepping. */
1569
1570 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1571
1572 if (debug_displaced)
1573 fprintf_unfiltered (gdb_stdlog,
1574 "displaced: "
1575 "relocated %%rip from %s to %s\n",
1576 paddress (gdbarch, orig_rip),
1577 paddress (gdbarch, rip));
1578 }
1579 }
1580
1581 /* If the instruction was PUSHFL, then the TF bit will be set in the
1582 pushed value, and should be cleared. We'll leave this for later,
1583 since GDB already messes up the TF flag when stepping over a
1584 pushfl. */
1585
1586 /* If the instruction was a call, the return address now atop the
1587 stack is the address following the copied instruction. We need
1588 to make it the address following the original instruction. */
1589 if (amd64_call_p (insn_details))
1590 {
1591 ULONGEST rsp;
1592 ULONGEST retaddr;
1593 const ULONGEST retaddr_len = 8;
1594
1595 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1596 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1597 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
1598 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1599
1600 if (debug_displaced)
1601 fprintf_unfiltered (gdb_stdlog,
1602 "displaced: relocated return addr at %s "
1603 "to %s\n",
1604 paddress (gdbarch, rsp),
1605 paddress (gdbarch, retaddr));
1606 }
1607 }
1608
1609 /* If the instruction INSN uses RIP-relative addressing, return the
1610 offset into the raw INSN where the displacement to be adjusted is
1611 found. Returns 0 if the instruction doesn't use RIP-relative
1612 addressing. */
1613
1614 static int
1615 rip_relative_offset (struct amd64_insn *insn)
1616 {
1617 if (insn->modrm_offset != -1)
1618 {
1619 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1620
1621 if ((modrm & 0xc7) == 0x05)
1622 {
1623 /* The displacement is found right after the ModRM byte. */
1624 return insn->modrm_offset + 1;
1625 }
1626 }
1627
1628 return 0;
1629 }
1630
1631 static void
1632 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1633 {
1634 target_write_memory (*to, buf, len);
1635 *to += len;
1636 }
1637
1638 static void
1639 amd64_relocate_instruction (struct gdbarch *gdbarch,
1640 CORE_ADDR *to, CORE_ADDR oldloc)
1641 {
1642 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1643 int len = gdbarch_max_insn_length (gdbarch);
1644 /* Extra space for sentinels. */
1645 int fixup_sentinel_space = len;
1646 gdb_byte *buf = xmalloc (len + fixup_sentinel_space);
1647 struct amd64_insn insn_details;
1648 int offset = 0;
1649 LONGEST rel32, newrel;
1650 gdb_byte *insn;
1651 int insn_length;
1652
1653 read_memory (oldloc, buf, len);
1654
1655 /* Set up the sentinel space so we don't have to worry about running
1656 off the end of the buffer. An excessive number of leading prefixes
1657 could otherwise cause this. */
1658 memset (buf + len, 0, fixup_sentinel_space);
1659
1660 insn = buf;
1661 amd64_get_insn_details (insn, &insn_details);
1662
1663 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1664
1665 /* Skip legacy instruction prefixes. */
1666 insn = amd64_skip_prefixes (insn);
1667
1668 /* Adjust calls with 32-bit relative addresses as push/jump, with
1669 the address pushed being the location where the original call in
1670 the user program would return to. */
1671 if (insn[0] == 0xe8)
1672 {
1673 gdb_byte push_buf[16];
1674 unsigned int ret_addr;
1675
1676 /* Where "ret" in the original code will return to. */
1677 ret_addr = oldloc + insn_length;
1678 push_buf[0] = 0x68; /* pushq $... */
1679 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1680 /* Push the push. */
1681 append_insns (to, 5, push_buf);
1682
1683 /* Convert the relative call to a relative jump. */
1684 insn[0] = 0xe9;
1685
1686 /* Adjust the destination offset. */
1687 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1688 newrel = (oldloc - *to) + rel32;
1689 store_signed_integer (insn + 1, 4, byte_order, newrel);
1690
1691 if (debug_displaced)
1692 fprintf_unfiltered (gdb_stdlog,
1693 "Adjusted insn rel32=%s at %s to"
1694 " rel32=%s at %s\n",
1695 hex_string (rel32), paddress (gdbarch, oldloc),
1696 hex_string (newrel), paddress (gdbarch, *to));
1697
1698 /* Write the adjusted jump into its displaced location. */
1699 append_insns (to, 5, insn);
1700 return;
1701 }
1702
1703 offset = rip_relative_offset (&insn_details);
1704 if (!offset)
1705 {
1706 /* Adjust jumps with 32-bit relative addresses. Calls are
1707 already handled above. */
1708 if (insn[0] == 0xe9)
1709 offset = 1;
1710 /* Adjust conditional jumps. */
1711 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1712 offset = 2;
1713 }
1714
1715 if (offset)
1716 {
1717 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1718 newrel = (oldloc - *to) + rel32;
1719 store_signed_integer (insn + offset, 4, byte_order, newrel);
1720 if (debug_displaced)
1721 fprintf_unfiltered (gdb_stdlog,
1722 "Adjusted insn rel32=%s at %s to"
1723 " rel32=%s at %s\n",
1724 hex_string (rel32), paddress (gdbarch, oldloc),
1725 hex_string (newrel), paddress (gdbarch, *to));
1726 }
1727
1728 /* Write the adjusted instruction into its displaced location. */
1729 append_insns (to, insn_length, buf);
1730 }
1731
1732 \f
1733 /* The maximum number of saved registers. This should include %rip. */
1734 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1735
1736 struct amd64_frame_cache
1737 {
1738 /* Base address. */
1739 CORE_ADDR base;
1740 int base_p;
1741 CORE_ADDR sp_offset;
1742 CORE_ADDR pc;
1743
1744 /* Saved registers. */
1745 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1746 CORE_ADDR saved_sp;
1747 int saved_sp_reg;
1748
1749 /* Do we have a frame? */
1750 int frameless_p;
1751 };
1752
1753 /* Initialize a frame cache. */
1754
1755 static void
1756 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1757 {
1758 int i;
1759
1760 /* Base address. */
1761 cache->base = 0;
1762 cache->base_p = 0;
1763 cache->sp_offset = -8;
1764 cache->pc = 0;
1765
1766 /* Saved registers. We initialize these to -1 since zero is a valid
1767 offset (that's where %rbp is supposed to be stored).
1768 The values start out as being offsets, and are later converted to
1769 addresses (at which point -1 is interpreted as an address, still meaning
1770 "invalid"). */
1771 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1772 cache->saved_regs[i] = -1;
1773 cache->saved_sp = 0;
1774 cache->saved_sp_reg = -1;
1775
1776 /* Frameless until proven otherwise. */
1777 cache->frameless_p = 1;
1778 }
1779
1780 /* Allocate and initialize a frame cache. */
1781
1782 static struct amd64_frame_cache *
1783 amd64_alloc_frame_cache (void)
1784 {
1785 struct amd64_frame_cache *cache;
1786
1787 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1788 amd64_init_frame_cache (cache);
1789 return cache;
1790 }
1791
1792 /* GCC 4.4 and later, can put code in the prologue to realign the
1793 stack pointer. Check whether PC points to such code, and update
1794 CACHE accordingly. Return the first instruction after the code
1795 sequence or CURRENT_PC, whichever is smaller. If we don't
1796 recognize the code, return PC. */
1797
1798 static CORE_ADDR
1799 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1800 struct amd64_frame_cache *cache)
1801 {
1802 /* There are 2 code sequences to re-align stack before the frame
1803 gets set up:
1804
1805 1. Use a caller-saved saved register:
1806
1807 leaq 8(%rsp), %reg
1808 andq $-XXX, %rsp
1809 pushq -8(%reg)
1810
1811 2. Use a callee-saved saved register:
1812
1813 pushq %reg
1814 leaq 16(%rsp), %reg
1815 andq $-XXX, %rsp
1816 pushq -8(%reg)
1817
1818 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1819
1820 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1821 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1822 */
1823
1824 gdb_byte buf[18];
1825 int reg, r;
1826 int offset, offset_and;
1827
1828 if (target_read_code (pc, buf, sizeof buf))
1829 return pc;
1830
1831 /* Check caller-saved saved register. The first instruction has
1832 to be "leaq 8(%rsp), %reg". */
1833 if ((buf[0] & 0xfb) == 0x48
1834 && buf[1] == 0x8d
1835 && buf[3] == 0x24
1836 && buf[4] == 0x8)
1837 {
1838 /* MOD must be binary 10 and R/M must be binary 100. */
1839 if ((buf[2] & 0xc7) != 0x44)
1840 return pc;
1841
1842 /* REG has register number. */
1843 reg = (buf[2] >> 3) & 7;
1844
1845 /* Check the REX.R bit. */
1846 if (buf[0] == 0x4c)
1847 reg += 8;
1848
1849 offset = 5;
1850 }
1851 else
1852 {
1853 /* Check callee-saved saved register. The first instruction
1854 has to be "pushq %reg". */
1855 reg = 0;
1856 if ((buf[0] & 0xf8) == 0x50)
1857 offset = 0;
1858 else if ((buf[0] & 0xf6) == 0x40
1859 && (buf[1] & 0xf8) == 0x50)
1860 {
1861 /* Check the REX.B bit. */
1862 if ((buf[0] & 1) != 0)
1863 reg = 8;
1864
1865 offset = 1;
1866 }
1867 else
1868 return pc;
1869
1870 /* Get register. */
1871 reg += buf[offset] & 0x7;
1872
1873 offset++;
1874
1875 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1876 if ((buf[offset] & 0xfb) != 0x48
1877 || buf[offset + 1] != 0x8d
1878 || buf[offset + 3] != 0x24
1879 || buf[offset + 4] != 0x10)
1880 return pc;
1881
1882 /* MOD must be binary 10 and R/M must be binary 100. */
1883 if ((buf[offset + 2] & 0xc7) != 0x44)
1884 return pc;
1885
1886 /* REG has register number. */
1887 r = (buf[offset + 2] >> 3) & 7;
1888
1889 /* Check the REX.R bit. */
1890 if (buf[offset] == 0x4c)
1891 r += 8;
1892
1893 /* Registers in pushq and leaq have to be the same. */
1894 if (reg != r)
1895 return pc;
1896
1897 offset += 5;
1898 }
1899
1900 /* Rigister can't be %rsp nor %rbp. */
1901 if (reg == 4 || reg == 5)
1902 return pc;
1903
1904 /* The next instruction has to be "andq $-XXX, %rsp". */
1905 if (buf[offset] != 0x48
1906 || buf[offset + 2] != 0xe4
1907 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1908 return pc;
1909
1910 offset_and = offset;
1911 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1912
1913 /* The next instruction has to be "pushq -8(%reg)". */
1914 r = 0;
1915 if (buf[offset] == 0xff)
1916 offset++;
1917 else if ((buf[offset] & 0xf6) == 0x40
1918 && buf[offset + 1] == 0xff)
1919 {
1920 /* Check the REX.B bit. */
1921 if ((buf[offset] & 0x1) != 0)
1922 r = 8;
1923 offset += 2;
1924 }
1925 else
1926 return pc;
1927
1928 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1929 01. */
1930 if (buf[offset + 1] != 0xf8
1931 || (buf[offset] & 0xf8) != 0x70)
1932 return pc;
1933
1934 /* R/M has register. */
1935 r += buf[offset] & 7;
1936
1937 /* Registers in leaq and pushq have to be the same. */
1938 if (reg != r)
1939 return pc;
1940
1941 if (current_pc > pc + offset_and)
1942 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
1943
1944 return min (pc + offset + 2, current_pc);
1945 }
1946
1947 /* Similar to amd64_analyze_stack_align for x32. */
1948
1949 static CORE_ADDR
1950 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1951 struct amd64_frame_cache *cache)
1952 {
1953 /* There are 2 code sequences to re-align stack before the frame
1954 gets set up:
1955
1956 1. Use a caller-saved saved register:
1957
1958 leaq 8(%rsp), %reg
1959 andq $-XXX, %rsp
1960 pushq -8(%reg)
1961
1962 or
1963
1964 [addr32] leal 8(%rsp), %reg
1965 andl $-XXX, %esp
1966 [addr32] pushq -8(%reg)
1967
1968 2. Use a callee-saved saved register:
1969
1970 pushq %reg
1971 leaq 16(%rsp), %reg
1972 andq $-XXX, %rsp
1973 pushq -8(%reg)
1974
1975 or
1976
1977 pushq %reg
1978 [addr32] leal 16(%rsp), %reg
1979 andl $-XXX, %esp
1980 [addr32] pushq -8(%reg)
1981
1982 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1983
1984 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1985 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1986
1987 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
1988
1989 0x83 0xe4 0xf0 andl $-16, %esp
1990 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
1991 */
1992
1993 gdb_byte buf[19];
1994 int reg, r;
1995 int offset, offset_and;
1996
1997 if (target_read_memory (pc, buf, sizeof buf))
1998 return pc;
1999
2000 /* Skip optional addr32 prefix. */
2001 offset = buf[0] == 0x67 ? 1 : 0;
2002
2003 /* Check caller-saved saved register. The first instruction has
2004 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2005 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2006 && buf[offset + 1] == 0x8d
2007 && buf[offset + 3] == 0x24
2008 && buf[offset + 4] == 0x8)
2009 {
2010 /* MOD must be binary 10 and R/M must be binary 100. */
2011 if ((buf[offset + 2] & 0xc7) != 0x44)
2012 return pc;
2013
2014 /* REG has register number. */
2015 reg = (buf[offset + 2] >> 3) & 7;
2016
2017 /* Check the REX.R bit. */
2018 if ((buf[offset] & 0x4) != 0)
2019 reg += 8;
2020
2021 offset += 5;
2022 }
2023 else
2024 {
2025 /* Check callee-saved saved register. The first instruction
2026 has to be "pushq %reg". */
2027 reg = 0;
2028 if ((buf[offset] & 0xf6) == 0x40
2029 && (buf[offset + 1] & 0xf8) == 0x50)
2030 {
2031 /* Check the REX.B bit. */
2032 if ((buf[offset] & 1) != 0)
2033 reg = 8;
2034
2035 offset += 1;
2036 }
2037 else if ((buf[offset] & 0xf8) != 0x50)
2038 return pc;
2039
2040 /* Get register. */
2041 reg += buf[offset] & 0x7;
2042
2043 offset++;
2044
2045 /* Skip optional addr32 prefix. */
2046 if (buf[offset] == 0x67)
2047 offset++;
2048
2049 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2050 "leal 16(%rsp), %reg". */
2051 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2052 || buf[offset + 1] != 0x8d
2053 || buf[offset + 3] != 0x24
2054 || buf[offset + 4] != 0x10)
2055 return pc;
2056
2057 /* MOD must be binary 10 and R/M must be binary 100. */
2058 if ((buf[offset + 2] & 0xc7) != 0x44)
2059 return pc;
2060
2061 /* REG has register number. */
2062 r = (buf[offset + 2] >> 3) & 7;
2063
2064 /* Check the REX.R bit. */
2065 if ((buf[offset] & 0x4) != 0)
2066 r += 8;
2067
2068 /* Registers in pushq and leaq have to be the same. */
2069 if (reg != r)
2070 return pc;
2071
2072 offset += 5;
2073 }
2074
2075 /* Rigister can't be %rsp nor %rbp. */
2076 if (reg == 4 || reg == 5)
2077 return pc;
2078
2079 /* The next instruction may be "andq $-XXX, %rsp" or
2080 "andl $-XXX, %esp". */
2081 if (buf[offset] != 0x48)
2082 offset--;
2083
2084 if (buf[offset + 2] != 0xe4
2085 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2086 return pc;
2087
2088 offset_and = offset;
2089 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2090
2091 /* Skip optional addr32 prefix. */
2092 if (buf[offset] == 0x67)
2093 offset++;
2094
2095 /* The next instruction has to be "pushq -8(%reg)". */
2096 r = 0;
2097 if (buf[offset] == 0xff)
2098 offset++;
2099 else if ((buf[offset] & 0xf6) == 0x40
2100 && buf[offset + 1] == 0xff)
2101 {
2102 /* Check the REX.B bit. */
2103 if ((buf[offset] & 0x1) != 0)
2104 r = 8;
2105 offset += 2;
2106 }
2107 else
2108 return pc;
2109
2110 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2111 01. */
2112 if (buf[offset + 1] != 0xf8
2113 || (buf[offset] & 0xf8) != 0x70)
2114 return pc;
2115
2116 /* R/M has register. */
2117 r += buf[offset] & 7;
2118
2119 /* Registers in leaq and pushq have to be the same. */
2120 if (reg != r)
2121 return pc;
2122
2123 if (current_pc > pc + offset_and)
2124 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2125
2126 return min (pc + offset + 2, current_pc);
2127 }
2128
2129 /* Do a limited analysis of the prologue at PC and update CACHE
2130 accordingly. Bail out early if CURRENT_PC is reached. Return the
2131 address where the analysis stopped.
2132
2133 We will handle only functions beginning with:
2134
2135 pushq %rbp 0x55
2136 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2137
2138 or (for the X32 ABI):
2139
2140 pushq %rbp 0x55
2141 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2142
2143 Any function that doesn't start with one of these sequences will be
2144 assumed to have no prologue and thus no valid frame pointer in
2145 %rbp. */
2146
2147 static CORE_ADDR
2148 amd64_analyze_prologue (struct gdbarch *gdbarch,
2149 CORE_ADDR pc, CORE_ADDR current_pc,
2150 struct amd64_frame_cache *cache)
2151 {
2152 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2153 /* There are two variations of movq %rsp, %rbp. */
2154 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2155 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
2156 /* Ditto for movl %esp, %ebp. */
2157 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2158 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2159
2160 gdb_byte buf[3];
2161 gdb_byte op;
2162
2163 if (current_pc <= pc)
2164 return current_pc;
2165
2166 if (gdbarch_ptr_bit (gdbarch) == 32)
2167 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2168 else
2169 pc = amd64_analyze_stack_align (pc, current_pc, cache);
2170
2171 op = read_code_unsigned_integer (pc, 1, byte_order);
2172
2173 if (op == 0x55) /* pushq %rbp */
2174 {
2175 /* Take into account that we've executed the `pushq %rbp' that
2176 starts this instruction sequence. */
2177 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
2178 cache->sp_offset += 8;
2179
2180 /* If that's all, return now. */
2181 if (current_pc <= pc + 1)
2182 return current_pc;
2183
2184 read_code (pc + 1, buf, 3);
2185
2186 /* Check for `movq %rsp, %rbp'. */
2187 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2188 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2189 {
2190 /* OK, we actually have a frame. */
2191 cache->frameless_p = 0;
2192 return pc + 4;
2193 }
2194
2195 /* For X32, also check for `movq %esp, %ebp'. */
2196 if (gdbarch_ptr_bit (gdbarch) == 32)
2197 {
2198 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2199 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2200 {
2201 /* OK, we actually have a frame. */
2202 cache->frameless_p = 0;
2203 return pc + 3;
2204 }
2205 }
2206
2207 return pc + 1;
2208 }
2209
2210 return pc;
2211 }
2212
2213 /* Work around false termination of prologue - GCC PR debug/48827.
2214
2215 START_PC is the first instruction of a function, PC is its minimal already
2216 determined advanced address. Function returns PC if it has nothing to do.
2217
2218 84 c0 test %al,%al
2219 74 23 je after
2220 <-- here is 0 lines advance - the false prologue end marker.
2221 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2222 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2223 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2224 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2225 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2226 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2227 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2228 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2229 after: */
2230
2231 static CORE_ADDR
2232 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
2233 {
2234 struct symtab_and_line start_pc_sal, next_sal;
2235 gdb_byte buf[4 + 8 * 7];
2236 int offset, xmmreg;
2237
2238 if (pc == start_pc)
2239 return pc;
2240
2241 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2242 if (start_pc_sal.symtab == NULL
2243 || producer_is_gcc_ge_4 (start_pc_sal.symtab->producer) < 6
2244 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2245 return pc;
2246
2247 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2248 if (next_sal.line != start_pc_sal.line)
2249 return pc;
2250
2251 /* START_PC can be from overlayed memory, ignored here. */
2252 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
2253 return pc;
2254
2255 /* test %al,%al */
2256 if (buf[0] != 0x84 || buf[1] != 0xc0)
2257 return pc;
2258 /* je AFTER */
2259 if (buf[2] != 0x74)
2260 return pc;
2261
2262 offset = 4;
2263 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2264 {
2265 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2266 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
2267 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
2268 return pc;
2269
2270 /* 0b01?????? */
2271 if ((buf[offset + 2] & 0xc0) == 0x40)
2272 {
2273 /* 8-bit displacement. */
2274 offset += 4;
2275 }
2276 /* 0b10?????? */
2277 else if ((buf[offset + 2] & 0xc0) == 0x80)
2278 {
2279 /* 32-bit displacement. */
2280 offset += 7;
2281 }
2282 else
2283 return pc;
2284 }
2285
2286 /* je AFTER */
2287 if (offset - 4 != buf[3])
2288 return pc;
2289
2290 return next_sal.end;
2291 }
2292
2293 /* Return PC of first real instruction. */
2294
2295 static CORE_ADDR
2296 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2297 {
2298 struct amd64_frame_cache cache;
2299 CORE_ADDR pc;
2300 CORE_ADDR func_addr;
2301
2302 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2303 {
2304 CORE_ADDR post_prologue_pc
2305 = skip_prologue_using_sal (gdbarch, func_addr);
2306 struct symtab *s = find_pc_symtab (func_addr);
2307
2308 /* Clang always emits a line note before the prologue and another
2309 one after. We trust clang to emit usable line notes. */
2310 if (post_prologue_pc
2311 && (s != NULL
2312 && s->producer != NULL
2313 && strncmp (s->producer, "clang ", sizeof ("clang ") - 1) == 0))
2314 return max (start_pc, post_prologue_pc);
2315 }
2316
2317 amd64_init_frame_cache (&cache);
2318 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2319 &cache);
2320 if (cache.frameless_p)
2321 return start_pc;
2322
2323 return amd64_skip_xmm_prologue (pc, start_pc);
2324 }
2325 \f
2326
2327 /* Normal frames. */
2328
2329 static void
2330 amd64_frame_cache_1 (struct frame_info *this_frame,
2331 struct amd64_frame_cache *cache)
2332 {
2333 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2334 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2335 gdb_byte buf[8];
2336 int i;
2337
2338 cache->pc = get_frame_func (this_frame);
2339 if (cache->pc != 0)
2340 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2341 cache);
2342
2343 if (cache->frameless_p)
2344 {
2345 /* We didn't find a valid frame. If we're at the start of a
2346 function, or somewhere half-way its prologue, the function's
2347 frame probably hasn't been fully setup yet. Try to
2348 reconstruct the base address for the stack frame by looking
2349 at the stack pointer. For truly "frameless" functions this
2350 might work too. */
2351
2352 if (cache->saved_sp_reg != -1)
2353 {
2354 /* Stack pointer has been saved. */
2355 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2356 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2357
2358 /* We're halfway aligning the stack. */
2359 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2360 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2361
2362 /* This will be added back below. */
2363 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2364 }
2365 else
2366 {
2367 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2368 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2369 + cache->sp_offset;
2370 }
2371 }
2372 else
2373 {
2374 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
2375 cache->base = extract_unsigned_integer (buf, 8, byte_order);
2376 }
2377
2378 /* Now that we have the base address for the stack frame we can
2379 calculate the value of %rsp in the calling frame. */
2380 cache->saved_sp = cache->base + 16;
2381
2382 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2383 frame we find it at the same offset from the reconstructed base
2384 address. If we're halfway aligning the stack, %rip is handled
2385 differently (see above). */
2386 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2387 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
2388
2389 /* Adjust all the saved registers such that they contain addresses
2390 instead of offsets. */
2391 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
2392 if (cache->saved_regs[i] != -1)
2393 cache->saved_regs[i] += cache->base;
2394
2395 cache->base_p = 1;
2396 }
2397
2398 static struct amd64_frame_cache *
2399 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
2400 {
2401 volatile struct gdb_exception ex;
2402 struct amd64_frame_cache *cache;
2403
2404 if (*this_cache)
2405 return *this_cache;
2406
2407 cache = amd64_alloc_frame_cache ();
2408 *this_cache = cache;
2409
2410 TRY_CATCH (ex, RETURN_MASK_ERROR)
2411 {
2412 amd64_frame_cache_1 (this_frame, cache);
2413 }
2414 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2415 throw_exception (ex);
2416
2417 return cache;
2418 }
2419
2420 static enum unwind_stop_reason
2421 amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
2422 void **this_cache)
2423 {
2424 struct amd64_frame_cache *cache =
2425 amd64_frame_cache (this_frame, this_cache);
2426
2427 if (!cache->base_p)
2428 return UNWIND_UNAVAILABLE;
2429
2430 /* This marks the outermost frame. */
2431 if (cache->base == 0)
2432 return UNWIND_OUTERMOST;
2433
2434 return UNWIND_NO_REASON;
2435 }
2436
2437 static void
2438 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
2439 struct frame_id *this_id)
2440 {
2441 struct amd64_frame_cache *cache =
2442 amd64_frame_cache (this_frame, this_cache);
2443
2444 if (!cache->base_p)
2445 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2446 else if (cache->base == 0)
2447 {
2448 /* This marks the outermost frame. */
2449 return;
2450 }
2451 else
2452 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2453 }
2454
2455 static struct value *
2456 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2457 int regnum)
2458 {
2459 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2460 struct amd64_frame_cache *cache =
2461 amd64_frame_cache (this_frame, this_cache);
2462
2463 gdb_assert (regnum >= 0);
2464
2465 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
2466 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
2467
2468 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
2469 return frame_unwind_got_memory (this_frame, regnum,
2470 cache->saved_regs[regnum]);
2471
2472 return frame_unwind_got_register (this_frame, regnum, regnum);
2473 }
2474
2475 static const struct frame_unwind amd64_frame_unwind =
2476 {
2477 NORMAL_FRAME,
2478 amd64_frame_unwind_stop_reason,
2479 amd64_frame_this_id,
2480 amd64_frame_prev_register,
2481 NULL,
2482 default_frame_sniffer
2483 };
2484 \f
2485 /* Generate a bytecode expression to get the value of the saved PC. */
2486
2487 static void
2488 amd64_gen_return_address (struct gdbarch *gdbarch,
2489 struct agent_expr *ax, struct axs_value *value,
2490 CORE_ADDR scope)
2491 {
2492 /* The following sequence assumes the traditional use of the base
2493 register. */
2494 ax_reg (ax, AMD64_RBP_REGNUM);
2495 ax_const_l (ax, 8);
2496 ax_simple (ax, aop_add);
2497 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2498 value->kind = axs_lvalue_memory;
2499 }
2500 \f
2501
2502 /* Signal trampolines. */
2503
2504 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2505 64-bit variants. This would require using identical frame caches
2506 on both platforms. */
2507
2508 static struct amd64_frame_cache *
2509 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2510 {
2511 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2512 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2513 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2514 volatile struct gdb_exception ex;
2515 struct amd64_frame_cache *cache;
2516 CORE_ADDR addr;
2517 gdb_byte buf[8];
2518 int i;
2519
2520 if (*this_cache)
2521 return *this_cache;
2522
2523 cache = amd64_alloc_frame_cache ();
2524
2525 TRY_CATCH (ex, RETURN_MASK_ERROR)
2526 {
2527 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2528 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2529
2530 addr = tdep->sigcontext_addr (this_frame);
2531 gdb_assert (tdep->sc_reg_offset);
2532 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2533 for (i = 0; i < tdep->sc_num_regs; i++)
2534 if (tdep->sc_reg_offset[i] != -1)
2535 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
2536
2537 cache->base_p = 1;
2538 }
2539 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2540 throw_exception (ex);
2541
2542 *this_cache = cache;
2543 return cache;
2544 }
2545
2546 static enum unwind_stop_reason
2547 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
2548 void **this_cache)
2549 {
2550 struct amd64_frame_cache *cache =
2551 amd64_sigtramp_frame_cache (this_frame, this_cache);
2552
2553 if (!cache->base_p)
2554 return UNWIND_UNAVAILABLE;
2555
2556 return UNWIND_NO_REASON;
2557 }
2558
2559 static void
2560 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
2561 void **this_cache, struct frame_id *this_id)
2562 {
2563 struct amd64_frame_cache *cache =
2564 amd64_sigtramp_frame_cache (this_frame, this_cache);
2565
2566 if (!cache->base_p)
2567 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2568 else if (cache->base == 0)
2569 {
2570 /* This marks the outermost frame. */
2571 return;
2572 }
2573 else
2574 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
2575 }
2576
2577 static struct value *
2578 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2579 void **this_cache, int regnum)
2580 {
2581 /* Make sure we've initialized the cache. */
2582 amd64_sigtramp_frame_cache (this_frame, this_cache);
2583
2584 return amd64_frame_prev_register (this_frame, this_cache, regnum);
2585 }
2586
2587 static int
2588 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2589 struct frame_info *this_frame,
2590 void **this_cache)
2591 {
2592 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2593
2594 /* We shouldn't even bother if we don't have a sigcontext_addr
2595 handler. */
2596 if (tdep->sigcontext_addr == NULL)
2597 return 0;
2598
2599 if (tdep->sigtramp_p != NULL)
2600 {
2601 if (tdep->sigtramp_p (this_frame))
2602 return 1;
2603 }
2604
2605 if (tdep->sigtramp_start != 0)
2606 {
2607 CORE_ADDR pc = get_frame_pc (this_frame);
2608
2609 gdb_assert (tdep->sigtramp_end != 0);
2610 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2611 return 1;
2612 }
2613
2614 return 0;
2615 }
2616
2617 static const struct frame_unwind amd64_sigtramp_frame_unwind =
2618 {
2619 SIGTRAMP_FRAME,
2620 amd64_sigtramp_frame_unwind_stop_reason,
2621 amd64_sigtramp_frame_this_id,
2622 amd64_sigtramp_frame_prev_register,
2623 NULL,
2624 amd64_sigtramp_frame_sniffer
2625 };
2626 \f
2627
2628 static CORE_ADDR
2629 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2630 {
2631 struct amd64_frame_cache *cache =
2632 amd64_frame_cache (this_frame, this_cache);
2633
2634 return cache->base;
2635 }
2636
2637 static const struct frame_base amd64_frame_base =
2638 {
2639 &amd64_frame_unwind,
2640 amd64_frame_base_address,
2641 amd64_frame_base_address,
2642 amd64_frame_base_address
2643 };
2644
2645 /* Normal frames, but in a function epilogue. */
2646
2647 /* The epilogue is defined here as the 'ret' instruction, which will
2648 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2649 the function's stack frame. */
2650
2651 static int
2652 amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2653 {
2654 gdb_byte insn;
2655 struct symtab *symtab;
2656
2657 symtab = find_pc_symtab (pc);
2658 if (symtab && symtab->epilogue_unwind_valid)
2659 return 0;
2660
2661 if (target_read_memory (pc, &insn, 1))
2662 return 0; /* Can't read memory at pc. */
2663
2664 if (insn != 0xc3) /* 'ret' instruction. */
2665 return 0;
2666
2667 return 1;
2668 }
2669
2670 static int
2671 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2672 struct frame_info *this_frame,
2673 void **this_prologue_cache)
2674 {
2675 if (frame_relative_level (this_frame) == 0)
2676 return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
2677 get_frame_pc (this_frame));
2678 else
2679 return 0;
2680 }
2681
2682 static struct amd64_frame_cache *
2683 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2684 {
2685 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2686 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2687 volatile struct gdb_exception ex;
2688 struct amd64_frame_cache *cache;
2689 gdb_byte buf[8];
2690
2691 if (*this_cache)
2692 return *this_cache;
2693
2694 cache = amd64_alloc_frame_cache ();
2695 *this_cache = cache;
2696
2697 TRY_CATCH (ex, RETURN_MASK_ERROR)
2698 {
2699 /* Cache base will be %esp plus cache->sp_offset (-8). */
2700 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2701 cache->base = extract_unsigned_integer (buf, 8,
2702 byte_order) + cache->sp_offset;
2703
2704 /* Cache pc will be the frame func. */
2705 cache->pc = get_frame_pc (this_frame);
2706
2707 /* The saved %esp will be at cache->base plus 16. */
2708 cache->saved_sp = cache->base + 16;
2709
2710 /* The saved %eip will be at cache->base plus 8. */
2711 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2712
2713 cache->base_p = 1;
2714 }
2715 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2716 throw_exception (ex);
2717
2718 return cache;
2719 }
2720
2721 static enum unwind_stop_reason
2722 amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
2723 void **this_cache)
2724 {
2725 struct amd64_frame_cache *cache
2726 = amd64_epilogue_frame_cache (this_frame, this_cache);
2727
2728 if (!cache->base_p)
2729 return UNWIND_UNAVAILABLE;
2730
2731 return UNWIND_NO_REASON;
2732 }
2733
2734 static void
2735 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2736 void **this_cache,
2737 struct frame_id *this_id)
2738 {
2739 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2740 this_cache);
2741
2742 if (!cache->base_p)
2743 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2744 else
2745 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2746 }
2747
2748 static const struct frame_unwind amd64_epilogue_frame_unwind =
2749 {
2750 NORMAL_FRAME,
2751 amd64_epilogue_frame_unwind_stop_reason,
2752 amd64_epilogue_frame_this_id,
2753 amd64_frame_prev_register,
2754 NULL,
2755 amd64_epilogue_frame_sniffer
2756 };
2757
2758 static struct frame_id
2759 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2760 {
2761 CORE_ADDR fp;
2762
2763 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2764
2765 return frame_id_build (fp + 16, get_frame_pc (this_frame));
2766 }
2767
2768 /* 16 byte align the SP per frame requirements. */
2769
2770 static CORE_ADDR
2771 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2772 {
2773 return sp & -(CORE_ADDR)16;
2774 }
2775 \f
2776
2777 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2778 in the floating-point register set REGSET to register cache
2779 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2780
2781 static void
2782 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2783 int regnum, const void *fpregs, size_t len)
2784 {
2785 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2786
2787 gdb_assert (len == tdep->sizeof_fpregset);
2788 amd64_supply_fxsave (regcache, regnum, fpregs);
2789 }
2790
2791 /* Collect register REGNUM from the register cache REGCACHE and store
2792 it in the buffer specified by FPREGS and LEN as described by the
2793 floating-point register set REGSET. If REGNUM is -1, do this for
2794 all registers in REGSET. */
2795
2796 static void
2797 amd64_collect_fpregset (const struct regset *regset,
2798 const struct regcache *regcache,
2799 int regnum, void *fpregs, size_t len)
2800 {
2801 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2802
2803 gdb_assert (len == tdep->sizeof_fpregset);
2804 amd64_collect_fxsave (regcache, regnum, fpregs);
2805 }
2806
2807 /* Similar to amd64_supply_fpregset, but use XSAVE extended state. */
2808
2809 static void
2810 amd64_supply_xstateregset (const struct regset *regset,
2811 struct regcache *regcache, int regnum,
2812 const void *xstateregs, size_t len)
2813 {
2814 amd64_supply_xsave (regcache, regnum, xstateregs);
2815 }
2816
2817 /* Similar to amd64_collect_fpregset, but use XSAVE extended state. */
2818
2819 static void
2820 amd64_collect_xstateregset (const struct regset *regset,
2821 const struct regcache *regcache,
2822 int regnum, void *xstateregs, size_t len)
2823 {
2824 amd64_collect_xsave (regcache, regnum, xstateregs, 1);
2825 }
2826
2827 /* Return the appropriate register set for the core section identified
2828 by SECT_NAME and SECT_SIZE. */
2829
2830 static const struct regset *
2831 amd64_regset_from_core_section (struct gdbarch *gdbarch,
2832 const char *sect_name, size_t sect_size)
2833 {
2834 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2835
2836 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
2837 {
2838 if (tdep->fpregset == NULL)
2839 tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
2840 amd64_collect_fpregset);
2841
2842 return tdep->fpregset;
2843 }
2844
2845 if (strcmp (sect_name, ".reg-xstate") == 0)
2846 {
2847 if (tdep->xstateregset == NULL)
2848 tdep->xstateregset = regset_alloc (gdbarch,
2849 amd64_supply_xstateregset,
2850 amd64_collect_xstateregset);
2851
2852 return tdep->xstateregset;
2853 }
2854
2855 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
2856 }
2857 \f
2858
2859 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2860 %rdi. We expect its value to be a pointer to the jmp_buf structure
2861 from which we extract the address that we will land at. This
2862 address is copied into PC. This routine returns non-zero on
2863 success. */
2864
2865 static int
2866 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2867 {
2868 gdb_byte buf[8];
2869 CORE_ADDR jb_addr;
2870 struct gdbarch *gdbarch = get_frame_arch (frame);
2871 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
2872 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
2873
2874 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2875 longjmp will land. */
2876 if (jb_pc_offset == -1)
2877 return 0;
2878
2879 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
2880 jb_addr= extract_typed_address
2881 (buf, builtin_type (gdbarch)->builtin_data_ptr);
2882 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2883 return 0;
2884
2885 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
2886
2887 return 1;
2888 }
2889
2890 static const int amd64_record_regmap[] =
2891 {
2892 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2893 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2894 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2895 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2896 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2897 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2898 };
2899
2900 void
2901 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
2902 {
2903 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2904 const struct target_desc *tdesc = info.target_desc;
2905 static const char *const stap_integer_prefixes[] = { "$", NULL };
2906 static const char *const stap_register_prefixes[] = { "%", NULL };
2907 static const char *const stap_register_indirection_prefixes[] = { "(",
2908 NULL };
2909 static const char *const stap_register_indirection_suffixes[] = { ")",
2910 NULL };
2911
2912 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2913 floating-point registers. */
2914 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2915
2916 if (! tdesc_has_registers (tdesc))
2917 tdesc = tdesc_amd64;
2918 tdep->tdesc = tdesc;
2919
2920 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
2921 tdep->register_names = amd64_register_names;
2922
2923 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
2924 {
2925 tdep->ymmh_register_names = amd64_ymmh_names;
2926 tdep->num_ymm_regs = 16;
2927 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
2928 }
2929
2930 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
2931 {
2932 tdep->mpx_register_names = amd64_mpx_names;
2933 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
2934 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
2935 }
2936
2937 tdep->num_byte_regs = 20;
2938 tdep->num_word_regs = 16;
2939 tdep->num_dword_regs = 16;
2940 /* Avoid wiring in the MMX registers for now. */
2941 tdep->num_mmx_regs = 0;
2942
2943 set_gdbarch_pseudo_register_read_value (gdbarch,
2944 amd64_pseudo_register_read_value);
2945 set_gdbarch_pseudo_register_write (gdbarch,
2946 amd64_pseudo_register_write);
2947
2948 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
2949
2950 /* AMD64 has an FPU and 16 SSE registers. */
2951 tdep->st0_regnum = AMD64_ST0_REGNUM;
2952 tdep->num_xmm_regs = 16;
2953
2954 /* This is what all the fuss is about. */
2955 set_gdbarch_long_bit (gdbarch, 64);
2956 set_gdbarch_long_long_bit (gdbarch, 64);
2957 set_gdbarch_ptr_bit (gdbarch, 64);
2958
2959 /* In contrast to the i386, on AMD64 a `long double' actually takes
2960 up 128 bits, even though it's still based on the i387 extended
2961 floating-point format which has only 80 significant bits. */
2962 set_gdbarch_long_double_bit (gdbarch, 128);
2963
2964 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
2965
2966 /* Register numbers of various important registers. */
2967 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
2968 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
2969 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
2970 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
2971
2972 /* The "default" register numbering scheme for AMD64 is referred to
2973 as the "DWARF Register Number Mapping" in the System V psABI.
2974 The preferred debugging format for all known AMD64 targets is
2975 actually DWARF2, and GCC doesn't seem to support DWARF (that is
2976 DWARF-1), but we provide the same mapping just in case. This
2977 mapping is also used for stabs, which GCC does support. */
2978 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2979 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2980
2981 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
2982 be in use on any of the supported AMD64 targets. */
2983
2984 /* Call dummy code. */
2985 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
2986 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
2987 set_gdbarch_frame_red_zone_size (gdbarch, 128);
2988
2989 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
2990 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
2991 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
2992
2993 set_gdbarch_return_value (gdbarch, amd64_return_value);
2994
2995 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
2996
2997 tdep->record_regmap = amd64_record_regmap;
2998
2999 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
3000
3001 /* Hook the function epilogue frame unwinder. This unwinder is
3002 appended to the list first, so that it supercedes the other
3003 unwinders in function epilogues. */
3004 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3005
3006 /* Hook the prologue-based frame unwinders. */
3007 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3008 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
3009 frame_base_set_default (gdbarch, &amd64_frame_base);
3010
3011 /* If we have a register mapping, enable the generic core file support. */
3012 if (tdep->gregset_reg_offset)
3013 set_gdbarch_regset_from_core_section (gdbarch,
3014 amd64_regset_from_core_section);
3015
3016 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
3017
3018 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
3019
3020 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
3021
3022 /* SystemTap variables and functions. */
3023 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3024 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3025 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3026 stap_register_indirection_prefixes);
3027 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3028 stap_register_indirection_suffixes);
3029 set_gdbarch_stap_is_single_operand (gdbarch,
3030 i386_stap_is_single_operand);
3031 set_gdbarch_stap_parse_special_token (gdbarch,
3032 i386_stap_parse_special_token);
3033 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3034 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3035 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
3036 }
3037 \f
3038
3039 static struct type *
3040 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3041 {
3042 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3043
3044 switch (regnum - tdep->eax_regnum)
3045 {
3046 case AMD64_RBP_REGNUM: /* %ebp */
3047 case AMD64_RSP_REGNUM: /* %esp */
3048 return builtin_type (gdbarch)->builtin_data_ptr;
3049 case AMD64_RIP_REGNUM: /* %eip */
3050 return builtin_type (gdbarch)->builtin_func_ptr;
3051 }
3052
3053 return i386_pseudo_register_type (gdbarch, regnum);
3054 }
3055
3056 void
3057 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
3058 {
3059 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3060 const struct target_desc *tdesc = info.target_desc;
3061
3062 amd64_init_abi (info, gdbarch);
3063
3064 if (! tdesc_has_registers (tdesc))
3065 tdesc = tdesc_x32;
3066 tdep->tdesc = tdesc;
3067
3068 tdep->num_dword_regs = 17;
3069 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3070
3071 set_gdbarch_long_bit (gdbarch, 32);
3072 set_gdbarch_ptr_bit (gdbarch, 32);
3073 }
3074
3075 /* Provide a prototype to silence -Wmissing-prototypes. */
3076 void _initialize_amd64_tdep (void);
3077
3078 void
3079 _initialize_amd64_tdep (void)
3080 {
3081 initialize_tdesc_amd64 ();
3082 initialize_tdesc_amd64_avx ();
3083 initialize_tdesc_amd64_mpx ();
3084 initialize_tdesc_x32 ();
3085 initialize_tdesc_x32_avx ();
3086 }
3087 \f
3088
3089 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3090 sense that the instruction pointer and data pointer are simply
3091 64-bit offsets into the code segment and the data segment instead
3092 of a selector offset pair. The functions below store the upper 32
3093 bits of these pointers (instead of just the 16-bits of the segment
3094 selector). */
3095
3096 /* Fill register REGNUM in REGCACHE with the appropriate
3097 floating-point or SSE register value from *FXSAVE. If REGNUM is
3098 -1, do this for all registers. This function masks off any of the
3099 reserved bits in *FXSAVE. */
3100
3101 void
3102 amd64_supply_fxsave (struct regcache *regcache, int regnum,
3103 const void *fxsave)
3104 {
3105 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3106 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3107
3108 i387_supply_fxsave (regcache, regnum, fxsave);
3109
3110 if (fxsave
3111 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3112 {
3113 const gdb_byte *regs = fxsave;
3114
3115 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3116 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3117 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3118 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
3119 }
3120 }
3121
3122 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3123
3124 void
3125 amd64_supply_xsave (struct regcache *regcache, int regnum,
3126 const void *xsave)
3127 {
3128 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3129 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3130
3131 i387_supply_xsave (regcache, regnum, xsave);
3132
3133 if (xsave
3134 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3135 {
3136 const gdb_byte *regs = xsave;
3137
3138 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3139 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep),
3140 regs + 12);
3141 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3142 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep),
3143 regs + 20);
3144 }
3145 }
3146
3147 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3148 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3149 all registers. This function doesn't touch any of the reserved
3150 bits in *FXSAVE. */
3151
3152 void
3153 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3154 void *fxsave)
3155 {
3156 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3157 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3158 gdb_byte *regs = fxsave;
3159
3160 i387_collect_fxsave (regcache, regnum, fxsave);
3161
3162 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3163 {
3164 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3165 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3166 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3167 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
3168 }
3169 }
3170
3171 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3172
3173 void
3174 amd64_collect_xsave (const struct regcache *regcache, int regnum,
3175 void *xsave, int gcore)
3176 {
3177 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3178 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3179 gdb_byte *regs = xsave;
3180
3181 i387_collect_xsave (regcache, regnum, xsave, gcore);
3182
3183 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3184 {
3185 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3186 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep),
3187 regs + 12);
3188 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3189 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep),
3190 regs + 20);
3191 }
3192 }
This page took 0.222124 seconds and 5 git commands to generate.