Restore sp for x86.
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 Contributed by Jiri Smid, SuSE Labs.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "opcode/i386.h"
25 #include "dis-asm.h"
26 #include "arch-utils.h"
27 #include "block.h"
28 #include "dummy-frame.h"
29 #include "frame.h"
30 #include "frame-base.h"
31 #include "frame-unwind.h"
32 #include "inferior.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39
40 #include "gdb_assert.h"
41
42 #include "amd64-tdep.h"
43 #include "i387-tdep.h"
44
45 #include "features/i386/amd64.c"
46
47 /* Note that the AMD64 architecture was previously known as x86-64.
48 The latter is (forever) engraved into the canonical system name as
49 returned by config.guess, and used as the name for the AMD64 port
50 of GNU/Linux. The BSD's have renamed their ports to amd64; they
51 don't like to shout. For GDB we prefer the amd64_-prefix over the
52 x86_64_-prefix since it's so much easier to type. */
53
54 /* Register information. */
55
56 static const char *amd64_register_names[] =
57 {
58 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
59
60 /* %r8 is indeed register number 8. */
61 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
62 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
63
64 /* %st0 is register number 24. */
65 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
66 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
67
68 /* %xmm0 is register number 40. */
69 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
70 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
71 "mxcsr",
72 };
73
74 /* Total number of registers. */
75 #define AMD64_NUM_REGS ARRAY_SIZE (amd64_register_names)
76
77 /* The registers used to pass integer arguments during a function call. */
78 static int amd64_dummy_call_integer_regs[] =
79 {
80 AMD64_RDI_REGNUM, /* %rdi */
81 AMD64_RSI_REGNUM, /* %rsi */
82 AMD64_RDX_REGNUM, /* %rdx */
83 AMD64_RCX_REGNUM, /* %rcx */
84 8, /* %r8 */
85 9 /* %r9 */
86 };
87
88 /* DWARF Register Number Mapping as defined in the System V psABI,
89 section 3.6. */
90
91 static int amd64_dwarf_regmap[] =
92 {
93 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
94 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
95 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
96 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
97
98 /* Frame Pointer Register RBP. */
99 AMD64_RBP_REGNUM,
100
101 /* Stack Pointer Register RSP. */
102 AMD64_RSP_REGNUM,
103
104 /* Extended Integer Registers 8 - 15. */
105 8, 9, 10, 11, 12, 13, 14, 15,
106
107 /* Return Address RA. Mapped to RIP. */
108 AMD64_RIP_REGNUM,
109
110 /* SSE Registers 0 - 7. */
111 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
112 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
113 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
114 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
115
116 /* Extended SSE Registers 8 - 15. */
117 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
118 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
119 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
120 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
121
122 /* Floating Point Registers 0-7. */
123 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
124 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
125 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
126 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
127
128 /* Control and Status Flags Register. */
129 AMD64_EFLAGS_REGNUM,
130
131 /* Selector Registers. */
132 AMD64_ES_REGNUM,
133 AMD64_CS_REGNUM,
134 AMD64_SS_REGNUM,
135 AMD64_DS_REGNUM,
136 AMD64_FS_REGNUM,
137 AMD64_GS_REGNUM,
138 -1,
139 -1,
140
141 /* Segment Base Address Registers. */
142 -1,
143 -1,
144 -1,
145 -1,
146
147 /* Special Selector Registers. */
148 -1,
149 -1,
150
151 /* Floating Point Control Registers. */
152 AMD64_MXCSR_REGNUM,
153 AMD64_FCTRL_REGNUM,
154 AMD64_FSTAT_REGNUM
155 };
156
157 static const int amd64_dwarf_regmap_len =
158 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
159
160 /* Convert DWARF register number REG to the appropriate register
161 number used by GDB. */
162
163 static int
164 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
165 {
166 int regnum = -1;
167
168 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
169 regnum = amd64_dwarf_regmap[reg];
170
171 if (regnum == -1)
172 warning (_("Unmapped DWARF Register #%d encountered."), reg);
173
174 return regnum;
175 }
176
177 /* Map architectural register numbers to gdb register numbers. */
178
179 static const int amd64_arch_regmap[16] =
180 {
181 AMD64_RAX_REGNUM, /* %rax */
182 AMD64_RCX_REGNUM, /* %rcx */
183 AMD64_RDX_REGNUM, /* %rdx */
184 AMD64_RBX_REGNUM, /* %rbx */
185 AMD64_RSP_REGNUM, /* %rsp */
186 AMD64_RBP_REGNUM, /* %rbp */
187 AMD64_RSI_REGNUM, /* %rsi */
188 AMD64_RDI_REGNUM, /* %rdi */
189 AMD64_R8_REGNUM, /* %r8 */
190 AMD64_R9_REGNUM, /* %r9 */
191 AMD64_R10_REGNUM, /* %r10 */
192 AMD64_R11_REGNUM, /* %r11 */
193 AMD64_R12_REGNUM, /* %r12 */
194 AMD64_R13_REGNUM, /* %r13 */
195 AMD64_R14_REGNUM, /* %r14 */
196 AMD64_R15_REGNUM /* %r15 */
197 };
198
199 static const int amd64_arch_regmap_len =
200 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
201
202 /* Convert architectural register number REG to the appropriate register
203 number used by GDB. */
204
205 static int
206 amd64_arch_reg_to_regnum (int reg)
207 {
208 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
209
210 return amd64_arch_regmap[reg];
211 }
212
213 /* Register names for byte pseudo-registers. */
214
215 static const char *amd64_byte_names[] =
216 {
217 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
218 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l"
219 };
220
221 /* Register names for word pseudo-registers. */
222
223 static const char *amd64_word_names[] =
224 {
225 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
226 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
227 };
228
229 /* Register names for dword pseudo-registers. */
230
231 static const char *amd64_dword_names[] =
232 {
233 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
234 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d"
235 };
236
237 /* Return the name of register REGNUM. */
238
239 static const char *
240 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
241 {
242 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
243 if (i386_byte_regnum_p (gdbarch, regnum))
244 return amd64_byte_names[regnum - tdep->al_regnum];
245 else if (i386_word_regnum_p (gdbarch, regnum))
246 return amd64_word_names[regnum - tdep->ax_regnum];
247 else if (i386_dword_regnum_p (gdbarch, regnum))
248 return amd64_dword_names[regnum - tdep->eax_regnum];
249 else
250 return i386_pseudo_register_name (gdbarch, regnum);
251 }
252
253 static void
254 amd64_pseudo_register_read (struct gdbarch *gdbarch,
255 struct regcache *regcache,
256 int regnum, gdb_byte *buf)
257 {
258 gdb_byte raw_buf[MAX_REGISTER_SIZE];
259 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
260
261 if (i386_byte_regnum_p (gdbarch, regnum))
262 {
263 int gpnum = regnum - tdep->al_regnum;
264
265 /* Extract (always little endian). */
266 regcache_raw_read (regcache, gpnum, raw_buf);
267 memcpy (buf, raw_buf, 1);
268 }
269 else if (i386_dword_regnum_p (gdbarch, regnum))
270 {
271 int gpnum = regnum - tdep->eax_regnum;
272 /* Extract (always little endian). */
273 regcache_raw_read (regcache, gpnum, raw_buf);
274 memcpy (buf, raw_buf, 4);
275 }
276 else
277 i386_pseudo_register_read (gdbarch, regcache, regnum, buf);
278 }
279
280 static void
281 amd64_pseudo_register_write (struct gdbarch *gdbarch,
282 struct regcache *regcache,
283 int regnum, const gdb_byte *buf)
284 {
285 gdb_byte raw_buf[MAX_REGISTER_SIZE];
286 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
287
288 if (i386_byte_regnum_p (gdbarch, regnum))
289 {
290 int gpnum = regnum - tdep->al_regnum;
291
292 /* Read ... */
293 regcache_raw_read (regcache, gpnum, raw_buf);
294 /* ... Modify ... (always little endian). */
295 memcpy (raw_buf, buf, 1);
296 /* ... Write. */
297 regcache_raw_write (regcache, gpnum, raw_buf);
298 }
299 else if (i386_dword_regnum_p (gdbarch, regnum))
300 {
301 int gpnum = regnum - tdep->eax_regnum;
302
303 /* Read ... */
304 regcache_raw_read (regcache, gpnum, raw_buf);
305 /* ... Modify ... (always little endian). */
306 memcpy (raw_buf, buf, 4);
307 /* ... Write. */
308 regcache_raw_write (regcache, gpnum, raw_buf);
309 }
310 else
311 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
312 }
313
314 \f
315
316 /* Return the union class of CLASS1 and CLASS2. See the psABI for
317 details. */
318
319 static enum amd64_reg_class
320 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
321 {
322 /* Rule (a): If both classes are equal, this is the resulting class. */
323 if (class1 == class2)
324 return class1;
325
326 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
327 is the other class. */
328 if (class1 == AMD64_NO_CLASS)
329 return class2;
330 if (class2 == AMD64_NO_CLASS)
331 return class1;
332
333 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
334 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
335 return AMD64_MEMORY;
336
337 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
338 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
339 return AMD64_INTEGER;
340
341 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
342 MEMORY is used as class. */
343 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
344 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
345 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
346 return AMD64_MEMORY;
347
348 /* Rule (f): Otherwise class SSE is used. */
349 return AMD64_SSE;
350 }
351
352 /* Return non-zero if TYPE is a non-POD structure or union type. */
353
354 static int
355 amd64_non_pod_p (struct type *type)
356 {
357 /* ??? A class with a base class certainly isn't POD, but does this
358 catch all non-POD structure types? */
359 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
360 return 1;
361
362 return 0;
363 }
364
365 /* Classify TYPE according to the rules for aggregate (structures and
366 arrays) and union types, and store the result in CLASS. */
367
368 static void
369 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
370 {
371 int len = TYPE_LENGTH (type);
372
373 /* 1. If the size of an object is larger than two eightbytes, or in
374 C++, is a non-POD structure or union type, or contains
375 unaligned fields, it has class memory. */
376 if (len > 16 || amd64_non_pod_p (type))
377 {
378 class[0] = class[1] = AMD64_MEMORY;
379 return;
380 }
381
382 /* 2. Both eightbytes get initialized to class NO_CLASS. */
383 class[0] = class[1] = AMD64_NO_CLASS;
384
385 /* 3. Each field of an object is classified recursively so that
386 always two fields are considered. The resulting class is
387 calculated according to the classes of the fields in the
388 eightbyte: */
389
390 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
391 {
392 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
393
394 /* All fields in an array have the same type. */
395 amd64_classify (subtype, class);
396 if (len > 8 && class[1] == AMD64_NO_CLASS)
397 class[1] = class[0];
398 }
399 else
400 {
401 int i;
402
403 /* Structure or union. */
404 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
405 || TYPE_CODE (type) == TYPE_CODE_UNION);
406
407 for (i = 0; i < TYPE_NFIELDS (type); i++)
408 {
409 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
410 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
411 enum amd64_reg_class subclass[2];
412 int bitsize = TYPE_FIELD_BITSIZE (type, i);
413 int endpos;
414
415 if (bitsize == 0)
416 bitsize = TYPE_LENGTH (subtype) * 8;
417 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
418
419 /* Ignore static fields. */
420 if (field_is_static (&TYPE_FIELD (type, i)))
421 continue;
422
423 gdb_assert (pos == 0 || pos == 1);
424
425 amd64_classify (subtype, subclass);
426 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
427 if (bitsize <= 64 && pos == 0 && endpos == 1)
428 /* This is a bit of an odd case: We have a field that would
429 normally fit in one of the two eightbytes, except that
430 it is placed in a way that this field straddles them.
431 This has been seen with a structure containing an array.
432
433 The ABI is a bit unclear in this case, but we assume that
434 this field's class (stored in subclass[0]) must also be merged
435 into class[1]. In other words, our field has a piece stored
436 in the second eight-byte, and thus its class applies to
437 the second eight-byte as well.
438
439 In the case where the field length exceeds 8 bytes,
440 it should not be necessary to merge the field class
441 into class[1]. As LEN > 8, subclass[1] is necessarily
442 different from AMD64_NO_CLASS. If subclass[1] is equal
443 to subclass[0], then the normal class[1]/subclass[1]
444 merging will take care of everything. For subclass[1]
445 to be different from subclass[0], I can only see the case
446 where we have a SSE/SSEUP or X87/X87UP pair, which both
447 use up all 16 bytes of the aggregate, and are already
448 handled just fine (because each portion sits on its own
449 8-byte). */
450 class[1] = amd64_merge_classes (class[1], subclass[0]);
451 if (pos == 0)
452 class[1] = amd64_merge_classes (class[1], subclass[1]);
453 }
454 }
455
456 /* 4. Then a post merger cleanup is done: */
457
458 /* Rule (a): If one of the classes is MEMORY, the whole argument is
459 passed in memory. */
460 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
461 class[0] = class[1] = AMD64_MEMORY;
462
463 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
464 SSE. */
465 if (class[0] == AMD64_SSEUP)
466 class[0] = AMD64_SSE;
467 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
468 class[1] = AMD64_SSE;
469 }
470
471 /* Classify TYPE, and store the result in CLASS. */
472
473 void
474 amd64_classify (struct type *type, enum amd64_reg_class class[2])
475 {
476 enum type_code code = TYPE_CODE (type);
477 int len = TYPE_LENGTH (type);
478
479 class[0] = class[1] = AMD64_NO_CLASS;
480
481 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
482 long, long long, and pointers are in the INTEGER class. Similarly,
483 range types, used by languages such as Ada, are also in the INTEGER
484 class. */
485 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
486 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
487 || code == TYPE_CODE_CHAR
488 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
489 && (len == 1 || len == 2 || len == 4 || len == 8))
490 class[0] = AMD64_INTEGER;
491
492 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
493 are in class SSE. */
494 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
495 && (len == 4 || len == 8))
496 /* FIXME: __m64 . */
497 class[0] = AMD64_SSE;
498
499 /* Arguments of types __float128, _Decimal128 and __m128 are split into
500 two halves. The least significant ones belong to class SSE, the most
501 significant one to class SSEUP. */
502 else if (code == TYPE_CODE_DECFLOAT && len == 16)
503 /* FIXME: __float128, __m128. */
504 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
505
506 /* The 64-bit mantissa of arguments of type long double belongs to
507 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
508 class X87UP. */
509 else if (code == TYPE_CODE_FLT && len == 16)
510 /* Class X87 and X87UP. */
511 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
512
513 /* Aggregates. */
514 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
515 || code == TYPE_CODE_UNION)
516 amd64_classify_aggregate (type, class);
517 }
518
519 static enum return_value_convention
520 amd64_return_value (struct gdbarch *gdbarch, struct type *func_type,
521 struct type *type, struct regcache *regcache,
522 gdb_byte *readbuf, const gdb_byte *writebuf)
523 {
524 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
525 enum amd64_reg_class class[2];
526 int len = TYPE_LENGTH (type);
527 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
528 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
529 int integer_reg = 0;
530 int sse_reg = 0;
531 int i;
532
533 gdb_assert (!(readbuf && writebuf));
534 gdb_assert (tdep->classify);
535
536 /* 1. Classify the return type with the classification algorithm. */
537 tdep->classify (type, class);
538
539 /* 2. If the type has class MEMORY, then the caller provides space
540 for the return value and passes the address of this storage in
541 %rdi as if it were the first argument to the function. In effect,
542 this address becomes a hidden first argument.
543
544 On return %rax will contain the address that has been passed in
545 by the caller in %rdi. */
546 if (class[0] == AMD64_MEMORY)
547 {
548 /* As indicated by the comment above, the ABI guarantees that we
549 can always find the return value just after the function has
550 returned. */
551
552 if (readbuf)
553 {
554 ULONGEST addr;
555
556 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
557 read_memory (addr, readbuf, TYPE_LENGTH (type));
558 }
559
560 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
561 }
562
563 gdb_assert (class[1] != AMD64_MEMORY);
564 gdb_assert (len <= 16);
565
566 for (i = 0; len > 0; i++, len -= 8)
567 {
568 int regnum = -1;
569 int offset = 0;
570
571 switch (class[i])
572 {
573 case AMD64_INTEGER:
574 /* 3. If the class is INTEGER, the next available register
575 of the sequence %rax, %rdx is used. */
576 regnum = integer_regnum[integer_reg++];
577 break;
578
579 case AMD64_SSE:
580 /* 4. If the class is SSE, the next available SSE register
581 of the sequence %xmm0, %xmm1 is used. */
582 regnum = sse_regnum[sse_reg++];
583 break;
584
585 case AMD64_SSEUP:
586 /* 5. If the class is SSEUP, the eightbyte is passed in the
587 upper half of the last used SSE register. */
588 gdb_assert (sse_reg > 0);
589 regnum = sse_regnum[sse_reg - 1];
590 offset = 8;
591 break;
592
593 case AMD64_X87:
594 /* 6. If the class is X87, the value is returned on the X87
595 stack in %st0 as 80-bit x87 number. */
596 regnum = AMD64_ST0_REGNUM;
597 if (writebuf)
598 i387_return_value (gdbarch, regcache);
599 break;
600
601 case AMD64_X87UP:
602 /* 7. If the class is X87UP, the value is returned together
603 with the previous X87 value in %st0. */
604 gdb_assert (i > 0 && class[0] == AMD64_X87);
605 regnum = AMD64_ST0_REGNUM;
606 offset = 8;
607 len = 2;
608 break;
609
610 case AMD64_NO_CLASS:
611 continue;
612
613 default:
614 gdb_assert (!"Unexpected register class.");
615 }
616
617 gdb_assert (regnum != -1);
618
619 if (readbuf)
620 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
621 readbuf + i * 8);
622 if (writebuf)
623 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
624 writebuf + i * 8);
625 }
626
627 return RETURN_VALUE_REGISTER_CONVENTION;
628 }
629 \f
630
631 static CORE_ADDR
632 amd64_push_arguments (struct regcache *regcache, int nargs,
633 struct value **args, CORE_ADDR sp, int struct_return)
634 {
635 struct gdbarch *gdbarch = get_regcache_arch (regcache);
636 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
637 int *integer_regs = tdep->call_dummy_integer_regs;
638 int num_integer_regs = tdep->call_dummy_num_integer_regs;
639
640 static int sse_regnum[] =
641 {
642 /* %xmm0 ... %xmm7 */
643 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
644 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
645 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
646 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
647 };
648 struct value **stack_args = alloca (nargs * sizeof (struct value *));
649 /* An array that mirrors the stack_args array. For all arguments
650 that are passed by MEMORY, if that argument's address also needs
651 to be stored in a register, the ARG_ADDR_REGNO array will contain
652 that register number (or a negative value otherwise). */
653 int *arg_addr_regno = alloca (nargs * sizeof (int));
654 int num_stack_args = 0;
655 int num_elements = 0;
656 int element = 0;
657 int integer_reg = 0;
658 int sse_reg = 0;
659 int i;
660
661 gdb_assert (tdep->classify);
662
663 /* Reserve a register for the "hidden" argument. */
664 if (struct_return)
665 integer_reg++;
666
667 for (i = 0; i < nargs; i++)
668 {
669 struct type *type = value_type (args[i]);
670 int len = TYPE_LENGTH (type);
671 enum amd64_reg_class class[2];
672 int needed_integer_regs = 0;
673 int needed_sse_regs = 0;
674 int j;
675
676 /* Classify argument. */
677 tdep->classify (type, class);
678
679 /* Calculate the number of integer and SSE registers needed for
680 this argument. */
681 for (j = 0; j < 2; j++)
682 {
683 if (class[j] == AMD64_INTEGER)
684 needed_integer_regs++;
685 else if (class[j] == AMD64_SSE)
686 needed_sse_regs++;
687 }
688
689 /* Check whether enough registers are available, and if the
690 argument should be passed in registers at all. */
691 if (integer_reg + needed_integer_regs > num_integer_regs
692 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
693 || (needed_integer_regs == 0 && needed_sse_regs == 0))
694 {
695 /* The argument will be passed on the stack. */
696 num_elements += ((len + 7) / 8);
697 stack_args[num_stack_args] = args[i];
698 /* If this is an AMD64_MEMORY argument whose address must also
699 be passed in one of the integer registers, reserve that
700 register and associate this value to that register so that
701 we can store the argument address as soon as we know it. */
702 if (class[0] == AMD64_MEMORY
703 && tdep->memory_args_by_pointer
704 && integer_reg < tdep->call_dummy_num_integer_regs)
705 arg_addr_regno[num_stack_args] =
706 tdep->call_dummy_integer_regs[integer_reg++];
707 else
708 arg_addr_regno[num_stack_args] = -1;
709 num_stack_args++;
710 }
711 else
712 {
713 /* The argument will be passed in registers. */
714 const gdb_byte *valbuf = value_contents (args[i]);
715 gdb_byte buf[8];
716
717 gdb_assert (len <= 16);
718
719 for (j = 0; len > 0; j++, len -= 8)
720 {
721 int regnum = -1;
722 int offset = 0;
723
724 switch (class[j])
725 {
726 case AMD64_INTEGER:
727 regnum = integer_regs[integer_reg++];
728 break;
729
730 case AMD64_SSE:
731 regnum = sse_regnum[sse_reg++];
732 break;
733
734 case AMD64_SSEUP:
735 gdb_assert (sse_reg > 0);
736 regnum = sse_regnum[sse_reg - 1];
737 offset = 8;
738 break;
739
740 default:
741 gdb_assert (!"Unexpected register class.");
742 }
743
744 gdb_assert (regnum != -1);
745 memset (buf, 0, sizeof buf);
746 memcpy (buf, valbuf + j * 8, min (len, 8));
747 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
748 }
749 }
750 }
751
752 /* Allocate space for the arguments on the stack. */
753 sp -= num_elements * 8;
754
755 /* The psABI says that "The end of the input argument area shall be
756 aligned on a 16 byte boundary." */
757 sp &= ~0xf;
758
759 /* Write out the arguments to the stack. */
760 for (i = 0; i < num_stack_args; i++)
761 {
762 struct type *type = value_type (stack_args[i]);
763 const gdb_byte *valbuf = value_contents (stack_args[i]);
764 int len = TYPE_LENGTH (type);
765 CORE_ADDR arg_addr = sp + element * 8;
766
767 write_memory (arg_addr, valbuf, len);
768 if (arg_addr_regno[i] >= 0)
769 {
770 /* We also need to store the address of that argument in
771 the given register. */
772 gdb_byte buf[8];
773 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
774
775 store_unsigned_integer (buf, 8, byte_order, arg_addr);
776 regcache_cooked_write (regcache, arg_addr_regno[i], buf);
777 }
778 element += ((len + 7) / 8);
779 }
780
781 /* The psABI says that "For calls that may call functions that use
782 varargs or stdargs (prototype-less calls or calls to functions
783 containing ellipsis (...) in the declaration) %al is used as
784 hidden argument to specify the number of SSE registers used. */
785 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
786 return sp;
787 }
788
789 static CORE_ADDR
790 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
791 struct regcache *regcache, CORE_ADDR bp_addr,
792 int nargs, struct value **args, CORE_ADDR sp,
793 int struct_return, CORE_ADDR struct_addr)
794 {
795 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
796 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
797 gdb_byte buf[8];
798
799 /* Pass arguments. */
800 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
801
802 /* Pass "hidden" argument". */
803 if (struct_return)
804 {
805 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
806 /* The "hidden" argument is passed throught the first argument
807 register. */
808 const int arg_regnum = tdep->call_dummy_integer_regs[0];
809
810 store_unsigned_integer (buf, 8, byte_order, struct_addr);
811 regcache_cooked_write (regcache, arg_regnum, buf);
812 }
813
814 /* Reserve some memory on the stack for the integer-parameter registers,
815 if required by the ABI. */
816 if (tdep->integer_param_regs_saved_in_caller_frame)
817 sp -= tdep->call_dummy_num_integer_regs * 8;
818
819 /* Store return address. */
820 sp -= 8;
821 store_unsigned_integer (buf, 8, byte_order, bp_addr);
822 write_memory (sp, buf, 8);
823
824 /* Finally, update the stack pointer... */
825 store_unsigned_integer (buf, 8, byte_order, sp);
826 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
827
828 /* ...and fake a frame pointer. */
829 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
830
831 return sp + 16;
832 }
833 \f
834 /* Displaced instruction handling. */
835
836 /* A partially decoded instruction.
837 This contains enough details for displaced stepping purposes. */
838
839 struct amd64_insn
840 {
841 /* The number of opcode bytes. */
842 int opcode_len;
843 /* The offset of the rex prefix or -1 if not present. */
844 int rex_offset;
845 /* The offset to the first opcode byte. */
846 int opcode_offset;
847 /* The offset to the modrm byte or -1 if not present. */
848 int modrm_offset;
849
850 /* The raw instruction. */
851 gdb_byte *raw_insn;
852 };
853
854 struct displaced_step_closure
855 {
856 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
857 int tmp_used;
858 int tmp_regno;
859 ULONGEST tmp_save;
860
861 /* Details of the instruction. */
862 struct amd64_insn insn_details;
863
864 /* Amount of space allocated to insn_buf. */
865 int max_len;
866
867 /* The possibly modified insn.
868 This is a variable-length field. */
869 gdb_byte insn_buf[1];
870 };
871
872 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
873 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
874 at which point delete these in favor of libopcodes' versions). */
875
876 static const unsigned char onebyte_has_modrm[256] = {
877 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
878 /* ------------------------------- */
879 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
880 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
881 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
882 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
883 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
884 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
885 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
886 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
887 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
888 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
889 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
890 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
891 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
892 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
893 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
894 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
895 /* ------------------------------- */
896 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
897 };
898
899 static const unsigned char twobyte_has_modrm[256] = {
900 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
901 /* ------------------------------- */
902 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
903 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
904 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
905 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
906 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
907 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
908 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
909 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
910 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
911 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
912 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
913 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
914 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
915 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
916 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
917 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
918 /* ------------------------------- */
919 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
920 };
921
922 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
923
924 static int
925 rex_prefix_p (gdb_byte pfx)
926 {
927 return REX_PREFIX_P (pfx);
928 }
929
930 /* Skip the legacy instruction prefixes in INSN.
931 We assume INSN is properly sentineled so we don't have to worry
932 about falling off the end of the buffer. */
933
934 static gdb_byte *
935 amd64_skip_prefixes (gdb_byte *insn)
936 {
937 while (1)
938 {
939 switch (*insn)
940 {
941 case DATA_PREFIX_OPCODE:
942 case ADDR_PREFIX_OPCODE:
943 case CS_PREFIX_OPCODE:
944 case DS_PREFIX_OPCODE:
945 case ES_PREFIX_OPCODE:
946 case FS_PREFIX_OPCODE:
947 case GS_PREFIX_OPCODE:
948 case SS_PREFIX_OPCODE:
949 case LOCK_PREFIX_OPCODE:
950 case REPE_PREFIX_OPCODE:
951 case REPNE_PREFIX_OPCODE:
952 ++insn;
953 continue;
954 default:
955 break;
956 }
957 break;
958 }
959
960 return insn;
961 }
962
963 /* fprintf-function for amd64_insn_length.
964 This function is a nop, we don't want to print anything, we just want to
965 compute the length of the insn. */
966
967 static int ATTR_FORMAT (printf, 2, 3)
968 amd64_insn_length_fprintf (void *stream, const char *format, ...)
969 {
970 return 0;
971 }
972
973 /* Initialize a struct disassemble_info for amd64_insn_length. */
974
975 static void
976 amd64_insn_length_init_dis (struct gdbarch *gdbarch,
977 struct disassemble_info *di,
978 const gdb_byte *insn, int max_len,
979 CORE_ADDR addr)
980 {
981 init_disassemble_info (di, NULL, amd64_insn_length_fprintf);
982
983 /* init_disassemble_info installs buffer_read_memory, etc.
984 so we don't need to do that here.
985 The cast is necessary until disassemble_info is const-ified. */
986 di->buffer = (gdb_byte *) insn;
987 di->buffer_length = max_len;
988 di->buffer_vma = addr;
989
990 di->arch = gdbarch_bfd_arch_info (gdbarch)->arch;
991 di->mach = gdbarch_bfd_arch_info (gdbarch)->mach;
992 di->endian = gdbarch_byte_order (gdbarch);
993 di->endian_code = gdbarch_byte_order_for_code (gdbarch);
994
995 disassemble_init_for_target (di);
996 }
997
998 /* Return the length in bytes of INSN.
999 MAX_LEN is the size of the buffer containing INSN.
1000 libopcodes currently doesn't export a utility to compute the
1001 instruction length, so use the disassembler until then. */
1002
1003 static int
1004 amd64_insn_length (struct gdbarch *gdbarch,
1005 const gdb_byte *insn, int max_len, CORE_ADDR addr)
1006 {
1007 struct disassemble_info di;
1008
1009 amd64_insn_length_init_dis (gdbarch, &di, insn, max_len, addr);
1010
1011 return gdbarch_print_insn (gdbarch, addr, &di);
1012 }
1013
1014 /* Return an integer register (other than RSP) that is unused as an input
1015 operand in INSN.
1016 In order to not require adding a rex prefix if the insn doesn't already
1017 have one, the result is restricted to RAX ... RDI, sans RSP.
1018 The register numbering of the result follows architecture ordering,
1019 e.g. RDI = 7. */
1020
1021 static int
1022 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1023 {
1024 /* 1 bit for each reg */
1025 int used_regs_mask = 0;
1026
1027 /* There can be at most 3 int regs used as inputs in an insn, and we have
1028 7 to choose from (RAX ... RDI, sans RSP).
1029 This allows us to take a conservative approach and keep things simple.
1030 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1031 that implicitly specify RAX. */
1032
1033 /* Avoid RAX. */
1034 used_regs_mask |= 1 << EAX_REG_NUM;
1035 /* Similarily avoid RDX, implicit operand in divides. */
1036 used_regs_mask |= 1 << EDX_REG_NUM;
1037 /* Avoid RSP. */
1038 used_regs_mask |= 1 << ESP_REG_NUM;
1039
1040 /* If the opcode is one byte long and there's no ModRM byte,
1041 assume the opcode specifies a register. */
1042 if (details->opcode_len == 1 && details->modrm_offset == -1)
1043 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1044
1045 /* Mark used regs in the modrm/sib bytes. */
1046 if (details->modrm_offset != -1)
1047 {
1048 int modrm = details->raw_insn[details->modrm_offset];
1049 int mod = MODRM_MOD_FIELD (modrm);
1050 int reg = MODRM_REG_FIELD (modrm);
1051 int rm = MODRM_RM_FIELD (modrm);
1052 int have_sib = mod != 3 && rm == 4;
1053
1054 /* Assume the reg field of the modrm byte specifies a register. */
1055 used_regs_mask |= 1 << reg;
1056
1057 if (have_sib)
1058 {
1059 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1060 int index = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1061 used_regs_mask |= 1 << base;
1062 used_regs_mask |= 1 << index;
1063 }
1064 else
1065 {
1066 used_regs_mask |= 1 << rm;
1067 }
1068 }
1069
1070 gdb_assert (used_regs_mask < 256);
1071 gdb_assert (used_regs_mask != 255);
1072
1073 /* Finally, find a free reg. */
1074 {
1075 int i;
1076
1077 for (i = 0; i < 8; ++i)
1078 {
1079 if (! (used_regs_mask & (1 << i)))
1080 return i;
1081 }
1082
1083 /* We shouldn't get here. */
1084 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1085 }
1086 }
1087
1088 /* Extract the details of INSN that we need. */
1089
1090 static void
1091 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1092 {
1093 gdb_byte *start = insn;
1094 int need_modrm;
1095
1096 details->raw_insn = insn;
1097
1098 details->opcode_len = -1;
1099 details->rex_offset = -1;
1100 details->opcode_offset = -1;
1101 details->modrm_offset = -1;
1102
1103 /* Skip legacy instruction prefixes. */
1104 insn = amd64_skip_prefixes (insn);
1105
1106 /* Skip REX instruction prefix. */
1107 if (rex_prefix_p (*insn))
1108 {
1109 details->rex_offset = insn - start;
1110 ++insn;
1111 }
1112
1113 details->opcode_offset = insn - start;
1114
1115 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1116 {
1117 /* Two or three-byte opcode. */
1118 ++insn;
1119 need_modrm = twobyte_has_modrm[*insn];
1120
1121 /* Check for three-byte opcode. */
1122 switch (*insn)
1123 {
1124 case 0x24:
1125 case 0x25:
1126 case 0x38:
1127 case 0x3a:
1128 case 0x7a:
1129 case 0x7b:
1130 ++insn;
1131 details->opcode_len = 3;
1132 break;
1133 default:
1134 details->opcode_len = 2;
1135 break;
1136 }
1137 }
1138 else
1139 {
1140 /* One-byte opcode. */
1141 need_modrm = onebyte_has_modrm[*insn];
1142 details->opcode_len = 1;
1143 }
1144
1145 if (need_modrm)
1146 {
1147 ++insn;
1148 details->modrm_offset = insn - start;
1149 }
1150 }
1151
1152 /* Update %rip-relative addressing in INSN.
1153
1154 %rip-relative addressing only uses a 32-bit displacement.
1155 32 bits is not enough to be guaranteed to cover the distance between where
1156 the real instruction is and where its copy is.
1157 Convert the insn to use base+disp addressing.
1158 We set base = pc + insn_length so we can leave disp unchanged. */
1159
1160 static void
1161 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1162 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1163 {
1164 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1165 const struct amd64_insn *insn_details = &dsc->insn_details;
1166 int modrm_offset = insn_details->modrm_offset;
1167 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1168 CORE_ADDR rip_base;
1169 int32_t disp;
1170 int insn_length;
1171 int arch_tmp_regno, tmp_regno;
1172 ULONGEST orig_value;
1173
1174 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1175 ++insn;
1176
1177 /* Compute the rip-relative address. */
1178 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
1179 insn_length = amd64_insn_length (gdbarch, dsc->insn_buf, dsc->max_len, from);
1180 rip_base = from + insn_length;
1181
1182 /* We need a register to hold the address.
1183 Pick one not used in the insn.
1184 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1185 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1186 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1187
1188 /* REX.B should be unset as we were using rip-relative addressing,
1189 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1190 if (insn_details->rex_offset != -1)
1191 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1192
1193 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1194 dsc->tmp_regno = tmp_regno;
1195 dsc->tmp_save = orig_value;
1196 dsc->tmp_used = 1;
1197
1198 /* Convert the ModRM field to be base+disp. */
1199 dsc->insn_buf[modrm_offset] &= ~0xc7;
1200 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1201
1202 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1203
1204 if (debug_displaced)
1205 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1206 "displaced: using temp reg %d, old value %s, new value %s\n",
1207 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1208 paddress (gdbarch, rip_base));
1209 }
1210
1211 static void
1212 fixup_displaced_copy (struct gdbarch *gdbarch,
1213 struct displaced_step_closure *dsc,
1214 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1215 {
1216 const struct amd64_insn *details = &dsc->insn_details;
1217
1218 if (details->modrm_offset != -1)
1219 {
1220 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1221
1222 if ((modrm & 0xc7) == 0x05)
1223 {
1224 /* The insn uses rip-relative addressing.
1225 Deal with it. */
1226 fixup_riprel (gdbarch, dsc, from, to, regs);
1227 }
1228 }
1229 }
1230
1231 struct displaced_step_closure *
1232 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1233 CORE_ADDR from, CORE_ADDR to,
1234 struct regcache *regs)
1235 {
1236 int len = gdbarch_max_insn_length (gdbarch);
1237 /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to
1238 continually watch for running off the end of the buffer. */
1239 int fixup_sentinel_space = len;
1240 struct displaced_step_closure *dsc =
1241 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1242 gdb_byte *buf = &dsc->insn_buf[0];
1243 struct amd64_insn *details = &dsc->insn_details;
1244
1245 dsc->tmp_used = 0;
1246 dsc->max_len = len + fixup_sentinel_space;
1247
1248 read_memory (from, buf, len);
1249
1250 /* Set up the sentinel space so we don't have to worry about running
1251 off the end of the buffer. An excessive number of leading prefixes
1252 could otherwise cause this. */
1253 memset (buf + len, 0, fixup_sentinel_space);
1254
1255 amd64_get_insn_details (buf, details);
1256
1257 /* GDB may get control back after the insn after the syscall.
1258 Presumably this is a kernel bug.
1259 If this is a syscall, make sure there's a nop afterwards. */
1260 {
1261 int syscall_length;
1262
1263 if (amd64_syscall_p (details, &syscall_length))
1264 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1265 }
1266
1267 /* Modify the insn to cope with the address where it will be executed from.
1268 In particular, handle any rip-relative addressing. */
1269 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1270
1271 write_memory (to, buf, len);
1272
1273 if (debug_displaced)
1274 {
1275 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1276 paddress (gdbarch, from), paddress (gdbarch, to));
1277 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1278 }
1279
1280 return dsc;
1281 }
1282
1283 static int
1284 amd64_absolute_jmp_p (const struct amd64_insn *details)
1285 {
1286 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1287
1288 if (insn[0] == 0xff)
1289 {
1290 /* jump near, absolute indirect (/4) */
1291 if ((insn[1] & 0x38) == 0x20)
1292 return 1;
1293
1294 /* jump far, absolute indirect (/5) */
1295 if ((insn[1] & 0x38) == 0x28)
1296 return 1;
1297 }
1298
1299 return 0;
1300 }
1301
1302 static int
1303 amd64_absolute_call_p (const struct amd64_insn *details)
1304 {
1305 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1306
1307 if (insn[0] == 0xff)
1308 {
1309 /* Call near, absolute indirect (/2) */
1310 if ((insn[1] & 0x38) == 0x10)
1311 return 1;
1312
1313 /* Call far, absolute indirect (/3) */
1314 if ((insn[1] & 0x38) == 0x18)
1315 return 1;
1316 }
1317
1318 return 0;
1319 }
1320
1321 static int
1322 amd64_ret_p (const struct amd64_insn *details)
1323 {
1324 /* NOTE: gcc can emit "repz ; ret". */
1325 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1326
1327 switch (insn[0])
1328 {
1329 case 0xc2: /* ret near, pop N bytes */
1330 case 0xc3: /* ret near */
1331 case 0xca: /* ret far, pop N bytes */
1332 case 0xcb: /* ret far */
1333 case 0xcf: /* iret */
1334 return 1;
1335
1336 default:
1337 return 0;
1338 }
1339 }
1340
1341 static int
1342 amd64_call_p (const struct amd64_insn *details)
1343 {
1344 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1345
1346 if (amd64_absolute_call_p (details))
1347 return 1;
1348
1349 /* call near, relative */
1350 if (insn[0] == 0xe8)
1351 return 1;
1352
1353 return 0;
1354 }
1355
1356 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1357 length in bytes. Otherwise, return zero. */
1358
1359 static int
1360 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1361 {
1362 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1363
1364 if (insn[0] == 0x0f && insn[1] == 0x05)
1365 {
1366 *lengthp = 2;
1367 return 1;
1368 }
1369
1370 return 0;
1371 }
1372
1373 /* Fix up the state of registers and memory after having single-stepped
1374 a displaced instruction. */
1375
1376 void
1377 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1378 struct displaced_step_closure *dsc,
1379 CORE_ADDR from, CORE_ADDR to,
1380 struct regcache *regs)
1381 {
1382 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1383 /* The offset we applied to the instruction's address. */
1384 ULONGEST insn_offset = to - from;
1385 gdb_byte *insn = dsc->insn_buf;
1386 const struct amd64_insn *insn_details = &dsc->insn_details;
1387
1388 if (debug_displaced)
1389 fprintf_unfiltered (gdb_stdlog,
1390 "displaced: fixup (%s, %s), "
1391 "insn = 0x%02x 0x%02x ...\n",
1392 paddress (gdbarch, from), paddress (gdbarch, to),
1393 insn[0], insn[1]);
1394
1395 /* If we used a tmp reg, restore it. */
1396
1397 if (dsc->tmp_used)
1398 {
1399 if (debug_displaced)
1400 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1401 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1402 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1403 }
1404
1405 /* The list of issues to contend with here is taken from
1406 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1407 Yay for Free Software! */
1408
1409 /* Relocate the %rip back to the program's instruction stream,
1410 if necessary. */
1411
1412 /* Except in the case of absolute or indirect jump or call
1413 instructions, or a return instruction, the new rip is relative to
1414 the displaced instruction; make it relative to the original insn.
1415 Well, signal handler returns don't need relocation either, but we use the
1416 value of %rip to recognize those; see below. */
1417 if (! amd64_absolute_jmp_p (insn_details)
1418 && ! amd64_absolute_call_p (insn_details)
1419 && ! amd64_ret_p (insn_details))
1420 {
1421 ULONGEST orig_rip;
1422 int insn_len;
1423
1424 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1425
1426 /* A signal trampoline system call changes the %rip, resuming
1427 execution of the main program after the signal handler has
1428 returned. That makes them like 'return' instructions; we
1429 shouldn't relocate %rip.
1430
1431 But most system calls don't, and we do need to relocate %rip.
1432
1433 Our heuristic for distinguishing these cases: if stepping
1434 over the system call instruction left control directly after
1435 the instruction, the we relocate --- control almost certainly
1436 doesn't belong in the displaced copy. Otherwise, we assume
1437 the instruction has put control where it belongs, and leave
1438 it unrelocated. Goodness help us if there are PC-relative
1439 system calls. */
1440 if (amd64_syscall_p (insn_details, &insn_len)
1441 && orig_rip != to + insn_len
1442 /* GDB can get control back after the insn after the syscall.
1443 Presumably this is a kernel bug.
1444 Fixup ensures its a nop, we add one to the length for it. */
1445 && orig_rip != to + insn_len + 1)
1446 {
1447 if (debug_displaced)
1448 fprintf_unfiltered (gdb_stdlog,
1449 "displaced: syscall changed %%rip; "
1450 "not relocating\n");
1451 }
1452 else
1453 {
1454 ULONGEST rip = orig_rip - insn_offset;
1455
1456 /* If we just stepped over a breakpoint insn, we don't backup
1457 the pc on purpose; this is to match behaviour without
1458 stepping. */
1459
1460 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1461
1462 if (debug_displaced)
1463 fprintf_unfiltered (gdb_stdlog,
1464 "displaced: "
1465 "relocated %%rip from %s to %s\n",
1466 paddress (gdbarch, orig_rip),
1467 paddress (gdbarch, rip));
1468 }
1469 }
1470
1471 /* If the instruction was PUSHFL, then the TF bit will be set in the
1472 pushed value, and should be cleared. We'll leave this for later,
1473 since GDB already messes up the TF flag when stepping over a
1474 pushfl. */
1475
1476 /* If the instruction was a call, the return address now atop the
1477 stack is the address following the copied instruction. We need
1478 to make it the address following the original instruction. */
1479 if (amd64_call_p (insn_details))
1480 {
1481 ULONGEST rsp;
1482 ULONGEST retaddr;
1483 const ULONGEST retaddr_len = 8;
1484
1485 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1486 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1487 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
1488 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1489
1490 if (debug_displaced)
1491 fprintf_unfiltered (gdb_stdlog,
1492 "displaced: relocated return addr at %s "
1493 "to %s\n",
1494 paddress (gdbarch, rsp),
1495 paddress (gdbarch, retaddr));
1496 }
1497 }
1498 \f
1499 /* The maximum number of saved registers. This should include %rip. */
1500 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1501
1502 struct amd64_frame_cache
1503 {
1504 /* Base address. */
1505 CORE_ADDR base;
1506 CORE_ADDR sp_offset;
1507 CORE_ADDR pc;
1508
1509 /* Saved registers. */
1510 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1511 CORE_ADDR saved_sp;
1512 int saved_sp_reg;
1513
1514 /* Do we have a frame? */
1515 int frameless_p;
1516 };
1517
1518 /* Initialize a frame cache. */
1519
1520 static void
1521 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1522 {
1523 int i;
1524
1525 /* Base address. */
1526 cache->base = 0;
1527 cache->sp_offset = -8;
1528 cache->pc = 0;
1529
1530 /* Saved registers. We initialize these to -1 since zero is a valid
1531 offset (that's where %rbp is supposed to be stored).
1532 The values start out as being offsets, and are later converted to
1533 addresses (at which point -1 is interpreted as an address, still meaning
1534 "invalid"). */
1535 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1536 cache->saved_regs[i] = -1;
1537 cache->saved_sp = 0;
1538 cache->saved_sp_reg = -1;
1539
1540 /* Frameless until proven otherwise. */
1541 cache->frameless_p = 1;
1542 }
1543
1544 /* Allocate and initialize a frame cache. */
1545
1546 static struct amd64_frame_cache *
1547 amd64_alloc_frame_cache (void)
1548 {
1549 struct amd64_frame_cache *cache;
1550
1551 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1552 amd64_init_frame_cache (cache);
1553 return cache;
1554 }
1555
1556 /* GCC 4.4 and later, can put code in the prologue to realign the
1557 stack pointer. Check whether PC points to such code, and update
1558 CACHE accordingly. Return the first instruction after the code
1559 sequence or CURRENT_PC, whichever is smaller. If we don't
1560 recognize the code, return PC. */
1561
1562 static CORE_ADDR
1563 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1564 struct amd64_frame_cache *cache)
1565 {
1566 /* There are 2 code sequences to re-align stack before the frame
1567 gets set up:
1568
1569 1. Use a caller-saved saved register:
1570
1571 leaq 8(%rsp), %reg
1572 andq $-XXX, %rsp
1573 pushq -8(%reg)
1574
1575 2. Use a callee-saved saved register:
1576
1577 pushq %reg
1578 leaq 16(%rsp), %reg
1579 andq $-XXX, %rsp
1580 pushq -8(%reg)
1581
1582 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1583
1584 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1585 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1586 */
1587
1588 gdb_byte buf[18];
1589 int reg, r;
1590 int offset, offset_and;
1591
1592 if (target_read_memory (pc, buf, sizeof buf))
1593 return pc;
1594
1595 /* Check caller-saved saved register. The first instruction has
1596 to be "leaq 8(%rsp), %reg". */
1597 if ((buf[0] & 0xfb) == 0x48
1598 && buf[1] == 0x8d
1599 && buf[3] == 0x24
1600 && buf[4] == 0x8)
1601 {
1602 /* MOD must be binary 10 and R/M must be binary 100. */
1603 if ((buf[2] & 0xc7) != 0x44)
1604 return pc;
1605
1606 /* REG has register number. */
1607 reg = (buf[2] >> 3) & 7;
1608
1609 /* Check the REX.R bit. */
1610 if (buf[0] == 0x4c)
1611 reg += 8;
1612
1613 offset = 5;
1614 }
1615 else
1616 {
1617 /* Check callee-saved saved register. The first instruction
1618 has to be "pushq %reg". */
1619 reg = 0;
1620 if ((buf[0] & 0xf8) == 0x50)
1621 offset = 0;
1622 else if ((buf[0] & 0xf6) == 0x40
1623 && (buf[1] & 0xf8) == 0x50)
1624 {
1625 /* Check the REX.B bit. */
1626 if ((buf[0] & 1) != 0)
1627 reg = 8;
1628
1629 offset = 1;
1630 }
1631 else
1632 return pc;
1633
1634 /* Get register. */
1635 reg += buf[offset] & 0x7;
1636
1637 offset++;
1638
1639 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1640 if ((buf[offset] & 0xfb) != 0x48
1641 || buf[offset + 1] != 0x8d
1642 || buf[offset + 3] != 0x24
1643 || buf[offset + 4] != 0x10)
1644 return pc;
1645
1646 /* MOD must be binary 10 and R/M must be binary 100. */
1647 if ((buf[offset + 2] & 0xc7) != 0x44)
1648 return pc;
1649
1650 /* REG has register number. */
1651 r = (buf[offset + 2] >> 3) & 7;
1652
1653 /* Check the REX.R bit. */
1654 if (buf[offset] == 0x4c)
1655 r += 8;
1656
1657 /* Registers in pushq and leaq have to be the same. */
1658 if (reg != r)
1659 return pc;
1660
1661 offset += 5;
1662 }
1663
1664 /* Rigister can't be %rsp nor %rbp. */
1665 if (reg == 4 || reg == 5)
1666 return pc;
1667
1668 /* The next instruction has to be "andq $-XXX, %rsp". */
1669 if (buf[offset] != 0x48
1670 || buf[offset + 2] != 0xe4
1671 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1672 return pc;
1673
1674 offset_and = offset;
1675 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1676
1677 /* The next instruction has to be "pushq -8(%reg)". */
1678 r = 0;
1679 if (buf[offset] == 0xff)
1680 offset++;
1681 else if ((buf[offset] & 0xf6) == 0x40
1682 && buf[offset + 1] == 0xff)
1683 {
1684 /* Check the REX.B bit. */
1685 if ((buf[offset] & 0x1) != 0)
1686 r = 8;
1687 offset += 2;
1688 }
1689 else
1690 return pc;
1691
1692 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1693 01. */
1694 if (buf[offset + 1] != 0xf8
1695 || (buf[offset] & 0xf8) != 0x70)
1696 return pc;
1697
1698 /* R/M has register. */
1699 r += buf[offset] & 7;
1700
1701 /* Registers in leaq and pushq have to be the same. */
1702 if (reg != r)
1703 return pc;
1704
1705 if (current_pc > pc + offset_and)
1706 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
1707
1708 return min (pc + offset + 2, current_pc);
1709 }
1710
1711 /* Do a limited analysis of the prologue at PC and update CACHE
1712 accordingly. Bail out early if CURRENT_PC is reached. Return the
1713 address where the analysis stopped.
1714
1715 We will handle only functions beginning with:
1716
1717 pushq %rbp 0x55
1718 movq %rsp, %rbp 0x48 0x89 0xe5
1719
1720 Any function that doesn't start with this sequence will be assumed
1721 to have no prologue and thus no valid frame pointer in %rbp. */
1722
1723 static CORE_ADDR
1724 amd64_analyze_prologue (struct gdbarch *gdbarch,
1725 CORE_ADDR pc, CORE_ADDR current_pc,
1726 struct amd64_frame_cache *cache)
1727 {
1728 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1729 static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
1730 gdb_byte buf[3];
1731 gdb_byte op;
1732
1733 if (current_pc <= pc)
1734 return current_pc;
1735
1736 pc = amd64_analyze_stack_align (pc, current_pc, cache);
1737
1738 op = read_memory_unsigned_integer (pc, 1, byte_order);
1739
1740 if (op == 0x55) /* pushq %rbp */
1741 {
1742 /* Take into account that we've executed the `pushq %rbp' that
1743 starts this instruction sequence. */
1744 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
1745 cache->sp_offset += 8;
1746
1747 /* If that's all, return now. */
1748 if (current_pc <= pc + 1)
1749 return current_pc;
1750
1751 /* Check for `movq %rsp, %rbp'. */
1752 read_memory (pc + 1, buf, 3);
1753 if (memcmp (buf, proto, 3) != 0)
1754 return pc + 1;
1755
1756 /* OK, we actually have a frame. */
1757 cache->frameless_p = 0;
1758 return pc + 4;
1759 }
1760
1761 return pc;
1762 }
1763
1764 /* Return PC of first real instruction. */
1765
1766 static CORE_ADDR
1767 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
1768 {
1769 struct amd64_frame_cache cache;
1770 CORE_ADDR pc;
1771
1772 amd64_init_frame_cache (&cache);
1773 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
1774 &cache);
1775 if (cache.frameless_p)
1776 return start_pc;
1777
1778 return pc;
1779 }
1780 \f
1781
1782 /* Normal frames. */
1783
1784 static struct amd64_frame_cache *
1785 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
1786 {
1787 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1788 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1789 struct amd64_frame_cache *cache;
1790 gdb_byte buf[8];
1791 int i;
1792
1793 if (*this_cache)
1794 return *this_cache;
1795
1796 cache = amd64_alloc_frame_cache ();
1797 *this_cache = cache;
1798
1799 cache->pc = get_frame_func (this_frame);
1800 if (cache->pc != 0)
1801 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
1802 cache);
1803
1804 if (cache->saved_sp_reg != -1)
1805 {
1806 /* Stack pointer has been saved. */
1807 get_frame_register (this_frame, cache->saved_sp_reg, buf);
1808 cache->saved_sp = extract_unsigned_integer(buf, 8, byte_order);
1809 }
1810
1811 if (cache->frameless_p)
1812 {
1813 /* We didn't find a valid frame. If we're at the start of a
1814 function, or somewhere half-way its prologue, the function's
1815 frame probably hasn't been fully setup yet. Try to
1816 reconstruct the base address for the stack frame by looking
1817 at the stack pointer. For truly "frameless" functions this
1818 might work too. */
1819
1820 if (cache->saved_sp_reg != -1)
1821 {
1822 /* We're halfway aligning the stack. */
1823 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
1824 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
1825
1826 /* This will be added back below. */
1827 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
1828 }
1829 else
1830 {
1831 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1832 cache->base = extract_unsigned_integer (buf, 8, byte_order)
1833 + cache->sp_offset;
1834 }
1835 }
1836 else
1837 {
1838 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
1839 cache->base = extract_unsigned_integer (buf, 8, byte_order);
1840 }
1841
1842 /* Now that we have the base address for the stack frame we can
1843 calculate the value of %rsp in the calling frame. */
1844 cache->saved_sp = cache->base + 16;
1845
1846 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
1847 frame we find it at the same offset from the reconstructed base
1848 address. If we're halfway aligning the stack, %rip is handled
1849 differently (see above). */
1850 if (!cache->frameless_p || cache->saved_sp_reg == -1)
1851 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
1852
1853 /* Adjust all the saved registers such that they contain addresses
1854 instead of offsets. */
1855 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1856 if (cache->saved_regs[i] != -1)
1857 cache->saved_regs[i] += cache->base;
1858
1859 return cache;
1860 }
1861
1862 static void
1863 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1864 struct frame_id *this_id)
1865 {
1866 struct amd64_frame_cache *cache =
1867 amd64_frame_cache (this_frame, this_cache);
1868
1869 /* This marks the outermost frame. */
1870 if (cache->base == 0)
1871 return;
1872
1873 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
1874 }
1875
1876 static struct value *
1877 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1878 int regnum)
1879 {
1880 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1881 struct amd64_frame_cache *cache =
1882 amd64_frame_cache (this_frame, this_cache);
1883
1884 gdb_assert (regnum >= 0);
1885
1886 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
1887 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
1888
1889 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
1890 return frame_unwind_got_memory (this_frame, regnum,
1891 cache->saved_regs[regnum]);
1892
1893 return frame_unwind_got_register (this_frame, regnum, regnum);
1894 }
1895
1896 static const struct frame_unwind amd64_frame_unwind =
1897 {
1898 NORMAL_FRAME,
1899 amd64_frame_this_id,
1900 amd64_frame_prev_register,
1901 NULL,
1902 default_frame_sniffer
1903 };
1904 \f
1905
1906 /* Signal trampolines. */
1907
1908 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
1909 64-bit variants. This would require using identical frame caches
1910 on both platforms. */
1911
1912 static struct amd64_frame_cache *
1913 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
1914 {
1915 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1916 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1917 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1918 struct amd64_frame_cache *cache;
1919 CORE_ADDR addr;
1920 gdb_byte buf[8];
1921 int i;
1922
1923 if (*this_cache)
1924 return *this_cache;
1925
1926 cache = amd64_alloc_frame_cache ();
1927
1928 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1929 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
1930
1931 addr = tdep->sigcontext_addr (this_frame);
1932 gdb_assert (tdep->sc_reg_offset);
1933 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
1934 for (i = 0; i < tdep->sc_num_regs; i++)
1935 if (tdep->sc_reg_offset[i] != -1)
1936 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
1937
1938 *this_cache = cache;
1939 return cache;
1940 }
1941
1942 static void
1943 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
1944 void **this_cache, struct frame_id *this_id)
1945 {
1946 struct amd64_frame_cache *cache =
1947 amd64_sigtramp_frame_cache (this_frame, this_cache);
1948
1949 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
1950 }
1951
1952 static struct value *
1953 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
1954 void **this_cache, int regnum)
1955 {
1956 /* Make sure we've initialized the cache. */
1957 amd64_sigtramp_frame_cache (this_frame, this_cache);
1958
1959 return amd64_frame_prev_register (this_frame, this_cache, regnum);
1960 }
1961
1962 static int
1963 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
1964 struct frame_info *this_frame,
1965 void **this_cache)
1966 {
1967 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1968
1969 /* We shouldn't even bother if we don't have a sigcontext_addr
1970 handler. */
1971 if (tdep->sigcontext_addr == NULL)
1972 return 0;
1973
1974 if (tdep->sigtramp_p != NULL)
1975 {
1976 if (tdep->sigtramp_p (this_frame))
1977 return 1;
1978 }
1979
1980 if (tdep->sigtramp_start != 0)
1981 {
1982 CORE_ADDR pc = get_frame_pc (this_frame);
1983
1984 gdb_assert (tdep->sigtramp_end != 0);
1985 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
1986 return 1;
1987 }
1988
1989 return 0;
1990 }
1991
1992 static const struct frame_unwind amd64_sigtramp_frame_unwind =
1993 {
1994 SIGTRAMP_FRAME,
1995 amd64_sigtramp_frame_this_id,
1996 amd64_sigtramp_frame_prev_register,
1997 NULL,
1998 amd64_sigtramp_frame_sniffer
1999 };
2000 \f
2001
2002 static CORE_ADDR
2003 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2004 {
2005 struct amd64_frame_cache *cache =
2006 amd64_frame_cache (this_frame, this_cache);
2007
2008 return cache->base;
2009 }
2010
2011 static const struct frame_base amd64_frame_base =
2012 {
2013 &amd64_frame_unwind,
2014 amd64_frame_base_address,
2015 amd64_frame_base_address,
2016 amd64_frame_base_address
2017 };
2018
2019 /* Normal frames, but in a function epilogue. */
2020
2021 /* The epilogue is defined here as the 'ret' instruction, which will
2022 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2023 the function's stack frame. */
2024
2025 static int
2026 amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2027 {
2028 gdb_byte insn;
2029
2030 if (target_read_memory (pc, &insn, 1))
2031 return 0; /* Can't read memory at pc. */
2032
2033 if (insn != 0xc3) /* 'ret' instruction. */
2034 return 0;
2035
2036 return 1;
2037 }
2038
2039 static int
2040 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2041 struct frame_info *this_frame,
2042 void **this_prologue_cache)
2043 {
2044 if (frame_relative_level (this_frame) == 0)
2045 return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
2046 get_frame_pc (this_frame));
2047 else
2048 return 0;
2049 }
2050
2051 static struct amd64_frame_cache *
2052 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2053 {
2054 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2055 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2056 struct amd64_frame_cache *cache;
2057 gdb_byte buf[8];
2058
2059 if (*this_cache)
2060 return *this_cache;
2061
2062 cache = amd64_alloc_frame_cache ();
2063 *this_cache = cache;
2064
2065 /* Cache base will be %esp plus cache->sp_offset (-8). */
2066 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2067 cache->base = extract_unsigned_integer (buf, 8,
2068 byte_order) + cache->sp_offset;
2069
2070 /* Cache pc will be the frame func. */
2071 cache->pc = get_frame_pc (this_frame);
2072
2073 /* The saved %esp will be at cache->base plus 16. */
2074 cache->saved_sp = cache->base + 16;
2075
2076 /* The saved %eip will be at cache->base plus 8. */
2077 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2078
2079 return cache;
2080 }
2081
2082 static void
2083 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2084 void **this_cache,
2085 struct frame_id *this_id)
2086 {
2087 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2088 this_cache);
2089
2090 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2091 }
2092
2093 static const struct frame_unwind amd64_epilogue_frame_unwind =
2094 {
2095 NORMAL_FRAME,
2096 amd64_epilogue_frame_this_id,
2097 amd64_frame_prev_register,
2098 NULL,
2099 amd64_epilogue_frame_sniffer
2100 };
2101
2102 static struct frame_id
2103 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2104 {
2105 CORE_ADDR fp;
2106
2107 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2108
2109 return frame_id_build (fp + 16, get_frame_pc (this_frame));
2110 }
2111
2112 /* 16 byte align the SP per frame requirements. */
2113
2114 static CORE_ADDR
2115 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2116 {
2117 return sp & -(CORE_ADDR)16;
2118 }
2119 \f
2120
2121 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2122 in the floating-point register set REGSET to register cache
2123 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2124
2125 static void
2126 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2127 int regnum, const void *fpregs, size_t len)
2128 {
2129 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2130
2131 gdb_assert (len == tdep->sizeof_fpregset);
2132 amd64_supply_fxsave (regcache, regnum, fpregs);
2133 }
2134
2135 /* Collect register REGNUM from the register cache REGCACHE and store
2136 it in the buffer specified by FPREGS and LEN as described by the
2137 floating-point register set REGSET. If REGNUM is -1, do this for
2138 all registers in REGSET. */
2139
2140 static void
2141 amd64_collect_fpregset (const struct regset *regset,
2142 const struct regcache *regcache,
2143 int regnum, void *fpregs, size_t len)
2144 {
2145 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2146
2147 gdb_assert (len == tdep->sizeof_fpregset);
2148 amd64_collect_fxsave (regcache, regnum, fpregs);
2149 }
2150
2151 /* Return the appropriate register set for the core section identified
2152 by SECT_NAME and SECT_SIZE. */
2153
2154 static const struct regset *
2155 amd64_regset_from_core_section (struct gdbarch *gdbarch,
2156 const char *sect_name, size_t sect_size)
2157 {
2158 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2159
2160 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
2161 {
2162 if (tdep->fpregset == NULL)
2163 tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
2164 amd64_collect_fpregset);
2165
2166 return tdep->fpregset;
2167 }
2168
2169 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
2170 }
2171 \f
2172
2173 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2174 %rdi. We expect its value to be a pointer to the jmp_buf structure
2175 from which we extract the address that we will land at. This
2176 address is copied into PC. This routine returns non-zero on
2177 success. */
2178
2179 static int
2180 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2181 {
2182 gdb_byte buf[8];
2183 CORE_ADDR jb_addr;
2184 struct gdbarch *gdbarch = get_frame_arch (frame);
2185 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
2186 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
2187
2188 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2189 longjmp will land. */
2190 if (jb_pc_offset == -1)
2191 return 0;
2192
2193 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
2194 jb_addr= extract_typed_address
2195 (buf, builtin_type (gdbarch)->builtin_data_ptr);
2196 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2197 return 0;
2198
2199 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
2200
2201 return 1;
2202 }
2203
2204 static const int amd64_record_regmap[] =
2205 {
2206 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2207 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2208 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2209 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2210 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2211 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2212 };
2213
2214 void
2215 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
2216 {
2217 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2218 const struct target_desc *tdesc = info.target_desc;
2219
2220 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2221 floating-point registers. */
2222 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2223
2224 if (! tdesc_has_registers (tdesc))
2225 tdesc = tdesc_amd64;
2226 tdep->tdesc = tdesc;
2227
2228 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
2229 tdep->register_names = amd64_register_names;
2230
2231 tdep->num_byte_regs = 16;
2232 tdep->num_word_regs = 16;
2233 tdep->num_dword_regs = 16;
2234 /* Avoid wiring in the MMX registers for now. */
2235 tdep->num_mmx_regs = 0;
2236
2237 set_gdbarch_pseudo_register_read (gdbarch,
2238 amd64_pseudo_register_read);
2239 set_gdbarch_pseudo_register_write (gdbarch,
2240 amd64_pseudo_register_write);
2241
2242 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
2243
2244 /* AMD64 has an FPU and 16 SSE registers. */
2245 tdep->st0_regnum = AMD64_ST0_REGNUM;
2246 tdep->num_xmm_regs = 16;
2247
2248 /* This is what all the fuss is about. */
2249 set_gdbarch_long_bit (gdbarch, 64);
2250 set_gdbarch_long_long_bit (gdbarch, 64);
2251 set_gdbarch_ptr_bit (gdbarch, 64);
2252
2253 /* In contrast to the i386, on AMD64 a `long double' actually takes
2254 up 128 bits, even though it's still based on the i387 extended
2255 floating-point format which has only 80 significant bits. */
2256 set_gdbarch_long_double_bit (gdbarch, 128);
2257
2258 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
2259
2260 /* Register numbers of various important registers. */
2261 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
2262 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
2263 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
2264 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
2265
2266 /* The "default" register numbering scheme for AMD64 is referred to
2267 as the "DWARF Register Number Mapping" in the System V psABI.
2268 The preferred debugging format for all known AMD64 targets is
2269 actually DWARF2, and GCC doesn't seem to support DWARF (that is
2270 DWARF-1), but we provide the same mapping just in case. This
2271 mapping is also used for stabs, which GCC does support. */
2272 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2273 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2274
2275 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
2276 be in use on any of the supported AMD64 targets. */
2277
2278 /* Call dummy code. */
2279 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
2280 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
2281 set_gdbarch_frame_red_zone_size (gdbarch, 128);
2282 tdep->call_dummy_num_integer_regs =
2283 ARRAY_SIZE (amd64_dummy_call_integer_regs);
2284 tdep->call_dummy_integer_regs = amd64_dummy_call_integer_regs;
2285 tdep->classify = amd64_classify;
2286
2287 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
2288 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
2289 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
2290
2291 set_gdbarch_return_value (gdbarch, amd64_return_value);
2292
2293 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
2294
2295 tdep->record_regmap = amd64_record_regmap;
2296
2297 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
2298
2299 /* Hook the function epilogue frame unwinder. This unwinder is
2300 appended to the list first, so that it supercedes the other
2301 unwinders in function epilogues. */
2302 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
2303
2304 /* Hook the prologue-based frame unwinders. */
2305 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
2306 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
2307 frame_base_set_default (gdbarch, &amd64_frame_base);
2308
2309 /* If we have a register mapping, enable the generic core file support. */
2310 if (tdep->gregset_reg_offset)
2311 set_gdbarch_regset_from_core_section (gdbarch,
2312 amd64_regset_from_core_section);
2313
2314 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
2315 }
2316
2317 /* Provide a prototype to silence -Wmissing-prototypes. */
2318 void _initialize_amd64_tdep (void);
2319
2320 void
2321 _initialize_amd64_tdep (void)
2322 {
2323 initialize_tdesc_amd64 ();
2324 }
2325 \f
2326
2327 /* The 64-bit FXSAVE format differs from the 32-bit format in the
2328 sense that the instruction pointer and data pointer are simply
2329 64-bit offsets into the code segment and the data segment instead
2330 of a selector offset pair. The functions below store the upper 32
2331 bits of these pointers (instead of just the 16-bits of the segment
2332 selector). */
2333
2334 /* Fill register REGNUM in REGCACHE with the appropriate
2335 floating-point or SSE register value from *FXSAVE. If REGNUM is
2336 -1, do this for all registers. This function masks off any of the
2337 reserved bits in *FXSAVE. */
2338
2339 void
2340 amd64_supply_fxsave (struct regcache *regcache, int regnum,
2341 const void *fxsave)
2342 {
2343 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2344 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2345
2346 i387_supply_fxsave (regcache, regnum, fxsave);
2347
2348 if (fxsave && gdbarch_ptr_bit (gdbarch) == 64)
2349 {
2350 const gdb_byte *regs = fxsave;
2351
2352 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2353 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2354 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2355 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2356 }
2357 }
2358
2359 /* Fill register REGNUM (if it is a floating-point or SSE register) in
2360 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
2361 all registers. This function doesn't touch any of the reserved
2362 bits in *FXSAVE. */
2363
2364 void
2365 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
2366 void *fxsave)
2367 {
2368 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2369 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2370 gdb_byte *regs = fxsave;
2371
2372 i387_collect_fxsave (regcache, regnum, fxsave);
2373
2374 if (gdbarch_ptr_bit (gdbarch) == 64)
2375 {
2376 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2377 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2378 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2379 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2380 }
2381 }
This page took 0.089741 seconds and 5 git commands to generate.