amd64-windows: 32 bytes allocated on stack by caller for integer parameter registers
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 Contributed by Jiri Smid, SuSE Labs.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "opcode/i386.h"
25 #include "dis-asm.h"
26 #include "arch-utils.h"
27 #include "block.h"
28 #include "dummy-frame.h"
29 #include "frame.h"
30 #include "frame-base.h"
31 #include "frame-unwind.h"
32 #include "inferior.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39
40 #include "gdb_assert.h"
41
42 #include "amd64-tdep.h"
43 #include "i387-tdep.h"
44
45 /* Note that the AMD64 architecture was previously known as x86-64.
46 The latter is (forever) engraved into the canonical system name as
47 returned by config.guess, and used as the name for the AMD64 port
48 of GNU/Linux. The BSD's have renamed their ports to amd64; they
49 don't like to shout. For GDB we prefer the amd64_-prefix over the
50 x86_64_-prefix since it's so much easier to type. */
51
52 /* Register information. */
53
54 static const char *amd64_register_names[] =
55 {
56 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
57
58 /* %r8 is indeed register number 8. */
59 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
60 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
61
62 /* %st0 is register number 24. */
63 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
64 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
65
66 /* %xmm0 is register number 40. */
67 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
68 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
69 "mxcsr",
70 };
71
72 /* Total number of registers. */
73 #define AMD64_NUM_REGS ARRAY_SIZE (amd64_register_names)
74
75 /* The registers used to pass integer arguments during a function call. */
76 static int amd64_dummy_call_integer_regs[] =
77 {
78 AMD64_RDI_REGNUM, /* %rdi */
79 AMD64_RSI_REGNUM, /* %rsi */
80 AMD64_RDX_REGNUM, /* %rdx */
81 AMD64_RCX_REGNUM, /* %rcx */
82 8, /* %r8 */
83 9 /* %r9 */
84 };
85
86 /* Return the name of register REGNUM. */
87
88 const char *
89 amd64_register_name (struct gdbarch *gdbarch, int regnum)
90 {
91 if (regnum >= 0 && regnum < AMD64_NUM_REGS)
92 return amd64_register_names[regnum];
93
94 return NULL;
95 }
96
97 /* Return the GDB type object for the "standard" data type of data in
98 register REGNUM. */
99
100 struct type *
101 amd64_register_type (struct gdbarch *gdbarch, int regnum)
102 {
103 if (regnum >= AMD64_RAX_REGNUM && regnum <= AMD64_RDI_REGNUM)
104 return builtin_type (gdbarch)->builtin_int64;
105 if (regnum == AMD64_RBP_REGNUM || regnum == AMD64_RSP_REGNUM)
106 return builtin_type (gdbarch)->builtin_data_ptr;
107 if (regnum >= AMD64_R8_REGNUM && regnum <= AMD64_R15_REGNUM)
108 return builtin_type (gdbarch)->builtin_int64;
109 if (regnum == AMD64_RIP_REGNUM)
110 return builtin_type (gdbarch)->builtin_func_ptr;
111 if (regnum == AMD64_EFLAGS_REGNUM)
112 return i386_eflags_type (gdbarch);
113 if (regnum >= AMD64_CS_REGNUM && regnum <= AMD64_GS_REGNUM)
114 return builtin_type (gdbarch)->builtin_int32;
115 if (regnum >= AMD64_ST0_REGNUM && regnum <= AMD64_ST0_REGNUM + 7)
116 return i387_ext_type (gdbarch);
117 if (regnum >= AMD64_FCTRL_REGNUM && regnum <= AMD64_FCTRL_REGNUM + 7)
118 return builtin_type (gdbarch)->builtin_int32;
119 if (regnum >= AMD64_XMM0_REGNUM && regnum <= AMD64_XMM0_REGNUM + 15)
120 return i386_sse_type (gdbarch);
121 if (regnum == AMD64_MXCSR_REGNUM)
122 return i386_mxcsr_type (gdbarch);
123
124 internal_error (__FILE__, __LINE__, _("invalid regnum"));
125 }
126
127 /* DWARF Register Number Mapping as defined in the System V psABI,
128 section 3.6. */
129
130 static int amd64_dwarf_regmap[] =
131 {
132 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
133 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
134 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
135 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
136
137 /* Frame Pointer Register RBP. */
138 AMD64_RBP_REGNUM,
139
140 /* Stack Pointer Register RSP. */
141 AMD64_RSP_REGNUM,
142
143 /* Extended Integer Registers 8 - 15. */
144 8, 9, 10, 11, 12, 13, 14, 15,
145
146 /* Return Address RA. Mapped to RIP. */
147 AMD64_RIP_REGNUM,
148
149 /* SSE Registers 0 - 7. */
150 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
151 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
152 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
153 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
154
155 /* Extended SSE Registers 8 - 15. */
156 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
157 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
158 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
159 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
160
161 /* Floating Point Registers 0-7. */
162 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
163 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
164 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
165 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
166
167 /* Control and Status Flags Register. */
168 AMD64_EFLAGS_REGNUM,
169
170 /* Selector Registers. */
171 AMD64_ES_REGNUM,
172 AMD64_CS_REGNUM,
173 AMD64_SS_REGNUM,
174 AMD64_DS_REGNUM,
175 AMD64_FS_REGNUM,
176 AMD64_GS_REGNUM,
177 -1,
178 -1,
179
180 /* Segment Base Address Registers. */
181 -1,
182 -1,
183 -1,
184 -1,
185
186 /* Special Selector Registers. */
187 -1,
188 -1,
189
190 /* Floating Point Control Registers. */
191 AMD64_MXCSR_REGNUM,
192 AMD64_FCTRL_REGNUM,
193 AMD64_FSTAT_REGNUM
194 };
195
196 static const int amd64_dwarf_regmap_len =
197 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
198
199 /* Convert DWARF register number REG to the appropriate register
200 number used by GDB. */
201
202 static int
203 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
204 {
205 int regnum = -1;
206
207 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
208 regnum = amd64_dwarf_regmap[reg];
209
210 if (regnum == -1)
211 warning (_("Unmapped DWARF Register #%d encountered."), reg);
212
213 return regnum;
214 }
215
216 /* Map architectural register numbers to gdb register numbers. */
217
218 static const int amd64_arch_regmap[16] =
219 {
220 AMD64_RAX_REGNUM, /* %rax */
221 AMD64_RCX_REGNUM, /* %rcx */
222 AMD64_RDX_REGNUM, /* %rdx */
223 AMD64_RBX_REGNUM, /* %rbx */
224 AMD64_RSP_REGNUM, /* %rsp */
225 AMD64_RBP_REGNUM, /* %rbp */
226 AMD64_RSI_REGNUM, /* %rsi */
227 AMD64_RDI_REGNUM, /* %rdi */
228 AMD64_R8_REGNUM, /* %r8 */
229 AMD64_R9_REGNUM, /* %r9 */
230 AMD64_R10_REGNUM, /* %r10 */
231 AMD64_R11_REGNUM, /* %r11 */
232 AMD64_R12_REGNUM, /* %r12 */
233 AMD64_R13_REGNUM, /* %r13 */
234 AMD64_R14_REGNUM, /* %r14 */
235 AMD64_R15_REGNUM /* %r15 */
236 };
237
238 static const int amd64_arch_regmap_len =
239 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
240
241 /* Convert architectural register number REG to the appropriate register
242 number used by GDB. */
243
244 static int
245 amd64_arch_reg_to_regnum (int reg)
246 {
247 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
248
249 return amd64_arch_regmap[reg];
250 }
251
252 \f
253
254 /* Return the union class of CLASS1 and CLASS2. See the psABI for
255 details. */
256
257 static enum amd64_reg_class
258 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
259 {
260 /* Rule (a): If both classes are equal, this is the resulting class. */
261 if (class1 == class2)
262 return class1;
263
264 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
265 is the other class. */
266 if (class1 == AMD64_NO_CLASS)
267 return class2;
268 if (class2 == AMD64_NO_CLASS)
269 return class1;
270
271 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
272 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
273 return AMD64_MEMORY;
274
275 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
276 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
277 return AMD64_INTEGER;
278
279 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
280 MEMORY is used as class. */
281 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
282 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
283 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
284 return AMD64_MEMORY;
285
286 /* Rule (f): Otherwise class SSE is used. */
287 return AMD64_SSE;
288 }
289
290 /* Return non-zero if TYPE is a non-POD structure or union type. */
291
292 static int
293 amd64_non_pod_p (struct type *type)
294 {
295 /* ??? A class with a base class certainly isn't POD, but does this
296 catch all non-POD structure types? */
297 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
298 return 1;
299
300 return 0;
301 }
302
303 /* Classify TYPE according to the rules for aggregate (structures and
304 arrays) and union types, and store the result in CLASS. */
305
306 static void
307 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
308 {
309 int len = TYPE_LENGTH (type);
310
311 /* 1. If the size of an object is larger than two eightbytes, or in
312 C++, is a non-POD structure or union type, or contains
313 unaligned fields, it has class memory. */
314 if (len > 16 || amd64_non_pod_p (type))
315 {
316 class[0] = class[1] = AMD64_MEMORY;
317 return;
318 }
319
320 /* 2. Both eightbytes get initialized to class NO_CLASS. */
321 class[0] = class[1] = AMD64_NO_CLASS;
322
323 /* 3. Each field of an object is classified recursively so that
324 always two fields are considered. The resulting class is
325 calculated according to the classes of the fields in the
326 eightbyte: */
327
328 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
329 {
330 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
331
332 /* All fields in an array have the same type. */
333 amd64_classify (subtype, class);
334 if (len > 8 && class[1] == AMD64_NO_CLASS)
335 class[1] = class[0];
336 }
337 else
338 {
339 int i;
340
341 /* Structure or union. */
342 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
343 || TYPE_CODE (type) == TYPE_CODE_UNION);
344
345 for (i = 0; i < TYPE_NFIELDS (type); i++)
346 {
347 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
348 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
349 enum amd64_reg_class subclass[2];
350
351 /* Ignore static fields. */
352 if (field_is_static (&TYPE_FIELD (type, i)))
353 continue;
354
355 gdb_assert (pos == 0 || pos == 1);
356
357 amd64_classify (subtype, subclass);
358 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
359 if (pos == 0)
360 class[1] = amd64_merge_classes (class[1], subclass[1]);
361 }
362 }
363
364 /* 4. Then a post merger cleanup is done: */
365
366 /* Rule (a): If one of the classes is MEMORY, the whole argument is
367 passed in memory. */
368 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
369 class[0] = class[1] = AMD64_MEMORY;
370
371 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
372 SSE. */
373 if (class[0] == AMD64_SSEUP)
374 class[0] = AMD64_SSE;
375 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
376 class[1] = AMD64_SSE;
377 }
378
379 /* Classify TYPE, and store the result in CLASS. */
380
381 void
382 amd64_classify (struct type *type, enum amd64_reg_class class[2])
383 {
384 enum type_code code = TYPE_CODE (type);
385 int len = TYPE_LENGTH (type);
386
387 class[0] = class[1] = AMD64_NO_CLASS;
388
389 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
390 long, long long, and pointers are in the INTEGER class. Similarly,
391 range types, used by languages such as Ada, are also in the INTEGER
392 class. */
393 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
394 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
395 || code == TYPE_CODE_CHAR
396 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
397 && (len == 1 || len == 2 || len == 4 || len == 8))
398 class[0] = AMD64_INTEGER;
399
400 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
401 are in class SSE. */
402 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
403 && (len == 4 || len == 8))
404 /* FIXME: __m64 . */
405 class[0] = AMD64_SSE;
406
407 /* Arguments of types __float128, _Decimal128 and __m128 are split into
408 two halves. The least significant ones belong to class SSE, the most
409 significant one to class SSEUP. */
410 else if (code == TYPE_CODE_DECFLOAT && len == 16)
411 /* FIXME: __float128, __m128. */
412 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
413
414 /* The 64-bit mantissa of arguments of type long double belongs to
415 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
416 class X87UP. */
417 else if (code == TYPE_CODE_FLT && len == 16)
418 /* Class X87 and X87UP. */
419 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
420
421 /* Aggregates. */
422 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
423 || code == TYPE_CODE_UNION)
424 amd64_classify_aggregate (type, class);
425 }
426
427 static enum return_value_convention
428 amd64_return_value (struct gdbarch *gdbarch, struct type *func_type,
429 struct type *type, struct regcache *regcache,
430 gdb_byte *readbuf, const gdb_byte *writebuf)
431 {
432 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
433 enum amd64_reg_class class[2];
434 int len = TYPE_LENGTH (type);
435 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
436 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
437 int integer_reg = 0;
438 int sse_reg = 0;
439 int i;
440
441 gdb_assert (!(readbuf && writebuf));
442 gdb_assert (tdep->classify);
443
444 /* 1. Classify the return type with the classification algorithm. */
445 tdep->classify (type, class);
446
447 /* 2. If the type has class MEMORY, then the caller provides space
448 for the return value and passes the address of this storage in
449 %rdi as if it were the first argument to the function. In effect,
450 this address becomes a hidden first argument.
451
452 On return %rax will contain the address that has been passed in
453 by the caller in %rdi. */
454 if (class[0] == AMD64_MEMORY)
455 {
456 /* As indicated by the comment above, the ABI guarantees that we
457 can always find the return value just after the function has
458 returned. */
459
460 if (readbuf)
461 {
462 ULONGEST addr;
463
464 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
465 read_memory (addr, readbuf, TYPE_LENGTH (type));
466 }
467
468 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
469 }
470
471 gdb_assert (class[1] != AMD64_MEMORY);
472 gdb_assert (len <= 16);
473
474 for (i = 0; len > 0; i++, len -= 8)
475 {
476 int regnum = -1;
477 int offset = 0;
478
479 switch (class[i])
480 {
481 case AMD64_INTEGER:
482 /* 3. If the class is INTEGER, the next available register
483 of the sequence %rax, %rdx is used. */
484 regnum = integer_regnum[integer_reg++];
485 break;
486
487 case AMD64_SSE:
488 /* 4. If the class is SSE, the next available SSE register
489 of the sequence %xmm0, %xmm1 is used. */
490 regnum = sse_regnum[sse_reg++];
491 break;
492
493 case AMD64_SSEUP:
494 /* 5. If the class is SSEUP, the eightbyte is passed in the
495 upper half of the last used SSE register. */
496 gdb_assert (sse_reg > 0);
497 regnum = sse_regnum[sse_reg - 1];
498 offset = 8;
499 break;
500
501 case AMD64_X87:
502 /* 6. If the class is X87, the value is returned on the X87
503 stack in %st0 as 80-bit x87 number. */
504 regnum = AMD64_ST0_REGNUM;
505 if (writebuf)
506 i387_return_value (gdbarch, regcache);
507 break;
508
509 case AMD64_X87UP:
510 /* 7. If the class is X87UP, the value is returned together
511 with the previous X87 value in %st0. */
512 gdb_assert (i > 0 && class[0] == AMD64_X87);
513 regnum = AMD64_ST0_REGNUM;
514 offset = 8;
515 len = 2;
516 break;
517
518 case AMD64_NO_CLASS:
519 continue;
520
521 default:
522 gdb_assert (!"Unexpected register class.");
523 }
524
525 gdb_assert (regnum != -1);
526
527 if (readbuf)
528 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
529 readbuf + i * 8);
530 if (writebuf)
531 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
532 writebuf + i * 8);
533 }
534
535 return RETURN_VALUE_REGISTER_CONVENTION;
536 }
537 \f
538
539 static CORE_ADDR
540 amd64_push_arguments (struct regcache *regcache, int nargs,
541 struct value **args, CORE_ADDR sp, int struct_return)
542 {
543 struct gdbarch *gdbarch = get_regcache_arch (regcache);
544 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
545 int *integer_regs = tdep->call_dummy_integer_regs;
546 int num_integer_regs = tdep->call_dummy_num_integer_regs;
547
548 static int sse_regnum[] =
549 {
550 /* %xmm0 ... %xmm7 */
551 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
552 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
553 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
554 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
555 };
556 struct value **stack_args = alloca (nargs * sizeof (struct value *));
557 /* An array that mirrors the stack_args array. For all arguments
558 that are passed by MEMORY, if that argument's address also needs
559 to be stored in a register, the ARG_ADDR_REGNO array will contain
560 that register number (or a negative value otherwise). */
561 int *arg_addr_regno = alloca (nargs * sizeof (int));
562 int num_stack_args = 0;
563 int num_elements = 0;
564 int element = 0;
565 int integer_reg = 0;
566 int sse_reg = 0;
567 int i;
568
569 gdb_assert (tdep->classify);
570
571 /* Reserve a register for the "hidden" argument. */
572 if (struct_return)
573 integer_reg++;
574
575 for (i = 0; i < nargs; i++)
576 {
577 struct type *type = value_type (args[i]);
578 int len = TYPE_LENGTH (type);
579 enum amd64_reg_class class[2];
580 int needed_integer_regs = 0;
581 int needed_sse_regs = 0;
582 int j;
583
584 /* Classify argument. */
585 tdep->classify (type, class);
586
587 /* Calculate the number of integer and SSE registers needed for
588 this argument. */
589 for (j = 0; j < 2; j++)
590 {
591 if (class[j] == AMD64_INTEGER)
592 needed_integer_regs++;
593 else if (class[j] == AMD64_SSE)
594 needed_sse_regs++;
595 }
596
597 /* Check whether enough registers are available, and if the
598 argument should be passed in registers at all. */
599 if (integer_reg + needed_integer_regs > num_integer_regs
600 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
601 || (needed_integer_regs == 0 && needed_sse_regs == 0))
602 {
603 /* The argument will be passed on the stack. */
604 num_elements += ((len + 7) / 8);
605 stack_args[num_stack_args] = args[i];
606 /* If this is an AMD64_MEMORY argument whose address must also
607 be passed in one of the integer registers, reserve that
608 register and associate this value to that register so that
609 we can store the argument address as soon as we know it. */
610 if (class[0] == AMD64_MEMORY
611 && tdep->memory_args_by_pointer
612 && integer_reg < tdep->call_dummy_num_integer_regs)
613 arg_addr_regno[num_stack_args] =
614 tdep->call_dummy_integer_regs[integer_reg++];
615 else
616 arg_addr_regno[num_stack_args] = -1;
617 num_stack_args++;
618 }
619 else
620 {
621 /* The argument will be passed in registers. */
622 const gdb_byte *valbuf = value_contents (args[i]);
623 gdb_byte buf[8];
624
625 gdb_assert (len <= 16);
626
627 for (j = 0; len > 0; j++, len -= 8)
628 {
629 int regnum = -1;
630 int offset = 0;
631
632 switch (class[j])
633 {
634 case AMD64_INTEGER:
635 regnum = integer_regs[integer_reg++];
636 break;
637
638 case AMD64_SSE:
639 regnum = sse_regnum[sse_reg++];
640 break;
641
642 case AMD64_SSEUP:
643 gdb_assert (sse_reg > 0);
644 regnum = sse_regnum[sse_reg - 1];
645 offset = 8;
646 break;
647
648 default:
649 gdb_assert (!"Unexpected register class.");
650 }
651
652 gdb_assert (regnum != -1);
653 memset (buf, 0, sizeof buf);
654 memcpy (buf, valbuf + j * 8, min (len, 8));
655 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
656 }
657 }
658 }
659
660 /* Allocate space for the arguments on the stack. */
661 sp -= num_elements * 8;
662
663 /* The psABI says that "The end of the input argument area shall be
664 aligned on a 16 byte boundary." */
665 sp &= ~0xf;
666
667 /* Write out the arguments to the stack. */
668 for (i = 0; i < num_stack_args; i++)
669 {
670 struct type *type = value_type (stack_args[i]);
671 const gdb_byte *valbuf = value_contents (stack_args[i]);
672 int len = TYPE_LENGTH (type);
673 CORE_ADDR arg_addr = sp + element * 8;
674
675 write_memory (arg_addr, valbuf, len);
676 if (arg_addr_regno[i] >= 0)
677 {
678 /* We also need to store the address of that argument in
679 the given register. */
680 gdb_byte buf[8];
681 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
682
683 store_unsigned_integer (buf, 8, byte_order, arg_addr);
684 regcache_cooked_write (regcache, arg_addr_regno[i], buf);
685 }
686 element += ((len + 7) / 8);
687 }
688
689 /* The psABI says that "For calls that may call functions that use
690 varargs or stdargs (prototype-less calls or calls to functions
691 containing ellipsis (...) in the declaration) %al is used as
692 hidden argument to specify the number of SSE registers used. */
693 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
694 return sp;
695 }
696
697 static CORE_ADDR
698 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
699 struct regcache *regcache, CORE_ADDR bp_addr,
700 int nargs, struct value **args, CORE_ADDR sp,
701 int struct_return, CORE_ADDR struct_addr)
702 {
703 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
704 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
705 gdb_byte buf[8];
706
707 /* Pass arguments. */
708 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
709
710 /* Pass "hidden" argument". */
711 if (struct_return)
712 {
713 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
714 /* The "hidden" argument is passed throught the first argument
715 register. */
716 const int arg_regnum = tdep->call_dummy_integer_regs[0];
717
718 store_unsigned_integer (buf, 8, byte_order, struct_addr);
719 regcache_cooked_write (regcache, arg_regnum, buf);
720 }
721
722 /* Reserve some memory on the stack for the integer-parameter registers,
723 if required by the ABI. */
724 if (tdep->integer_param_regs_saved_in_caller_frame)
725 sp -= tdep->call_dummy_num_integer_regs * 8;
726
727 /* Store return address. */
728 sp -= 8;
729 store_unsigned_integer (buf, 8, byte_order, bp_addr);
730 write_memory (sp, buf, 8);
731
732 /* Finally, update the stack pointer... */
733 store_unsigned_integer (buf, 8, byte_order, sp);
734 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
735
736 /* ...and fake a frame pointer. */
737 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
738
739 return sp + 16;
740 }
741 \f
742 /* Displaced instruction handling. */
743
744 /* A partially decoded instruction.
745 This contains enough details for displaced stepping purposes. */
746
747 struct amd64_insn
748 {
749 /* The number of opcode bytes. */
750 int opcode_len;
751 /* The offset of the rex prefix or -1 if not present. */
752 int rex_offset;
753 /* The offset to the first opcode byte. */
754 int opcode_offset;
755 /* The offset to the modrm byte or -1 if not present. */
756 int modrm_offset;
757
758 /* The raw instruction. */
759 gdb_byte *raw_insn;
760 };
761
762 struct displaced_step_closure
763 {
764 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
765 int tmp_used;
766 int tmp_regno;
767 ULONGEST tmp_save;
768
769 /* Details of the instruction. */
770 struct amd64_insn insn_details;
771
772 /* Amount of space allocated to insn_buf. */
773 int max_len;
774
775 /* The possibly modified insn.
776 This is a variable-length field. */
777 gdb_byte insn_buf[1];
778 };
779
780 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
781 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
782 at which point delete these in favor of libopcodes' versions). */
783
784 static const unsigned char onebyte_has_modrm[256] = {
785 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
786 /* ------------------------------- */
787 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
788 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
789 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
790 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
791 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
792 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
793 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
794 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
795 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
796 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
797 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
798 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
799 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
800 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
801 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
802 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
803 /* ------------------------------- */
804 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
805 };
806
807 static const unsigned char twobyte_has_modrm[256] = {
808 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
809 /* ------------------------------- */
810 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
811 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
812 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
813 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
814 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
815 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
816 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
817 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
818 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
819 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
820 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
821 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
822 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
823 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
824 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
825 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
826 /* ------------------------------- */
827 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
828 };
829
830 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
831
832 static int
833 rex_prefix_p (gdb_byte pfx)
834 {
835 return REX_PREFIX_P (pfx);
836 }
837
838 /* Skip the legacy instruction prefixes in INSN.
839 We assume INSN is properly sentineled so we don't have to worry
840 about falling off the end of the buffer. */
841
842 static gdb_byte *
843 amd64_skip_prefixes (gdb_byte *insn)
844 {
845 while (1)
846 {
847 switch (*insn)
848 {
849 case DATA_PREFIX_OPCODE:
850 case ADDR_PREFIX_OPCODE:
851 case CS_PREFIX_OPCODE:
852 case DS_PREFIX_OPCODE:
853 case ES_PREFIX_OPCODE:
854 case FS_PREFIX_OPCODE:
855 case GS_PREFIX_OPCODE:
856 case SS_PREFIX_OPCODE:
857 case LOCK_PREFIX_OPCODE:
858 case REPE_PREFIX_OPCODE:
859 case REPNE_PREFIX_OPCODE:
860 ++insn;
861 continue;
862 default:
863 break;
864 }
865 break;
866 }
867
868 return insn;
869 }
870
871 /* fprintf-function for amd64_insn_length.
872 This function is a nop, we don't want to print anything, we just want to
873 compute the length of the insn. */
874
875 static int ATTR_FORMAT (printf, 2, 3)
876 amd64_insn_length_fprintf (void *stream, const char *format, ...)
877 {
878 return 0;
879 }
880
881 /* Initialize a struct disassemble_info for amd64_insn_length. */
882
883 static void
884 amd64_insn_length_init_dis (struct gdbarch *gdbarch,
885 struct disassemble_info *di,
886 const gdb_byte *insn, int max_len,
887 CORE_ADDR addr)
888 {
889 init_disassemble_info (di, NULL, amd64_insn_length_fprintf);
890
891 /* init_disassemble_info installs buffer_read_memory, etc.
892 so we don't need to do that here.
893 The cast is necessary until disassemble_info is const-ified. */
894 di->buffer = (gdb_byte *) insn;
895 di->buffer_length = max_len;
896 di->buffer_vma = addr;
897
898 di->arch = gdbarch_bfd_arch_info (gdbarch)->arch;
899 di->mach = gdbarch_bfd_arch_info (gdbarch)->mach;
900 di->endian = gdbarch_byte_order (gdbarch);
901 di->endian_code = gdbarch_byte_order_for_code (gdbarch);
902
903 disassemble_init_for_target (di);
904 }
905
906 /* Return the length in bytes of INSN.
907 MAX_LEN is the size of the buffer containing INSN.
908 libopcodes currently doesn't export a utility to compute the
909 instruction length, so use the disassembler until then. */
910
911 static int
912 amd64_insn_length (struct gdbarch *gdbarch,
913 const gdb_byte *insn, int max_len, CORE_ADDR addr)
914 {
915 struct disassemble_info di;
916
917 amd64_insn_length_init_dis (gdbarch, &di, insn, max_len, addr);
918
919 return gdbarch_print_insn (gdbarch, addr, &di);
920 }
921
922 /* Return an integer register (other than RSP) that is unused as an input
923 operand in INSN.
924 In order to not require adding a rex prefix if the insn doesn't already
925 have one, the result is restricted to RAX ... RDI, sans RSP.
926 The register numbering of the result follows architecture ordering,
927 e.g. RDI = 7. */
928
929 static int
930 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
931 {
932 /* 1 bit for each reg */
933 int used_regs_mask = 0;
934
935 /* There can be at most 3 int regs used as inputs in an insn, and we have
936 7 to choose from (RAX ... RDI, sans RSP).
937 This allows us to take a conservative approach and keep things simple.
938 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
939 that implicitly specify RAX. */
940
941 /* Avoid RAX. */
942 used_regs_mask |= 1 << EAX_REG_NUM;
943 /* Similarily avoid RDX, implicit operand in divides. */
944 used_regs_mask |= 1 << EDX_REG_NUM;
945 /* Avoid RSP. */
946 used_regs_mask |= 1 << ESP_REG_NUM;
947
948 /* If the opcode is one byte long and there's no ModRM byte,
949 assume the opcode specifies a register. */
950 if (details->opcode_len == 1 && details->modrm_offset == -1)
951 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
952
953 /* Mark used regs in the modrm/sib bytes. */
954 if (details->modrm_offset != -1)
955 {
956 int modrm = details->raw_insn[details->modrm_offset];
957 int mod = MODRM_MOD_FIELD (modrm);
958 int reg = MODRM_REG_FIELD (modrm);
959 int rm = MODRM_RM_FIELD (modrm);
960 int have_sib = mod != 3 && rm == 4;
961
962 /* Assume the reg field of the modrm byte specifies a register. */
963 used_regs_mask |= 1 << reg;
964
965 if (have_sib)
966 {
967 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
968 int index = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
969 used_regs_mask |= 1 << base;
970 used_regs_mask |= 1 << index;
971 }
972 else
973 {
974 used_regs_mask |= 1 << rm;
975 }
976 }
977
978 gdb_assert (used_regs_mask < 256);
979 gdb_assert (used_regs_mask != 255);
980
981 /* Finally, find a free reg. */
982 {
983 int i;
984
985 for (i = 0; i < 8; ++i)
986 {
987 if (! (used_regs_mask & (1 << i)))
988 return i;
989 }
990
991 /* We shouldn't get here. */
992 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
993 }
994 }
995
996 /* Extract the details of INSN that we need. */
997
998 static void
999 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1000 {
1001 gdb_byte *start = insn;
1002 int need_modrm;
1003
1004 details->raw_insn = insn;
1005
1006 details->opcode_len = -1;
1007 details->rex_offset = -1;
1008 details->opcode_offset = -1;
1009 details->modrm_offset = -1;
1010
1011 /* Skip legacy instruction prefixes. */
1012 insn = amd64_skip_prefixes (insn);
1013
1014 /* Skip REX instruction prefix. */
1015 if (rex_prefix_p (*insn))
1016 {
1017 details->rex_offset = insn - start;
1018 ++insn;
1019 }
1020
1021 details->opcode_offset = insn - start;
1022
1023 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1024 {
1025 /* Two or three-byte opcode. */
1026 ++insn;
1027 need_modrm = twobyte_has_modrm[*insn];
1028
1029 /* Check for three-byte opcode. */
1030 switch (*insn)
1031 {
1032 case 0x24:
1033 case 0x25:
1034 case 0x38:
1035 case 0x3a:
1036 case 0x7a:
1037 case 0x7b:
1038 ++insn;
1039 details->opcode_len = 3;
1040 break;
1041 default:
1042 details->opcode_len = 2;
1043 break;
1044 }
1045 }
1046 else
1047 {
1048 /* One-byte opcode. */
1049 need_modrm = onebyte_has_modrm[*insn];
1050 details->opcode_len = 1;
1051 }
1052
1053 if (need_modrm)
1054 {
1055 ++insn;
1056 details->modrm_offset = insn - start;
1057 }
1058 }
1059
1060 /* Update %rip-relative addressing in INSN.
1061
1062 %rip-relative addressing only uses a 32-bit displacement.
1063 32 bits is not enough to be guaranteed to cover the distance between where
1064 the real instruction is and where its copy is.
1065 Convert the insn to use base+disp addressing.
1066 We set base = pc + insn_length so we can leave disp unchanged. */
1067
1068 static void
1069 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1070 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1071 {
1072 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1073 const struct amd64_insn *insn_details = &dsc->insn_details;
1074 int modrm_offset = insn_details->modrm_offset;
1075 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1076 CORE_ADDR rip_base;
1077 int32_t disp;
1078 int insn_length;
1079 int arch_tmp_regno, tmp_regno;
1080 ULONGEST orig_value;
1081
1082 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1083 ++insn;
1084
1085 /* Compute the rip-relative address. */
1086 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
1087 insn_length = amd64_insn_length (gdbarch, dsc->insn_buf, dsc->max_len, from);
1088 rip_base = from + insn_length;
1089
1090 /* We need a register to hold the address.
1091 Pick one not used in the insn.
1092 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1093 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1094 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1095
1096 /* REX.B should be unset as we were using rip-relative addressing,
1097 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1098 if (insn_details->rex_offset != -1)
1099 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1100
1101 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1102 dsc->tmp_regno = tmp_regno;
1103 dsc->tmp_save = orig_value;
1104 dsc->tmp_used = 1;
1105
1106 /* Convert the ModRM field to be base+disp. */
1107 dsc->insn_buf[modrm_offset] &= ~0xc7;
1108 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1109
1110 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1111
1112 if (debug_displaced)
1113 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1114 "displaced: using temp reg %d, old value %s, new value %s\n",
1115 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1116 paddress (gdbarch, rip_base));
1117 }
1118
1119 static void
1120 fixup_displaced_copy (struct gdbarch *gdbarch,
1121 struct displaced_step_closure *dsc,
1122 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1123 {
1124 const struct amd64_insn *details = &dsc->insn_details;
1125
1126 if (details->modrm_offset != -1)
1127 {
1128 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1129
1130 if ((modrm & 0xc7) == 0x05)
1131 {
1132 /* The insn uses rip-relative addressing.
1133 Deal with it. */
1134 fixup_riprel (gdbarch, dsc, from, to, regs);
1135 }
1136 }
1137 }
1138
1139 struct displaced_step_closure *
1140 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1141 CORE_ADDR from, CORE_ADDR to,
1142 struct regcache *regs)
1143 {
1144 int len = gdbarch_max_insn_length (gdbarch);
1145 /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to
1146 continually watch for running off the end of the buffer. */
1147 int fixup_sentinel_space = len;
1148 struct displaced_step_closure *dsc =
1149 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1150 gdb_byte *buf = &dsc->insn_buf[0];
1151 struct amd64_insn *details = &dsc->insn_details;
1152
1153 dsc->tmp_used = 0;
1154 dsc->max_len = len + fixup_sentinel_space;
1155
1156 read_memory (from, buf, len);
1157
1158 /* Set up the sentinel space so we don't have to worry about running
1159 off the end of the buffer. An excessive number of leading prefixes
1160 could otherwise cause this. */
1161 memset (buf + len, 0, fixup_sentinel_space);
1162
1163 amd64_get_insn_details (buf, details);
1164
1165 /* GDB may get control back after the insn after the syscall.
1166 Presumably this is a kernel bug.
1167 If this is a syscall, make sure there's a nop afterwards. */
1168 {
1169 int syscall_length;
1170
1171 if (amd64_syscall_p (details, &syscall_length))
1172 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1173 }
1174
1175 /* Modify the insn to cope with the address where it will be executed from.
1176 In particular, handle any rip-relative addressing. */
1177 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1178
1179 write_memory (to, buf, len);
1180
1181 if (debug_displaced)
1182 {
1183 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1184 paddress (gdbarch, from), paddress (gdbarch, to));
1185 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1186 }
1187
1188 return dsc;
1189 }
1190
1191 static int
1192 amd64_absolute_jmp_p (const struct amd64_insn *details)
1193 {
1194 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1195
1196 if (insn[0] == 0xff)
1197 {
1198 /* jump near, absolute indirect (/4) */
1199 if ((insn[1] & 0x38) == 0x20)
1200 return 1;
1201
1202 /* jump far, absolute indirect (/5) */
1203 if ((insn[1] & 0x38) == 0x28)
1204 return 1;
1205 }
1206
1207 return 0;
1208 }
1209
1210 static int
1211 amd64_absolute_call_p (const struct amd64_insn *details)
1212 {
1213 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1214
1215 if (insn[0] == 0xff)
1216 {
1217 /* Call near, absolute indirect (/2) */
1218 if ((insn[1] & 0x38) == 0x10)
1219 return 1;
1220
1221 /* Call far, absolute indirect (/3) */
1222 if ((insn[1] & 0x38) == 0x18)
1223 return 1;
1224 }
1225
1226 return 0;
1227 }
1228
1229 static int
1230 amd64_ret_p (const struct amd64_insn *details)
1231 {
1232 /* NOTE: gcc can emit "repz ; ret". */
1233 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1234
1235 switch (insn[0])
1236 {
1237 case 0xc2: /* ret near, pop N bytes */
1238 case 0xc3: /* ret near */
1239 case 0xca: /* ret far, pop N bytes */
1240 case 0xcb: /* ret far */
1241 case 0xcf: /* iret */
1242 return 1;
1243
1244 default:
1245 return 0;
1246 }
1247 }
1248
1249 static int
1250 amd64_call_p (const struct amd64_insn *details)
1251 {
1252 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1253
1254 if (amd64_absolute_call_p (details))
1255 return 1;
1256
1257 /* call near, relative */
1258 if (insn[0] == 0xe8)
1259 return 1;
1260
1261 return 0;
1262 }
1263
1264 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1265 length in bytes. Otherwise, return zero. */
1266
1267 static int
1268 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1269 {
1270 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1271
1272 if (insn[0] == 0x0f && insn[1] == 0x05)
1273 {
1274 *lengthp = 2;
1275 return 1;
1276 }
1277
1278 return 0;
1279 }
1280
1281 /* Fix up the state of registers and memory after having single-stepped
1282 a displaced instruction. */
1283
1284 void
1285 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1286 struct displaced_step_closure *dsc,
1287 CORE_ADDR from, CORE_ADDR to,
1288 struct regcache *regs)
1289 {
1290 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1291 /* The offset we applied to the instruction's address. */
1292 ULONGEST insn_offset = to - from;
1293 gdb_byte *insn = dsc->insn_buf;
1294 const struct amd64_insn *insn_details = &dsc->insn_details;
1295
1296 if (debug_displaced)
1297 fprintf_unfiltered (gdb_stdlog,
1298 "displaced: fixup (%s, %s), "
1299 "insn = 0x%02x 0x%02x ...\n",
1300 paddress (gdbarch, from), paddress (gdbarch, to),
1301 insn[0], insn[1]);
1302
1303 /* If we used a tmp reg, restore it. */
1304
1305 if (dsc->tmp_used)
1306 {
1307 if (debug_displaced)
1308 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1309 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1310 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1311 }
1312
1313 /* The list of issues to contend with here is taken from
1314 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1315 Yay for Free Software! */
1316
1317 /* Relocate the %rip back to the program's instruction stream,
1318 if necessary. */
1319
1320 /* Except in the case of absolute or indirect jump or call
1321 instructions, or a return instruction, the new rip is relative to
1322 the displaced instruction; make it relative to the original insn.
1323 Well, signal handler returns don't need relocation either, but we use the
1324 value of %rip to recognize those; see below. */
1325 if (! amd64_absolute_jmp_p (insn_details)
1326 && ! amd64_absolute_call_p (insn_details)
1327 && ! amd64_ret_p (insn_details))
1328 {
1329 ULONGEST orig_rip;
1330 int insn_len;
1331
1332 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1333
1334 /* A signal trampoline system call changes the %rip, resuming
1335 execution of the main program after the signal handler has
1336 returned. That makes them like 'return' instructions; we
1337 shouldn't relocate %rip.
1338
1339 But most system calls don't, and we do need to relocate %rip.
1340
1341 Our heuristic for distinguishing these cases: if stepping
1342 over the system call instruction left control directly after
1343 the instruction, the we relocate --- control almost certainly
1344 doesn't belong in the displaced copy. Otherwise, we assume
1345 the instruction has put control where it belongs, and leave
1346 it unrelocated. Goodness help us if there are PC-relative
1347 system calls. */
1348 if (amd64_syscall_p (insn_details, &insn_len)
1349 && orig_rip != to + insn_len
1350 /* GDB can get control back after the insn after the syscall.
1351 Presumably this is a kernel bug.
1352 Fixup ensures its a nop, we add one to the length for it. */
1353 && orig_rip != to + insn_len + 1)
1354 {
1355 if (debug_displaced)
1356 fprintf_unfiltered (gdb_stdlog,
1357 "displaced: syscall changed %%rip; "
1358 "not relocating\n");
1359 }
1360 else
1361 {
1362 ULONGEST rip = orig_rip - insn_offset;
1363
1364 /* If we just stepped over a breakpoint insn, we don't backup
1365 the pc on purpose; this is to match behaviour without
1366 stepping. */
1367
1368 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1369
1370 if (debug_displaced)
1371 fprintf_unfiltered (gdb_stdlog,
1372 "displaced: "
1373 "relocated %%rip from %s to %s\n",
1374 paddress (gdbarch, orig_rip),
1375 paddress (gdbarch, rip));
1376 }
1377 }
1378
1379 /* If the instruction was PUSHFL, then the TF bit will be set in the
1380 pushed value, and should be cleared. We'll leave this for later,
1381 since GDB already messes up the TF flag when stepping over a
1382 pushfl. */
1383
1384 /* If the instruction was a call, the return address now atop the
1385 stack is the address following the copied instruction. We need
1386 to make it the address following the original instruction. */
1387 if (amd64_call_p (insn_details))
1388 {
1389 ULONGEST rsp;
1390 ULONGEST retaddr;
1391 const ULONGEST retaddr_len = 8;
1392
1393 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1394 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1395 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
1396 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1397
1398 if (debug_displaced)
1399 fprintf_unfiltered (gdb_stdlog,
1400 "displaced: relocated return addr at %s "
1401 "to %s\n",
1402 paddress (gdbarch, rsp),
1403 paddress (gdbarch, retaddr));
1404 }
1405 }
1406 \f
1407 /* The maximum number of saved registers. This should include %rip. */
1408 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1409
1410 struct amd64_frame_cache
1411 {
1412 /* Base address. */
1413 CORE_ADDR base;
1414 CORE_ADDR sp_offset;
1415 CORE_ADDR pc;
1416
1417 /* Saved registers. */
1418 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1419 CORE_ADDR saved_sp;
1420 int saved_sp_reg;
1421
1422 /* Do we have a frame? */
1423 int frameless_p;
1424 };
1425
1426 /* Initialize a frame cache. */
1427
1428 static void
1429 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1430 {
1431 int i;
1432
1433 /* Base address. */
1434 cache->base = 0;
1435 cache->sp_offset = -8;
1436 cache->pc = 0;
1437
1438 /* Saved registers. We initialize these to -1 since zero is a valid
1439 offset (that's where %rbp is supposed to be stored).
1440 The values start out as being offsets, and are later converted to
1441 addresses (at which point -1 is interpreted as an address, still meaning
1442 "invalid"). */
1443 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1444 cache->saved_regs[i] = -1;
1445 cache->saved_sp = 0;
1446 cache->saved_sp_reg = -1;
1447
1448 /* Frameless until proven otherwise. */
1449 cache->frameless_p = 1;
1450 }
1451
1452 /* Allocate and initialize a frame cache. */
1453
1454 static struct amd64_frame_cache *
1455 amd64_alloc_frame_cache (void)
1456 {
1457 struct amd64_frame_cache *cache;
1458
1459 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1460 amd64_init_frame_cache (cache);
1461 return cache;
1462 }
1463
1464 /* GCC 4.4 and later, can put code in the prologue to realign the
1465 stack pointer. Check whether PC points to such code, and update
1466 CACHE accordingly. Return the first instruction after the code
1467 sequence or CURRENT_PC, whichever is smaller. If we don't
1468 recognize the code, return PC. */
1469
1470 static CORE_ADDR
1471 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1472 struct amd64_frame_cache *cache)
1473 {
1474 /* There are 2 code sequences to re-align stack before the frame
1475 gets set up:
1476
1477 1. Use a caller-saved saved register:
1478
1479 leaq 8(%rsp), %reg
1480 andq $-XXX, %rsp
1481 pushq -8(%reg)
1482
1483 2. Use a callee-saved saved register:
1484
1485 pushq %reg
1486 leaq 16(%rsp), %reg
1487 andq $-XXX, %rsp
1488 pushq -8(%reg)
1489
1490 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1491
1492 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1493 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1494 */
1495
1496 gdb_byte buf[18];
1497 int reg, r;
1498 int offset, offset_and;
1499
1500 if (target_read_memory (pc, buf, sizeof buf))
1501 return pc;
1502
1503 /* Check caller-saved saved register. The first instruction has
1504 to be "leaq 8(%rsp), %reg". */
1505 if ((buf[0] & 0xfb) == 0x48
1506 && buf[1] == 0x8d
1507 && buf[3] == 0x24
1508 && buf[4] == 0x8)
1509 {
1510 /* MOD must be binary 10 and R/M must be binary 100. */
1511 if ((buf[2] & 0xc7) != 0x44)
1512 return pc;
1513
1514 /* REG has register number. */
1515 reg = (buf[2] >> 3) & 7;
1516
1517 /* Check the REX.R bit. */
1518 if (buf[0] == 0x4c)
1519 reg += 8;
1520
1521 offset = 5;
1522 }
1523 else
1524 {
1525 /* Check callee-saved saved register. The first instruction
1526 has to be "pushq %reg". */
1527 reg = 0;
1528 if ((buf[0] & 0xf8) == 0x50)
1529 offset = 0;
1530 else if ((buf[0] & 0xf6) == 0x40
1531 && (buf[1] & 0xf8) == 0x50)
1532 {
1533 /* Check the REX.B bit. */
1534 if ((buf[0] & 1) != 0)
1535 reg = 8;
1536
1537 offset = 1;
1538 }
1539 else
1540 return pc;
1541
1542 /* Get register. */
1543 reg += buf[offset] & 0x7;
1544
1545 offset++;
1546
1547 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1548 if ((buf[offset] & 0xfb) != 0x48
1549 || buf[offset + 1] != 0x8d
1550 || buf[offset + 3] != 0x24
1551 || buf[offset + 4] != 0x10)
1552 return pc;
1553
1554 /* MOD must be binary 10 and R/M must be binary 100. */
1555 if ((buf[offset + 2] & 0xc7) != 0x44)
1556 return pc;
1557
1558 /* REG has register number. */
1559 r = (buf[offset + 2] >> 3) & 7;
1560
1561 /* Check the REX.R bit. */
1562 if (buf[offset] == 0x4c)
1563 r += 8;
1564
1565 /* Registers in pushq and leaq have to be the same. */
1566 if (reg != r)
1567 return pc;
1568
1569 offset += 5;
1570 }
1571
1572 /* Rigister can't be %rsp nor %rbp. */
1573 if (reg == 4 || reg == 5)
1574 return pc;
1575
1576 /* The next instruction has to be "andq $-XXX, %rsp". */
1577 if (buf[offset] != 0x48
1578 || buf[offset + 2] != 0xe4
1579 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1580 return pc;
1581
1582 offset_and = offset;
1583 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1584
1585 /* The next instruction has to be "pushq -8(%reg)". */
1586 r = 0;
1587 if (buf[offset] == 0xff)
1588 offset++;
1589 else if ((buf[offset] & 0xf6) == 0x40
1590 && buf[offset + 1] == 0xff)
1591 {
1592 /* Check the REX.B bit. */
1593 if ((buf[offset] & 0x1) != 0)
1594 r = 8;
1595 offset += 2;
1596 }
1597 else
1598 return pc;
1599
1600 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1601 01. */
1602 if (buf[offset + 1] != 0xf8
1603 || (buf[offset] & 0xf8) != 0x70)
1604 return pc;
1605
1606 /* R/M has register. */
1607 r += buf[offset] & 7;
1608
1609 /* Registers in leaq and pushq have to be the same. */
1610 if (reg != r)
1611 return pc;
1612
1613 if (current_pc > pc + offset_and)
1614 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
1615
1616 return min (pc + offset + 2, current_pc);
1617 }
1618
1619 /* Do a limited analysis of the prologue at PC and update CACHE
1620 accordingly. Bail out early if CURRENT_PC is reached. Return the
1621 address where the analysis stopped.
1622
1623 We will handle only functions beginning with:
1624
1625 pushq %rbp 0x55
1626 movq %rsp, %rbp 0x48 0x89 0xe5
1627
1628 Any function that doesn't start with this sequence will be assumed
1629 to have no prologue and thus no valid frame pointer in %rbp. */
1630
1631 static CORE_ADDR
1632 amd64_analyze_prologue (struct gdbarch *gdbarch,
1633 CORE_ADDR pc, CORE_ADDR current_pc,
1634 struct amd64_frame_cache *cache)
1635 {
1636 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1637 static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
1638 gdb_byte buf[3];
1639 gdb_byte op;
1640
1641 if (current_pc <= pc)
1642 return current_pc;
1643
1644 pc = amd64_analyze_stack_align (pc, current_pc, cache);
1645
1646 op = read_memory_unsigned_integer (pc, 1, byte_order);
1647
1648 if (op == 0x55) /* pushq %rbp */
1649 {
1650 /* Take into account that we've executed the `pushq %rbp' that
1651 starts this instruction sequence. */
1652 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
1653 cache->sp_offset += 8;
1654
1655 /* If that's all, return now. */
1656 if (current_pc <= pc + 1)
1657 return current_pc;
1658
1659 /* Check for `movq %rsp, %rbp'. */
1660 read_memory (pc + 1, buf, 3);
1661 if (memcmp (buf, proto, 3) != 0)
1662 return pc + 1;
1663
1664 /* OK, we actually have a frame. */
1665 cache->frameless_p = 0;
1666 return pc + 4;
1667 }
1668
1669 return pc;
1670 }
1671
1672 /* Return PC of first real instruction. */
1673
1674 static CORE_ADDR
1675 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
1676 {
1677 struct amd64_frame_cache cache;
1678 CORE_ADDR pc;
1679
1680 amd64_init_frame_cache (&cache);
1681 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
1682 &cache);
1683 if (cache.frameless_p)
1684 return start_pc;
1685
1686 return pc;
1687 }
1688 \f
1689
1690 /* Normal frames. */
1691
1692 static struct amd64_frame_cache *
1693 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
1694 {
1695 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1696 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1697 struct amd64_frame_cache *cache;
1698 gdb_byte buf[8];
1699 int i;
1700
1701 if (*this_cache)
1702 return *this_cache;
1703
1704 cache = amd64_alloc_frame_cache ();
1705 *this_cache = cache;
1706
1707 cache->pc = get_frame_func (this_frame);
1708 if (cache->pc != 0)
1709 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
1710 cache);
1711
1712 if (cache->saved_sp_reg != -1)
1713 {
1714 /* Stack pointer has been saved. */
1715 get_frame_register (this_frame, cache->saved_sp_reg, buf);
1716 cache->saved_sp = extract_unsigned_integer(buf, 8, byte_order);
1717 }
1718
1719 if (cache->frameless_p)
1720 {
1721 /* We didn't find a valid frame. If we're at the start of a
1722 function, or somewhere half-way its prologue, the function's
1723 frame probably hasn't been fully setup yet. Try to
1724 reconstruct the base address for the stack frame by looking
1725 at the stack pointer. For truly "frameless" functions this
1726 might work too. */
1727
1728 if (cache->saved_sp_reg != -1)
1729 {
1730 /* We're halfway aligning the stack. */
1731 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
1732 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
1733
1734 /* This will be added back below. */
1735 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
1736 }
1737 else
1738 {
1739 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1740 cache->base = extract_unsigned_integer (buf, 8, byte_order)
1741 + cache->sp_offset;
1742 }
1743 }
1744 else
1745 {
1746 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
1747 cache->base = extract_unsigned_integer (buf, 8, byte_order);
1748 }
1749
1750 /* Now that we have the base address for the stack frame we can
1751 calculate the value of %rsp in the calling frame. */
1752 cache->saved_sp = cache->base + 16;
1753
1754 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
1755 frame we find it at the same offset from the reconstructed base
1756 address. If we're halfway aligning the stack, %rip is handled
1757 differently (see above). */
1758 if (!cache->frameless_p || cache->saved_sp_reg == -1)
1759 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
1760
1761 /* Adjust all the saved registers such that they contain addresses
1762 instead of offsets. */
1763 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1764 if (cache->saved_regs[i] != -1)
1765 cache->saved_regs[i] += cache->base;
1766
1767 return cache;
1768 }
1769
1770 static void
1771 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1772 struct frame_id *this_id)
1773 {
1774 struct amd64_frame_cache *cache =
1775 amd64_frame_cache (this_frame, this_cache);
1776
1777 /* This marks the outermost frame. */
1778 if (cache->base == 0)
1779 return;
1780
1781 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
1782 }
1783
1784 static struct value *
1785 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1786 int regnum)
1787 {
1788 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1789 struct amd64_frame_cache *cache =
1790 amd64_frame_cache (this_frame, this_cache);
1791
1792 gdb_assert (regnum >= 0);
1793
1794 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
1795 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
1796
1797 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
1798 return frame_unwind_got_memory (this_frame, regnum,
1799 cache->saved_regs[regnum]);
1800
1801 return frame_unwind_got_register (this_frame, regnum, regnum);
1802 }
1803
1804 static const struct frame_unwind amd64_frame_unwind =
1805 {
1806 NORMAL_FRAME,
1807 amd64_frame_this_id,
1808 amd64_frame_prev_register,
1809 NULL,
1810 default_frame_sniffer
1811 };
1812 \f
1813
1814 /* Signal trampolines. */
1815
1816 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
1817 64-bit variants. This would require using identical frame caches
1818 on both platforms. */
1819
1820 static struct amd64_frame_cache *
1821 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
1822 {
1823 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1824 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1825 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1826 struct amd64_frame_cache *cache;
1827 CORE_ADDR addr;
1828 gdb_byte buf[8];
1829 int i;
1830
1831 if (*this_cache)
1832 return *this_cache;
1833
1834 cache = amd64_alloc_frame_cache ();
1835
1836 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1837 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
1838
1839 addr = tdep->sigcontext_addr (this_frame);
1840 gdb_assert (tdep->sc_reg_offset);
1841 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
1842 for (i = 0; i < tdep->sc_num_regs; i++)
1843 if (tdep->sc_reg_offset[i] != -1)
1844 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
1845
1846 *this_cache = cache;
1847 return cache;
1848 }
1849
1850 static void
1851 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
1852 void **this_cache, struct frame_id *this_id)
1853 {
1854 struct amd64_frame_cache *cache =
1855 amd64_sigtramp_frame_cache (this_frame, this_cache);
1856
1857 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
1858 }
1859
1860 static struct value *
1861 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
1862 void **this_cache, int regnum)
1863 {
1864 /* Make sure we've initialized the cache. */
1865 amd64_sigtramp_frame_cache (this_frame, this_cache);
1866
1867 return amd64_frame_prev_register (this_frame, this_cache, regnum);
1868 }
1869
1870 static int
1871 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
1872 struct frame_info *this_frame,
1873 void **this_cache)
1874 {
1875 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1876
1877 /* We shouldn't even bother if we don't have a sigcontext_addr
1878 handler. */
1879 if (tdep->sigcontext_addr == NULL)
1880 return 0;
1881
1882 if (tdep->sigtramp_p != NULL)
1883 {
1884 if (tdep->sigtramp_p (this_frame))
1885 return 1;
1886 }
1887
1888 if (tdep->sigtramp_start != 0)
1889 {
1890 CORE_ADDR pc = get_frame_pc (this_frame);
1891
1892 gdb_assert (tdep->sigtramp_end != 0);
1893 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
1894 return 1;
1895 }
1896
1897 return 0;
1898 }
1899
1900 static const struct frame_unwind amd64_sigtramp_frame_unwind =
1901 {
1902 SIGTRAMP_FRAME,
1903 amd64_sigtramp_frame_this_id,
1904 amd64_sigtramp_frame_prev_register,
1905 NULL,
1906 amd64_sigtramp_frame_sniffer
1907 };
1908 \f
1909
1910 static CORE_ADDR
1911 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
1912 {
1913 struct amd64_frame_cache *cache =
1914 amd64_frame_cache (this_frame, this_cache);
1915
1916 return cache->base;
1917 }
1918
1919 static const struct frame_base amd64_frame_base =
1920 {
1921 &amd64_frame_unwind,
1922 amd64_frame_base_address,
1923 amd64_frame_base_address,
1924 amd64_frame_base_address
1925 };
1926
1927 /* Normal frames, but in a function epilogue. */
1928
1929 /* The epilogue is defined here as the 'ret' instruction, which will
1930 follow any instruction such as 'leave' or 'pop %ebp' that destroys
1931 the function's stack frame. */
1932
1933 static int
1934 amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
1935 {
1936 gdb_byte insn;
1937
1938 if (target_read_memory (pc, &insn, 1))
1939 return 0; /* Can't read memory at pc. */
1940
1941 if (insn != 0xc3) /* 'ret' instruction. */
1942 return 0;
1943
1944 return 1;
1945 }
1946
1947 static int
1948 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
1949 struct frame_info *this_frame,
1950 void **this_prologue_cache)
1951 {
1952 if (frame_relative_level (this_frame) == 0)
1953 return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
1954 get_frame_pc (this_frame));
1955 else
1956 return 0;
1957 }
1958
1959 static struct amd64_frame_cache *
1960 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
1961 {
1962 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1963 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1964 struct amd64_frame_cache *cache;
1965 gdb_byte buf[8];
1966
1967 if (*this_cache)
1968 return *this_cache;
1969
1970 cache = amd64_alloc_frame_cache ();
1971 *this_cache = cache;
1972
1973 /* Cache base will be %esp plus cache->sp_offset (-8). */
1974 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1975 cache->base = extract_unsigned_integer (buf, 8,
1976 byte_order) + cache->sp_offset;
1977
1978 /* Cache pc will be the frame func. */
1979 cache->pc = get_frame_pc (this_frame);
1980
1981 /* The saved %esp will be at cache->base plus 16. */
1982 cache->saved_sp = cache->base + 16;
1983
1984 /* The saved %eip will be at cache->base plus 8. */
1985 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
1986
1987 return cache;
1988 }
1989
1990 static void
1991 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
1992 void **this_cache,
1993 struct frame_id *this_id)
1994 {
1995 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
1996 this_cache);
1997
1998 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
1999 }
2000
2001 static const struct frame_unwind amd64_epilogue_frame_unwind =
2002 {
2003 NORMAL_FRAME,
2004 amd64_epilogue_frame_this_id,
2005 amd64_frame_prev_register,
2006 NULL,
2007 amd64_epilogue_frame_sniffer
2008 };
2009
2010 static struct frame_id
2011 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2012 {
2013 CORE_ADDR fp;
2014
2015 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2016
2017 return frame_id_build (fp + 16, get_frame_pc (this_frame));
2018 }
2019
2020 /* 16 byte align the SP per frame requirements. */
2021
2022 static CORE_ADDR
2023 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2024 {
2025 return sp & -(CORE_ADDR)16;
2026 }
2027 \f
2028
2029 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2030 in the floating-point register set REGSET to register cache
2031 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2032
2033 static void
2034 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2035 int regnum, const void *fpregs, size_t len)
2036 {
2037 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2038
2039 gdb_assert (len == tdep->sizeof_fpregset);
2040 amd64_supply_fxsave (regcache, regnum, fpregs);
2041 }
2042
2043 /* Collect register REGNUM from the register cache REGCACHE and store
2044 it in the buffer specified by FPREGS and LEN as described by the
2045 floating-point register set REGSET. If REGNUM is -1, do this for
2046 all registers in REGSET. */
2047
2048 static void
2049 amd64_collect_fpregset (const struct regset *regset,
2050 const struct regcache *regcache,
2051 int regnum, void *fpregs, size_t len)
2052 {
2053 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2054
2055 gdb_assert (len == tdep->sizeof_fpregset);
2056 amd64_collect_fxsave (regcache, regnum, fpregs);
2057 }
2058
2059 /* Return the appropriate register set for the core section identified
2060 by SECT_NAME and SECT_SIZE. */
2061
2062 static const struct regset *
2063 amd64_regset_from_core_section (struct gdbarch *gdbarch,
2064 const char *sect_name, size_t sect_size)
2065 {
2066 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2067
2068 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
2069 {
2070 if (tdep->fpregset == NULL)
2071 tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
2072 amd64_collect_fpregset);
2073
2074 return tdep->fpregset;
2075 }
2076
2077 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
2078 }
2079 \f
2080
2081 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2082 %rdi. We expect its value to be a pointer to the jmp_buf structure
2083 from which we extract the address that we will land at. This
2084 address is copied into PC. This routine returns non-zero on
2085 success. */
2086
2087 static int
2088 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2089 {
2090 gdb_byte buf[8];
2091 CORE_ADDR jb_addr;
2092 struct gdbarch *gdbarch = get_frame_arch (frame);
2093 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
2094 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
2095
2096 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2097 longjmp will land. */
2098 if (jb_pc_offset == -1)
2099 return 0;
2100
2101 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
2102 jb_addr= extract_typed_address
2103 (buf, builtin_type (gdbarch)->builtin_data_ptr);
2104 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2105 return 0;
2106
2107 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
2108
2109 return 1;
2110 }
2111
2112 static const int amd64_record_regmap[] =
2113 {
2114 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2115 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2116 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2117 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2118 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2119 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2120 };
2121
2122 void
2123 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
2124 {
2125 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2126
2127 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2128 floating-point registers. */
2129 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2130
2131 /* AMD64 has an FPU and 16 SSE registers. */
2132 tdep->st0_regnum = AMD64_ST0_REGNUM;
2133 tdep->num_xmm_regs = 16;
2134
2135 /* This is what all the fuss is about. */
2136 set_gdbarch_long_bit (gdbarch, 64);
2137 set_gdbarch_long_long_bit (gdbarch, 64);
2138 set_gdbarch_ptr_bit (gdbarch, 64);
2139
2140 /* In contrast to the i386, on AMD64 a `long double' actually takes
2141 up 128 bits, even though it's still based on the i387 extended
2142 floating-point format which has only 80 significant bits. */
2143 set_gdbarch_long_double_bit (gdbarch, 128);
2144
2145 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
2146 set_gdbarch_register_name (gdbarch, amd64_register_name);
2147 set_gdbarch_register_type (gdbarch, amd64_register_type);
2148
2149 /* Register numbers of various important registers. */
2150 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
2151 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
2152 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
2153 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
2154
2155 /* The "default" register numbering scheme for AMD64 is referred to
2156 as the "DWARF Register Number Mapping" in the System V psABI.
2157 The preferred debugging format for all known AMD64 targets is
2158 actually DWARF2, and GCC doesn't seem to support DWARF (that is
2159 DWARF-1), but we provide the same mapping just in case. This
2160 mapping is also used for stabs, which GCC does support. */
2161 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2162 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2163
2164 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
2165 be in use on any of the supported AMD64 targets. */
2166
2167 /* Call dummy code. */
2168 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
2169 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
2170 set_gdbarch_frame_red_zone_size (gdbarch, 128);
2171 tdep->call_dummy_num_integer_regs =
2172 ARRAY_SIZE (amd64_dummy_call_integer_regs);
2173 tdep->call_dummy_integer_regs = amd64_dummy_call_integer_regs;
2174 tdep->classify = amd64_classify;
2175
2176 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
2177 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
2178 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
2179
2180 set_gdbarch_return_value (gdbarch, amd64_return_value);
2181
2182 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
2183
2184 /* Avoid wiring in the MMX registers for now. */
2185 set_gdbarch_num_pseudo_regs (gdbarch, 0);
2186 tdep->mm0_regnum = -1;
2187
2188 tdep->record_regmap = amd64_record_regmap;
2189
2190 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
2191
2192 /* Hook the function epilogue frame unwinder. This unwinder is
2193 appended to the list first, so that it supercedes the other
2194 unwinders in function epilogues. */
2195 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
2196
2197 /* Hook the prologue-based frame unwinders. */
2198 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
2199 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
2200 frame_base_set_default (gdbarch, &amd64_frame_base);
2201
2202 /* If we have a register mapping, enable the generic core file support. */
2203 if (tdep->gregset_reg_offset)
2204 set_gdbarch_regset_from_core_section (gdbarch,
2205 amd64_regset_from_core_section);
2206
2207 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
2208 }
2209 \f
2210
2211 /* The 64-bit FXSAVE format differs from the 32-bit format in the
2212 sense that the instruction pointer and data pointer are simply
2213 64-bit offsets into the code segment and the data segment instead
2214 of a selector offset pair. The functions below store the upper 32
2215 bits of these pointers (instead of just the 16-bits of the segment
2216 selector). */
2217
2218 /* Fill register REGNUM in REGCACHE with the appropriate
2219 floating-point or SSE register value from *FXSAVE. If REGNUM is
2220 -1, do this for all registers. This function masks off any of the
2221 reserved bits in *FXSAVE. */
2222
2223 void
2224 amd64_supply_fxsave (struct regcache *regcache, int regnum,
2225 const void *fxsave)
2226 {
2227 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2228 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2229
2230 i387_supply_fxsave (regcache, regnum, fxsave);
2231
2232 if (fxsave && gdbarch_ptr_bit (gdbarch) == 64)
2233 {
2234 const gdb_byte *regs = fxsave;
2235
2236 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2237 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2238 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2239 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2240 }
2241 }
2242
2243 /* Fill register REGNUM (if it is a floating-point or SSE register) in
2244 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
2245 all registers. This function doesn't touch any of the reserved
2246 bits in *FXSAVE. */
2247
2248 void
2249 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
2250 void *fxsave)
2251 {
2252 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2253 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2254 gdb_byte *regs = fxsave;
2255
2256 i387_collect_fxsave (regcache, regnum, fxsave);
2257
2258 if (gdbarch_ptr_bit (gdbarch) == 64)
2259 {
2260 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2261 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2262 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2263 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2264 }
2265 }
This page took 0.081085 seconds and 4 git commands to generate.