* amd64-tdep.c (amd64_init_frame_cache): New function.
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5
6 Contributed by Jiri Smid, SuSE Labs.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
24
25 #include "defs.h"
26 #include "arch-utils.h"
27 #include "block.h"
28 #include "dummy-frame.h"
29 #include "frame.h"
30 #include "frame-base.h"
31 #include "frame-unwind.h"
32 #include "inferior.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39
40 #include "gdb_assert.h"
41
42 #include "amd64-tdep.h"
43 #include "i387-tdep.h"
44
45 /* Note that the AMD64 architecture was previously known as x86-64.
46 The latter is (forever) engraved into the canonical system name as
47 returned by config.guess, and used as the name for the AMD64 port
48 of GNU/Linux. The BSD's have renamed their ports to amd64; they
49 don't like to shout. For GDB we prefer the amd64_-prefix over the
50 x86_64_-prefix since it's so much easier to type. */
51
52 /* Register information. */
53
54 struct amd64_register_info
55 {
56 char *name;
57 struct type **type;
58 };
59
60 static struct amd64_register_info const amd64_register_info[] =
61 {
62 { "rax", &builtin_type_int64 },
63 { "rbx", &builtin_type_int64 },
64 { "rcx", &builtin_type_int64 },
65 { "rdx", &builtin_type_int64 },
66 { "rsi", &builtin_type_int64 },
67 { "rdi", &builtin_type_int64 },
68 { "rbp", &builtin_type_void_data_ptr },
69 { "rsp", &builtin_type_void_data_ptr },
70
71 /* %r8 is indeed register number 8. */
72 { "r8", &builtin_type_int64 },
73 { "r9", &builtin_type_int64 },
74 { "r10", &builtin_type_int64 },
75 { "r11", &builtin_type_int64 },
76 { "r12", &builtin_type_int64 },
77 { "r13", &builtin_type_int64 },
78 { "r14", &builtin_type_int64 },
79 { "r15", &builtin_type_int64 },
80 { "rip", &builtin_type_void_func_ptr },
81 { "eflags", &i386_eflags_type },
82 { "cs", &builtin_type_int32 },
83 { "ss", &builtin_type_int32 },
84 { "ds", &builtin_type_int32 },
85 { "es", &builtin_type_int32 },
86 { "fs", &builtin_type_int32 },
87 { "gs", &builtin_type_int32 },
88
89 /* %st0 is register number 24. */
90 { "st0", &builtin_type_i387_ext },
91 { "st1", &builtin_type_i387_ext },
92 { "st2", &builtin_type_i387_ext },
93 { "st3", &builtin_type_i387_ext },
94 { "st4", &builtin_type_i387_ext },
95 { "st5", &builtin_type_i387_ext },
96 { "st6", &builtin_type_i387_ext },
97 { "st7", &builtin_type_i387_ext },
98 { "fctrl", &builtin_type_int32 },
99 { "fstat", &builtin_type_int32 },
100 { "ftag", &builtin_type_int32 },
101 { "fiseg", &builtin_type_int32 },
102 { "fioff", &builtin_type_int32 },
103 { "foseg", &builtin_type_int32 },
104 { "fooff", &builtin_type_int32 },
105 { "fop", &builtin_type_int32 },
106
107 /* %xmm0 is register number 40. */
108 { "xmm0", &i386_sse_type },
109 { "xmm1", &i386_sse_type },
110 { "xmm2", &i386_sse_type },
111 { "xmm3", &i386_sse_type },
112 { "xmm4", &i386_sse_type },
113 { "xmm5", &i386_sse_type },
114 { "xmm6", &i386_sse_type },
115 { "xmm7", &i386_sse_type },
116 { "xmm8", &i386_sse_type },
117 { "xmm9", &i386_sse_type },
118 { "xmm10", &i386_sse_type },
119 { "xmm11", &i386_sse_type },
120 { "xmm12", &i386_sse_type },
121 { "xmm13", &i386_sse_type },
122 { "xmm14", &i386_sse_type },
123 { "xmm15", &i386_sse_type },
124 { "mxcsr", &i386_mxcsr_type }
125 };
126
127 /* Total number of registers. */
128 #define AMD64_NUM_REGS ARRAY_SIZE (amd64_register_info)
129
130 /* Return the name of register REGNUM. */
131
132 const char *
133 amd64_register_name (int regnum)
134 {
135 if (regnum >= 0 && regnum < AMD64_NUM_REGS)
136 return amd64_register_info[regnum].name;
137
138 return NULL;
139 }
140
141 /* Return the GDB type object for the "standard" data type of data in
142 register REGNUM. */
143
144 struct type *
145 amd64_register_type (struct gdbarch *gdbarch, int regnum)
146 {
147 gdb_assert (regnum >= 0 && regnum < AMD64_NUM_REGS);
148
149 return *amd64_register_info[regnum].type;
150 }
151
152 /* DWARF Register Number Mapping as defined in the System V psABI,
153 section 3.6. */
154
155 static int amd64_dwarf_regmap[] =
156 {
157 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
158 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
159 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
160 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
161
162 /* Frame Pointer Register RBP. */
163 AMD64_RBP_REGNUM,
164
165 /* Stack Pointer Register RSP. */
166 AMD64_RSP_REGNUM,
167
168 /* Extended Integer Registers 8 - 15. */
169 8, 9, 10, 11, 12, 13, 14, 15,
170
171 /* Return Address RA. Mapped to RIP. */
172 AMD64_RIP_REGNUM,
173
174 /* SSE Registers 0 - 7. */
175 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
176 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
177 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
178 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
179
180 /* Extended SSE Registers 8 - 15. */
181 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
182 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
183 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
184 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
185
186 /* Floating Point Registers 0-7. */
187 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
188 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
189 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
190 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
191
192 /* Control and Status Flags Register. */
193 AMD64_EFLAGS_REGNUM,
194
195 /* Selector Registers. */
196 AMD64_ES_REGNUM,
197 AMD64_CS_REGNUM,
198 AMD64_SS_REGNUM,
199 AMD64_DS_REGNUM,
200 AMD64_FS_REGNUM,
201 AMD64_GS_REGNUM,
202 -1,
203 -1,
204
205 /* Segment Base Address Registers. */
206 -1,
207 -1,
208 -1,
209 -1,
210
211 /* Special Selector Registers. */
212 -1,
213 -1,
214
215 /* Floating Point Control Registers. */
216 AMD64_MXCSR_REGNUM,
217 AMD64_FCTRL_REGNUM,
218 AMD64_FSTAT_REGNUM
219 };
220
221 static const int amd64_dwarf_regmap_len =
222 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
223
224 /* Convert DWARF register number REG to the appropriate register
225 number used by GDB. */
226
227 static int
228 amd64_dwarf_reg_to_regnum (int reg)
229 {
230 int regnum = -1;
231
232 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
233 regnum = amd64_dwarf_regmap[reg];
234
235 if (regnum == -1)
236 warning (_("Unmapped DWARF Register #%d encountered."), reg);
237
238 return regnum;
239 }
240
241 /* Return nonzero if a value of type TYPE stored in register REGNUM
242 needs any special handling. */
243
244 static int
245 amd64_convert_register_p (int regnum, struct type *type)
246 {
247 return i386_fp_regnum_p (regnum);
248 }
249 \f
250
251 /* Register classes as defined in the psABI. */
252
253 enum amd64_reg_class
254 {
255 AMD64_INTEGER,
256 AMD64_SSE,
257 AMD64_SSEUP,
258 AMD64_X87,
259 AMD64_X87UP,
260 AMD64_COMPLEX_X87,
261 AMD64_NO_CLASS,
262 AMD64_MEMORY
263 };
264
265 /* Return the union class of CLASS1 and CLASS2. See the psABI for
266 details. */
267
268 static enum amd64_reg_class
269 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
270 {
271 /* Rule (a): If both classes are equal, this is the resulting class. */
272 if (class1 == class2)
273 return class1;
274
275 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
276 is the other class. */
277 if (class1 == AMD64_NO_CLASS)
278 return class2;
279 if (class2 == AMD64_NO_CLASS)
280 return class1;
281
282 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
283 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
284 return AMD64_MEMORY;
285
286 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
287 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
288 return AMD64_INTEGER;
289
290 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
291 MEMORY is used as class. */
292 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
293 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
294 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
295 return AMD64_MEMORY;
296
297 /* Rule (f): Otherwise class SSE is used. */
298 return AMD64_SSE;
299 }
300
301 static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
302
303 /* Return non-zero if TYPE is a non-POD structure or union type. */
304
305 static int
306 amd64_non_pod_p (struct type *type)
307 {
308 /* ??? A class with a base class certainly isn't POD, but does this
309 catch all non-POD structure types? */
310 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
311 return 1;
312
313 return 0;
314 }
315
316 /* Classify TYPE according to the rules for aggregate (structures and
317 arrays) and union types, and store the result in CLASS. */
318
319 static void
320 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
321 {
322 int len = TYPE_LENGTH (type);
323
324 /* 1. If the size of an object is larger than two eightbytes, or in
325 C++, is a non-POD structure or union type, or contains
326 unaligned fields, it has class memory. */
327 if (len > 16 || amd64_non_pod_p (type))
328 {
329 class[0] = class[1] = AMD64_MEMORY;
330 return;
331 }
332
333 /* 2. Both eightbytes get initialized to class NO_CLASS. */
334 class[0] = class[1] = AMD64_NO_CLASS;
335
336 /* 3. Each field of an object is classified recursively so that
337 always two fields are considered. The resulting class is
338 calculated according to the classes of the fields in the
339 eightbyte: */
340
341 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
342 {
343 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
344
345 /* All fields in an array have the same type. */
346 amd64_classify (subtype, class);
347 if (len > 8 && class[1] == AMD64_NO_CLASS)
348 class[1] = class[0];
349 }
350 else
351 {
352 int i;
353
354 /* Structure or union. */
355 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
356 || TYPE_CODE (type) == TYPE_CODE_UNION);
357
358 for (i = 0; i < TYPE_NFIELDS (type); i++)
359 {
360 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
361 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
362 enum amd64_reg_class subclass[2];
363
364 /* Ignore static fields. */
365 if (TYPE_FIELD_STATIC (type, i))
366 continue;
367
368 gdb_assert (pos == 0 || pos == 1);
369
370 amd64_classify (subtype, subclass);
371 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
372 if (pos == 0)
373 class[1] = amd64_merge_classes (class[1], subclass[1]);
374 }
375 }
376
377 /* 4. Then a post merger cleanup is done: */
378
379 /* Rule (a): If one of the classes is MEMORY, the whole argument is
380 passed in memory. */
381 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
382 class[0] = class[1] = AMD64_MEMORY;
383
384 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
385 SSE. */
386 if (class[0] == AMD64_SSEUP)
387 class[0] = AMD64_SSE;
388 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
389 class[1] = AMD64_SSE;
390 }
391
392 /* Classify TYPE, and store the result in CLASS. */
393
394 static void
395 amd64_classify (struct type *type, enum amd64_reg_class class[2])
396 {
397 enum type_code code = TYPE_CODE (type);
398 int len = TYPE_LENGTH (type);
399
400 class[0] = class[1] = AMD64_NO_CLASS;
401
402 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
403 long, long long, and pointers are in the INTEGER class. Similarly,
404 range types, used by languages such as Ada, are also in the INTEGER
405 class. */
406 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
407 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
408 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
409 && (len == 1 || len == 2 || len == 4 || len == 8))
410 class[0] = AMD64_INTEGER;
411
412 /* Arguments of types float, double and __m64 are in class SSE. */
413 else if (code == TYPE_CODE_FLT && (len == 4 || len == 8))
414 /* FIXME: __m64 . */
415 class[0] = AMD64_SSE;
416
417 /* Arguments of types __float128 and __m128 are split into two
418 halves. The least significant ones belong to class SSE, the most
419 significant one to class SSEUP. */
420 /* FIXME: __float128, __m128. */
421
422 /* The 64-bit mantissa of arguments of type long double belongs to
423 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
424 class X87UP. */
425 else if (code == TYPE_CODE_FLT && len == 16)
426 /* Class X87 and X87UP. */
427 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
428
429 /* Aggregates. */
430 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
431 || code == TYPE_CODE_UNION)
432 amd64_classify_aggregate (type, class);
433 }
434
435 static enum return_value_convention
436 amd64_return_value (struct gdbarch *gdbarch, struct type *type,
437 struct regcache *regcache,
438 gdb_byte *readbuf, const gdb_byte *writebuf)
439 {
440 enum amd64_reg_class class[2];
441 int len = TYPE_LENGTH (type);
442 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
443 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
444 int integer_reg = 0;
445 int sse_reg = 0;
446 int i;
447
448 gdb_assert (!(readbuf && writebuf));
449
450 /* 1. Classify the return type with the classification algorithm. */
451 amd64_classify (type, class);
452
453 /* 2. If the type has class MEMORY, then the caller provides space
454 for the return value and passes the address of this storage in
455 %rdi as if it were the first argument to the function. In effect,
456 this address becomes a hidden first argument.
457
458 On return %rax will contain the address that has been passed in
459 by the caller in %rdi. */
460 if (class[0] == AMD64_MEMORY)
461 {
462 /* As indicated by the comment above, the ABI guarantees that we
463 can always find the return value just after the function has
464 returned. */
465
466 if (readbuf)
467 {
468 ULONGEST addr;
469
470 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
471 read_memory (addr, readbuf, TYPE_LENGTH (type));
472 }
473
474 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
475 }
476
477 gdb_assert (class[1] != AMD64_MEMORY);
478 gdb_assert (len <= 16);
479
480 for (i = 0; len > 0; i++, len -= 8)
481 {
482 int regnum = -1;
483 int offset = 0;
484
485 switch (class[i])
486 {
487 case AMD64_INTEGER:
488 /* 3. If the class is INTEGER, the next available register
489 of the sequence %rax, %rdx is used. */
490 regnum = integer_regnum[integer_reg++];
491 break;
492
493 case AMD64_SSE:
494 /* 4. If the class is SSE, the next available SSE register
495 of the sequence %xmm0, %xmm1 is used. */
496 regnum = sse_regnum[sse_reg++];
497 break;
498
499 case AMD64_SSEUP:
500 /* 5. If the class is SSEUP, the eightbyte is passed in the
501 upper half of the last used SSE register. */
502 gdb_assert (sse_reg > 0);
503 regnum = sse_regnum[sse_reg - 1];
504 offset = 8;
505 break;
506
507 case AMD64_X87:
508 /* 6. If the class is X87, the value is returned on the X87
509 stack in %st0 as 80-bit x87 number. */
510 regnum = AMD64_ST0_REGNUM;
511 if (writebuf)
512 i387_return_value (gdbarch, regcache);
513 break;
514
515 case AMD64_X87UP:
516 /* 7. If the class is X87UP, the value is returned together
517 with the previous X87 value in %st0. */
518 gdb_assert (i > 0 && class[0] == AMD64_X87);
519 regnum = AMD64_ST0_REGNUM;
520 offset = 8;
521 len = 2;
522 break;
523
524 case AMD64_NO_CLASS:
525 continue;
526
527 default:
528 gdb_assert (!"Unexpected register class.");
529 }
530
531 gdb_assert (regnum != -1);
532
533 if (readbuf)
534 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
535 readbuf + i * 8);
536 if (writebuf)
537 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
538 writebuf + i * 8);
539 }
540
541 return RETURN_VALUE_REGISTER_CONVENTION;
542 }
543 \f
544
545 static CORE_ADDR
546 amd64_push_arguments (struct regcache *regcache, int nargs,
547 struct value **args, CORE_ADDR sp, int struct_return)
548 {
549 static int integer_regnum[] =
550 {
551 AMD64_RDI_REGNUM, /* %rdi */
552 AMD64_RSI_REGNUM, /* %rsi */
553 AMD64_RDX_REGNUM, /* %rdx */
554 AMD64_RCX_REGNUM, /* %rcx */
555 8, /* %r8 */
556 9 /* %r9 */
557 };
558 static int sse_regnum[] =
559 {
560 /* %xmm0 ... %xmm7 */
561 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
562 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
563 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
564 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
565 };
566 struct value **stack_args = alloca (nargs * sizeof (struct value *));
567 int num_stack_args = 0;
568 int num_elements = 0;
569 int element = 0;
570 int integer_reg = 0;
571 int sse_reg = 0;
572 int i;
573
574 /* Reserve a register for the "hidden" argument. */
575 if (struct_return)
576 integer_reg++;
577
578 for (i = 0; i < nargs; i++)
579 {
580 struct type *type = value_type (args[i]);
581 int len = TYPE_LENGTH (type);
582 enum amd64_reg_class class[2];
583 int needed_integer_regs = 0;
584 int needed_sse_regs = 0;
585 int j;
586
587 /* Classify argument. */
588 amd64_classify (type, class);
589
590 /* Calculate the number of integer and SSE registers needed for
591 this argument. */
592 for (j = 0; j < 2; j++)
593 {
594 if (class[j] == AMD64_INTEGER)
595 needed_integer_regs++;
596 else if (class[j] == AMD64_SSE)
597 needed_sse_regs++;
598 }
599
600 /* Check whether enough registers are available, and if the
601 argument should be passed in registers at all. */
602 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
603 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
604 || (needed_integer_regs == 0 && needed_sse_regs == 0))
605 {
606 /* The argument will be passed on the stack. */
607 num_elements += ((len + 7) / 8);
608 stack_args[num_stack_args++] = args[i];
609 }
610 else
611 {
612 /* The argument will be passed in registers. */
613 const gdb_byte *valbuf = value_contents (args[i]);
614 gdb_byte buf[8];
615
616 gdb_assert (len <= 16);
617
618 for (j = 0; len > 0; j++, len -= 8)
619 {
620 int regnum = -1;
621 int offset = 0;
622
623 switch (class[j])
624 {
625 case AMD64_INTEGER:
626 regnum = integer_regnum[integer_reg++];
627 break;
628
629 case AMD64_SSE:
630 regnum = sse_regnum[sse_reg++];
631 break;
632
633 case AMD64_SSEUP:
634 gdb_assert (sse_reg > 0);
635 regnum = sse_regnum[sse_reg - 1];
636 offset = 8;
637 break;
638
639 default:
640 gdb_assert (!"Unexpected register class.");
641 }
642
643 gdb_assert (regnum != -1);
644 memset (buf, 0, sizeof buf);
645 memcpy (buf, valbuf + j * 8, min (len, 8));
646 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
647 }
648 }
649 }
650
651 /* Allocate space for the arguments on the stack. */
652 sp -= num_elements * 8;
653
654 /* The psABI says that "The end of the input argument area shall be
655 aligned on a 16 byte boundary." */
656 sp &= ~0xf;
657
658 /* Write out the arguments to the stack. */
659 for (i = 0; i < num_stack_args; i++)
660 {
661 struct type *type = value_type (stack_args[i]);
662 const gdb_byte *valbuf = value_contents (stack_args[i]);
663 int len = TYPE_LENGTH (type);
664
665 write_memory (sp + element * 8, valbuf, len);
666 element += ((len + 7) / 8);
667 }
668
669 /* The psABI says that "For calls that may call functions that use
670 varargs or stdargs (prototype-less calls or calls to functions
671 containing ellipsis (...) in the declaration) %al is used as
672 hidden argument to specify the number of SSE registers used. */
673 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
674 return sp;
675 }
676
677 static CORE_ADDR
678 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
679 struct regcache *regcache, CORE_ADDR bp_addr,
680 int nargs, struct value **args, CORE_ADDR sp,
681 int struct_return, CORE_ADDR struct_addr)
682 {
683 gdb_byte buf[8];
684
685 /* Pass arguments. */
686 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
687
688 /* Pass "hidden" argument". */
689 if (struct_return)
690 {
691 store_unsigned_integer (buf, 8, struct_addr);
692 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
693 }
694
695 /* Store return address. */
696 sp -= 8;
697 store_unsigned_integer (buf, 8, bp_addr);
698 write_memory (sp, buf, 8);
699
700 /* Finally, update the stack pointer... */
701 store_unsigned_integer (buf, 8, sp);
702 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
703
704 /* ...and fake a frame pointer. */
705 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
706
707 return sp + 16;
708 }
709 \f
710
711 /* The maximum number of saved registers. This should include %rip. */
712 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
713
714 struct amd64_frame_cache
715 {
716 /* Base address. */
717 CORE_ADDR base;
718 CORE_ADDR sp_offset;
719 CORE_ADDR pc;
720
721 /* Saved registers. */
722 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
723 CORE_ADDR saved_sp;
724
725 /* Do we have a frame? */
726 int frameless_p;
727 };
728
729 /* Initialize a frame cache. */
730
731 static void
732 amd64_init_frame_cache (struct amd64_frame_cache *cache)
733 {
734 int i;
735
736 /* Base address. */
737 cache->base = 0;
738 cache->sp_offset = -8;
739 cache->pc = 0;
740
741 /* Saved registers. We initialize these to -1 since zero is a valid
742 offset (that's where %rbp is supposed to be stored). */
743 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
744 cache->saved_regs[i] = -1;
745 cache->saved_sp = 0;
746
747 /* Frameless until proven otherwise. */
748 cache->frameless_p = 1;
749 }
750
751 /* Allocate and initialize a frame cache. */
752
753 static struct amd64_frame_cache *
754 amd64_alloc_frame_cache (void)
755 {
756 struct amd64_frame_cache *cache;
757
758 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
759 amd64_init_frame_cache (cache);
760 return cache;
761 }
762
763 /* Do a limited analysis of the prologue at PC and update CACHE
764 accordingly. Bail out early if CURRENT_PC is reached. Return the
765 address where the analysis stopped.
766
767 We will handle only functions beginning with:
768
769 pushq %rbp 0x55
770 movq %rsp, %rbp 0x48 0x89 0xe5
771
772 Any function that doesn't start with this sequence will be assumed
773 to have no prologue and thus no valid frame pointer in %rbp. */
774
775 static CORE_ADDR
776 amd64_analyze_prologue (CORE_ADDR pc, CORE_ADDR current_pc,
777 struct amd64_frame_cache *cache)
778 {
779 static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
780 gdb_byte buf[3];
781 gdb_byte op;
782
783 if (current_pc <= pc)
784 return current_pc;
785
786 op = read_memory_unsigned_integer (pc, 1);
787
788 if (op == 0x55) /* pushq %rbp */
789 {
790 /* Take into account that we've executed the `pushq %rbp' that
791 starts this instruction sequence. */
792 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
793 cache->sp_offset += 8;
794
795 /* If that's all, return now. */
796 if (current_pc <= pc + 1)
797 return current_pc;
798
799 /* Check for `movq %rsp, %rbp'. */
800 read_memory (pc + 1, buf, 3);
801 if (memcmp (buf, proto, 3) != 0)
802 return pc + 1;
803
804 /* OK, we actually have a frame. */
805 cache->frameless_p = 0;
806 return pc + 4;
807 }
808
809 return pc;
810 }
811
812 /* Return PC of first real instruction. */
813
814 static CORE_ADDR
815 amd64_skip_prologue (CORE_ADDR start_pc)
816 {
817 struct amd64_frame_cache cache;
818 CORE_ADDR pc;
819
820 amd64_init_frame_cache (&cache);
821 pc = amd64_analyze_prologue (start_pc, 0xffffffffffffffffLL, &cache);
822 if (cache.frameless_p)
823 return start_pc;
824
825 return pc;
826 }
827 \f
828
829 /* Normal frames. */
830
831 static struct amd64_frame_cache *
832 amd64_frame_cache (struct frame_info *next_frame, void **this_cache)
833 {
834 struct amd64_frame_cache *cache;
835 gdb_byte buf[8];
836 int i;
837
838 if (*this_cache)
839 return *this_cache;
840
841 cache = amd64_alloc_frame_cache ();
842 *this_cache = cache;
843
844 cache->pc = frame_func_unwind (next_frame, NORMAL_FRAME);
845 if (cache->pc != 0)
846 amd64_analyze_prologue (cache->pc, frame_pc_unwind (next_frame), cache);
847
848 if (cache->frameless_p)
849 {
850 /* We didn't find a valid frame. If we're at the start of a
851 function, or somewhere half-way its prologue, the function's
852 frame probably hasn't been fully setup yet. Try to
853 reconstruct the base address for the stack frame by looking
854 at the stack pointer. For truly "frameless" functions this
855 might work too. */
856
857 frame_unwind_register (next_frame, AMD64_RSP_REGNUM, buf);
858 cache->base = extract_unsigned_integer (buf, 8) + cache->sp_offset;
859 }
860 else
861 {
862 frame_unwind_register (next_frame, AMD64_RBP_REGNUM, buf);
863 cache->base = extract_unsigned_integer (buf, 8);
864 }
865
866 /* Now that we have the base address for the stack frame we can
867 calculate the value of %rsp in the calling frame. */
868 cache->saved_sp = cache->base + 16;
869
870 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
871 frame we find it at the same offset from the reconstructed base
872 address. */
873 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
874
875 /* Adjust all the saved registers such that they contain addresses
876 instead of offsets. */
877 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
878 if (cache->saved_regs[i] != -1)
879 cache->saved_regs[i] += cache->base;
880
881 return cache;
882 }
883
884 static void
885 amd64_frame_this_id (struct frame_info *next_frame, void **this_cache,
886 struct frame_id *this_id)
887 {
888 struct amd64_frame_cache *cache =
889 amd64_frame_cache (next_frame, this_cache);
890
891 /* This marks the outermost frame. */
892 if (cache->base == 0)
893 return;
894
895 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
896 }
897
898 static void
899 amd64_frame_prev_register (struct frame_info *next_frame, void **this_cache,
900 int regnum, int *optimizedp,
901 enum lval_type *lvalp, CORE_ADDR *addrp,
902 int *realnump, gdb_byte *valuep)
903 {
904 struct amd64_frame_cache *cache =
905 amd64_frame_cache (next_frame, this_cache);
906
907 gdb_assert (regnum >= 0);
908
909 if (regnum == SP_REGNUM && cache->saved_sp)
910 {
911 *optimizedp = 0;
912 *lvalp = not_lval;
913 *addrp = 0;
914 *realnump = -1;
915 if (valuep)
916 {
917 /* Store the value. */
918 store_unsigned_integer (valuep, 8, cache->saved_sp);
919 }
920 return;
921 }
922
923 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
924 {
925 *optimizedp = 0;
926 *lvalp = lval_memory;
927 *addrp = cache->saved_regs[regnum];
928 *realnump = -1;
929 if (valuep)
930 {
931 /* Read the value in from memory. */
932 read_memory (*addrp, valuep,
933 register_size (current_gdbarch, regnum));
934 }
935 return;
936 }
937
938 *optimizedp = 0;
939 *lvalp = lval_register;
940 *addrp = 0;
941 *realnump = regnum;
942 if (valuep)
943 frame_unwind_register (next_frame, (*realnump), valuep);
944 }
945
946 static const struct frame_unwind amd64_frame_unwind =
947 {
948 NORMAL_FRAME,
949 amd64_frame_this_id,
950 amd64_frame_prev_register
951 };
952
953 static const struct frame_unwind *
954 amd64_frame_sniffer (struct frame_info *next_frame)
955 {
956 return &amd64_frame_unwind;
957 }
958 \f
959
960 /* Signal trampolines. */
961
962 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
963 64-bit variants. This would require using identical frame caches
964 on both platforms. */
965
966 static struct amd64_frame_cache *
967 amd64_sigtramp_frame_cache (struct frame_info *next_frame, void **this_cache)
968 {
969 struct amd64_frame_cache *cache;
970 struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch);
971 CORE_ADDR addr;
972 gdb_byte buf[8];
973 int i;
974
975 if (*this_cache)
976 return *this_cache;
977
978 cache = amd64_alloc_frame_cache ();
979
980 frame_unwind_register (next_frame, AMD64_RSP_REGNUM, buf);
981 cache->base = extract_unsigned_integer (buf, 8) - 8;
982
983 addr = tdep->sigcontext_addr (next_frame);
984 gdb_assert (tdep->sc_reg_offset);
985 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
986 for (i = 0; i < tdep->sc_num_regs; i++)
987 if (tdep->sc_reg_offset[i] != -1)
988 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
989
990 *this_cache = cache;
991 return cache;
992 }
993
994 static void
995 amd64_sigtramp_frame_this_id (struct frame_info *next_frame,
996 void **this_cache, struct frame_id *this_id)
997 {
998 struct amd64_frame_cache *cache =
999 amd64_sigtramp_frame_cache (next_frame, this_cache);
1000
1001 (*this_id) = frame_id_build (cache->base + 16, frame_pc_unwind (next_frame));
1002 }
1003
1004 static void
1005 amd64_sigtramp_frame_prev_register (struct frame_info *next_frame,
1006 void **this_cache,
1007 int regnum, int *optimizedp,
1008 enum lval_type *lvalp, CORE_ADDR *addrp,
1009 int *realnump, gdb_byte *valuep)
1010 {
1011 /* Make sure we've initialized the cache. */
1012 amd64_sigtramp_frame_cache (next_frame, this_cache);
1013
1014 amd64_frame_prev_register (next_frame, this_cache, regnum,
1015 optimizedp, lvalp, addrp, realnump, valuep);
1016 }
1017
1018 static const struct frame_unwind amd64_sigtramp_frame_unwind =
1019 {
1020 SIGTRAMP_FRAME,
1021 amd64_sigtramp_frame_this_id,
1022 amd64_sigtramp_frame_prev_register
1023 };
1024
1025 static const struct frame_unwind *
1026 amd64_sigtramp_frame_sniffer (struct frame_info *next_frame)
1027 {
1028 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (next_frame));
1029
1030 /* We shouldn't even bother if we don't have a sigcontext_addr
1031 handler. */
1032 if (tdep->sigcontext_addr == NULL)
1033 return NULL;
1034
1035 if (tdep->sigtramp_p != NULL)
1036 {
1037 if (tdep->sigtramp_p (next_frame))
1038 return &amd64_sigtramp_frame_unwind;
1039 }
1040
1041 if (tdep->sigtramp_start != 0)
1042 {
1043 CORE_ADDR pc = frame_pc_unwind (next_frame);
1044
1045 gdb_assert (tdep->sigtramp_end != 0);
1046 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
1047 return &amd64_sigtramp_frame_unwind;
1048 }
1049
1050 return NULL;
1051 }
1052 \f
1053
1054 static CORE_ADDR
1055 amd64_frame_base_address (struct frame_info *next_frame, void **this_cache)
1056 {
1057 struct amd64_frame_cache *cache =
1058 amd64_frame_cache (next_frame, this_cache);
1059
1060 return cache->base;
1061 }
1062
1063 static const struct frame_base amd64_frame_base =
1064 {
1065 &amd64_frame_unwind,
1066 amd64_frame_base_address,
1067 amd64_frame_base_address,
1068 amd64_frame_base_address
1069 };
1070
1071 static struct frame_id
1072 amd64_unwind_dummy_id (struct gdbarch *gdbarch, struct frame_info *next_frame)
1073 {
1074 gdb_byte buf[8];
1075 CORE_ADDR fp;
1076
1077 frame_unwind_register (next_frame, AMD64_RBP_REGNUM, buf);
1078 fp = extract_unsigned_integer (buf, 8);
1079
1080 return frame_id_build (fp + 16, frame_pc_unwind (next_frame));
1081 }
1082
1083 /* 16 byte align the SP per frame requirements. */
1084
1085 static CORE_ADDR
1086 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1087 {
1088 return sp & -(CORE_ADDR)16;
1089 }
1090 \f
1091
1092 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
1093 in the floating-point register set REGSET to register cache
1094 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
1095
1096 static void
1097 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
1098 int regnum, const void *fpregs, size_t len)
1099 {
1100 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
1101
1102 gdb_assert (len == tdep->sizeof_fpregset);
1103 amd64_supply_fxsave (regcache, regnum, fpregs);
1104 }
1105
1106 /* Collect register REGNUM from the register cache REGCACHE and store
1107 it in the buffer specified by FPREGS and LEN as described by the
1108 floating-point register set REGSET. If REGNUM is -1, do this for
1109 all registers in REGSET. */
1110
1111 static void
1112 amd64_collect_fpregset (const struct regset *regset,
1113 const struct regcache *regcache,
1114 int regnum, void *fpregs, size_t len)
1115 {
1116 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
1117
1118 gdb_assert (len == tdep->sizeof_fpregset);
1119 amd64_collect_fxsave (regcache, regnum, fpregs);
1120 }
1121
1122 /* Return the appropriate register set for the core section identified
1123 by SECT_NAME and SECT_SIZE. */
1124
1125 static const struct regset *
1126 amd64_regset_from_core_section (struct gdbarch *gdbarch,
1127 const char *sect_name, size_t sect_size)
1128 {
1129 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1130
1131 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
1132 {
1133 if (tdep->fpregset == NULL)
1134 tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
1135 amd64_collect_fpregset);
1136
1137 return tdep->fpregset;
1138 }
1139
1140 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
1141 }
1142 \f
1143
1144 void
1145 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
1146 {
1147 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1148
1149 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
1150 floating-point registers. */
1151 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
1152
1153 /* AMD64 has an FPU and 16 SSE registers. */
1154 tdep->st0_regnum = AMD64_ST0_REGNUM;
1155 tdep->num_xmm_regs = 16;
1156
1157 /* This is what all the fuss is about. */
1158 set_gdbarch_long_bit (gdbarch, 64);
1159 set_gdbarch_long_long_bit (gdbarch, 64);
1160 set_gdbarch_ptr_bit (gdbarch, 64);
1161
1162 /* In contrast to the i386, on AMD64 a `long double' actually takes
1163 up 128 bits, even though it's still based on the i387 extended
1164 floating-point format which has only 80 significant bits. */
1165 set_gdbarch_long_double_bit (gdbarch, 128);
1166
1167 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
1168 set_gdbarch_register_name (gdbarch, amd64_register_name);
1169 set_gdbarch_register_type (gdbarch, amd64_register_type);
1170
1171 /* Register numbers of various important registers. */
1172 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
1173 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
1174 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
1175 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
1176
1177 /* The "default" register numbering scheme for AMD64 is referred to
1178 as the "DWARF Register Number Mapping" in the System V psABI.
1179 The preferred debugging format for all known AMD64 targets is
1180 actually DWARF2, and GCC doesn't seem to support DWARF (that is
1181 DWARF-1), but we provide the same mapping just in case. This
1182 mapping is also used for stabs, which GCC does support. */
1183 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
1184 set_gdbarch_dwarf_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
1185 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
1186
1187 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
1188 be in use on any of the supported AMD64 targets. */
1189
1190 /* Call dummy code. */
1191 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
1192 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
1193 set_gdbarch_frame_red_zone_size (gdbarch, 128);
1194
1195 set_gdbarch_convert_register_p (gdbarch, amd64_convert_register_p);
1196 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
1197 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
1198
1199 set_gdbarch_return_value (gdbarch, amd64_return_value);
1200
1201 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
1202
1203 /* Avoid wiring in the MMX registers for now. */
1204 set_gdbarch_num_pseudo_regs (gdbarch, 0);
1205 tdep->mm0_regnum = -1;
1206
1207 set_gdbarch_unwind_dummy_id (gdbarch, amd64_unwind_dummy_id);
1208
1209 frame_unwind_append_sniffer (gdbarch, amd64_sigtramp_frame_sniffer);
1210 frame_unwind_append_sniffer (gdbarch, amd64_frame_sniffer);
1211 frame_base_set_default (gdbarch, &amd64_frame_base);
1212
1213 /* If we have a register mapping, enable the generic core file support. */
1214 if (tdep->gregset_reg_offset)
1215 set_gdbarch_regset_from_core_section (gdbarch,
1216 amd64_regset_from_core_section);
1217 }
1218 \f
1219
1220 #define I387_ST0_REGNUM AMD64_ST0_REGNUM
1221
1222 /* The 64-bit FXSAVE format differs from the 32-bit format in the
1223 sense that the instruction pointer and data pointer are simply
1224 64-bit offsets into the code segment and the data segment instead
1225 of a selector offset pair. The functions below store the upper 32
1226 bits of these pointers (instead of just the 16-bits of the segment
1227 selector). */
1228
1229 /* Fill register REGNUM in REGCACHE with the appropriate
1230 floating-point or SSE register value from *FXSAVE. If REGNUM is
1231 -1, do this for all registers. This function masks off any of the
1232 reserved bits in *FXSAVE. */
1233
1234 void
1235 amd64_supply_fxsave (struct regcache *regcache, int regnum,
1236 const void *fxsave)
1237 {
1238 i387_supply_fxsave (regcache, regnum, fxsave);
1239
1240 if (fxsave && gdbarch_ptr_bit (get_regcache_arch (regcache)) == 64)
1241 {
1242 const gdb_byte *regs = fxsave;
1243
1244 if (regnum == -1 || regnum == I387_FISEG_REGNUM)
1245 regcache_raw_supply (regcache, I387_FISEG_REGNUM, regs + 12);
1246 if (regnum == -1 || regnum == I387_FOSEG_REGNUM)
1247 regcache_raw_supply (regcache, I387_FOSEG_REGNUM, regs + 20);
1248 }
1249 }
1250
1251 /* Fill register REGNUM (if it is a floating-point or SSE register) in
1252 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
1253 all registers. This function doesn't touch any of the reserved
1254 bits in *FXSAVE. */
1255
1256 void
1257 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
1258 void *fxsave)
1259 {
1260 gdb_byte *regs = fxsave;
1261
1262 i387_collect_fxsave (regcache, regnum, fxsave);
1263
1264 if (gdbarch_ptr_bit (get_regcache_arch (regcache)) == 64)
1265 {
1266 if (regnum == -1 || regnum == I387_FISEG_REGNUM)
1267 regcache_raw_collect (regcache, I387_FISEG_REGNUM, regs + 12);
1268 if (regnum == -1 || regnum == I387_FOSEG_REGNUM)
1269 regcache_raw_collect (regcache, I387_FOSEG_REGNUM, regs + 20);
1270 }
1271 }
This page took 0.05469 seconds and 4 git commands to generate.