2005-03-17 Paul Brook <paul@codesourcery.com>
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright 2001, 2002, 2003, 2004, 2005 Free Software Foundation,
4 Inc. Contributed by Jiri Smid, SuSE Labs.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23 #include "defs.h"
24 #include "arch-utils.h"
25 #include "block.h"
26 #include "dummy-frame.h"
27 #include "frame.h"
28 #include "frame-base.h"
29 #include "frame-unwind.h"
30 #include "inferior.h"
31 #include "gdbcmd.h"
32 #include "gdbcore.h"
33 #include "objfiles.h"
34 #include "regcache.h"
35 #include "regset.h"
36 #include "symfile.h"
37
38 #include "gdb_assert.h"
39
40 #include "amd64-tdep.h"
41 #include "i387-tdep.h"
42
43 /* Note that the AMD64 architecture was previously known as x86-64.
44 The latter is (forever) engraved into the canonical system name as
45 returned by config.guess, and used as the name for the AMD64 port
46 of GNU/Linux. The BSD's have renamed their ports to amd64; they
47 don't like to shout. For GDB we prefer the amd64_-prefix over the
48 x86_64_-prefix since it's so much easier to type. */
49
50 /* Register information. */
51
52 struct amd64_register_info
53 {
54 char *name;
55 struct type **type;
56 };
57
58 static struct amd64_register_info amd64_register_info[] =
59 {
60 { "rax", &builtin_type_int64 },
61 { "rbx", &builtin_type_int64 },
62 { "rcx", &builtin_type_int64 },
63 { "rdx", &builtin_type_int64 },
64 { "rsi", &builtin_type_int64 },
65 { "rdi", &builtin_type_int64 },
66 { "rbp", &builtin_type_void_data_ptr },
67 { "rsp", &builtin_type_void_data_ptr },
68
69 /* %r8 is indeed register number 8. */
70 { "r8", &builtin_type_int64 },
71 { "r9", &builtin_type_int64 },
72 { "r10", &builtin_type_int64 },
73 { "r11", &builtin_type_int64 },
74 { "r12", &builtin_type_int64 },
75 { "r13", &builtin_type_int64 },
76 { "r14", &builtin_type_int64 },
77 { "r15", &builtin_type_int64 },
78 { "rip", &builtin_type_void_func_ptr },
79 { "eflags", &builtin_type_int32 },
80 { "cs", &builtin_type_int32 },
81 { "ss", &builtin_type_int32 },
82 { "ds", &builtin_type_int32 },
83 { "es", &builtin_type_int32 },
84 { "fs", &builtin_type_int32 },
85 { "gs", &builtin_type_int32 },
86
87 /* %st0 is register number 24. */
88 { "st0", &builtin_type_i387_ext },
89 { "st1", &builtin_type_i387_ext },
90 { "st2", &builtin_type_i387_ext },
91 { "st3", &builtin_type_i387_ext },
92 { "st4", &builtin_type_i387_ext },
93 { "st5", &builtin_type_i387_ext },
94 { "st6", &builtin_type_i387_ext },
95 { "st7", &builtin_type_i387_ext },
96 { "fctrl", &builtin_type_int32 },
97 { "fstat", &builtin_type_int32 },
98 { "ftag", &builtin_type_int32 },
99 { "fiseg", &builtin_type_int32 },
100 { "fioff", &builtin_type_int32 },
101 { "foseg", &builtin_type_int32 },
102 { "fooff", &builtin_type_int32 },
103 { "fop", &builtin_type_int32 },
104
105 /* %xmm0 is register number 40. */
106 { "xmm0", &builtin_type_v4sf },
107 { "xmm1", &builtin_type_v4sf },
108 { "xmm2", &builtin_type_v4sf },
109 { "xmm3", &builtin_type_v4sf },
110 { "xmm4", &builtin_type_v4sf },
111 { "xmm5", &builtin_type_v4sf },
112 { "xmm6", &builtin_type_v4sf },
113 { "xmm7", &builtin_type_v4sf },
114 { "xmm8", &builtin_type_v4sf },
115 { "xmm9", &builtin_type_v4sf },
116 { "xmm10", &builtin_type_v4sf },
117 { "xmm11", &builtin_type_v4sf },
118 { "xmm12", &builtin_type_v4sf },
119 { "xmm13", &builtin_type_v4sf },
120 { "xmm14", &builtin_type_v4sf },
121 { "xmm15", &builtin_type_v4sf },
122 { "mxcsr", &builtin_type_int32 }
123 };
124
125 /* Total number of registers. */
126 #define AMD64_NUM_REGS \
127 (sizeof (amd64_register_info) / sizeof (amd64_register_info[0]))
128
129 /* Return the name of register REGNUM. */
130
131 static const char *
132 amd64_register_name (int regnum)
133 {
134 if (regnum >= 0 && regnum < AMD64_NUM_REGS)
135 return amd64_register_info[regnum].name;
136
137 return NULL;
138 }
139
140 /* Return the GDB type object for the "standard" data type of data in
141 register REGNUM. */
142
143 static struct type *
144 amd64_register_type (struct gdbarch *gdbarch, int regnum)
145 {
146 gdb_assert (regnum >= 0 && regnum < AMD64_NUM_REGS);
147
148 return *amd64_register_info[regnum].type;
149 }
150
151 /* DWARF Register Number Mapping as defined in the System V psABI,
152 section 3.6. */
153
154 static int amd64_dwarf_regmap[] =
155 {
156 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
157 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
158 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
159 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
160
161 /* Frame Pointer Register RBP. */
162 AMD64_RBP_REGNUM,
163
164 /* Stack Pointer Register RSP. */
165 AMD64_RSP_REGNUM,
166
167 /* Extended Integer Registers 8 - 15. */
168 8, 9, 10, 11, 12, 13, 14, 15,
169
170 /* Return Address RA. Mapped to RIP. */
171 AMD64_RIP_REGNUM,
172
173 /* SSE Registers 0 - 7. */
174 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
175 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
176 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
177 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
178
179 /* Extended SSE Registers 8 - 15. */
180 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
181 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
182 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
183 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
184
185 /* Floating Point Registers 0-7. */
186 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
187 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
188 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
189 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7
190 };
191
192 static const int amd64_dwarf_regmap_len =
193 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
194
195 /* Convert DWARF register number REG to the appropriate register
196 number used by GDB. */
197
198 static int
199 amd64_dwarf_reg_to_regnum (int reg)
200 {
201 int regnum = -1;
202
203 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
204 regnum = amd64_dwarf_regmap[reg];
205
206 if (regnum == -1)
207 warning (_("Unmapped DWARF Register #%d encountered."), reg);
208
209 return regnum;
210 }
211
212 /* Return nonzero if a value of type TYPE stored in register REGNUM
213 needs any special handling. */
214
215 static int
216 amd64_convert_register_p (int regnum, struct type *type)
217 {
218 return i386_fp_regnum_p (regnum);
219 }
220 \f
221
222 /* Register classes as defined in the psABI. */
223
224 enum amd64_reg_class
225 {
226 AMD64_INTEGER,
227 AMD64_SSE,
228 AMD64_SSEUP,
229 AMD64_X87,
230 AMD64_X87UP,
231 AMD64_COMPLEX_X87,
232 AMD64_NO_CLASS,
233 AMD64_MEMORY
234 };
235
236 /* Return the union class of CLASS1 and CLASS2. See the psABI for
237 details. */
238
239 static enum amd64_reg_class
240 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
241 {
242 /* Rule (a): If both classes are equal, this is the resulting class. */
243 if (class1 == class2)
244 return class1;
245
246 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
247 is the other class. */
248 if (class1 == AMD64_NO_CLASS)
249 return class2;
250 if (class2 == AMD64_NO_CLASS)
251 return class1;
252
253 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
254 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
255 return AMD64_MEMORY;
256
257 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
258 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
259 return AMD64_INTEGER;
260
261 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
262 MEMORY is used as class. */
263 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
264 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
265 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
266 return AMD64_MEMORY;
267
268 /* Rule (f): Otherwise class SSE is used. */
269 return AMD64_SSE;
270 }
271
272 static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
273
274 /* Return non-zero if TYPE is a non-POD structure or union type. */
275
276 static int
277 amd64_non_pod_p (struct type *type)
278 {
279 /* ??? A class with a base class certainly isn't POD, but does this
280 catch all non-POD structure types? */
281 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
282 return 1;
283
284 return 0;
285 }
286
287 /* Classify TYPE according to the rules for aggregate (structures and
288 arrays) and union types, and store the result in CLASS. */
289
290 static void
291 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
292 {
293 int len = TYPE_LENGTH (type);
294
295 /* 1. If the size of an object is larger than two eightbytes, or in
296 C++, is a non-POD structure or union type, or contains
297 unaligned fields, it has class memory. */
298 if (len > 16 || amd64_non_pod_p (type))
299 {
300 class[0] = class[1] = AMD64_MEMORY;
301 return;
302 }
303
304 /* 2. Both eightbytes get initialized to class NO_CLASS. */
305 class[0] = class[1] = AMD64_NO_CLASS;
306
307 /* 3. Each field of an object is classified recursively so that
308 always two fields are considered. The resulting class is
309 calculated according to the classes of the fields in the
310 eightbyte: */
311
312 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
313 {
314 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
315
316 /* All fields in an array have the same type. */
317 amd64_classify (subtype, class);
318 if (len > 8 && class[1] == AMD64_NO_CLASS)
319 class[1] = class[0];
320 }
321 else
322 {
323 int i;
324
325 /* Structure or union. */
326 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
327 || TYPE_CODE (type) == TYPE_CODE_UNION);
328
329 for (i = 0; i < TYPE_NFIELDS (type); i++)
330 {
331 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
332 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
333 enum amd64_reg_class subclass[2];
334
335 /* Ignore static fields. */
336 if (TYPE_FIELD_STATIC (type, i))
337 continue;
338
339 gdb_assert (pos == 0 || pos == 1);
340
341 amd64_classify (subtype, subclass);
342 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
343 if (pos == 0)
344 class[1] = amd64_merge_classes (class[1], subclass[1]);
345 }
346 }
347
348 /* 4. Then a post merger cleanup is done: */
349
350 /* Rule (a): If one of the classes is MEMORY, the whole argument is
351 passed in memory. */
352 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
353 class[0] = class[1] = AMD64_MEMORY;
354
355 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
356 SSE. */
357 if (class[0] == AMD64_SSEUP)
358 class[0] = AMD64_SSE;
359 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
360 class[1] = AMD64_SSE;
361 }
362
363 /* Classify TYPE, and store the result in CLASS. */
364
365 static void
366 amd64_classify (struct type *type, enum amd64_reg_class class[2])
367 {
368 enum type_code code = TYPE_CODE (type);
369 int len = TYPE_LENGTH (type);
370
371 class[0] = class[1] = AMD64_NO_CLASS;
372
373 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
374 long, long long, and pointers are in the INTEGER class. Similarly,
375 range types, used by languages such as Ada, are also in the INTEGER
376 class. */
377 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
378 || code == TYPE_CODE_RANGE
379 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
380 && (len == 1 || len == 2 || len == 4 || len == 8))
381 class[0] = AMD64_INTEGER;
382
383 /* Arguments of types float, double and __m64 are in class SSE. */
384 else if (code == TYPE_CODE_FLT && (len == 4 || len == 8))
385 /* FIXME: __m64 . */
386 class[0] = AMD64_SSE;
387
388 /* Arguments of types __float128 and __m128 are split into two
389 halves. The least significant ones belong to class SSE, the most
390 significant one to class SSEUP. */
391 /* FIXME: __float128, __m128. */
392
393 /* The 64-bit mantissa of arguments of type long double belongs to
394 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
395 class X87UP. */
396 else if (code == TYPE_CODE_FLT && len == 16)
397 /* Class X87 and X87UP. */
398 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
399
400 /* Aggregates. */
401 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
402 || code == TYPE_CODE_UNION)
403 amd64_classify_aggregate (type, class);
404 }
405
406 static enum return_value_convention
407 amd64_return_value (struct gdbarch *gdbarch, struct type *type,
408 struct regcache *regcache,
409 void *readbuf, const void *writebuf)
410 {
411 enum amd64_reg_class class[2];
412 int len = TYPE_LENGTH (type);
413 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
414 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
415 int integer_reg = 0;
416 int sse_reg = 0;
417 int i;
418
419 gdb_assert (!(readbuf && writebuf));
420
421 /* 1. Classify the return type with the classification algorithm. */
422 amd64_classify (type, class);
423
424 /* 2. If the type has class MEMORY, then the caller provides space
425 for the return value and passes the address of this storage in
426 %rdi as if it were the first argument to the function. In effect,
427 this address becomes a hidden first argument.
428
429 On return %rax will contain the address that has been passed in
430 by the caller in %rdi. */
431 if (class[0] == AMD64_MEMORY)
432 {
433 /* As indicated by the comment above, the ABI guarantees that we
434 can always find the return value just after the function has
435 returned. */
436
437 if (readbuf)
438 {
439 ULONGEST addr;
440
441 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
442 read_memory (addr, readbuf, TYPE_LENGTH (type));
443 }
444
445 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
446 }
447
448 gdb_assert (class[1] != AMD64_MEMORY);
449 gdb_assert (len <= 16);
450
451 for (i = 0; len > 0; i++, len -= 8)
452 {
453 int regnum = -1;
454 int offset = 0;
455
456 switch (class[i])
457 {
458 case AMD64_INTEGER:
459 /* 3. If the class is INTEGER, the next available register
460 of the sequence %rax, %rdx is used. */
461 regnum = integer_regnum[integer_reg++];
462 break;
463
464 case AMD64_SSE:
465 /* 4. If the class is SSE, the next available SSE register
466 of the sequence %xmm0, %xmm1 is used. */
467 regnum = sse_regnum[sse_reg++];
468 break;
469
470 case AMD64_SSEUP:
471 /* 5. If the class is SSEUP, the eightbyte is passed in the
472 upper half of the last used SSE register. */
473 gdb_assert (sse_reg > 0);
474 regnum = sse_regnum[sse_reg - 1];
475 offset = 8;
476 break;
477
478 case AMD64_X87:
479 /* 6. If the class is X87, the value is returned on the X87
480 stack in %st0 as 80-bit x87 number. */
481 regnum = AMD64_ST0_REGNUM;
482 if (writebuf)
483 i387_return_value (gdbarch, regcache);
484 break;
485
486 case AMD64_X87UP:
487 /* 7. If the class is X87UP, the value is returned together
488 with the previous X87 value in %st0. */
489 gdb_assert (i > 0 && class[0] == AMD64_X87);
490 regnum = AMD64_ST0_REGNUM;
491 offset = 8;
492 len = 2;
493 break;
494
495 case AMD64_NO_CLASS:
496 continue;
497
498 default:
499 gdb_assert (!"Unexpected register class.");
500 }
501
502 gdb_assert (regnum != -1);
503
504 if (readbuf)
505 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
506 (char *) readbuf + i * 8);
507 if (writebuf)
508 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
509 (const char *) writebuf + i * 8);
510 }
511
512 return RETURN_VALUE_REGISTER_CONVENTION;
513 }
514 \f
515
516 static CORE_ADDR
517 amd64_push_arguments (struct regcache *regcache, int nargs,
518 struct value **args, CORE_ADDR sp, int struct_return)
519 {
520 static int integer_regnum[] =
521 {
522 AMD64_RDI_REGNUM, /* %rdi */
523 AMD64_RSI_REGNUM, /* %rsi */
524 AMD64_RDX_REGNUM, /* %rdx */
525 AMD64_RCX_REGNUM, /* %rcx */
526 8, /* %r8 */
527 9 /* %r9 */
528 };
529 static int sse_regnum[] =
530 {
531 /* %xmm0 ... %xmm7 */
532 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
533 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
534 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
535 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
536 };
537 struct value **stack_args = alloca (nargs * sizeof (struct value *));
538 int num_stack_args = 0;
539 int num_elements = 0;
540 int element = 0;
541 int integer_reg = 0;
542 int sse_reg = 0;
543 int i;
544
545 /* Reserve a register for the "hidden" argument. */
546 if (struct_return)
547 integer_reg++;
548
549 for (i = 0; i < nargs; i++)
550 {
551 struct type *type = value_type (args[i]);
552 int len = TYPE_LENGTH (type);
553 enum amd64_reg_class class[2];
554 int needed_integer_regs = 0;
555 int needed_sse_regs = 0;
556 int j;
557
558 /* Classify argument. */
559 amd64_classify (type, class);
560
561 /* Calculate the number of integer and SSE registers needed for
562 this argument. */
563 for (j = 0; j < 2; j++)
564 {
565 if (class[j] == AMD64_INTEGER)
566 needed_integer_regs++;
567 else if (class[j] == AMD64_SSE)
568 needed_sse_regs++;
569 }
570
571 /* Check whether enough registers are available, and if the
572 argument should be passed in registers at all. */
573 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
574 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
575 || (needed_integer_regs == 0 && needed_sse_regs == 0))
576 {
577 /* The argument will be passed on the stack. */
578 num_elements += ((len + 7) / 8);
579 stack_args[num_stack_args++] = args[i];
580 }
581 else
582 {
583 /* The argument will be passed in registers. */
584 const bfd_byte *valbuf = value_contents (args[i]);
585 char buf[8];
586
587 gdb_assert (len <= 16);
588
589 for (j = 0; len > 0; j++, len -= 8)
590 {
591 int regnum = -1;
592 int offset = 0;
593
594 switch (class[j])
595 {
596 case AMD64_INTEGER:
597 regnum = integer_regnum[integer_reg++];
598 break;
599
600 case AMD64_SSE:
601 regnum = sse_regnum[sse_reg++];
602 break;
603
604 case AMD64_SSEUP:
605 gdb_assert (sse_reg > 0);
606 regnum = sse_regnum[sse_reg - 1];
607 offset = 8;
608 break;
609
610 default:
611 gdb_assert (!"Unexpected register class.");
612 }
613
614 gdb_assert (regnum != -1);
615 memset (buf, 0, sizeof buf);
616 memcpy (buf, valbuf + j * 8, min (len, 8));
617 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
618 }
619 }
620 }
621
622 /* Allocate space for the arguments on the stack. */
623 sp -= num_elements * 8;
624
625 /* The psABI says that "The end of the input argument area shall be
626 aligned on a 16 byte boundary." */
627 sp &= ~0xf;
628
629 /* Write out the arguments to the stack. */
630 for (i = 0; i < num_stack_args; i++)
631 {
632 struct type *type = value_type (stack_args[i]);
633 const bfd_byte *valbuf = value_contents (stack_args[i]);
634 int len = TYPE_LENGTH (type);
635
636 write_memory (sp + element * 8, valbuf, len);
637 element += ((len + 7) / 8);
638 }
639
640 /* The psABI says that "For calls that may call functions that use
641 varargs or stdargs (prototype-less calls or calls to functions
642 containing ellipsis (...) in the declaration) %al is used as
643 hidden argument to specify the number of SSE registers used. */
644 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
645 return sp;
646 }
647
648 static CORE_ADDR
649 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
650 struct regcache *regcache, CORE_ADDR bp_addr,
651 int nargs, struct value **args, CORE_ADDR sp,
652 int struct_return, CORE_ADDR struct_addr)
653 {
654 char buf[8];
655
656 /* Pass arguments. */
657 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
658
659 /* Pass "hidden" argument". */
660 if (struct_return)
661 {
662 store_unsigned_integer (buf, 8, struct_addr);
663 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
664 }
665
666 /* Store return address. */
667 sp -= 8;
668 store_unsigned_integer (buf, 8, bp_addr);
669 write_memory (sp, buf, 8);
670
671 /* Finally, update the stack pointer... */
672 store_unsigned_integer (buf, 8, sp);
673 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
674
675 /* ...and fake a frame pointer. */
676 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
677
678 return sp + 16;
679 }
680 \f
681
682 /* The maximum number of saved registers. This should include %rip. */
683 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
684
685 struct amd64_frame_cache
686 {
687 /* Base address. */
688 CORE_ADDR base;
689 CORE_ADDR sp_offset;
690 CORE_ADDR pc;
691
692 /* Saved registers. */
693 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
694 CORE_ADDR saved_sp;
695
696 /* Do we have a frame? */
697 int frameless_p;
698 };
699
700 /* Allocate and initialize a frame cache. */
701
702 static struct amd64_frame_cache *
703 amd64_alloc_frame_cache (void)
704 {
705 struct amd64_frame_cache *cache;
706 int i;
707
708 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
709
710 /* Base address. */
711 cache->base = 0;
712 cache->sp_offset = -8;
713 cache->pc = 0;
714
715 /* Saved registers. We initialize these to -1 since zero is a valid
716 offset (that's where %rbp is supposed to be stored). */
717 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
718 cache->saved_regs[i] = -1;
719 cache->saved_sp = 0;
720
721 /* Frameless until proven otherwise. */
722 cache->frameless_p = 1;
723
724 return cache;
725 }
726
727 /* Do a limited analysis of the prologue at PC and update CACHE
728 accordingly. Bail out early if CURRENT_PC is reached. Return the
729 address where the analysis stopped.
730
731 We will handle only functions beginning with:
732
733 pushq %rbp 0x55
734 movq %rsp, %rbp 0x48 0x89 0xe5
735
736 Any function that doesn't start with this sequence will be assumed
737 to have no prologue and thus no valid frame pointer in %rbp. */
738
739 static CORE_ADDR
740 amd64_analyze_prologue (CORE_ADDR pc, CORE_ADDR current_pc,
741 struct amd64_frame_cache *cache)
742 {
743 static unsigned char proto[3] = { 0x48, 0x89, 0xe5 };
744 unsigned char buf[3];
745 unsigned char op;
746
747 if (current_pc <= pc)
748 return current_pc;
749
750 op = read_memory_unsigned_integer (pc, 1);
751
752 if (op == 0x55) /* pushq %rbp */
753 {
754 /* Take into account that we've executed the `pushq %rbp' that
755 starts this instruction sequence. */
756 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
757 cache->sp_offset += 8;
758
759 /* If that's all, return now. */
760 if (current_pc <= pc + 1)
761 return current_pc;
762
763 /* Check for `movq %rsp, %rbp'. */
764 read_memory (pc + 1, buf, 3);
765 if (memcmp (buf, proto, 3) != 0)
766 return pc + 1;
767
768 /* OK, we actually have a frame. */
769 cache->frameless_p = 0;
770 return pc + 4;
771 }
772
773 return pc;
774 }
775
776 /* Return PC of first real instruction. */
777
778 static CORE_ADDR
779 amd64_skip_prologue (CORE_ADDR start_pc)
780 {
781 struct amd64_frame_cache cache;
782 CORE_ADDR pc;
783
784 pc = amd64_analyze_prologue (start_pc, 0xffffffffffffffffLL, &cache);
785 if (cache.frameless_p)
786 return start_pc;
787
788 return pc;
789 }
790 \f
791
792 /* Normal frames. */
793
794 static struct amd64_frame_cache *
795 amd64_frame_cache (struct frame_info *next_frame, void **this_cache)
796 {
797 struct amd64_frame_cache *cache;
798 char buf[8];
799 int i;
800
801 if (*this_cache)
802 return *this_cache;
803
804 cache = amd64_alloc_frame_cache ();
805 *this_cache = cache;
806
807 cache->pc = frame_func_unwind (next_frame);
808 if (cache->pc != 0)
809 amd64_analyze_prologue (cache->pc, frame_pc_unwind (next_frame), cache);
810
811 if (cache->frameless_p)
812 {
813 /* We didn't find a valid frame. If we're at the start of a
814 function, or somewhere half-way its prologue, the function's
815 frame probably hasn't been fully setup yet. Try to
816 reconstruct the base address for the stack frame by looking
817 at the stack pointer. For truly "frameless" functions this
818 might work too. */
819
820 frame_unwind_register (next_frame, AMD64_RSP_REGNUM, buf);
821 cache->base = extract_unsigned_integer (buf, 8) + cache->sp_offset;
822 }
823 else
824 {
825 frame_unwind_register (next_frame, AMD64_RBP_REGNUM, buf);
826 cache->base = extract_unsigned_integer (buf, 8);
827 }
828
829 /* Now that we have the base address for the stack frame we can
830 calculate the value of %rsp in the calling frame. */
831 cache->saved_sp = cache->base + 16;
832
833 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
834 frame we find it at the same offset from the reconstructed base
835 address. */
836 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
837
838 /* Adjust all the saved registers such that they contain addresses
839 instead of offsets. */
840 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
841 if (cache->saved_regs[i] != -1)
842 cache->saved_regs[i] += cache->base;
843
844 return cache;
845 }
846
847 static void
848 amd64_frame_this_id (struct frame_info *next_frame, void **this_cache,
849 struct frame_id *this_id)
850 {
851 struct amd64_frame_cache *cache =
852 amd64_frame_cache (next_frame, this_cache);
853
854 /* This marks the outermost frame. */
855 if (cache->base == 0)
856 return;
857
858 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
859 }
860
861 static void
862 amd64_frame_prev_register (struct frame_info *next_frame, void **this_cache,
863 int regnum, int *optimizedp,
864 enum lval_type *lvalp, CORE_ADDR *addrp,
865 int *realnump, void *valuep)
866 {
867 struct amd64_frame_cache *cache =
868 amd64_frame_cache (next_frame, this_cache);
869
870 gdb_assert (regnum >= 0);
871
872 if (regnum == SP_REGNUM && cache->saved_sp)
873 {
874 *optimizedp = 0;
875 *lvalp = not_lval;
876 *addrp = 0;
877 *realnump = -1;
878 if (valuep)
879 {
880 /* Store the value. */
881 store_unsigned_integer (valuep, 8, cache->saved_sp);
882 }
883 return;
884 }
885
886 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
887 {
888 *optimizedp = 0;
889 *lvalp = lval_memory;
890 *addrp = cache->saved_regs[regnum];
891 *realnump = -1;
892 if (valuep)
893 {
894 /* Read the value in from memory. */
895 read_memory (*addrp, valuep,
896 register_size (current_gdbarch, regnum));
897 }
898 return;
899 }
900
901 *optimizedp = 0;
902 *lvalp = lval_register;
903 *addrp = 0;
904 *realnump = regnum;
905 if (valuep)
906 frame_unwind_register (next_frame, (*realnump), valuep);
907 }
908
909 static const struct frame_unwind amd64_frame_unwind =
910 {
911 NORMAL_FRAME,
912 amd64_frame_this_id,
913 amd64_frame_prev_register
914 };
915
916 static const struct frame_unwind *
917 amd64_frame_sniffer (struct frame_info *next_frame)
918 {
919 return &amd64_frame_unwind;
920 }
921 \f
922
923 /* Signal trampolines. */
924
925 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
926 64-bit variants. This would require using identical frame caches
927 on both platforms. */
928
929 static struct amd64_frame_cache *
930 amd64_sigtramp_frame_cache (struct frame_info *next_frame, void **this_cache)
931 {
932 struct amd64_frame_cache *cache;
933 struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch);
934 CORE_ADDR addr;
935 char buf[8];
936 int i;
937
938 if (*this_cache)
939 return *this_cache;
940
941 cache = amd64_alloc_frame_cache ();
942
943 frame_unwind_register (next_frame, AMD64_RSP_REGNUM, buf);
944 cache->base = extract_unsigned_integer (buf, 8) - 8;
945
946 addr = tdep->sigcontext_addr (next_frame);
947 gdb_assert (tdep->sc_reg_offset);
948 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
949 for (i = 0; i < tdep->sc_num_regs; i++)
950 if (tdep->sc_reg_offset[i] != -1)
951 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
952
953 *this_cache = cache;
954 return cache;
955 }
956
957 static void
958 amd64_sigtramp_frame_this_id (struct frame_info *next_frame,
959 void **this_cache, struct frame_id *this_id)
960 {
961 struct amd64_frame_cache *cache =
962 amd64_sigtramp_frame_cache (next_frame, this_cache);
963
964 (*this_id) = frame_id_build (cache->base + 16, frame_pc_unwind (next_frame));
965 }
966
967 static void
968 amd64_sigtramp_frame_prev_register (struct frame_info *next_frame,
969 void **this_cache,
970 int regnum, int *optimizedp,
971 enum lval_type *lvalp, CORE_ADDR *addrp,
972 int *realnump, void *valuep)
973 {
974 /* Make sure we've initialized the cache. */
975 amd64_sigtramp_frame_cache (next_frame, this_cache);
976
977 amd64_frame_prev_register (next_frame, this_cache, regnum,
978 optimizedp, lvalp, addrp, realnump, valuep);
979 }
980
981 static const struct frame_unwind amd64_sigtramp_frame_unwind =
982 {
983 SIGTRAMP_FRAME,
984 amd64_sigtramp_frame_this_id,
985 amd64_sigtramp_frame_prev_register
986 };
987
988 static const struct frame_unwind *
989 amd64_sigtramp_frame_sniffer (struct frame_info *next_frame)
990 {
991 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (next_frame));
992
993 /* We shouldn't even bother if we don't have a sigcontext_addr
994 handler. */
995 if (tdep->sigcontext_addr == NULL)
996 return NULL;
997
998 if (tdep->sigtramp_p != NULL)
999 {
1000 if (tdep->sigtramp_p (next_frame))
1001 return &amd64_sigtramp_frame_unwind;
1002 }
1003
1004 if (tdep->sigtramp_start != 0)
1005 {
1006 CORE_ADDR pc = frame_pc_unwind (next_frame);
1007
1008 gdb_assert (tdep->sigtramp_end != 0);
1009 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
1010 return &amd64_sigtramp_frame_unwind;
1011 }
1012
1013 return NULL;
1014 }
1015 \f
1016
1017 static CORE_ADDR
1018 amd64_frame_base_address (struct frame_info *next_frame, void **this_cache)
1019 {
1020 struct amd64_frame_cache *cache =
1021 amd64_frame_cache (next_frame, this_cache);
1022
1023 return cache->base;
1024 }
1025
1026 static const struct frame_base amd64_frame_base =
1027 {
1028 &amd64_frame_unwind,
1029 amd64_frame_base_address,
1030 amd64_frame_base_address,
1031 amd64_frame_base_address
1032 };
1033
1034 static struct frame_id
1035 amd64_unwind_dummy_id (struct gdbarch *gdbarch, struct frame_info *next_frame)
1036 {
1037 char buf[8];
1038 CORE_ADDR fp;
1039
1040 frame_unwind_register (next_frame, AMD64_RBP_REGNUM, buf);
1041 fp = extract_unsigned_integer (buf, 8);
1042
1043 return frame_id_build (fp + 16, frame_pc_unwind (next_frame));
1044 }
1045
1046 /* 16 byte align the SP per frame requirements. */
1047
1048 static CORE_ADDR
1049 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1050 {
1051 return sp & -(CORE_ADDR)16;
1052 }
1053 \f
1054
1055 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
1056 in the floating-point register set REGSET to register cache
1057 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
1058
1059 static void
1060 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
1061 int regnum, const void *fpregs, size_t len)
1062 {
1063 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
1064
1065 gdb_assert (len == tdep->sizeof_fpregset);
1066 amd64_supply_fxsave (regcache, regnum, fpregs);
1067 }
1068
1069 /* Collect register REGNUM from the register cache REGCACHE and store
1070 it in the buffer specified by FPREGS and LEN as described by the
1071 floating-point register set REGSET. If REGNUM is -1, do this for
1072 all registers in REGSET. */
1073
1074 static void
1075 amd64_collect_fpregset (const struct regset *regset,
1076 const struct regcache *regcache,
1077 int regnum, void *fpregs, size_t len)
1078 {
1079 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
1080
1081 gdb_assert (len == tdep->sizeof_fpregset);
1082 amd64_collect_fxsave (regcache, regnum, fpregs);
1083 }
1084
1085 /* Return the appropriate register set for the core section identified
1086 by SECT_NAME and SECT_SIZE. */
1087
1088 static const struct regset *
1089 amd64_regset_from_core_section (struct gdbarch *gdbarch,
1090 const char *sect_name, size_t sect_size)
1091 {
1092 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1093
1094 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
1095 {
1096 if (tdep->fpregset == NULL)
1097 tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
1098 amd64_collect_fpregset);
1099
1100 return tdep->fpregset;
1101 }
1102
1103 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
1104 }
1105 \f
1106
1107 void
1108 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
1109 {
1110 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1111
1112 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
1113 floating-point registers. */
1114 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
1115
1116 /* AMD64 has an FPU and 16 SSE registers. */
1117 tdep->st0_regnum = AMD64_ST0_REGNUM;
1118 tdep->num_xmm_regs = 16;
1119
1120 /* This is what all the fuss is about. */
1121 set_gdbarch_long_bit (gdbarch, 64);
1122 set_gdbarch_long_long_bit (gdbarch, 64);
1123 set_gdbarch_ptr_bit (gdbarch, 64);
1124
1125 /* In contrast to the i386, on AMD64 a `long double' actually takes
1126 up 128 bits, even though it's still based on the i387 extended
1127 floating-point format which has only 80 significant bits. */
1128 set_gdbarch_long_double_bit (gdbarch, 128);
1129
1130 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
1131 set_gdbarch_register_name (gdbarch, amd64_register_name);
1132 set_gdbarch_register_type (gdbarch, amd64_register_type);
1133
1134 /* Register numbers of various important registers. */
1135 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
1136 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
1137 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
1138 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
1139
1140 /* The "default" register numbering scheme for AMD64 is referred to
1141 as the "DWARF Register Number Mapping" in the System V psABI.
1142 The preferred debugging format for all known AMD64 targets is
1143 actually DWARF2, and GCC doesn't seem to support DWARF (that is
1144 DWARF-1), but we provide the same mapping just in case. This
1145 mapping is also used for stabs, which GCC does support. */
1146 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
1147 set_gdbarch_dwarf_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
1148 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
1149
1150 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
1151 be in use on any of the supported AMD64 targets. */
1152
1153 /* Call dummy code. */
1154 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
1155 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
1156 set_gdbarch_frame_red_zone_size (gdbarch, 128);
1157
1158 set_gdbarch_convert_register_p (gdbarch, amd64_convert_register_p);
1159 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
1160 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
1161
1162 set_gdbarch_return_value (gdbarch, amd64_return_value);
1163
1164 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
1165
1166 /* Avoid wiring in the MMX registers for now. */
1167 set_gdbarch_num_pseudo_regs (gdbarch, 0);
1168 tdep->mm0_regnum = -1;
1169
1170 set_gdbarch_unwind_dummy_id (gdbarch, amd64_unwind_dummy_id);
1171
1172 frame_unwind_append_sniffer (gdbarch, amd64_sigtramp_frame_sniffer);
1173 frame_unwind_append_sniffer (gdbarch, amd64_frame_sniffer);
1174 frame_base_set_default (gdbarch, &amd64_frame_base);
1175
1176 /* If we have a register mapping, enable the generic core file support. */
1177 if (tdep->gregset_reg_offset)
1178 set_gdbarch_regset_from_core_section (gdbarch,
1179 amd64_regset_from_core_section);
1180 }
1181 \f
1182
1183 #define I387_ST0_REGNUM AMD64_ST0_REGNUM
1184
1185 /* The 64-bit FXSAVE format differs from the 32-bit format in the
1186 sense that the instruction pointer and data pointer are simply
1187 64-bit offsets into the code segment and the data segment instead
1188 of a selector offset pair. The functions below store the upper 32
1189 bits of these pointers (instead of just the 16-bits of the segment
1190 selector). */
1191
1192 /* Fill register REGNUM in REGCACHE with the appropriate
1193 floating-point or SSE register value from *FXSAVE. If REGNUM is
1194 -1, do this for all registers. This function masks off any of the
1195 reserved bits in *FXSAVE. */
1196
1197 void
1198 amd64_supply_fxsave (struct regcache *regcache, int regnum,
1199 const void *fxsave)
1200 {
1201 i387_supply_fxsave (regcache, regnum, fxsave);
1202
1203 if (fxsave && gdbarch_ptr_bit (get_regcache_arch (regcache)) == 64)
1204 {
1205 const char *regs = fxsave;
1206
1207 if (regnum == -1 || regnum == I387_FISEG_REGNUM)
1208 regcache_raw_supply (regcache, I387_FISEG_REGNUM, regs + 12);
1209 if (regnum == -1 || regnum == I387_FOSEG_REGNUM)
1210 regcache_raw_supply (regcache, I387_FOSEG_REGNUM, regs + 20);
1211 }
1212 }
1213
1214 /* Fill register REGNUM (if it is a floating-point or SSE register) in
1215 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
1216 all registers. This function doesn't touch any of the reserved
1217 bits in *FXSAVE. */
1218
1219 void
1220 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
1221 void *fxsave)
1222 {
1223 char *regs = fxsave;
1224
1225 i387_collect_fxsave (regcache, regnum, fxsave);
1226
1227 if (gdbarch_ptr_bit (get_regcache_arch (regcache)) == 64)
1228 {
1229 if (regnum == -1 || regnum == I387_FISEG_REGNUM)
1230 regcache_raw_collect (regcache, I387_FISEG_REGNUM, regs + 12);
1231 if (regnum == -1 || regnum == I387_FOSEG_REGNUM)
1232 regcache_raw_collect (regcache, I387_FOSEG_REGNUM, regs + 20);
1233 }
1234 }
This page took 0.061316 seconds and 5 git commands to generate.