* buildsym.c (start_subfile): Handle producer.
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
CommitLineData
e53bef9f 1/* Target-dependent code for AMD64.
ce0eebec 2
5ae96ec1
MK
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006
4 Free Software Foundation, Inc.
5
6 Contributed by Jiri Smid, SuSE Labs.
53e95fcf
JS
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
197e01b6
EZ
22 Foundation, Inc., 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
53e95fcf
JS
24
25#include "defs.h"
c4f35dd8
MK
26#include "arch-utils.h"
27#include "block.h"
28#include "dummy-frame.h"
29#include "frame.h"
30#include "frame-base.h"
31#include "frame-unwind.h"
53e95fcf 32#include "inferior.h"
53e95fcf 33#include "gdbcmd.h"
c4f35dd8
MK
34#include "gdbcore.h"
35#include "objfiles.h"
53e95fcf 36#include "regcache.h"
2c261fae 37#include "regset.h"
53e95fcf 38#include "symfile.h"
c4f35dd8 39
82dbc5f7 40#include "gdb_assert.h"
c4f35dd8 41
9c1488cb 42#include "amd64-tdep.h"
c4f35dd8 43#include "i387-tdep.h"
53e95fcf 44
e53bef9f
MK
45/* Note that the AMD64 architecture was previously known as x86-64.
46 The latter is (forever) engraved into the canonical system name as
90f90721 47 returned by config.guess, and used as the name for the AMD64 port
e53bef9f
MK
48 of GNU/Linux. The BSD's have renamed their ports to amd64; they
49 don't like to shout. For GDB we prefer the amd64_-prefix over the
50 x86_64_-prefix since it's so much easier to type. */
51
402ecd56 52/* Register information. */
c4f35dd8 53
e53bef9f 54struct amd64_register_info
de220d0f 55{
de220d0f
ML
56 char *name;
57 struct type **type;
58};
53e95fcf 59
2f4535c7 60static struct amd64_register_info const amd64_register_info[] =
c4f35dd8
MK
61{
62 { "rax", &builtin_type_int64 },
63 { "rbx", &builtin_type_int64 },
64 { "rcx", &builtin_type_int64 },
65 { "rdx", &builtin_type_int64 },
66 { "rsi", &builtin_type_int64 },
67 { "rdi", &builtin_type_int64 },
68 { "rbp", &builtin_type_void_data_ptr },
69 { "rsp", &builtin_type_void_data_ptr },
70
71 /* %r8 is indeed register number 8. */
72 { "r8", &builtin_type_int64 },
73 { "r9", &builtin_type_int64 },
74 { "r10", &builtin_type_int64 },
75 { "r11", &builtin_type_int64 },
76 { "r12", &builtin_type_int64 },
77 { "r13", &builtin_type_int64 },
78 { "r14", &builtin_type_int64 },
79 { "r15", &builtin_type_int64 },
80 { "rip", &builtin_type_void_func_ptr },
5ae96ec1 81 { "eflags", &i386_eflags_type },
af233647
MK
82 { "cs", &builtin_type_int32 },
83 { "ss", &builtin_type_int32 },
c4f35dd8
MK
84 { "ds", &builtin_type_int32 },
85 { "es", &builtin_type_int32 },
86 { "fs", &builtin_type_int32 },
87 { "gs", &builtin_type_int32 },
88
af233647 89 /* %st0 is register number 24. */
c4f35dd8
MK
90 { "st0", &builtin_type_i387_ext },
91 { "st1", &builtin_type_i387_ext },
92 { "st2", &builtin_type_i387_ext },
93 { "st3", &builtin_type_i387_ext },
94 { "st4", &builtin_type_i387_ext },
95 { "st5", &builtin_type_i387_ext },
96 { "st6", &builtin_type_i387_ext },
97 { "st7", &builtin_type_i387_ext },
98 { "fctrl", &builtin_type_int32 },
99 { "fstat", &builtin_type_int32 },
100 { "ftag", &builtin_type_int32 },
101 { "fiseg", &builtin_type_int32 },
102 { "fioff", &builtin_type_int32 },
103 { "foseg", &builtin_type_int32 },
104 { "fooff", &builtin_type_int32 },
105 { "fop", &builtin_type_int32 },
106
af233647 107 /* %xmm0 is register number 40. */
5ae96ec1
MK
108 { "xmm0", &i386_sse_type },
109 { "xmm1", &i386_sse_type },
110 { "xmm2", &i386_sse_type },
111 { "xmm3", &i386_sse_type },
112 { "xmm4", &i386_sse_type },
113 { "xmm5", &i386_sse_type },
114 { "xmm6", &i386_sse_type },
115 { "xmm7", &i386_sse_type },
116 { "xmm8", &i386_sse_type },
117 { "xmm9", &i386_sse_type },
118 { "xmm10", &i386_sse_type },
119 { "xmm11", &i386_sse_type },
120 { "xmm12", &i386_sse_type },
121 { "xmm13", &i386_sse_type },
122 { "xmm14", &i386_sse_type },
123 { "xmm15", &i386_sse_type },
878d9193 124 { "mxcsr", &i386_mxcsr_type }
0e04a514
ML
125};
126
c4f35dd8 127/* Total number of registers. */
5ae96ec1 128#define AMD64_NUM_REGS ARRAY_SIZE (amd64_register_info)
de220d0f 129
c4f35dd8 130/* Return the name of register REGNUM. */
b6779aa2 131
8695c747 132const char *
e53bef9f 133amd64_register_name (int regnum)
53e95fcf 134{
e53bef9f
MK
135 if (regnum >= 0 && regnum < AMD64_NUM_REGS)
136 return amd64_register_info[regnum].name;
53e95fcf 137
c4f35dd8 138 return NULL;
53e95fcf
JS
139}
140
141/* Return the GDB type object for the "standard" data type of data in
c4f35dd8 142 register REGNUM. */
53e95fcf 143
8695c747 144struct type *
e53bef9f 145amd64_register_type (struct gdbarch *gdbarch, int regnum)
53e95fcf 146{
e53bef9f 147 gdb_assert (regnum >= 0 && regnum < AMD64_NUM_REGS);
4657573b 148
5ae96ec1 149 return *amd64_register_info[regnum].type;
53e95fcf
JS
150}
151
c4f35dd8
MK
152/* DWARF Register Number Mapping as defined in the System V psABI,
153 section 3.6. */
53e95fcf 154
e53bef9f 155static int amd64_dwarf_regmap[] =
0e04a514 156{
c4f35dd8 157 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
90f90721
MK
158 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
159 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
160 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
c4f35dd8
MK
161
162 /* Frame Pointer Register RBP. */
90f90721 163 AMD64_RBP_REGNUM,
c4f35dd8
MK
164
165 /* Stack Pointer Register RSP. */
90f90721 166 AMD64_RSP_REGNUM,
c4f35dd8
MK
167
168 /* Extended Integer Registers 8 - 15. */
169 8, 9, 10, 11, 12, 13, 14, 15,
170
59207364 171 /* Return Address RA. Mapped to RIP. */
90f90721 172 AMD64_RIP_REGNUM,
c4f35dd8
MK
173
174 /* SSE Registers 0 - 7. */
90f90721
MK
175 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
176 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
177 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
178 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
c4f35dd8
MK
179
180 /* Extended SSE Registers 8 - 15. */
90f90721
MK
181 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
182 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
183 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
184 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
c4f35dd8
MK
185
186 /* Floating Point Registers 0-7. */
90f90721
MK
187 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
188 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
189 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
c6f4c129
JB
190 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
191
192 /* Control and Status Flags Register. */
193 AMD64_EFLAGS_REGNUM,
194
195 /* Selector Registers. */
196 AMD64_ES_REGNUM,
197 AMD64_CS_REGNUM,
198 AMD64_SS_REGNUM,
199 AMD64_DS_REGNUM,
200 AMD64_FS_REGNUM,
201 AMD64_GS_REGNUM,
202 -1,
203 -1,
204
205 /* Segment Base Address Registers. */
206 -1,
207 -1,
208 -1,
209 -1,
210
211 /* Special Selector Registers. */
212 -1,
213 -1,
214
215 /* Floating Point Control Registers. */
216 AMD64_MXCSR_REGNUM,
217 AMD64_FCTRL_REGNUM,
218 AMD64_FSTAT_REGNUM
c4f35dd8 219};
0e04a514 220
e53bef9f
MK
221static const int amd64_dwarf_regmap_len =
222 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
0e04a514 223
c4f35dd8
MK
224/* Convert DWARF register number REG to the appropriate register
225 number used by GDB. */
26abbdc4 226
c4f35dd8 227static int
e53bef9f 228amd64_dwarf_reg_to_regnum (int reg)
53e95fcf 229{
c4f35dd8 230 int regnum = -1;
53e95fcf 231
16aff9a6 232 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
e53bef9f 233 regnum = amd64_dwarf_regmap[reg];
53e95fcf 234
c4f35dd8 235 if (regnum == -1)
8a3fe4f8 236 warning (_("Unmapped DWARF Register #%d encountered."), reg);
c4f35dd8
MK
237
238 return regnum;
53e95fcf 239}
d532c08f
MK
240
241/* Return nonzero if a value of type TYPE stored in register REGNUM
242 needs any special handling. */
243
244static int
e53bef9f 245amd64_convert_register_p (int regnum, struct type *type)
d532c08f
MK
246{
247 return i386_fp_regnum_p (regnum);
248}
53e95fcf
JS
249\f
250
efb1c01c
MK
251/* Register classes as defined in the psABI. */
252
253enum amd64_reg_class
254{
255 AMD64_INTEGER,
256 AMD64_SSE,
257 AMD64_SSEUP,
258 AMD64_X87,
259 AMD64_X87UP,
260 AMD64_COMPLEX_X87,
261 AMD64_NO_CLASS,
262 AMD64_MEMORY
263};
264
265/* Return the union class of CLASS1 and CLASS2. See the psABI for
266 details. */
267
268static enum amd64_reg_class
269amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
270{
271 /* Rule (a): If both classes are equal, this is the resulting class. */
272 if (class1 == class2)
273 return class1;
274
275 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
276 is the other class. */
277 if (class1 == AMD64_NO_CLASS)
278 return class2;
279 if (class2 == AMD64_NO_CLASS)
280 return class1;
281
282 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
283 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
284 return AMD64_MEMORY;
285
286 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
287 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
288 return AMD64_INTEGER;
289
290 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
291 MEMORY is used as class. */
292 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
293 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
294 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
295 return AMD64_MEMORY;
296
297 /* Rule (f): Otherwise class SSE is used. */
298 return AMD64_SSE;
299}
300
301static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
302
79b1ab3d
MK
303/* Return non-zero if TYPE is a non-POD structure or union type. */
304
305static int
306amd64_non_pod_p (struct type *type)
307{
308 /* ??? A class with a base class certainly isn't POD, but does this
309 catch all non-POD structure types? */
310 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
311 return 1;
312
313 return 0;
314}
315
efb1c01c
MK
316/* Classify TYPE according to the rules for aggregate (structures and
317 arrays) and union types, and store the result in CLASS. */
c4f35dd8
MK
318
319static void
efb1c01c 320amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
53e95fcf
JS
321{
322 int len = TYPE_LENGTH (type);
323
efb1c01c
MK
324 /* 1. If the size of an object is larger than two eightbytes, or in
325 C++, is a non-POD structure or union type, or contains
326 unaligned fields, it has class memory. */
79b1ab3d 327 if (len > 16 || amd64_non_pod_p (type))
53e95fcf 328 {
efb1c01c
MK
329 class[0] = class[1] = AMD64_MEMORY;
330 return;
53e95fcf 331 }
efb1c01c
MK
332
333 /* 2. Both eightbytes get initialized to class NO_CLASS. */
334 class[0] = class[1] = AMD64_NO_CLASS;
335
336 /* 3. Each field of an object is classified recursively so that
337 always two fields are considered. The resulting class is
338 calculated according to the classes of the fields in the
339 eightbyte: */
340
341 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
8ffd9b1b 342 {
efb1c01c
MK
343 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
344
345 /* All fields in an array have the same type. */
346 amd64_classify (subtype, class);
347 if (len > 8 && class[1] == AMD64_NO_CLASS)
348 class[1] = class[0];
8ffd9b1b 349 }
53e95fcf
JS
350 else
351 {
efb1c01c 352 int i;
53e95fcf 353
efb1c01c
MK
354 /* Structure or union. */
355 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
356 || TYPE_CODE (type) == TYPE_CODE_UNION);
357
358 for (i = 0; i < TYPE_NFIELDS (type); i++)
53e95fcf 359 {
efb1c01c
MK
360 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
361 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
362 enum amd64_reg_class subclass[2];
363
562c50c2
MK
364 /* Ignore static fields. */
365 if (TYPE_FIELD_STATIC (type, i))
366 continue;
367
efb1c01c
MK
368 gdb_assert (pos == 0 || pos == 1);
369
370 amd64_classify (subtype, subclass);
371 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
372 if (pos == 0)
373 class[1] = amd64_merge_classes (class[1], subclass[1]);
53e95fcf 374 }
53e95fcf 375 }
efb1c01c
MK
376
377 /* 4. Then a post merger cleanup is done: */
378
379 /* Rule (a): If one of the classes is MEMORY, the whole argument is
380 passed in memory. */
381 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
382 class[0] = class[1] = AMD64_MEMORY;
383
384 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
385 SSE. */
386 if (class[0] == AMD64_SSEUP)
387 class[0] = AMD64_SSE;
388 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
389 class[1] = AMD64_SSE;
390}
391
392/* Classify TYPE, and store the result in CLASS. */
393
394static void
395amd64_classify (struct type *type, enum amd64_reg_class class[2])
396{
397 enum type_code code = TYPE_CODE (type);
398 int len = TYPE_LENGTH (type);
399
400 class[0] = class[1] = AMD64_NO_CLASS;
401
402 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
5a7225ed
JB
403 long, long long, and pointers are in the INTEGER class. Similarly,
404 range types, used by languages such as Ada, are also in the INTEGER
405 class. */
efb1c01c 406 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
b929c77f 407 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
efb1c01c
MK
408 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
409 && (len == 1 || len == 2 || len == 4 || len == 8))
410 class[0] = AMD64_INTEGER;
411
412 /* Arguments of types float, double and __m64 are in class SSE. */
413 else if (code == TYPE_CODE_FLT && (len == 4 || len == 8))
414 /* FIXME: __m64 . */
415 class[0] = AMD64_SSE;
416
417 /* Arguments of types __float128 and __m128 are split into two
418 halves. The least significant ones belong to class SSE, the most
419 significant one to class SSEUP. */
420 /* FIXME: __float128, __m128. */
421
422 /* The 64-bit mantissa of arguments of type long double belongs to
423 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
424 class X87UP. */
425 else if (code == TYPE_CODE_FLT && len == 16)
426 /* Class X87 and X87UP. */
427 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
428
429 /* Aggregates. */
430 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
431 || code == TYPE_CODE_UNION)
432 amd64_classify_aggregate (type, class);
433}
434
435static enum return_value_convention
436amd64_return_value (struct gdbarch *gdbarch, struct type *type,
437 struct regcache *regcache,
42835c2b 438 gdb_byte *readbuf, const gdb_byte *writebuf)
efb1c01c
MK
439{
440 enum amd64_reg_class class[2];
441 int len = TYPE_LENGTH (type);
90f90721
MK
442 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
443 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
efb1c01c
MK
444 int integer_reg = 0;
445 int sse_reg = 0;
446 int i;
447
448 gdb_assert (!(readbuf && writebuf));
449
450 /* 1. Classify the return type with the classification algorithm. */
451 amd64_classify (type, class);
452
453 /* 2. If the type has class MEMORY, then the caller provides space
6fa57a7d
MK
454 for the return value and passes the address of this storage in
455 %rdi as if it were the first argument to the function. In effect,
456 this address becomes a hidden first argument.
457
458 On return %rax will contain the address that has been passed in
459 by the caller in %rdi. */
efb1c01c 460 if (class[0] == AMD64_MEMORY)
6fa57a7d
MK
461 {
462 /* As indicated by the comment above, the ABI guarantees that we
463 can always find the return value just after the function has
464 returned. */
465
466 if (readbuf)
467 {
468 ULONGEST addr;
469
470 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
471 read_memory (addr, readbuf, TYPE_LENGTH (type));
472 }
473
474 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
475 }
efb1c01c
MK
476
477 gdb_assert (class[1] != AMD64_MEMORY);
478 gdb_assert (len <= 16);
479
480 for (i = 0; len > 0; i++, len -= 8)
481 {
482 int regnum = -1;
483 int offset = 0;
484
485 switch (class[i])
486 {
487 case AMD64_INTEGER:
488 /* 3. If the class is INTEGER, the next available register
489 of the sequence %rax, %rdx is used. */
490 regnum = integer_regnum[integer_reg++];
491 break;
492
493 case AMD64_SSE:
494 /* 4. If the class is SSE, the next available SSE register
495 of the sequence %xmm0, %xmm1 is used. */
496 regnum = sse_regnum[sse_reg++];
497 break;
498
499 case AMD64_SSEUP:
500 /* 5. If the class is SSEUP, the eightbyte is passed in the
501 upper half of the last used SSE register. */
502 gdb_assert (sse_reg > 0);
503 regnum = sse_regnum[sse_reg - 1];
504 offset = 8;
505 break;
506
507 case AMD64_X87:
508 /* 6. If the class is X87, the value is returned on the X87
509 stack in %st0 as 80-bit x87 number. */
90f90721 510 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
511 if (writebuf)
512 i387_return_value (gdbarch, regcache);
513 break;
514
515 case AMD64_X87UP:
516 /* 7. If the class is X87UP, the value is returned together
517 with the previous X87 value in %st0. */
518 gdb_assert (i > 0 && class[0] == AMD64_X87);
90f90721 519 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
520 offset = 8;
521 len = 2;
522 break;
523
524 case AMD64_NO_CLASS:
525 continue;
526
527 default:
528 gdb_assert (!"Unexpected register class.");
529 }
530
531 gdb_assert (regnum != -1);
532
533 if (readbuf)
534 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
42835c2b 535 readbuf + i * 8);
efb1c01c
MK
536 if (writebuf)
537 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
42835c2b 538 writebuf + i * 8);
efb1c01c
MK
539 }
540
541 return RETURN_VALUE_REGISTER_CONVENTION;
53e95fcf
JS
542}
543\f
544
720aa428
MK
545static CORE_ADDR
546amd64_push_arguments (struct regcache *regcache, int nargs,
6470d250 547 struct value **args, CORE_ADDR sp, int struct_return)
720aa428
MK
548{
549 static int integer_regnum[] =
550 {
90f90721
MK
551 AMD64_RDI_REGNUM, /* %rdi */
552 AMD64_RSI_REGNUM, /* %rsi */
553 AMD64_RDX_REGNUM, /* %rdx */
554 AMD64_RCX_REGNUM, /* %rcx */
555 8, /* %r8 */
556 9 /* %r9 */
720aa428
MK
557 };
558 static int sse_regnum[] =
559 {
560 /* %xmm0 ... %xmm7 */
90f90721
MK
561 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
562 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
563 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
564 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
720aa428
MK
565 };
566 struct value **stack_args = alloca (nargs * sizeof (struct value *));
567 int num_stack_args = 0;
568 int num_elements = 0;
569 int element = 0;
570 int integer_reg = 0;
571 int sse_reg = 0;
572 int i;
573
6470d250
MK
574 /* Reserve a register for the "hidden" argument. */
575 if (struct_return)
576 integer_reg++;
577
720aa428
MK
578 for (i = 0; i < nargs; i++)
579 {
4991999e 580 struct type *type = value_type (args[i]);
720aa428
MK
581 int len = TYPE_LENGTH (type);
582 enum amd64_reg_class class[2];
583 int needed_integer_regs = 0;
584 int needed_sse_regs = 0;
585 int j;
586
587 /* Classify argument. */
588 amd64_classify (type, class);
589
590 /* Calculate the number of integer and SSE registers needed for
591 this argument. */
592 for (j = 0; j < 2; j++)
593 {
594 if (class[j] == AMD64_INTEGER)
595 needed_integer_regs++;
596 else if (class[j] == AMD64_SSE)
597 needed_sse_regs++;
598 }
599
600 /* Check whether enough registers are available, and if the
601 argument should be passed in registers at all. */
602 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
603 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
604 || (needed_integer_regs == 0 && needed_sse_regs == 0))
605 {
606 /* The argument will be passed on the stack. */
607 num_elements += ((len + 7) / 8);
608 stack_args[num_stack_args++] = args[i];
609 }
610 else
611 {
612 /* The argument will be passed in registers. */
d8de1ef7
MK
613 const gdb_byte *valbuf = value_contents (args[i]);
614 gdb_byte buf[8];
720aa428
MK
615
616 gdb_assert (len <= 16);
617
618 for (j = 0; len > 0; j++, len -= 8)
619 {
620 int regnum = -1;
621 int offset = 0;
622
623 switch (class[j])
624 {
625 case AMD64_INTEGER:
626 regnum = integer_regnum[integer_reg++];
627 break;
628
629 case AMD64_SSE:
630 regnum = sse_regnum[sse_reg++];
631 break;
632
633 case AMD64_SSEUP:
634 gdb_assert (sse_reg > 0);
635 regnum = sse_regnum[sse_reg - 1];
636 offset = 8;
637 break;
638
639 default:
640 gdb_assert (!"Unexpected register class.");
641 }
642
643 gdb_assert (regnum != -1);
644 memset (buf, 0, sizeof buf);
645 memcpy (buf, valbuf + j * 8, min (len, 8));
646 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
647 }
648 }
649 }
650
651 /* Allocate space for the arguments on the stack. */
652 sp -= num_elements * 8;
653
654 /* The psABI says that "The end of the input argument area shall be
655 aligned on a 16 byte boundary." */
656 sp &= ~0xf;
657
658 /* Write out the arguments to the stack. */
659 for (i = 0; i < num_stack_args; i++)
660 {
4991999e 661 struct type *type = value_type (stack_args[i]);
d8de1ef7 662 const gdb_byte *valbuf = value_contents (stack_args[i]);
720aa428
MK
663 int len = TYPE_LENGTH (type);
664
665 write_memory (sp + element * 8, valbuf, len);
666 element += ((len + 7) / 8);
667 }
668
669 /* The psABI says that "For calls that may call functions that use
670 varargs or stdargs (prototype-less calls or calls to functions
671 containing ellipsis (...) in the declaration) %al is used as
672 hidden argument to specify the number of SSE registers used. */
90f90721 673 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
720aa428
MK
674 return sp;
675}
676
c4f35dd8 677static CORE_ADDR
7d9b040b 678amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
e53bef9f
MK
679 struct regcache *regcache, CORE_ADDR bp_addr,
680 int nargs, struct value **args, CORE_ADDR sp,
681 int struct_return, CORE_ADDR struct_addr)
53e95fcf 682{
d8de1ef7 683 gdb_byte buf[8];
c4f35dd8
MK
684
685 /* Pass arguments. */
6470d250 686 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
c4f35dd8
MK
687
688 /* Pass "hidden" argument". */
689 if (struct_return)
690 {
691 store_unsigned_integer (buf, 8, struct_addr);
90f90721 692 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
c4f35dd8
MK
693 }
694
695 /* Store return address. */
696 sp -= 8;
10f93086 697 store_unsigned_integer (buf, 8, bp_addr);
c4f35dd8
MK
698 write_memory (sp, buf, 8);
699
700 /* Finally, update the stack pointer... */
701 store_unsigned_integer (buf, 8, sp);
90f90721 702 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
c4f35dd8
MK
703
704 /* ...and fake a frame pointer. */
90f90721 705 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
c4f35dd8 706
3e210248 707 return sp + 16;
53e95fcf 708}
c4f35dd8
MK
709\f
710
711/* The maximum number of saved registers. This should include %rip. */
90f90721 712#define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
c4f35dd8 713
e53bef9f 714struct amd64_frame_cache
c4f35dd8
MK
715{
716 /* Base address. */
717 CORE_ADDR base;
718 CORE_ADDR sp_offset;
719 CORE_ADDR pc;
720
721 /* Saved registers. */
e53bef9f 722 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
c4f35dd8
MK
723 CORE_ADDR saved_sp;
724
725 /* Do we have a frame? */
726 int frameless_p;
727};
8dda9770 728
c4f35dd8
MK
729/* Allocate and initialize a frame cache. */
730
e53bef9f
MK
731static struct amd64_frame_cache *
732amd64_alloc_frame_cache (void)
8dda9770 733{
e53bef9f 734 struct amd64_frame_cache *cache;
c4f35dd8
MK
735 int i;
736
e53bef9f 737 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
8dda9770 738
c4f35dd8
MK
739 /* Base address. */
740 cache->base = 0;
741 cache->sp_offset = -8;
742 cache->pc = 0;
743
744 /* Saved registers. We initialize these to -1 since zero is a valid
745 offset (that's where %rbp is supposed to be stored). */
e53bef9f 746 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
747 cache->saved_regs[i] = -1;
748 cache->saved_sp = 0;
749
750 /* Frameless until proven otherwise. */
751 cache->frameless_p = 1;
752
753 return cache;
8dda9770 754}
53e95fcf 755
c4f35dd8
MK
756/* Do a limited analysis of the prologue at PC and update CACHE
757 accordingly. Bail out early if CURRENT_PC is reached. Return the
758 address where the analysis stopped.
759
760 We will handle only functions beginning with:
761
762 pushq %rbp 0x55
763 movq %rsp, %rbp 0x48 0x89 0xe5
764
765 Any function that doesn't start with this sequence will be assumed
766 to have no prologue and thus no valid frame pointer in %rbp. */
767
768static CORE_ADDR
e53bef9f
MK
769amd64_analyze_prologue (CORE_ADDR pc, CORE_ADDR current_pc,
770 struct amd64_frame_cache *cache)
53e95fcf 771{
d8de1ef7
MK
772 static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
773 gdb_byte buf[3];
774 gdb_byte op;
c4f35dd8
MK
775
776 if (current_pc <= pc)
777 return current_pc;
778
779 op = read_memory_unsigned_integer (pc, 1);
780
781 if (op == 0x55) /* pushq %rbp */
782 {
783 /* Take into account that we've executed the `pushq %rbp' that
784 starts this instruction sequence. */
90f90721 785 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
c4f35dd8
MK
786 cache->sp_offset += 8;
787
788 /* If that's all, return now. */
789 if (current_pc <= pc + 1)
790 return current_pc;
791
792 /* Check for `movq %rsp, %rbp'. */
793 read_memory (pc + 1, buf, 3);
794 if (memcmp (buf, proto, 3) != 0)
795 return pc + 1;
796
797 /* OK, we actually have a frame. */
798 cache->frameless_p = 0;
799 return pc + 4;
800 }
801
802 return pc;
53e95fcf
JS
803}
804
c4f35dd8
MK
805/* Return PC of first real instruction. */
806
807static CORE_ADDR
e53bef9f 808amd64_skip_prologue (CORE_ADDR start_pc)
53e95fcf 809{
e53bef9f 810 struct amd64_frame_cache cache;
c4f35dd8
MK
811 CORE_ADDR pc;
812
594706e6 813 pc = amd64_analyze_prologue (start_pc, 0xffffffffffffffffLL, &cache);
c4f35dd8
MK
814 if (cache.frameless_p)
815 return start_pc;
816
817 return pc;
53e95fcf 818}
c4f35dd8 819\f
53e95fcf 820
c4f35dd8
MK
821/* Normal frames. */
822
e53bef9f
MK
823static struct amd64_frame_cache *
824amd64_frame_cache (struct frame_info *next_frame, void **this_cache)
6d686a84 825{
e53bef9f 826 struct amd64_frame_cache *cache;
d8de1ef7 827 gdb_byte buf[8];
6d686a84 828 int i;
6d686a84 829
c4f35dd8
MK
830 if (*this_cache)
831 return *this_cache;
6d686a84 832
e53bef9f 833 cache = amd64_alloc_frame_cache ();
c4f35dd8
MK
834 *this_cache = cache;
835
c4f35dd8
MK
836 cache->pc = frame_func_unwind (next_frame);
837 if (cache->pc != 0)
e53bef9f 838 amd64_analyze_prologue (cache->pc, frame_pc_unwind (next_frame), cache);
c4f35dd8
MK
839
840 if (cache->frameless_p)
841 {
4a28816e
MK
842 /* We didn't find a valid frame. If we're at the start of a
843 function, or somewhere half-way its prologue, the function's
844 frame probably hasn't been fully setup yet. Try to
845 reconstruct the base address for the stack frame by looking
846 at the stack pointer. For truly "frameless" functions this
847 might work too. */
c4f35dd8 848
90f90721 849 frame_unwind_register (next_frame, AMD64_RSP_REGNUM, buf);
c4f35dd8
MK
850 cache->base = extract_unsigned_integer (buf, 8) + cache->sp_offset;
851 }
35883a3f
MK
852 else
853 {
90f90721 854 frame_unwind_register (next_frame, AMD64_RBP_REGNUM, buf);
35883a3f
MK
855 cache->base = extract_unsigned_integer (buf, 8);
856 }
c4f35dd8
MK
857
858 /* Now that we have the base address for the stack frame we can
859 calculate the value of %rsp in the calling frame. */
860 cache->saved_sp = cache->base + 16;
861
35883a3f
MK
862 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
863 frame we find it at the same offset from the reconstructed base
864 address. */
90f90721 865 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
35883a3f 866
c4f35dd8
MK
867 /* Adjust all the saved registers such that they contain addresses
868 instead of offsets. */
e53bef9f 869 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
870 if (cache->saved_regs[i] != -1)
871 cache->saved_regs[i] += cache->base;
872
873 return cache;
6d686a84
ML
874}
875
c4f35dd8 876static void
e53bef9f
MK
877amd64_frame_this_id (struct frame_info *next_frame, void **this_cache,
878 struct frame_id *this_id)
c4f35dd8 879{
e53bef9f
MK
880 struct amd64_frame_cache *cache =
881 amd64_frame_cache (next_frame, this_cache);
c4f35dd8
MK
882
883 /* This marks the outermost frame. */
884 if (cache->base == 0)
885 return;
886
887 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
888}
e76e1718 889
c4f35dd8 890static void
e53bef9f
MK
891amd64_frame_prev_register (struct frame_info *next_frame, void **this_cache,
892 int regnum, int *optimizedp,
893 enum lval_type *lvalp, CORE_ADDR *addrp,
5323dd1d 894 int *realnump, gdb_byte *valuep)
53e95fcf 895{
e53bef9f
MK
896 struct amd64_frame_cache *cache =
897 amd64_frame_cache (next_frame, this_cache);
e76e1718 898
c4f35dd8 899 gdb_assert (regnum >= 0);
b1ab997b 900
c4f35dd8
MK
901 if (regnum == SP_REGNUM && cache->saved_sp)
902 {
903 *optimizedp = 0;
904 *lvalp = not_lval;
905 *addrp = 0;
906 *realnump = -1;
907 if (valuep)
908 {
909 /* Store the value. */
910 store_unsigned_integer (valuep, 8, cache->saved_sp);
911 }
912 return;
913 }
e76e1718 914
e53bef9f 915 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
c4f35dd8
MK
916 {
917 *optimizedp = 0;
918 *lvalp = lval_memory;
919 *addrp = cache->saved_regs[regnum];
920 *realnump = -1;
921 if (valuep)
922 {
923 /* Read the value in from memory. */
924 read_memory (*addrp, valuep,
925 register_size (current_gdbarch, regnum));
926 }
927 return;
928 }
e76e1718 929
00b25ff3
AC
930 *optimizedp = 0;
931 *lvalp = lval_register;
932 *addrp = 0;
933 *realnump = regnum;
934 if (valuep)
935 frame_unwind_register (next_frame, (*realnump), valuep);
c4f35dd8 936}
e76e1718 937
e53bef9f 938static const struct frame_unwind amd64_frame_unwind =
c4f35dd8
MK
939{
940 NORMAL_FRAME,
e53bef9f
MK
941 amd64_frame_this_id,
942 amd64_frame_prev_register
c4f35dd8 943};
e76e1718 944
c4f35dd8 945static const struct frame_unwind *
e53bef9f 946amd64_frame_sniffer (struct frame_info *next_frame)
c4f35dd8 947{
e53bef9f 948 return &amd64_frame_unwind;
c4f35dd8
MK
949}
950\f
e76e1718 951
c4f35dd8
MK
952/* Signal trampolines. */
953
954/* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
955 64-bit variants. This would require using identical frame caches
956 on both platforms. */
957
e53bef9f
MK
958static struct amd64_frame_cache *
959amd64_sigtramp_frame_cache (struct frame_info *next_frame, void **this_cache)
c4f35dd8 960{
e53bef9f 961 struct amd64_frame_cache *cache;
c4f35dd8
MK
962 struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch);
963 CORE_ADDR addr;
d8de1ef7 964 gdb_byte buf[8];
2b5e0749 965 int i;
c4f35dd8
MK
966
967 if (*this_cache)
968 return *this_cache;
969
e53bef9f 970 cache = amd64_alloc_frame_cache ();
c4f35dd8 971
90f90721 972 frame_unwind_register (next_frame, AMD64_RSP_REGNUM, buf);
c4f35dd8
MK
973 cache->base = extract_unsigned_integer (buf, 8) - 8;
974
975 addr = tdep->sigcontext_addr (next_frame);
2b5e0749 976 gdb_assert (tdep->sc_reg_offset);
e53bef9f 977 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2b5e0749
MK
978 for (i = 0; i < tdep->sc_num_regs; i++)
979 if (tdep->sc_reg_offset[i] != -1)
980 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
c4f35dd8
MK
981
982 *this_cache = cache;
983 return cache;
53e95fcf
JS
984}
985
c4f35dd8 986static void
e53bef9f
MK
987amd64_sigtramp_frame_this_id (struct frame_info *next_frame,
988 void **this_cache, struct frame_id *this_id)
c4f35dd8 989{
e53bef9f
MK
990 struct amd64_frame_cache *cache =
991 amd64_sigtramp_frame_cache (next_frame, this_cache);
c4f35dd8
MK
992
993 (*this_id) = frame_id_build (cache->base + 16, frame_pc_unwind (next_frame));
994}
995
996static void
e53bef9f
MK
997amd64_sigtramp_frame_prev_register (struct frame_info *next_frame,
998 void **this_cache,
999 int regnum, int *optimizedp,
1000 enum lval_type *lvalp, CORE_ADDR *addrp,
5323dd1d 1001 int *realnump, gdb_byte *valuep)
c4f35dd8
MK
1002{
1003 /* Make sure we've initialized the cache. */
e53bef9f 1004 amd64_sigtramp_frame_cache (next_frame, this_cache);
c4f35dd8 1005
e53bef9f
MK
1006 amd64_frame_prev_register (next_frame, this_cache, regnum,
1007 optimizedp, lvalp, addrp, realnump, valuep);
c4f35dd8
MK
1008}
1009
e53bef9f 1010static const struct frame_unwind amd64_sigtramp_frame_unwind =
c4f35dd8
MK
1011{
1012 SIGTRAMP_FRAME,
e53bef9f
MK
1013 amd64_sigtramp_frame_this_id,
1014 amd64_sigtramp_frame_prev_register
c4f35dd8
MK
1015};
1016
1017static const struct frame_unwind *
e53bef9f 1018amd64_sigtramp_frame_sniffer (struct frame_info *next_frame)
c4f35dd8 1019{
911bc6ee
MK
1020 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (next_frame));
1021
1022 /* We shouldn't even bother if we don't have a sigcontext_addr
1023 handler. */
1024 if (tdep->sigcontext_addr == NULL)
1025 return NULL;
1026
1027 if (tdep->sigtramp_p != NULL)
1028 {
1029 if (tdep->sigtramp_p (next_frame))
1030 return &amd64_sigtramp_frame_unwind;
1031 }
c4f35dd8 1032
911bc6ee 1033 if (tdep->sigtramp_start != 0)
1c3545ae 1034 {
911bc6ee 1035 CORE_ADDR pc = frame_pc_unwind (next_frame);
1c3545ae 1036
911bc6ee
MK
1037 gdb_assert (tdep->sigtramp_end != 0);
1038 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
1039 return &amd64_sigtramp_frame_unwind;
1c3545ae 1040 }
c4f35dd8
MK
1041
1042 return NULL;
1043}
1044\f
1045
1046static CORE_ADDR
e53bef9f 1047amd64_frame_base_address (struct frame_info *next_frame, void **this_cache)
c4f35dd8 1048{
e53bef9f
MK
1049 struct amd64_frame_cache *cache =
1050 amd64_frame_cache (next_frame, this_cache);
c4f35dd8
MK
1051
1052 return cache->base;
1053}
1054
e53bef9f 1055static const struct frame_base amd64_frame_base =
c4f35dd8 1056{
e53bef9f
MK
1057 &amd64_frame_unwind,
1058 amd64_frame_base_address,
1059 amd64_frame_base_address,
1060 amd64_frame_base_address
c4f35dd8
MK
1061};
1062
166f4c7b 1063static struct frame_id
e53bef9f 1064amd64_unwind_dummy_id (struct gdbarch *gdbarch, struct frame_info *next_frame)
166f4c7b 1065{
d8de1ef7 1066 gdb_byte buf[8];
c4f35dd8
MK
1067 CORE_ADDR fp;
1068
90f90721 1069 frame_unwind_register (next_frame, AMD64_RBP_REGNUM, buf);
c4f35dd8
MK
1070 fp = extract_unsigned_integer (buf, 8);
1071
1072 return frame_id_build (fp + 16, frame_pc_unwind (next_frame));
166f4c7b
ML
1073}
1074
8b148df9
AC
1075/* 16 byte align the SP per frame requirements. */
1076
1077static CORE_ADDR
e53bef9f 1078amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
8b148df9
AC
1079{
1080 return sp & -(CORE_ADDR)16;
1081}
473f17b0
MK
1082\f
1083
593adc23
MK
1084/* Supply register REGNUM from the buffer specified by FPREGS and LEN
1085 in the floating-point register set REGSET to register cache
1086 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
473f17b0
MK
1087
1088static void
e53bef9f
MK
1089amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
1090 int regnum, const void *fpregs, size_t len)
473f17b0 1091{
9ea75c57 1092 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
473f17b0
MK
1093
1094 gdb_assert (len == tdep->sizeof_fpregset);
90f90721 1095 amd64_supply_fxsave (regcache, regnum, fpregs);
473f17b0 1096}
8b148df9 1097
593adc23
MK
1098/* Collect register REGNUM from the register cache REGCACHE and store
1099 it in the buffer specified by FPREGS and LEN as described by the
1100 floating-point register set REGSET. If REGNUM is -1, do this for
1101 all registers in REGSET. */
1102
1103static void
1104amd64_collect_fpregset (const struct regset *regset,
1105 const struct regcache *regcache,
1106 int regnum, void *fpregs, size_t len)
1107{
1108 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
1109
1110 gdb_assert (len == tdep->sizeof_fpregset);
1111 amd64_collect_fxsave (regcache, regnum, fpregs);
1112}
1113
c6b33596
MK
1114/* Return the appropriate register set for the core section identified
1115 by SECT_NAME and SECT_SIZE. */
1116
1117static const struct regset *
e53bef9f
MK
1118amd64_regset_from_core_section (struct gdbarch *gdbarch,
1119 const char *sect_name, size_t sect_size)
c6b33596
MK
1120{
1121 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1122
1123 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
1124 {
1125 if (tdep->fpregset == NULL)
593adc23
MK
1126 tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
1127 amd64_collect_fpregset);
c6b33596
MK
1128
1129 return tdep->fpregset;
1130 }
1131
1132 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
1133}
1134\f
1135
2213a65d 1136void
90f90721 1137amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
53e95fcf 1138{
0c1a73d6 1139 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
53e95fcf 1140
473f17b0
MK
1141 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
1142 floating-point registers. */
1143 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
1144
5716833c 1145 /* AMD64 has an FPU and 16 SSE registers. */
90f90721 1146 tdep->st0_regnum = AMD64_ST0_REGNUM;
0c1a73d6 1147 tdep->num_xmm_regs = 16;
53e95fcf 1148
0c1a73d6 1149 /* This is what all the fuss is about. */
53e95fcf
JS
1150 set_gdbarch_long_bit (gdbarch, 64);
1151 set_gdbarch_long_long_bit (gdbarch, 64);
1152 set_gdbarch_ptr_bit (gdbarch, 64);
1153
e53bef9f
MK
1154 /* In contrast to the i386, on AMD64 a `long double' actually takes
1155 up 128 bits, even though it's still based on the i387 extended
1156 floating-point format which has only 80 significant bits. */
b83b026c
MK
1157 set_gdbarch_long_double_bit (gdbarch, 128);
1158
e53bef9f
MK
1159 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
1160 set_gdbarch_register_name (gdbarch, amd64_register_name);
1161 set_gdbarch_register_type (gdbarch, amd64_register_type);
b83b026c
MK
1162
1163 /* Register numbers of various important registers. */
90f90721
MK
1164 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
1165 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
1166 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
1167 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
b83b026c 1168
e53bef9f
MK
1169 /* The "default" register numbering scheme for AMD64 is referred to
1170 as the "DWARF Register Number Mapping" in the System V psABI.
1171 The preferred debugging format for all known AMD64 targets is
1172 actually DWARF2, and GCC doesn't seem to support DWARF (that is
1173 DWARF-1), but we provide the same mapping just in case. This
1174 mapping is also used for stabs, which GCC does support. */
1175 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
1176 set_gdbarch_dwarf_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
1177 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
de220d0f 1178
c4f35dd8 1179 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
e53bef9f 1180 be in use on any of the supported AMD64 targets. */
53e95fcf 1181
c4f35dd8 1182 /* Call dummy code. */
e53bef9f
MK
1183 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
1184 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
8b148df9 1185 set_gdbarch_frame_red_zone_size (gdbarch, 128);
53e95fcf 1186
e53bef9f 1187 set_gdbarch_convert_register_p (gdbarch, amd64_convert_register_p);
d532c08f
MK
1188 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
1189 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
1190
efb1c01c 1191 set_gdbarch_return_value (gdbarch, amd64_return_value);
53e95fcf 1192
e53bef9f 1193 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
53e95fcf 1194
c4f35dd8 1195 /* Avoid wiring in the MMX registers for now. */
2213a65d 1196 set_gdbarch_num_pseudo_regs (gdbarch, 0);
5716833c 1197 tdep->mm0_regnum = -1;
2213a65d 1198
e53bef9f 1199 set_gdbarch_unwind_dummy_id (gdbarch, amd64_unwind_dummy_id);
53e95fcf 1200
e53bef9f
MK
1201 frame_unwind_append_sniffer (gdbarch, amd64_sigtramp_frame_sniffer);
1202 frame_unwind_append_sniffer (gdbarch, amd64_frame_sniffer);
1203 frame_base_set_default (gdbarch, &amd64_frame_base);
c6b33596
MK
1204
1205 /* If we have a register mapping, enable the generic core file support. */
1206 if (tdep->gregset_reg_offset)
1207 set_gdbarch_regset_from_core_section (gdbarch,
e53bef9f 1208 amd64_regset_from_core_section);
c4f35dd8
MK
1209}
1210\f
1211
90f90721 1212#define I387_ST0_REGNUM AMD64_ST0_REGNUM
c4f35dd8 1213
41d041d6
MK
1214/* The 64-bit FXSAVE format differs from the 32-bit format in the
1215 sense that the instruction pointer and data pointer are simply
1216 64-bit offsets into the code segment and the data segment instead
1217 of a selector offset pair. The functions below store the upper 32
1218 bits of these pointers (instead of just the 16-bits of the segment
1219 selector). */
1220
1221/* Fill register REGNUM in REGCACHE with the appropriate
0485f6ad
MK
1222 floating-point or SSE register value from *FXSAVE. If REGNUM is
1223 -1, do this for all registers. This function masks off any of the
1224 reserved bits in *FXSAVE. */
c4f35dd8
MK
1225
1226void
90f90721 1227amd64_supply_fxsave (struct regcache *regcache, int regnum,
41d041d6 1228 const void *fxsave)
c4f35dd8 1229{
41d041d6 1230 i387_supply_fxsave (regcache, regnum, fxsave);
c4f35dd8 1231
f0ef85a5 1232 if (fxsave && gdbarch_ptr_bit (get_regcache_arch (regcache)) == 64)
c4f35dd8 1233 {
d8de1ef7 1234 const gdb_byte *regs = fxsave;
41d041d6 1235
0485f6ad 1236 if (regnum == -1 || regnum == I387_FISEG_REGNUM)
41d041d6 1237 regcache_raw_supply (regcache, I387_FISEG_REGNUM, regs + 12);
0485f6ad 1238 if (regnum == -1 || regnum == I387_FOSEG_REGNUM)
41d041d6 1239 regcache_raw_supply (regcache, I387_FOSEG_REGNUM, regs + 20);
c4f35dd8 1240 }
0c1a73d6
MK
1241}
1242
3c017e40
MK
1243/* Fill register REGNUM (if it is a floating-point or SSE register) in
1244 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
1245 all registers. This function doesn't touch any of the reserved
1246 bits in *FXSAVE. */
1247
1248void
1249amd64_collect_fxsave (const struct regcache *regcache, int regnum,
1250 void *fxsave)
1251{
d8de1ef7 1252 gdb_byte *regs = fxsave;
3c017e40
MK
1253
1254 i387_collect_fxsave (regcache, regnum, fxsave);
1255
f0ef85a5
MK
1256 if (gdbarch_ptr_bit (get_regcache_arch (regcache)) == 64)
1257 {
1258 if (regnum == -1 || regnum == I387_FISEG_REGNUM)
1259 regcache_raw_collect (regcache, I387_FISEG_REGNUM, regs + 12);
1260 if (regnum == -1 || regnum == I387_FOSEG_REGNUM)
1261 regcache_raw_collect (regcache, I387_FOSEG_REGNUM, regs + 20);
1262 }
3c017e40 1263}
This page took 0.685729 seconds and 4 git commands to generate.