2004-05-17 Andrew Cagney <cagney@redhat.com>
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
CommitLineData
e53bef9f 1/* Target-dependent code for AMD64.
ce0eebec 2
e53bef9f 3 Copyright 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
53e95fcf
JS
4 Contributed by Jiri Smid, SuSE Labs.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23#include "defs.h"
c4f35dd8
MK
24#include "arch-utils.h"
25#include "block.h"
26#include "dummy-frame.h"
27#include "frame.h"
28#include "frame-base.h"
29#include "frame-unwind.h"
53e95fcf 30#include "inferior.h"
53e95fcf 31#include "gdbcmd.h"
c4f35dd8
MK
32#include "gdbcore.h"
33#include "objfiles.h"
53e95fcf 34#include "regcache.h"
2c261fae 35#include "regset.h"
53e95fcf 36#include "symfile.h"
c4f35dd8 37
82dbc5f7 38#include "gdb_assert.h"
c4f35dd8 39
9c1488cb 40#include "amd64-tdep.h"
c4f35dd8 41#include "i387-tdep.h"
53e95fcf 42
e53bef9f
MK
43/* Note that the AMD64 architecture was previously known as x86-64.
44 The latter is (forever) engraved into the canonical system name as
90f90721 45 returned by config.guess, and used as the name for the AMD64 port
e53bef9f
MK
46 of GNU/Linux. The BSD's have renamed their ports to amd64; they
47 don't like to shout. For GDB we prefer the amd64_-prefix over the
48 x86_64_-prefix since it's so much easier to type. */
49
402ecd56 50/* Register information. */
c4f35dd8 51
e53bef9f 52struct amd64_register_info
de220d0f 53{
de220d0f
ML
54 char *name;
55 struct type **type;
56};
53e95fcf 57
e53bef9f 58static struct amd64_register_info amd64_register_info[] =
c4f35dd8
MK
59{
60 { "rax", &builtin_type_int64 },
61 { "rbx", &builtin_type_int64 },
62 { "rcx", &builtin_type_int64 },
63 { "rdx", &builtin_type_int64 },
64 { "rsi", &builtin_type_int64 },
65 { "rdi", &builtin_type_int64 },
66 { "rbp", &builtin_type_void_data_ptr },
67 { "rsp", &builtin_type_void_data_ptr },
68
69 /* %r8 is indeed register number 8. */
70 { "r8", &builtin_type_int64 },
71 { "r9", &builtin_type_int64 },
72 { "r10", &builtin_type_int64 },
73 { "r11", &builtin_type_int64 },
74 { "r12", &builtin_type_int64 },
75 { "r13", &builtin_type_int64 },
76 { "r14", &builtin_type_int64 },
77 { "r15", &builtin_type_int64 },
78 { "rip", &builtin_type_void_func_ptr },
79 { "eflags", &builtin_type_int32 },
af233647
MK
80 { "cs", &builtin_type_int32 },
81 { "ss", &builtin_type_int32 },
c4f35dd8
MK
82 { "ds", &builtin_type_int32 },
83 { "es", &builtin_type_int32 },
84 { "fs", &builtin_type_int32 },
85 { "gs", &builtin_type_int32 },
86
af233647 87 /* %st0 is register number 24. */
c4f35dd8
MK
88 { "st0", &builtin_type_i387_ext },
89 { "st1", &builtin_type_i387_ext },
90 { "st2", &builtin_type_i387_ext },
91 { "st3", &builtin_type_i387_ext },
92 { "st4", &builtin_type_i387_ext },
93 { "st5", &builtin_type_i387_ext },
94 { "st6", &builtin_type_i387_ext },
95 { "st7", &builtin_type_i387_ext },
96 { "fctrl", &builtin_type_int32 },
97 { "fstat", &builtin_type_int32 },
98 { "ftag", &builtin_type_int32 },
99 { "fiseg", &builtin_type_int32 },
100 { "fioff", &builtin_type_int32 },
101 { "foseg", &builtin_type_int32 },
102 { "fooff", &builtin_type_int32 },
103 { "fop", &builtin_type_int32 },
104
af233647 105 /* %xmm0 is register number 40. */
c4f35dd8
MK
106 { "xmm0", &builtin_type_v4sf },
107 { "xmm1", &builtin_type_v4sf },
108 { "xmm2", &builtin_type_v4sf },
109 { "xmm3", &builtin_type_v4sf },
110 { "xmm4", &builtin_type_v4sf },
111 { "xmm5", &builtin_type_v4sf },
112 { "xmm6", &builtin_type_v4sf },
113 { "xmm7", &builtin_type_v4sf },
114 { "xmm8", &builtin_type_v4sf },
115 { "xmm9", &builtin_type_v4sf },
116 { "xmm10", &builtin_type_v4sf },
117 { "xmm11", &builtin_type_v4sf },
118 { "xmm12", &builtin_type_v4sf },
119 { "xmm13", &builtin_type_v4sf },
120 { "xmm14", &builtin_type_v4sf },
121 { "xmm15", &builtin_type_v4sf },
122 { "mxcsr", &builtin_type_int32 }
0e04a514
ML
123};
124
c4f35dd8 125/* Total number of registers. */
e53bef9f
MK
126#define AMD64_NUM_REGS \
127 (sizeof (amd64_register_info) / sizeof (amd64_register_info[0]))
de220d0f 128
c4f35dd8 129/* Return the name of register REGNUM. */
b6779aa2 130
c4f35dd8 131static const char *
e53bef9f 132amd64_register_name (int regnum)
53e95fcf 133{
e53bef9f
MK
134 if (regnum >= 0 && regnum < AMD64_NUM_REGS)
135 return amd64_register_info[regnum].name;
53e95fcf 136
c4f35dd8 137 return NULL;
53e95fcf
JS
138}
139
140/* Return the GDB type object for the "standard" data type of data in
c4f35dd8 141 register REGNUM. */
53e95fcf 142
c4f35dd8 143static struct type *
e53bef9f 144amd64_register_type (struct gdbarch *gdbarch, int regnum)
53e95fcf 145{
e53bef9f 146 gdb_assert (regnum >= 0 && regnum < AMD64_NUM_REGS);
4657573b 147
e53bef9f 148 return *amd64_register_info[regnum].type;
53e95fcf
JS
149}
150
c4f35dd8
MK
151/* DWARF Register Number Mapping as defined in the System V psABI,
152 section 3.6. */
53e95fcf 153
e53bef9f 154static int amd64_dwarf_regmap[] =
0e04a514 155{
c4f35dd8 156 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
90f90721
MK
157 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
158 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
159 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
c4f35dd8
MK
160
161 /* Frame Pointer Register RBP. */
90f90721 162 AMD64_RBP_REGNUM,
c4f35dd8
MK
163
164 /* Stack Pointer Register RSP. */
90f90721 165 AMD64_RSP_REGNUM,
c4f35dd8
MK
166
167 /* Extended Integer Registers 8 - 15. */
168 8, 9, 10, 11, 12, 13, 14, 15,
169
59207364 170 /* Return Address RA. Mapped to RIP. */
90f90721 171 AMD64_RIP_REGNUM,
c4f35dd8
MK
172
173 /* SSE Registers 0 - 7. */
90f90721
MK
174 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
175 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
176 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
177 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
c4f35dd8
MK
178
179 /* Extended SSE Registers 8 - 15. */
90f90721
MK
180 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
181 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
182 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
183 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
c4f35dd8
MK
184
185 /* Floating Point Registers 0-7. */
90f90721
MK
186 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
187 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
188 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
189 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7
c4f35dd8 190};
0e04a514 191
e53bef9f
MK
192static const int amd64_dwarf_regmap_len =
193 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
0e04a514 194
c4f35dd8
MK
195/* Convert DWARF register number REG to the appropriate register
196 number used by GDB. */
26abbdc4 197
c4f35dd8 198static int
e53bef9f 199amd64_dwarf_reg_to_regnum (int reg)
53e95fcf 200{
c4f35dd8 201 int regnum = -1;
53e95fcf 202
e53bef9f
MK
203 if (reg >= 0 || reg < amd64_dwarf_regmap_len)
204 regnum = amd64_dwarf_regmap[reg];
53e95fcf 205
c4f35dd8
MK
206 if (regnum == -1)
207 warning ("Unmapped DWARF Register #%d encountered\n", reg);
208
209 return regnum;
53e95fcf 210}
d532c08f
MK
211
212/* Return nonzero if a value of type TYPE stored in register REGNUM
213 needs any special handling. */
214
215static int
e53bef9f 216amd64_convert_register_p (int regnum, struct type *type)
d532c08f
MK
217{
218 return i386_fp_regnum_p (regnum);
219}
53e95fcf
JS
220\f
221
efb1c01c
MK
222/* Register classes as defined in the psABI. */
223
224enum amd64_reg_class
225{
226 AMD64_INTEGER,
227 AMD64_SSE,
228 AMD64_SSEUP,
229 AMD64_X87,
230 AMD64_X87UP,
231 AMD64_COMPLEX_X87,
232 AMD64_NO_CLASS,
233 AMD64_MEMORY
234};
235
236/* Return the union class of CLASS1 and CLASS2. See the psABI for
237 details. */
238
239static enum amd64_reg_class
240amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
241{
242 /* Rule (a): If both classes are equal, this is the resulting class. */
243 if (class1 == class2)
244 return class1;
245
246 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
247 is the other class. */
248 if (class1 == AMD64_NO_CLASS)
249 return class2;
250 if (class2 == AMD64_NO_CLASS)
251 return class1;
252
253 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
254 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
255 return AMD64_MEMORY;
256
257 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
258 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
259 return AMD64_INTEGER;
260
261 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
262 MEMORY is used as class. */
263 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
264 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
265 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
266 return AMD64_MEMORY;
267
268 /* Rule (f): Otherwise class SSE is used. */
269 return AMD64_SSE;
270}
271
272static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
273
79b1ab3d
MK
274/* Return non-zero if TYPE is a non-POD structure or union type. */
275
276static int
277amd64_non_pod_p (struct type *type)
278{
279 /* ??? A class with a base class certainly isn't POD, but does this
280 catch all non-POD structure types? */
281 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
282 return 1;
283
284 return 0;
285}
286
efb1c01c
MK
287/* Classify TYPE according to the rules for aggregate (structures and
288 arrays) and union types, and store the result in CLASS. */
c4f35dd8
MK
289
290static void
efb1c01c 291amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
53e95fcf
JS
292{
293 int len = TYPE_LENGTH (type);
294
efb1c01c
MK
295 /* 1. If the size of an object is larger than two eightbytes, or in
296 C++, is a non-POD structure or union type, or contains
297 unaligned fields, it has class memory. */
79b1ab3d 298 if (len > 16 || amd64_non_pod_p (type))
53e95fcf 299 {
efb1c01c
MK
300 class[0] = class[1] = AMD64_MEMORY;
301 return;
53e95fcf 302 }
efb1c01c
MK
303
304 /* 2. Both eightbytes get initialized to class NO_CLASS. */
305 class[0] = class[1] = AMD64_NO_CLASS;
306
307 /* 3. Each field of an object is classified recursively so that
308 always two fields are considered. The resulting class is
309 calculated according to the classes of the fields in the
310 eightbyte: */
311
312 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
8ffd9b1b 313 {
efb1c01c
MK
314 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
315
316 /* All fields in an array have the same type. */
317 amd64_classify (subtype, class);
318 if (len > 8 && class[1] == AMD64_NO_CLASS)
319 class[1] = class[0];
8ffd9b1b 320 }
53e95fcf
JS
321 else
322 {
efb1c01c 323 int i;
53e95fcf 324
efb1c01c
MK
325 /* Structure or union. */
326 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
327 || TYPE_CODE (type) == TYPE_CODE_UNION);
328
329 for (i = 0; i < TYPE_NFIELDS (type); i++)
53e95fcf 330 {
efb1c01c
MK
331 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
332 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
333 enum amd64_reg_class subclass[2];
334
562c50c2
MK
335 /* Ignore static fields. */
336 if (TYPE_FIELD_STATIC (type, i))
337 continue;
338
efb1c01c
MK
339 gdb_assert (pos == 0 || pos == 1);
340
341 amd64_classify (subtype, subclass);
342 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
343 if (pos == 0)
344 class[1] = amd64_merge_classes (class[1], subclass[1]);
53e95fcf 345 }
53e95fcf 346 }
efb1c01c
MK
347
348 /* 4. Then a post merger cleanup is done: */
349
350 /* Rule (a): If one of the classes is MEMORY, the whole argument is
351 passed in memory. */
352 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
353 class[0] = class[1] = AMD64_MEMORY;
354
355 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
356 SSE. */
357 if (class[0] == AMD64_SSEUP)
358 class[0] = AMD64_SSE;
359 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
360 class[1] = AMD64_SSE;
361}
362
363/* Classify TYPE, and store the result in CLASS. */
364
365static void
366amd64_classify (struct type *type, enum amd64_reg_class class[2])
367{
368 enum type_code code = TYPE_CODE (type);
369 int len = TYPE_LENGTH (type);
370
371 class[0] = class[1] = AMD64_NO_CLASS;
372
373 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
5a7225ed
JB
374 long, long long, and pointers are in the INTEGER class. Similarly,
375 range types, used by languages such as Ada, are also in the INTEGER
376 class. */
efb1c01c 377 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
5a7225ed 378 || code == TYPE_CODE_RANGE
efb1c01c
MK
379 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
380 && (len == 1 || len == 2 || len == 4 || len == 8))
381 class[0] = AMD64_INTEGER;
382
383 /* Arguments of types float, double and __m64 are in class SSE. */
384 else if (code == TYPE_CODE_FLT && (len == 4 || len == 8))
385 /* FIXME: __m64 . */
386 class[0] = AMD64_SSE;
387
388 /* Arguments of types __float128 and __m128 are split into two
389 halves. The least significant ones belong to class SSE, the most
390 significant one to class SSEUP. */
391 /* FIXME: __float128, __m128. */
392
393 /* The 64-bit mantissa of arguments of type long double belongs to
394 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
395 class X87UP. */
396 else if (code == TYPE_CODE_FLT && len == 16)
397 /* Class X87 and X87UP. */
398 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
399
400 /* Aggregates. */
401 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
402 || code == TYPE_CODE_UNION)
403 amd64_classify_aggregate (type, class);
404}
405
406static enum return_value_convention
407amd64_return_value (struct gdbarch *gdbarch, struct type *type,
408 struct regcache *regcache,
409 void *readbuf, const void *writebuf)
410{
411 enum amd64_reg_class class[2];
412 int len = TYPE_LENGTH (type);
90f90721
MK
413 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
414 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
efb1c01c
MK
415 int integer_reg = 0;
416 int sse_reg = 0;
417 int i;
418
419 gdb_assert (!(readbuf && writebuf));
420
421 /* 1. Classify the return type with the classification algorithm. */
422 amd64_classify (type, class);
423
424 /* 2. If the type has class MEMORY, then the caller provides space
6fa57a7d
MK
425 for the return value and passes the address of this storage in
426 %rdi as if it were the first argument to the function. In effect,
427 this address becomes a hidden first argument.
428
429 On return %rax will contain the address that has been passed in
430 by the caller in %rdi. */
efb1c01c 431 if (class[0] == AMD64_MEMORY)
6fa57a7d
MK
432 {
433 /* As indicated by the comment above, the ABI guarantees that we
434 can always find the return value just after the function has
435 returned. */
436
437 if (readbuf)
438 {
439 ULONGEST addr;
440
441 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
442 read_memory (addr, readbuf, TYPE_LENGTH (type));
443 }
444
445 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
446 }
efb1c01c
MK
447
448 gdb_assert (class[1] != AMD64_MEMORY);
449 gdb_assert (len <= 16);
450
451 for (i = 0; len > 0; i++, len -= 8)
452 {
453 int regnum = -1;
454 int offset = 0;
455
456 switch (class[i])
457 {
458 case AMD64_INTEGER:
459 /* 3. If the class is INTEGER, the next available register
460 of the sequence %rax, %rdx is used. */
461 regnum = integer_regnum[integer_reg++];
462 break;
463
464 case AMD64_SSE:
465 /* 4. If the class is SSE, the next available SSE register
466 of the sequence %xmm0, %xmm1 is used. */
467 regnum = sse_regnum[sse_reg++];
468 break;
469
470 case AMD64_SSEUP:
471 /* 5. If the class is SSEUP, the eightbyte is passed in the
472 upper half of the last used SSE register. */
473 gdb_assert (sse_reg > 0);
474 regnum = sse_regnum[sse_reg - 1];
475 offset = 8;
476 break;
477
478 case AMD64_X87:
479 /* 6. If the class is X87, the value is returned on the X87
480 stack in %st0 as 80-bit x87 number. */
90f90721 481 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
482 if (writebuf)
483 i387_return_value (gdbarch, regcache);
484 break;
485
486 case AMD64_X87UP:
487 /* 7. If the class is X87UP, the value is returned together
488 with the previous X87 value in %st0. */
489 gdb_assert (i > 0 && class[0] == AMD64_X87);
90f90721 490 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
491 offset = 8;
492 len = 2;
493 break;
494
495 case AMD64_NO_CLASS:
496 continue;
497
498 default:
499 gdb_assert (!"Unexpected register class.");
500 }
501
502 gdb_assert (regnum != -1);
503
504 if (readbuf)
505 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
506 (char *) readbuf + i * 8);
507 if (writebuf)
508 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
509 (const char *) writebuf + i * 8);
510 }
511
512 return RETURN_VALUE_REGISTER_CONVENTION;
53e95fcf
JS
513}
514\f
515
720aa428
MK
516static CORE_ADDR
517amd64_push_arguments (struct regcache *regcache, int nargs,
6470d250 518 struct value **args, CORE_ADDR sp, int struct_return)
720aa428
MK
519{
520 static int integer_regnum[] =
521 {
90f90721
MK
522 AMD64_RDI_REGNUM, /* %rdi */
523 AMD64_RSI_REGNUM, /* %rsi */
524 AMD64_RDX_REGNUM, /* %rdx */
525 AMD64_RCX_REGNUM, /* %rcx */
526 8, /* %r8 */
527 9 /* %r9 */
720aa428
MK
528 };
529 static int sse_regnum[] =
530 {
531 /* %xmm0 ... %xmm7 */
90f90721
MK
532 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
533 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
534 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
535 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
720aa428
MK
536 };
537 struct value **stack_args = alloca (nargs * sizeof (struct value *));
538 int num_stack_args = 0;
539 int num_elements = 0;
540 int element = 0;
541 int integer_reg = 0;
542 int sse_reg = 0;
543 int i;
544
6470d250
MK
545 /* Reserve a register for the "hidden" argument. */
546 if (struct_return)
547 integer_reg++;
548
720aa428
MK
549 for (i = 0; i < nargs; i++)
550 {
551 struct type *type = VALUE_TYPE (args[i]);
552 int len = TYPE_LENGTH (type);
553 enum amd64_reg_class class[2];
554 int needed_integer_regs = 0;
555 int needed_sse_regs = 0;
556 int j;
557
558 /* Classify argument. */
559 amd64_classify (type, class);
560
561 /* Calculate the number of integer and SSE registers needed for
562 this argument. */
563 for (j = 0; j < 2; j++)
564 {
565 if (class[j] == AMD64_INTEGER)
566 needed_integer_regs++;
567 else if (class[j] == AMD64_SSE)
568 needed_sse_regs++;
569 }
570
571 /* Check whether enough registers are available, and if the
572 argument should be passed in registers at all. */
573 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
574 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
575 || (needed_integer_regs == 0 && needed_sse_regs == 0))
576 {
577 /* The argument will be passed on the stack. */
578 num_elements += ((len + 7) / 8);
579 stack_args[num_stack_args++] = args[i];
580 }
581 else
582 {
583 /* The argument will be passed in registers. */
584 char *valbuf = VALUE_CONTENTS (args[i]);
585 char buf[8];
586
587 gdb_assert (len <= 16);
588
589 for (j = 0; len > 0; j++, len -= 8)
590 {
591 int regnum = -1;
592 int offset = 0;
593
594 switch (class[j])
595 {
596 case AMD64_INTEGER:
597 regnum = integer_regnum[integer_reg++];
598 break;
599
600 case AMD64_SSE:
601 regnum = sse_regnum[sse_reg++];
602 break;
603
604 case AMD64_SSEUP:
605 gdb_assert (sse_reg > 0);
606 regnum = sse_regnum[sse_reg - 1];
607 offset = 8;
608 break;
609
610 default:
611 gdb_assert (!"Unexpected register class.");
612 }
613
614 gdb_assert (regnum != -1);
615 memset (buf, 0, sizeof buf);
616 memcpy (buf, valbuf + j * 8, min (len, 8));
617 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
618 }
619 }
620 }
621
622 /* Allocate space for the arguments on the stack. */
623 sp -= num_elements * 8;
624
625 /* The psABI says that "The end of the input argument area shall be
626 aligned on a 16 byte boundary." */
627 sp &= ~0xf;
628
629 /* Write out the arguments to the stack. */
630 for (i = 0; i < num_stack_args; i++)
631 {
632 struct type *type = VALUE_TYPE (stack_args[i]);
633 char *valbuf = VALUE_CONTENTS (stack_args[i]);
634 int len = TYPE_LENGTH (type);
635
636 write_memory (sp + element * 8, valbuf, len);
637 element += ((len + 7) / 8);
638 }
639
640 /* The psABI says that "For calls that may call functions that use
641 varargs or stdargs (prototype-less calls or calls to functions
642 containing ellipsis (...) in the declaration) %al is used as
643 hidden argument to specify the number of SSE registers used. */
90f90721 644 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
720aa428
MK
645 return sp;
646}
647
c4f35dd8 648static CORE_ADDR
e53bef9f
MK
649amd64_push_dummy_call (struct gdbarch *gdbarch, CORE_ADDR func_addr,
650 struct regcache *regcache, CORE_ADDR bp_addr,
651 int nargs, struct value **args, CORE_ADDR sp,
652 int struct_return, CORE_ADDR struct_addr)
53e95fcf 653{
c4f35dd8
MK
654 char buf[8];
655
656 /* Pass arguments. */
6470d250 657 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
c4f35dd8
MK
658
659 /* Pass "hidden" argument". */
660 if (struct_return)
661 {
662 store_unsigned_integer (buf, 8, struct_addr);
90f90721 663 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
c4f35dd8
MK
664 }
665
666 /* Store return address. */
667 sp -= 8;
10f93086 668 store_unsigned_integer (buf, 8, bp_addr);
c4f35dd8
MK
669 write_memory (sp, buf, 8);
670
671 /* Finally, update the stack pointer... */
672 store_unsigned_integer (buf, 8, sp);
90f90721 673 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
c4f35dd8
MK
674
675 /* ...and fake a frame pointer. */
90f90721 676 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
c4f35dd8 677
3e210248 678 return sp + 16;
53e95fcf 679}
c4f35dd8
MK
680\f
681
682/* The maximum number of saved registers. This should include %rip. */
90f90721 683#define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
c4f35dd8 684
e53bef9f 685struct amd64_frame_cache
c4f35dd8
MK
686{
687 /* Base address. */
688 CORE_ADDR base;
689 CORE_ADDR sp_offset;
690 CORE_ADDR pc;
691
692 /* Saved registers. */
e53bef9f 693 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
c4f35dd8
MK
694 CORE_ADDR saved_sp;
695
696 /* Do we have a frame? */
697 int frameless_p;
698};
8dda9770 699
c4f35dd8
MK
700/* Allocate and initialize a frame cache. */
701
e53bef9f
MK
702static struct amd64_frame_cache *
703amd64_alloc_frame_cache (void)
8dda9770 704{
e53bef9f 705 struct amd64_frame_cache *cache;
c4f35dd8
MK
706 int i;
707
e53bef9f 708 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
8dda9770 709
c4f35dd8
MK
710 /* Base address. */
711 cache->base = 0;
712 cache->sp_offset = -8;
713 cache->pc = 0;
714
715 /* Saved registers. We initialize these to -1 since zero is a valid
716 offset (that's where %rbp is supposed to be stored). */
e53bef9f 717 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
718 cache->saved_regs[i] = -1;
719 cache->saved_sp = 0;
720
721 /* Frameless until proven otherwise. */
722 cache->frameless_p = 1;
723
724 return cache;
8dda9770 725}
53e95fcf 726
c4f35dd8
MK
727/* Do a limited analysis of the prologue at PC and update CACHE
728 accordingly. Bail out early if CURRENT_PC is reached. Return the
729 address where the analysis stopped.
730
731 We will handle only functions beginning with:
732
733 pushq %rbp 0x55
734 movq %rsp, %rbp 0x48 0x89 0xe5
735
736 Any function that doesn't start with this sequence will be assumed
737 to have no prologue and thus no valid frame pointer in %rbp. */
738
739static CORE_ADDR
e53bef9f
MK
740amd64_analyze_prologue (CORE_ADDR pc, CORE_ADDR current_pc,
741 struct amd64_frame_cache *cache)
53e95fcf 742{
c4f35dd8
MK
743 static unsigned char proto[3] = { 0x48, 0x89, 0xe5 };
744 unsigned char buf[3];
745 unsigned char op;
746
747 if (current_pc <= pc)
748 return current_pc;
749
750 op = read_memory_unsigned_integer (pc, 1);
751
752 if (op == 0x55) /* pushq %rbp */
753 {
754 /* Take into account that we've executed the `pushq %rbp' that
755 starts this instruction sequence. */
90f90721 756 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
c4f35dd8
MK
757 cache->sp_offset += 8;
758
759 /* If that's all, return now. */
760 if (current_pc <= pc + 1)
761 return current_pc;
762
763 /* Check for `movq %rsp, %rbp'. */
764 read_memory (pc + 1, buf, 3);
765 if (memcmp (buf, proto, 3) != 0)
766 return pc + 1;
767
768 /* OK, we actually have a frame. */
769 cache->frameless_p = 0;
770 return pc + 4;
771 }
772
773 return pc;
53e95fcf
JS
774}
775
c4f35dd8
MK
776/* Return PC of first real instruction. */
777
778static CORE_ADDR
e53bef9f 779amd64_skip_prologue (CORE_ADDR start_pc)
53e95fcf 780{
e53bef9f 781 struct amd64_frame_cache cache;
c4f35dd8
MK
782 CORE_ADDR pc;
783
e53bef9f 784 pc = amd64_analyze_prologue (start_pc, 0xffffffffffffffff, &cache);
c4f35dd8
MK
785 if (cache.frameless_p)
786 return start_pc;
787
788 return pc;
53e95fcf 789}
c4f35dd8 790\f
53e95fcf 791
c4f35dd8
MK
792/* Normal frames. */
793
e53bef9f
MK
794static struct amd64_frame_cache *
795amd64_frame_cache (struct frame_info *next_frame, void **this_cache)
6d686a84 796{
e53bef9f 797 struct amd64_frame_cache *cache;
c4f35dd8 798 char buf[8];
6d686a84 799 int i;
6d686a84 800
c4f35dd8
MK
801 if (*this_cache)
802 return *this_cache;
6d686a84 803
e53bef9f 804 cache = amd64_alloc_frame_cache ();
c4f35dd8
MK
805 *this_cache = cache;
806
c4f35dd8
MK
807 cache->pc = frame_func_unwind (next_frame);
808 if (cache->pc != 0)
e53bef9f 809 amd64_analyze_prologue (cache->pc, frame_pc_unwind (next_frame), cache);
c4f35dd8
MK
810
811 if (cache->frameless_p)
812 {
4a28816e
MK
813 /* We didn't find a valid frame. If we're at the start of a
814 function, or somewhere half-way its prologue, the function's
815 frame probably hasn't been fully setup yet. Try to
816 reconstruct the base address for the stack frame by looking
817 at the stack pointer. For truly "frameless" functions this
818 might work too. */
c4f35dd8 819
90f90721 820 frame_unwind_register (next_frame, AMD64_RSP_REGNUM, buf);
c4f35dd8
MK
821 cache->base = extract_unsigned_integer (buf, 8) + cache->sp_offset;
822 }
35883a3f
MK
823 else
824 {
90f90721 825 frame_unwind_register (next_frame, AMD64_RBP_REGNUM, buf);
35883a3f
MK
826 cache->base = extract_unsigned_integer (buf, 8);
827 }
c4f35dd8
MK
828
829 /* Now that we have the base address for the stack frame we can
830 calculate the value of %rsp in the calling frame. */
831 cache->saved_sp = cache->base + 16;
832
35883a3f
MK
833 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
834 frame we find it at the same offset from the reconstructed base
835 address. */
90f90721 836 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
35883a3f 837
c4f35dd8
MK
838 /* Adjust all the saved registers such that they contain addresses
839 instead of offsets. */
e53bef9f 840 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
841 if (cache->saved_regs[i] != -1)
842 cache->saved_regs[i] += cache->base;
843
844 return cache;
6d686a84
ML
845}
846
c4f35dd8 847static void
e53bef9f
MK
848amd64_frame_this_id (struct frame_info *next_frame, void **this_cache,
849 struct frame_id *this_id)
c4f35dd8 850{
e53bef9f
MK
851 struct amd64_frame_cache *cache =
852 amd64_frame_cache (next_frame, this_cache);
c4f35dd8
MK
853
854 /* This marks the outermost frame. */
855 if (cache->base == 0)
856 return;
857
858 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
859}
e76e1718 860
c4f35dd8 861static void
e53bef9f
MK
862amd64_frame_prev_register (struct frame_info *next_frame, void **this_cache,
863 int regnum, int *optimizedp,
864 enum lval_type *lvalp, CORE_ADDR *addrp,
865 int *realnump, void *valuep)
53e95fcf 866{
e53bef9f
MK
867 struct amd64_frame_cache *cache =
868 amd64_frame_cache (next_frame, this_cache);
e76e1718 869
c4f35dd8 870 gdb_assert (regnum >= 0);
b1ab997b 871
c4f35dd8
MK
872 if (regnum == SP_REGNUM && cache->saved_sp)
873 {
874 *optimizedp = 0;
875 *lvalp = not_lval;
876 *addrp = 0;
877 *realnump = -1;
878 if (valuep)
879 {
880 /* Store the value. */
881 store_unsigned_integer (valuep, 8, cache->saved_sp);
882 }
883 return;
884 }
e76e1718 885
e53bef9f 886 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
c4f35dd8
MK
887 {
888 *optimizedp = 0;
889 *lvalp = lval_memory;
890 *addrp = cache->saved_regs[regnum];
891 *realnump = -1;
892 if (valuep)
893 {
894 /* Read the value in from memory. */
895 read_memory (*addrp, valuep,
896 register_size (current_gdbarch, regnum));
897 }
898 return;
899 }
e76e1718 900
c4f35dd8
MK
901 frame_register_unwind (next_frame, regnum,
902 optimizedp, lvalp, addrp, realnump, valuep);
903}
e76e1718 904
e53bef9f 905static const struct frame_unwind amd64_frame_unwind =
c4f35dd8
MK
906{
907 NORMAL_FRAME,
e53bef9f
MK
908 amd64_frame_this_id,
909 amd64_frame_prev_register
c4f35dd8 910};
e76e1718 911
c4f35dd8 912static const struct frame_unwind *
e53bef9f 913amd64_frame_sniffer (struct frame_info *next_frame)
c4f35dd8 914{
e53bef9f 915 return &amd64_frame_unwind;
c4f35dd8
MK
916}
917\f
e76e1718 918
c4f35dd8
MK
919/* Signal trampolines. */
920
921/* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
922 64-bit variants. This would require using identical frame caches
923 on both platforms. */
924
e53bef9f
MK
925static struct amd64_frame_cache *
926amd64_sigtramp_frame_cache (struct frame_info *next_frame, void **this_cache)
c4f35dd8 927{
e53bef9f 928 struct amd64_frame_cache *cache;
c4f35dd8
MK
929 struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch);
930 CORE_ADDR addr;
931 char buf[8];
2b5e0749 932 int i;
c4f35dd8
MK
933
934 if (*this_cache)
935 return *this_cache;
936
e53bef9f 937 cache = amd64_alloc_frame_cache ();
c4f35dd8 938
90f90721 939 frame_unwind_register (next_frame, AMD64_RSP_REGNUM, buf);
c4f35dd8
MK
940 cache->base = extract_unsigned_integer (buf, 8) - 8;
941
942 addr = tdep->sigcontext_addr (next_frame);
2b5e0749 943 gdb_assert (tdep->sc_reg_offset);
e53bef9f 944 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2b5e0749
MK
945 for (i = 0; i < tdep->sc_num_regs; i++)
946 if (tdep->sc_reg_offset[i] != -1)
947 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
c4f35dd8
MK
948
949 *this_cache = cache;
950 return cache;
53e95fcf
JS
951}
952
c4f35dd8 953static void
e53bef9f
MK
954amd64_sigtramp_frame_this_id (struct frame_info *next_frame,
955 void **this_cache, struct frame_id *this_id)
c4f35dd8 956{
e53bef9f
MK
957 struct amd64_frame_cache *cache =
958 amd64_sigtramp_frame_cache (next_frame, this_cache);
c4f35dd8
MK
959
960 (*this_id) = frame_id_build (cache->base + 16, frame_pc_unwind (next_frame));
961}
962
963static void
e53bef9f
MK
964amd64_sigtramp_frame_prev_register (struct frame_info *next_frame,
965 void **this_cache,
966 int regnum, int *optimizedp,
967 enum lval_type *lvalp, CORE_ADDR *addrp,
968 int *realnump, void *valuep)
c4f35dd8
MK
969{
970 /* Make sure we've initialized the cache. */
e53bef9f 971 amd64_sigtramp_frame_cache (next_frame, this_cache);
c4f35dd8 972
e53bef9f
MK
973 amd64_frame_prev_register (next_frame, this_cache, regnum,
974 optimizedp, lvalp, addrp, realnump, valuep);
c4f35dd8
MK
975}
976
e53bef9f 977static const struct frame_unwind amd64_sigtramp_frame_unwind =
c4f35dd8
MK
978{
979 SIGTRAMP_FRAME,
e53bef9f
MK
980 amd64_sigtramp_frame_this_id,
981 amd64_sigtramp_frame_prev_register
c4f35dd8
MK
982};
983
984static const struct frame_unwind *
e53bef9f 985amd64_sigtramp_frame_sniffer (struct frame_info *next_frame)
c4f35dd8 986{
911bc6ee
MK
987 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (next_frame));
988
989 /* We shouldn't even bother if we don't have a sigcontext_addr
990 handler. */
991 if (tdep->sigcontext_addr == NULL)
992 return NULL;
993
994 if (tdep->sigtramp_p != NULL)
995 {
996 if (tdep->sigtramp_p (next_frame))
997 return &amd64_sigtramp_frame_unwind;
998 }
c4f35dd8 999
911bc6ee 1000 if (tdep->sigtramp_start != 0)
1c3545ae 1001 {
911bc6ee 1002 CORE_ADDR pc = frame_pc_unwind (next_frame);
1c3545ae 1003
911bc6ee
MK
1004 gdb_assert (tdep->sigtramp_end != 0);
1005 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
1006 return &amd64_sigtramp_frame_unwind;
1c3545ae 1007 }
c4f35dd8
MK
1008
1009 return NULL;
1010}
1011\f
1012
1013static CORE_ADDR
e53bef9f 1014amd64_frame_base_address (struct frame_info *next_frame, void **this_cache)
c4f35dd8 1015{
e53bef9f
MK
1016 struct amd64_frame_cache *cache =
1017 amd64_frame_cache (next_frame, this_cache);
c4f35dd8
MK
1018
1019 return cache->base;
1020}
1021
e53bef9f 1022static const struct frame_base amd64_frame_base =
c4f35dd8 1023{
e53bef9f
MK
1024 &amd64_frame_unwind,
1025 amd64_frame_base_address,
1026 amd64_frame_base_address,
1027 amd64_frame_base_address
c4f35dd8
MK
1028};
1029
166f4c7b 1030static struct frame_id
e53bef9f 1031amd64_unwind_dummy_id (struct gdbarch *gdbarch, struct frame_info *next_frame)
166f4c7b 1032{
c4f35dd8
MK
1033 char buf[8];
1034 CORE_ADDR fp;
1035
90f90721 1036 frame_unwind_register (next_frame, AMD64_RBP_REGNUM, buf);
c4f35dd8
MK
1037 fp = extract_unsigned_integer (buf, 8);
1038
1039 return frame_id_build (fp + 16, frame_pc_unwind (next_frame));
166f4c7b
ML
1040}
1041
8b148df9
AC
1042/* 16 byte align the SP per frame requirements. */
1043
1044static CORE_ADDR
e53bef9f 1045amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
8b148df9
AC
1046{
1047 return sp & -(CORE_ADDR)16;
1048}
473f17b0
MK
1049\f
1050
1051/* Supply register REGNUM from the floating-point register set REGSET
1052 to register cache REGCACHE. If REGNUM is -1, do this for all
1053 registers in REGSET. */
1054
1055static void
e53bef9f
MK
1056amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
1057 int regnum, const void *fpregs, size_t len)
473f17b0
MK
1058{
1059 const struct gdbarch_tdep *tdep = regset->descr;
1060
1061 gdb_assert (len == tdep->sizeof_fpregset);
90f90721 1062 amd64_supply_fxsave (regcache, regnum, fpregs);
473f17b0 1063}
8b148df9 1064
c6b33596
MK
1065/* Return the appropriate register set for the core section identified
1066 by SECT_NAME and SECT_SIZE. */
1067
1068static const struct regset *
e53bef9f
MK
1069amd64_regset_from_core_section (struct gdbarch *gdbarch,
1070 const char *sect_name, size_t sect_size)
c6b33596
MK
1071{
1072 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1073
1074 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
1075 {
1076 if (tdep->fpregset == NULL)
1077 {
1078 tdep->fpregset = XMALLOC (struct regset);
1079 tdep->fpregset->descr = tdep;
e53bef9f 1080 tdep->fpregset->supply_regset = amd64_supply_fpregset;
c6b33596
MK
1081 }
1082
1083 return tdep->fpregset;
1084 }
1085
1086 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
1087}
1088\f
1089
2213a65d 1090void
90f90721 1091amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
53e95fcf 1092{
0c1a73d6 1093 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
53e95fcf 1094
473f17b0
MK
1095 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
1096 floating-point registers. */
1097 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
1098
5716833c 1099 /* AMD64 has an FPU and 16 SSE registers. */
90f90721 1100 tdep->st0_regnum = AMD64_ST0_REGNUM;
0c1a73d6 1101 tdep->num_xmm_regs = 16;
53e95fcf 1102
0c1a73d6 1103 /* This is what all the fuss is about. */
53e95fcf
JS
1104 set_gdbarch_long_bit (gdbarch, 64);
1105 set_gdbarch_long_long_bit (gdbarch, 64);
1106 set_gdbarch_ptr_bit (gdbarch, 64);
1107
e53bef9f
MK
1108 /* In contrast to the i386, on AMD64 a `long double' actually takes
1109 up 128 bits, even though it's still based on the i387 extended
1110 floating-point format which has only 80 significant bits. */
b83b026c
MK
1111 set_gdbarch_long_double_bit (gdbarch, 128);
1112
e53bef9f
MK
1113 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
1114 set_gdbarch_register_name (gdbarch, amd64_register_name);
1115 set_gdbarch_register_type (gdbarch, amd64_register_type);
b83b026c
MK
1116
1117 /* Register numbers of various important registers. */
90f90721
MK
1118 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
1119 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
1120 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
1121 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
b83b026c 1122
e53bef9f
MK
1123 /* The "default" register numbering scheme for AMD64 is referred to
1124 as the "DWARF Register Number Mapping" in the System V psABI.
1125 The preferred debugging format for all known AMD64 targets is
1126 actually DWARF2, and GCC doesn't seem to support DWARF (that is
1127 DWARF-1), but we provide the same mapping just in case. This
1128 mapping is also used for stabs, which GCC does support. */
1129 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
1130 set_gdbarch_dwarf_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
1131 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
de220d0f 1132
c4f35dd8 1133 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
e53bef9f 1134 be in use on any of the supported AMD64 targets. */
53e95fcf 1135
c4f35dd8 1136 /* Call dummy code. */
e53bef9f
MK
1137 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
1138 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
8b148df9 1139 set_gdbarch_frame_red_zone_size (gdbarch, 128);
53e95fcf 1140
e53bef9f 1141 set_gdbarch_convert_register_p (gdbarch, amd64_convert_register_p);
d532c08f
MK
1142 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
1143 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
1144
efb1c01c 1145 set_gdbarch_return_value (gdbarch, amd64_return_value);
53e95fcf 1146
e53bef9f 1147 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
53e95fcf 1148
c4f35dd8 1149 /* Avoid wiring in the MMX registers for now. */
2213a65d 1150 set_gdbarch_num_pseudo_regs (gdbarch, 0);
5716833c 1151 tdep->mm0_regnum = -1;
2213a65d 1152
e53bef9f 1153 set_gdbarch_unwind_dummy_id (gdbarch, amd64_unwind_dummy_id);
53e95fcf 1154
b83b026c 1155 /* FIXME: kettenis/20021026: This is ELF-specific. Fine for now,
e53bef9f 1156 since all supported AMD64 targets are ELF, but that might change
b83b026c 1157 in the future. */
8a8ab2b9 1158 set_gdbarch_in_solib_call_trampoline (gdbarch, in_plt_section);
c4f35dd8 1159
e53bef9f
MK
1160 frame_unwind_append_sniffer (gdbarch, amd64_sigtramp_frame_sniffer);
1161 frame_unwind_append_sniffer (gdbarch, amd64_frame_sniffer);
1162 frame_base_set_default (gdbarch, &amd64_frame_base);
c6b33596
MK
1163
1164 /* If we have a register mapping, enable the generic core file support. */
1165 if (tdep->gregset_reg_offset)
1166 set_gdbarch_regset_from_core_section (gdbarch,
e53bef9f 1167 amd64_regset_from_core_section);
c4f35dd8
MK
1168}
1169\f
1170
90f90721 1171#define I387_ST0_REGNUM AMD64_ST0_REGNUM
c4f35dd8 1172
41d041d6
MK
1173/* The 64-bit FXSAVE format differs from the 32-bit format in the
1174 sense that the instruction pointer and data pointer are simply
1175 64-bit offsets into the code segment and the data segment instead
1176 of a selector offset pair. The functions below store the upper 32
1177 bits of these pointers (instead of just the 16-bits of the segment
1178 selector). */
1179
1180/* Fill register REGNUM in REGCACHE with the appropriate
0485f6ad
MK
1181 floating-point or SSE register value from *FXSAVE. If REGNUM is
1182 -1, do this for all registers. This function masks off any of the
1183 reserved bits in *FXSAVE. */
c4f35dd8
MK
1184
1185void
90f90721 1186amd64_supply_fxsave (struct regcache *regcache, int regnum,
41d041d6 1187 const void *fxsave)
c4f35dd8 1188{
41d041d6 1189 i387_supply_fxsave (regcache, regnum, fxsave);
c4f35dd8 1190
f0ef85a5 1191 if (fxsave && gdbarch_ptr_bit (get_regcache_arch (regcache)) == 64)
c4f35dd8 1192 {
41d041d6
MK
1193 const char *regs = fxsave;
1194
0485f6ad 1195 if (regnum == -1 || regnum == I387_FISEG_REGNUM)
41d041d6 1196 regcache_raw_supply (regcache, I387_FISEG_REGNUM, regs + 12);
0485f6ad 1197 if (regnum == -1 || regnum == I387_FOSEG_REGNUM)
41d041d6 1198 regcache_raw_supply (regcache, I387_FOSEG_REGNUM, regs + 20);
c4f35dd8 1199 }
0c1a73d6
MK
1200}
1201
3c017e40
MK
1202/* Fill register REGNUM (if it is a floating-point or SSE register) in
1203 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
1204 all registers. This function doesn't touch any of the reserved
1205 bits in *FXSAVE. */
1206
1207void
1208amd64_collect_fxsave (const struct regcache *regcache, int regnum,
1209 void *fxsave)
1210{
1211 char *regs = fxsave;
1212
1213 i387_collect_fxsave (regcache, regnum, fxsave);
1214
f0ef85a5
MK
1215 if (gdbarch_ptr_bit (get_regcache_arch (regcache)) == 64)
1216 {
1217 if (regnum == -1 || regnum == I387_FISEG_REGNUM)
1218 regcache_raw_collect (regcache, I387_FISEG_REGNUM, regs + 12);
1219 if (regnum == -1 || regnum == I387_FOSEG_REGNUM)
1220 regcache_raw_collect (regcache, I387_FOSEG_REGNUM, regs + 20);
1221 }
3c017e40 1222}
This page took 0.512907 seconds and 4 git commands to generate.