* x86-64-tdep.c (amd64_classify_aggregate): Ignore static fields.
[deliverable/binutils-gdb.git] / gdb / x86-64-tdep.c
CommitLineData
e53bef9f 1/* Target-dependent code for AMD64.
ce0eebec 2
e53bef9f 3 Copyright 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
53e95fcf
JS
4 Contributed by Jiri Smid, SuSE Labs.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23#include "defs.h"
c4f35dd8
MK
24#include "arch-utils.h"
25#include "block.h"
26#include "dummy-frame.h"
27#include "frame.h"
28#include "frame-base.h"
29#include "frame-unwind.h"
53e95fcf 30#include "inferior.h"
53e95fcf 31#include "gdbcmd.h"
c4f35dd8
MK
32#include "gdbcore.h"
33#include "objfiles.h"
53e95fcf 34#include "regcache.h"
2c261fae 35#include "regset.h"
53e95fcf 36#include "symfile.h"
c4f35dd8 37
82dbc5f7 38#include "gdb_assert.h"
c4f35dd8
MK
39
40#include "x86-64-tdep.h"
41#include "i387-tdep.h"
53e95fcf 42
e53bef9f
MK
43/* Note that the AMD64 architecture was previously known as x86-64.
44 The latter is (forever) engraved into the canonical system name as
45 returned bu config.guess, and used as the name for the AMD64 port
46 of GNU/Linux. The BSD's have renamed their ports to amd64; they
47 don't like to shout. For GDB we prefer the amd64_-prefix over the
48 x86_64_-prefix since it's so much easier to type. */
49
402ecd56 50/* Register information. */
c4f35dd8 51
e53bef9f 52struct amd64_register_info
de220d0f 53{
de220d0f
ML
54 char *name;
55 struct type **type;
56};
53e95fcf 57
e53bef9f 58static struct amd64_register_info amd64_register_info[] =
c4f35dd8
MK
59{
60 { "rax", &builtin_type_int64 },
61 { "rbx", &builtin_type_int64 },
62 { "rcx", &builtin_type_int64 },
63 { "rdx", &builtin_type_int64 },
64 { "rsi", &builtin_type_int64 },
65 { "rdi", &builtin_type_int64 },
66 { "rbp", &builtin_type_void_data_ptr },
67 { "rsp", &builtin_type_void_data_ptr },
68
69 /* %r8 is indeed register number 8. */
70 { "r8", &builtin_type_int64 },
71 { "r9", &builtin_type_int64 },
72 { "r10", &builtin_type_int64 },
73 { "r11", &builtin_type_int64 },
74 { "r12", &builtin_type_int64 },
75 { "r13", &builtin_type_int64 },
76 { "r14", &builtin_type_int64 },
77 { "r15", &builtin_type_int64 },
78 { "rip", &builtin_type_void_func_ptr },
79 { "eflags", &builtin_type_int32 },
af233647
MK
80 { "cs", &builtin_type_int32 },
81 { "ss", &builtin_type_int32 },
c4f35dd8
MK
82 { "ds", &builtin_type_int32 },
83 { "es", &builtin_type_int32 },
84 { "fs", &builtin_type_int32 },
85 { "gs", &builtin_type_int32 },
86
af233647 87 /* %st0 is register number 24. */
c4f35dd8
MK
88 { "st0", &builtin_type_i387_ext },
89 { "st1", &builtin_type_i387_ext },
90 { "st2", &builtin_type_i387_ext },
91 { "st3", &builtin_type_i387_ext },
92 { "st4", &builtin_type_i387_ext },
93 { "st5", &builtin_type_i387_ext },
94 { "st6", &builtin_type_i387_ext },
95 { "st7", &builtin_type_i387_ext },
96 { "fctrl", &builtin_type_int32 },
97 { "fstat", &builtin_type_int32 },
98 { "ftag", &builtin_type_int32 },
99 { "fiseg", &builtin_type_int32 },
100 { "fioff", &builtin_type_int32 },
101 { "foseg", &builtin_type_int32 },
102 { "fooff", &builtin_type_int32 },
103 { "fop", &builtin_type_int32 },
104
af233647 105 /* %xmm0 is register number 40. */
c4f35dd8
MK
106 { "xmm0", &builtin_type_v4sf },
107 { "xmm1", &builtin_type_v4sf },
108 { "xmm2", &builtin_type_v4sf },
109 { "xmm3", &builtin_type_v4sf },
110 { "xmm4", &builtin_type_v4sf },
111 { "xmm5", &builtin_type_v4sf },
112 { "xmm6", &builtin_type_v4sf },
113 { "xmm7", &builtin_type_v4sf },
114 { "xmm8", &builtin_type_v4sf },
115 { "xmm9", &builtin_type_v4sf },
116 { "xmm10", &builtin_type_v4sf },
117 { "xmm11", &builtin_type_v4sf },
118 { "xmm12", &builtin_type_v4sf },
119 { "xmm13", &builtin_type_v4sf },
120 { "xmm14", &builtin_type_v4sf },
121 { "xmm15", &builtin_type_v4sf },
122 { "mxcsr", &builtin_type_int32 }
0e04a514
ML
123};
124
c4f35dd8 125/* Total number of registers. */
e53bef9f
MK
126#define AMD64_NUM_REGS \
127 (sizeof (amd64_register_info) / sizeof (amd64_register_info[0]))
de220d0f 128
c4f35dd8 129/* Return the name of register REGNUM. */
b6779aa2 130
c4f35dd8 131static const char *
e53bef9f 132amd64_register_name (int regnum)
53e95fcf 133{
e53bef9f
MK
134 if (regnum >= 0 && regnum < AMD64_NUM_REGS)
135 return amd64_register_info[regnum].name;
53e95fcf 136
c4f35dd8 137 return NULL;
53e95fcf
JS
138}
139
140/* Return the GDB type object for the "standard" data type of data in
c4f35dd8 141 register REGNUM. */
53e95fcf 142
c4f35dd8 143static struct type *
e53bef9f 144amd64_register_type (struct gdbarch *gdbarch, int regnum)
53e95fcf 145{
e53bef9f 146 gdb_assert (regnum >= 0 && regnum < AMD64_NUM_REGS);
4657573b 147
e53bef9f 148 return *amd64_register_info[regnum].type;
53e95fcf
JS
149}
150
c4f35dd8
MK
151/* DWARF Register Number Mapping as defined in the System V psABI,
152 section 3.6. */
53e95fcf 153
e53bef9f 154static int amd64_dwarf_regmap[] =
0e04a514 155{
c4f35dd8 156 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
f82b2acd 157 X86_64_RAX_REGNUM, X86_64_RDX_REGNUM, 2, 1,
c4f35dd8
MK
158 4, X86_64_RDI_REGNUM,
159
160 /* Frame Pointer Register RBP. */
161 X86_64_RBP_REGNUM,
162
163 /* Stack Pointer Register RSP. */
164 X86_64_RSP_REGNUM,
165
166 /* Extended Integer Registers 8 - 15. */
167 8, 9, 10, 11, 12, 13, 14, 15,
168
59207364
MK
169 /* Return Address RA. Mapped to RIP. */
170 X86_64_RIP_REGNUM,
c4f35dd8
MK
171
172 /* SSE Registers 0 - 7. */
173 X86_64_XMM0_REGNUM + 0, X86_64_XMM1_REGNUM,
174 X86_64_XMM0_REGNUM + 2, X86_64_XMM0_REGNUM + 3,
175 X86_64_XMM0_REGNUM + 4, X86_64_XMM0_REGNUM + 5,
176 X86_64_XMM0_REGNUM + 6, X86_64_XMM0_REGNUM + 7,
177
178 /* Extended SSE Registers 8 - 15. */
179 X86_64_XMM0_REGNUM + 8, X86_64_XMM0_REGNUM + 9,
180 X86_64_XMM0_REGNUM + 10, X86_64_XMM0_REGNUM + 11,
181 X86_64_XMM0_REGNUM + 12, X86_64_XMM0_REGNUM + 13,
182 X86_64_XMM0_REGNUM + 14, X86_64_XMM0_REGNUM + 15,
183
184 /* Floating Point Registers 0-7. */
f82b2acd 185 X86_64_ST0_REGNUM + 0, X86_64_ST0_REGNUM + 1,
c4f35dd8
MK
186 X86_64_ST0_REGNUM + 2, X86_64_ST0_REGNUM + 3,
187 X86_64_ST0_REGNUM + 4, X86_64_ST0_REGNUM + 5,
188 X86_64_ST0_REGNUM + 6, X86_64_ST0_REGNUM + 7
189};
0e04a514 190
e53bef9f
MK
191static const int amd64_dwarf_regmap_len =
192 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
0e04a514 193
c4f35dd8
MK
194/* Convert DWARF register number REG to the appropriate register
195 number used by GDB. */
26abbdc4 196
c4f35dd8 197static int
e53bef9f 198amd64_dwarf_reg_to_regnum (int reg)
53e95fcf 199{
c4f35dd8 200 int regnum = -1;
53e95fcf 201
e53bef9f
MK
202 if (reg >= 0 || reg < amd64_dwarf_regmap_len)
203 regnum = amd64_dwarf_regmap[reg];
53e95fcf 204
c4f35dd8
MK
205 if (regnum == -1)
206 warning ("Unmapped DWARF Register #%d encountered\n", reg);
207
208 return regnum;
53e95fcf 209}
d532c08f
MK
210
211/* Return nonzero if a value of type TYPE stored in register REGNUM
212 needs any special handling. */
213
214static int
e53bef9f 215amd64_convert_register_p (int regnum, struct type *type)
d532c08f
MK
216{
217 return i386_fp_regnum_p (regnum);
218}
53e95fcf
JS
219\f
220
efb1c01c
MK
221/* Register classes as defined in the psABI. */
222
223enum amd64_reg_class
224{
225 AMD64_INTEGER,
226 AMD64_SSE,
227 AMD64_SSEUP,
228 AMD64_X87,
229 AMD64_X87UP,
230 AMD64_COMPLEX_X87,
231 AMD64_NO_CLASS,
232 AMD64_MEMORY
233};
234
235/* Return the union class of CLASS1 and CLASS2. See the psABI for
236 details. */
237
238static enum amd64_reg_class
239amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
240{
241 /* Rule (a): If both classes are equal, this is the resulting class. */
242 if (class1 == class2)
243 return class1;
244
245 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
246 is the other class. */
247 if (class1 == AMD64_NO_CLASS)
248 return class2;
249 if (class2 == AMD64_NO_CLASS)
250 return class1;
251
252 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
253 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
254 return AMD64_MEMORY;
255
256 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
257 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
258 return AMD64_INTEGER;
259
260 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
261 MEMORY is used as class. */
262 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
263 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
264 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
265 return AMD64_MEMORY;
266
267 /* Rule (f): Otherwise class SSE is used. */
268 return AMD64_SSE;
269}
270
271static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
272
273/* Classify TYPE according to the rules for aggregate (structures and
274 arrays) and union types, and store the result in CLASS. */
c4f35dd8
MK
275
276static void
efb1c01c 277amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
53e95fcf
JS
278{
279 int len = TYPE_LENGTH (type);
280
efb1c01c
MK
281 /* 1. If the size of an object is larger than two eightbytes, or in
282 C++, is a non-POD structure or union type, or contains
283 unaligned fields, it has class memory. */
284 if (len > 16)
53e95fcf 285 {
efb1c01c
MK
286 class[0] = class[1] = AMD64_MEMORY;
287 return;
53e95fcf 288 }
efb1c01c
MK
289
290 /* 2. Both eightbytes get initialized to class NO_CLASS. */
291 class[0] = class[1] = AMD64_NO_CLASS;
292
293 /* 3. Each field of an object is classified recursively so that
294 always two fields are considered. The resulting class is
295 calculated according to the classes of the fields in the
296 eightbyte: */
297
298 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
8ffd9b1b 299 {
efb1c01c
MK
300 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
301
302 /* All fields in an array have the same type. */
303 amd64_classify (subtype, class);
304 if (len > 8 && class[1] == AMD64_NO_CLASS)
305 class[1] = class[0];
8ffd9b1b 306 }
53e95fcf
JS
307 else
308 {
efb1c01c 309 int i;
53e95fcf 310
efb1c01c
MK
311 /* Structure or union. */
312 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
313 || TYPE_CODE (type) == TYPE_CODE_UNION);
314
315 for (i = 0; i < TYPE_NFIELDS (type); i++)
53e95fcf 316 {
efb1c01c
MK
317 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
318 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
319 enum amd64_reg_class subclass[2];
320
562c50c2
MK
321 /* Ignore static fields. */
322 if (TYPE_FIELD_STATIC (type, i))
323 continue;
324
efb1c01c
MK
325 gdb_assert (pos == 0 || pos == 1);
326
327 amd64_classify (subtype, subclass);
328 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
329 if (pos == 0)
330 class[1] = amd64_merge_classes (class[1], subclass[1]);
53e95fcf 331 }
53e95fcf 332 }
efb1c01c
MK
333
334 /* 4. Then a post merger cleanup is done: */
335
336 /* Rule (a): If one of the classes is MEMORY, the whole argument is
337 passed in memory. */
338 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
339 class[0] = class[1] = AMD64_MEMORY;
340
341 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
342 SSE. */
343 if (class[0] == AMD64_SSEUP)
344 class[0] = AMD64_SSE;
345 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
346 class[1] = AMD64_SSE;
347}
348
349/* Classify TYPE, and store the result in CLASS. */
350
351static void
352amd64_classify (struct type *type, enum amd64_reg_class class[2])
353{
354 enum type_code code = TYPE_CODE (type);
355 int len = TYPE_LENGTH (type);
356
357 class[0] = class[1] = AMD64_NO_CLASS;
358
359 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
360 long, long long, and pointers are in the INTEGER class. */
361 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
362 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
363 && (len == 1 || len == 2 || len == 4 || len == 8))
364 class[0] = AMD64_INTEGER;
365
366 /* Arguments of types float, double and __m64 are in class SSE. */
367 else if (code == TYPE_CODE_FLT && (len == 4 || len == 8))
368 /* FIXME: __m64 . */
369 class[0] = AMD64_SSE;
370
371 /* Arguments of types __float128 and __m128 are split into two
372 halves. The least significant ones belong to class SSE, the most
373 significant one to class SSEUP. */
374 /* FIXME: __float128, __m128. */
375
376 /* The 64-bit mantissa of arguments of type long double belongs to
377 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
378 class X87UP. */
379 else if (code == TYPE_CODE_FLT && len == 16)
380 /* Class X87 and X87UP. */
381 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
382
383 /* Aggregates. */
384 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
385 || code == TYPE_CODE_UNION)
386 amd64_classify_aggregate (type, class);
387}
388
389static enum return_value_convention
390amd64_return_value (struct gdbarch *gdbarch, struct type *type,
391 struct regcache *regcache,
392 void *readbuf, const void *writebuf)
393{
394 enum amd64_reg_class class[2];
395 int len = TYPE_LENGTH (type);
396 static int integer_regnum[] = { X86_64_RAX_REGNUM, X86_64_RDX_REGNUM };
397 static int sse_regnum[] = { X86_64_XMM0_REGNUM, X86_64_XMM1_REGNUM };
398 int integer_reg = 0;
399 int sse_reg = 0;
400 int i;
401
402 gdb_assert (!(readbuf && writebuf));
403
404 /* 1. Classify the return type with the classification algorithm. */
405 amd64_classify (type, class);
406
407 /* 2. If the type has class MEMORY, then the caller provides space
408 for the return value and passes the address of this storage in
409 %rdi as if it were the first argument to the function. In
410 effect, this address becomes a hidden first argument. */
411 if (class[0] == AMD64_MEMORY)
412 return RETURN_VALUE_STRUCT_CONVENTION;
413
414 gdb_assert (class[1] != AMD64_MEMORY);
415 gdb_assert (len <= 16);
416
417 for (i = 0; len > 0; i++, len -= 8)
418 {
419 int regnum = -1;
420 int offset = 0;
421
422 switch (class[i])
423 {
424 case AMD64_INTEGER:
425 /* 3. If the class is INTEGER, the next available register
426 of the sequence %rax, %rdx is used. */
427 regnum = integer_regnum[integer_reg++];
428 break;
429
430 case AMD64_SSE:
431 /* 4. If the class is SSE, the next available SSE register
432 of the sequence %xmm0, %xmm1 is used. */
433 regnum = sse_regnum[sse_reg++];
434 break;
435
436 case AMD64_SSEUP:
437 /* 5. If the class is SSEUP, the eightbyte is passed in the
438 upper half of the last used SSE register. */
439 gdb_assert (sse_reg > 0);
440 regnum = sse_regnum[sse_reg - 1];
441 offset = 8;
442 break;
443
444 case AMD64_X87:
445 /* 6. If the class is X87, the value is returned on the X87
446 stack in %st0 as 80-bit x87 number. */
447 regnum = X86_64_ST0_REGNUM;
448 if (writebuf)
449 i387_return_value (gdbarch, regcache);
450 break;
451
452 case AMD64_X87UP:
453 /* 7. If the class is X87UP, the value is returned together
454 with the previous X87 value in %st0. */
455 gdb_assert (i > 0 && class[0] == AMD64_X87);
456 regnum = X86_64_ST0_REGNUM;
457 offset = 8;
458 len = 2;
459 break;
460
461 case AMD64_NO_CLASS:
462 continue;
463
464 default:
465 gdb_assert (!"Unexpected register class.");
466 }
467
468 gdb_assert (regnum != -1);
469
470 if (readbuf)
471 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
472 (char *) readbuf + i * 8);
473 if (writebuf)
474 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
475 (const char *) writebuf + i * 8);
476 }
477
478 return RETURN_VALUE_REGISTER_CONVENTION;
53e95fcf
JS
479}
480\f
481
720aa428
MK
482static CORE_ADDR
483amd64_push_arguments (struct regcache *regcache, int nargs,
484 struct value **args, CORE_ADDR sp)
485{
486 static int integer_regnum[] =
487 {
488 X86_64_RDI_REGNUM, 4, /* %rdi, %rsi */
489 X86_64_RDX_REGNUM, 2, /* %rdx, %rcx */
490 8, 9 /* %r8, %r9 */
491 };
492 static int sse_regnum[] =
493 {
494 /* %xmm0 ... %xmm7 */
495 X86_64_XMM0_REGNUM + 0, X86_64_XMM1_REGNUM,
496 X86_64_XMM0_REGNUM + 2, X86_64_XMM0_REGNUM + 3,
497 X86_64_XMM0_REGNUM + 4, X86_64_XMM0_REGNUM + 5,
498 X86_64_XMM0_REGNUM + 6, X86_64_XMM0_REGNUM + 7,
499 };
500 struct value **stack_args = alloca (nargs * sizeof (struct value *));
501 int num_stack_args = 0;
502 int num_elements = 0;
503 int element = 0;
504 int integer_reg = 0;
505 int sse_reg = 0;
506 int i;
507
508 for (i = 0; i < nargs; i++)
509 {
510 struct type *type = VALUE_TYPE (args[i]);
511 int len = TYPE_LENGTH (type);
512 enum amd64_reg_class class[2];
513 int needed_integer_regs = 0;
514 int needed_sse_regs = 0;
515 int j;
516
517 /* Classify argument. */
518 amd64_classify (type, class);
519
520 /* Calculate the number of integer and SSE registers needed for
521 this argument. */
522 for (j = 0; j < 2; j++)
523 {
524 if (class[j] == AMD64_INTEGER)
525 needed_integer_regs++;
526 else if (class[j] == AMD64_SSE)
527 needed_sse_regs++;
528 }
529
530 /* Check whether enough registers are available, and if the
531 argument should be passed in registers at all. */
532 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
533 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
534 || (needed_integer_regs == 0 && needed_sse_regs == 0))
535 {
536 /* The argument will be passed on the stack. */
537 num_elements += ((len + 7) / 8);
538 stack_args[num_stack_args++] = args[i];
539 }
540 else
541 {
542 /* The argument will be passed in registers. */
543 char *valbuf = VALUE_CONTENTS (args[i]);
544 char buf[8];
545
546 gdb_assert (len <= 16);
547
548 for (j = 0; len > 0; j++, len -= 8)
549 {
550 int regnum = -1;
551 int offset = 0;
552
553 switch (class[j])
554 {
555 case AMD64_INTEGER:
556 regnum = integer_regnum[integer_reg++];
557 break;
558
559 case AMD64_SSE:
560 regnum = sse_regnum[sse_reg++];
561 break;
562
563 case AMD64_SSEUP:
564 gdb_assert (sse_reg > 0);
565 regnum = sse_regnum[sse_reg - 1];
566 offset = 8;
567 break;
568
569 default:
570 gdb_assert (!"Unexpected register class.");
571 }
572
573 gdb_assert (regnum != -1);
574 memset (buf, 0, sizeof buf);
575 memcpy (buf, valbuf + j * 8, min (len, 8));
576 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
577 }
578 }
579 }
580
581 /* Allocate space for the arguments on the stack. */
582 sp -= num_elements * 8;
583
584 /* The psABI says that "The end of the input argument area shall be
585 aligned on a 16 byte boundary." */
586 sp &= ~0xf;
587
588 /* Write out the arguments to the stack. */
589 for (i = 0; i < num_stack_args; i++)
590 {
591 struct type *type = VALUE_TYPE (stack_args[i]);
592 char *valbuf = VALUE_CONTENTS (stack_args[i]);
593 int len = TYPE_LENGTH (type);
594
595 write_memory (sp + element * 8, valbuf, len);
596 element += ((len + 7) / 8);
597 }
598
599 /* The psABI says that "For calls that may call functions that use
600 varargs or stdargs (prototype-less calls or calls to functions
601 containing ellipsis (...) in the declaration) %al is used as
602 hidden argument to specify the number of SSE registers used. */
603 regcache_raw_write_unsigned (regcache, X86_64_RAX_REGNUM, sse_reg);
604 return sp;
605}
606
c4f35dd8 607static CORE_ADDR
e53bef9f
MK
608amd64_push_dummy_call (struct gdbarch *gdbarch, CORE_ADDR func_addr,
609 struct regcache *regcache, CORE_ADDR bp_addr,
610 int nargs, struct value **args, CORE_ADDR sp,
611 int struct_return, CORE_ADDR struct_addr)
53e95fcf 612{
c4f35dd8
MK
613 char buf[8];
614
615 /* Pass arguments. */
720aa428 616 sp = amd64_push_arguments (regcache, nargs, args, sp);
c4f35dd8
MK
617
618 /* Pass "hidden" argument". */
619 if (struct_return)
620 {
621 store_unsigned_integer (buf, 8, struct_addr);
622 regcache_cooked_write (regcache, X86_64_RDI_REGNUM, buf);
623 }
624
625 /* Store return address. */
626 sp -= 8;
10f93086 627 store_unsigned_integer (buf, 8, bp_addr);
c4f35dd8
MK
628 write_memory (sp, buf, 8);
629
630 /* Finally, update the stack pointer... */
631 store_unsigned_integer (buf, 8, sp);
632 regcache_cooked_write (regcache, X86_64_RSP_REGNUM, buf);
633
634 /* ...and fake a frame pointer. */
635 regcache_cooked_write (regcache, X86_64_RBP_REGNUM, buf);
636
3e210248 637 return sp + 16;
53e95fcf 638}
c4f35dd8
MK
639\f
640
641/* The maximum number of saved registers. This should include %rip. */
e53bef9f 642#define AMD64_NUM_SAVED_REGS X86_64_NUM_GREGS
c4f35dd8 643
e53bef9f 644struct amd64_frame_cache
c4f35dd8
MK
645{
646 /* Base address. */
647 CORE_ADDR base;
648 CORE_ADDR sp_offset;
649 CORE_ADDR pc;
650
651 /* Saved registers. */
e53bef9f 652 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
c4f35dd8
MK
653 CORE_ADDR saved_sp;
654
655 /* Do we have a frame? */
656 int frameless_p;
657};
8dda9770 658
c4f35dd8
MK
659/* Allocate and initialize a frame cache. */
660
e53bef9f
MK
661static struct amd64_frame_cache *
662amd64_alloc_frame_cache (void)
8dda9770 663{
e53bef9f 664 struct amd64_frame_cache *cache;
c4f35dd8
MK
665 int i;
666
e53bef9f 667 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
8dda9770 668
c4f35dd8
MK
669 /* Base address. */
670 cache->base = 0;
671 cache->sp_offset = -8;
672 cache->pc = 0;
673
674 /* Saved registers. We initialize these to -1 since zero is a valid
675 offset (that's where %rbp is supposed to be stored). */
e53bef9f 676 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
677 cache->saved_regs[i] = -1;
678 cache->saved_sp = 0;
679
680 /* Frameless until proven otherwise. */
681 cache->frameless_p = 1;
682
683 return cache;
8dda9770 684}
53e95fcf 685
c4f35dd8
MK
686/* Do a limited analysis of the prologue at PC and update CACHE
687 accordingly. Bail out early if CURRENT_PC is reached. Return the
688 address where the analysis stopped.
689
690 We will handle only functions beginning with:
691
692 pushq %rbp 0x55
693 movq %rsp, %rbp 0x48 0x89 0xe5
694
695 Any function that doesn't start with this sequence will be assumed
696 to have no prologue and thus no valid frame pointer in %rbp. */
697
698static CORE_ADDR
e53bef9f
MK
699amd64_analyze_prologue (CORE_ADDR pc, CORE_ADDR current_pc,
700 struct amd64_frame_cache *cache)
53e95fcf 701{
c4f35dd8
MK
702 static unsigned char proto[3] = { 0x48, 0x89, 0xe5 };
703 unsigned char buf[3];
704 unsigned char op;
705
706 if (current_pc <= pc)
707 return current_pc;
708
709 op = read_memory_unsigned_integer (pc, 1);
710
711 if (op == 0x55) /* pushq %rbp */
712 {
713 /* Take into account that we've executed the `pushq %rbp' that
714 starts this instruction sequence. */
715 cache->saved_regs[X86_64_RBP_REGNUM] = 0;
716 cache->sp_offset += 8;
717
718 /* If that's all, return now. */
719 if (current_pc <= pc + 1)
720 return current_pc;
721
722 /* Check for `movq %rsp, %rbp'. */
723 read_memory (pc + 1, buf, 3);
724 if (memcmp (buf, proto, 3) != 0)
725 return pc + 1;
726
727 /* OK, we actually have a frame. */
728 cache->frameless_p = 0;
729 return pc + 4;
730 }
731
732 return pc;
53e95fcf
JS
733}
734
c4f35dd8
MK
735/* Return PC of first real instruction. */
736
737static CORE_ADDR
e53bef9f 738amd64_skip_prologue (CORE_ADDR start_pc)
53e95fcf 739{
e53bef9f 740 struct amd64_frame_cache cache;
c4f35dd8
MK
741 CORE_ADDR pc;
742
e53bef9f 743 pc = amd64_analyze_prologue (start_pc, 0xffffffffffffffff, &cache);
c4f35dd8
MK
744 if (cache.frameless_p)
745 return start_pc;
746
747 return pc;
53e95fcf 748}
c4f35dd8 749\f
53e95fcf 750
c4f35dd8
MK
751/* Normal frames. */
752
e53bef9f
MK
753static struct amd64_frame_cache *
754amd64_frame_cache (struct frame_info *next_frame, void **this_cache)
6d686a84 755{
e53bef9f 756 struct amd64_frame_cache *cache;
c4f35dd8 757 char buf[8];
6d686a84 758 int i;
6d686a84 759
c4f35dd8
MK
760 if (*this_cache)
761 return *this_cache;
6d686a84 762
e53bef9f 763 cache = amd64_alloc_frame_cache ();
c4f35dd8
MK
764 *this_cache = cache;
765
c4f35dd8
MK
766 cache->pc = frame_func_unwind (next_frame);
767 if (cache->pc != 0)
e53bef9f 768 amd64_analyze_prologue (cache->pc, frame_pc_unwind (next_frame), cache);
c4f35dd8
MK
769
770 if (cache->frameless_p)
771 {
772 /* We didn't find a valid frame, which means that CACHE->base
773 currently holds the frame pointer for our calling frame. If
774 we're at the start of a function, or somewhere half-way its
775 prologue, the function's frame probably hasn't been fully
776 setup yet. Try to reconstruct the base address for the stack
777 frame by looking at the stack pointer. For truly "frameless"
778 functions this might work too. */
779
780 frame_unwind_register (next_frame, X86_64_RSP_REGNUM, buf);
781 cache->base = extract_unsigned_integer (buf, 8) + cache->sp_offset;
782 }
35883a3f
MK
783 else
784 {
785 frame_unwind_register (next_frame, X86_64_RBP_REGNUM, buf);
786 cache->base = extract_unsigned_integer (buf, 8);
787 }
c4f35dd8
MK
788
789 /* Now that we have the base address for the stack frame we can
790 calculate the value of %rsp in the calling frame. */
791 cache->saved_sp = cache->base + 16;
792
35883a3f
MK
793 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
794 frame we find it at the same offset from the reconstructed base
795 address. */
796 cache->saved_regs[X86_64_RIP_REGNUM] = 8;
797
c4f35dd8
MK
798 /* Adjust all the saved registers such that they contain addresses
799 instead of offsets. */
e53bef9f 800 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
801 if (cache->saved_regs[i] != -1)
802 cache->saved_regs[i] += cache->base;
803
804 return cache;
6d686a84
ML
805}
806
c4f35dd8 807static void
e53bef9f
MK
808amd64_frame_this_id (struct frame_info *next_frame, void **this_cache,
809 struct frame_id *this_id)
c4f35dd8 810{
e53bef9f
MK
811 struct amd64_frame_cache *cache =
812 amd64_frame_cache (next_frame, this_cache);
c4f35dd8
MK
813
814 /* This marks the outermost frame. */
815 if (cache->base == 0)
816 return;
817
818 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
819}
e76e1718 820
c4f35dd8 821static void
e53bef9f
MK
822amd64_frame_prev_register (struct frame_info *next_frame, void **this_cache,
823 int regnum, int *optimizedp,
824 enum lval_type *lvalp, CORE_ADDR *addrp,
825 int *realnump, void *valuep)
53e95fcf 826{
e53bef9f
MK
827 struct amd64_frame_cache *cache =
828 amd64_frame_cache (next_frame, this_cache);
e76e1718 829
c4f35dd8 830 gdb_assert (regnum >= 0);
b1ab997b 831
c4f35dd8
MK
832 if (regnum == SP_REGNUM && cache->saved_sp)
833 {
834 *optimizedp = 0;
835 *lvalp = not_lval;
836 *addrp = 0;
837 *realnump = -1;
838 if (valuep)
839 {
840 /* Store the value. */
841 store_unsigned_integer (valuep, 8, cache->saved_sp);
842 }
843 return;
844 }
e76e1718 845
e53bef9f 846 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
c4f35dd8
MK
847 {
848 *optimizedp = 0;
849 *lvalp = lval_memory;
850 *addrp = cache->saved_regs[regnum];
851 *realnump = -1;
852 if (valuep)
853 {
854 /* Read the value in from memory. */
855 read_memory (*addrp, valuep,
856 register_size (current_gdbarch, regnum));
857 }
858 return;
859 }
e76e1718 860
c4f35dd8
MK
861 frame_register_unwind (next_frame, regnum,
862 optimizedp, lvalp, addrp, realnump, valuep);
863}
e76e1718 864
e53bef9f 865static const struct frame_unwind amd64_frame_unwind =
c4f35dd8
MK
866{
867 NORMAL_FRAME,
e53bef9f
MK
868 amd64_frame_this_id,
869 amd64_frame_prev_register
c4f35dd8 870};
e76e1718 871
c4f35dd8 872static const struct frame_unwind *
e53bef9f 873amd64_frame_sniffer (struct frame_info *next_frame)
c4f35dd8 874{
e53bef9f 875 return &amd64_frame_unwind;
c4f35dd8
MK
876}
877\f
e76e1718 878
c4f35dd8
MK
879/* Signal trampolines. */
880
881/* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
882 64-bit variants. This would require using identical frame caches
883 on both platforms. */
884
e53bef9f
MK
885static struct amd64_frame_cache *
886amd64_sigtramp_frame_cache (struct frame_info *next_frame, void **this_cache)
c4f35dd8 887{
e53bef9f 888 struct amd64_frame_cache *cache;
c4f35dd8
MK
889 struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch);
890 CORE_ADDR addr;
891 char buf[8];
2b5e0749 892 int i;
c4f35dd8
MK
893
894 if (*this_cache)
895 return *this_cache;
896
e53bef9f 897 cache = amd64_alloc_frame_cache ();
c4f35dd8
MK
898
899 frame_unwind_register (next_frame, X86_64_RSP_REGNUM, buf);
900 cache->base = extract_unsigned_integer (buf, 8) - 8;
901
902 addr = tdep->sigcontext_addr (next_frame);
2b5e0749 903 gdb_assert (tdep->sc_reg_offset);
e53bef9f 904 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2b5e0749
MK
905 for (i = 0; i < tdep->sc_num_regs; i++)
906 if (tdep->sc_reg_offset[i] != -1)
907 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
c4f35dd8
MK
908
909 *this_cache = cache;
910 return cache;
53e95fcf
JS
911}
912
c4f35dd8 913static void
e53bef9f
MK
914amd64_sigtramp_frame_this_id (struct frame_info *next_frame,
915 void **this_cache, struct frame_id *this_id)
c4f35dd8 916{
e53bef9f
MK
917 struct amd64_frame_cache *cache =
918 amd64_sigtramp_frame_cache (next_frame, this_cache);
c4f35dd8
MK
919
920 (*this_id) = frame_id_build (cache->base + 16, frame_pc_unwind (next_frame));
921}
922
923static void
e53bef9f
MK
924amd64_sigtramp_frame_prev_register (struct frame_info *next_frame,
925 void **this_cache,
926 int regnum, int *optimizedp,
927 enum lval_type *lvalp, CORE_ADDR *addrp,
928 int *realnump, void *valuep)
c4f35dd8
MK
929{
930 /* Make sure we've initialized the cache. */
e53bef9f 931 amd64_sigtramp_frame_cache (next_frame, this_cache);
c4f35dd8 932
e53bef9f
MK
933 amd64_frame_prev_register (next_frame, this_cache, regnum,
934 optimizedp, lvalp, addrp, realnump, valuep);
c4f35dd8
MK
935}
936
e53bef9f 937static const struct frame_unwind amd64_sigtramp_frame_unwind =
c4f35dd8
MK
938{
939 SIGTRAMP_FRAME,
e53bef9f
MK
940 amd64_sigtramp_frame_this_id,
941 amd64_sigtramp_frame_prev_register
c4f35dd8
MK
942};
943
944static const struct frame_unwind *
e53bef9f 945amd64_sigtramp_frame_sniffer (struct frame_info *next_frame)
c4f35dd8 946{
336d1bba 947 CORE_ADDR pc = frame_pc_unwind (next_frame);
c4f35dd8
MK
948 char *name;
949
950 find_pc_partial_function (pc, &name, NULL, NULL);
951 if (PC_IN_SIGTRAMP (pc, name))
1c3545ae
MK
952 {
953 gdb_assert (gdbarch_tdep (current_gdbarch)->sigcontext_addr);
954
e53bef9f 955 return &amd64_sigtramp_frame_unwind;
1c3545ae 956 }
c4f35dd8
MK
957
958 return NULL;
959}
960\f
961
962static CORE_ADDR
e53bef9f 963amd64_frame_base_address (struct frame_info *next_frame, void **this_cache)
c4f35dd8 964{
e53bef9f
MK
965 struct amd64_frame_cache *cache =
966 amd64_frame_cache (next_frame, this_cache);
c4f35dd8
MK
967
968 return cache->base;
969}
970
e53bef9f 971static const struct frame_base amd64_frame_base =
c4f35dd8 972{
e53bef9f
MK
973 &amd64_frame_unwind,
974 amd64_frame_base_address,
975 amd64_frame_base_address,
976 amd64_frame_base_address
c4f35dd8
MK
977};
978
166f4c7b 979static struct frame_id
e53bef9f 980amd64_unwind_dummy_id (struct gdbarch *gdbarch, struct frame_info *next_frame)
166f4c7b 981{
c4f35dd8
MK
982 char buf[8];
983 CORE_ADDR fp;
984
985 frame_unwind_register (next_frame, X86_64_RBP_REGNUM, buf);
986 fp = extract_unsigned_integer (buf, 8);
987
988 return frame_id_build (fp + 16, frame_pc_unwind (next_frame));
166f4c7b
ML
989}
990
8b148df9
AC
991/* 16 byte align the SP per frame requirements. */
992
993static CORE_ADDR
e53bef9f 994amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
8b148df9
AC
995{
996 return sp & -(CORE_ADDR)16;
997}
473f17b0
MK
998\f
999
1000/* Supply register REGNUM from the floating-point register set REGSET
1001 to register cache REGCACHE. If REGNUM is -1, do this for all
1002 registers in REGSET. */
1003
1004static void
e53bef9f
MK
1005amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
1006 int regnum, const void *fpregs, size_t len)
473f17b0
MK
1007{
1008 const struct gdbarch_tdep *tdep = regset->descr;
1009
1010 gdb_assert (len == tdep->sizeof_fpregset);
1011 x86_64_supply_fxsave (regcache, regnum, fpregs);
1012}
8b148df9 1013
c6b33596
MK
1014/* Return the appropriate register set for the core section identified
1015 by SECT_NAME and SECT_SIZE. */
1016
1017static const struct regset *
e53bef9f
MK
1018amd64_regset_from_core_section (struct gdbarch *gdbarch,
1019 const char *sect_name, size_t sect_size)
c6b33596
MK
1020{
1021 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1022
1023 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
1024 {
1025 if (tdep->fpregset == NULL)
1026 {
1027 tdep->fpregset = XMALLOC (struct regset);
1028 tdep->fpregset->descr = tdep;
e53bef9f 1029 tdep->fpregset->supply_regset = amd64_supply_fpregset;
c6b33596
MK
1030 }
1031
1032 return tdep->fpregset;
1033 }
1034
1035 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
1036}
1037\f
1038
2213a65d 1039void
0c1a73d6 1040x86_64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
53e95fcf 1041{
0c1a73d6 1042 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
53e95fcf 1043
473f17b0
MK
1044 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
1045 floating-point registers. */
1046 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
1047
5716833c
MK
1048 /* AMD64 has an FPU and 16 SSE registers. */
1049 tdep->st0_regnum = X86_64_ST0_REGNUM;
0c1a73d6 1050 tdep->num_xmm_regs = 16;
53e95fcf 1051
0c1a73d6 1052 /* This is what all the fuss is about. */
53e95fcf
JS
1053 set_gdbarch_long_bit (gdbarch, 64);
1054 set_gdbarch_long_long_bit (gdbarch, 64);
1055 set_gdbarch_ptr_bit (gdbarch, 64);
1056
e53bef9f
MK
1057 /* In contrast to the i386, on AMD64 a `long double' actually takes
1058 up 128 bits, even though it's still based on the i387 extended
1059 floating-point format which has only 80 significant bits. */
b83b026c
MK
1060 set_gdbarch_long_double_bit (gdbarch, 128);
1061
e53bef9f
MK
1062 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
1063 set_gdbarch_register_name (gdbarch, amd64_register_name);
1064 set_gdbarch_register_type (gdbarch, amd64_register_type);
b83b026c
MK
1065
1066 /* Register numbers of various important registers. */
c4f35dd8
MK
1067 set_gdbarch_sp_regnum (gdbarch, X86_64_RSP_REGNUM); /* %rsp */
1068 set_gdbarch_pc_regnum (gdbarch, X86_64_RIP_REGNUM); /* %rip */
1069 set_gdbarch_ps_regnum (gdbarch, X86_64_EFLAGS_REGNUM); /* %eflags */
1070 set_gdbarch_fp0_regnum (gdbarch, X86_64_ST0_REGNUM); /* %st(0) */
b83b026c 1071
e53bef9f
MK
1072 /* The "default" register numbering scheme for AMD64 is referred to
1073 as the "DWARF Register Number Mapping" in the System V psABI.
1074 The preferred debugging format for all known AMD64 targets is
1075 actually DWARF2, and GCC doesn't seem to support DWARF (that is
1076 DWARF-1), but we provide the same mapping just in case. This
1077 mapping is also used for stabs, which GCC does support. */
1078 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
1079 set_gdbarch_dwarf_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
1080 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
de220d0f 1081
c4f35dd8 1082 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
e53bef9f 1083 be in use on any of the supported AMD64 targets. */
53e95fcf 1084
c4f35dd8 1085 /* Call dummy code. */
e53bef9f
MK
1086 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
1087 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
8b148df9 1088 set_gdbarch_frame_red_zone_size (gdbarch, 128);
53e95fcf 1089
e53bef9f 1090 set_gdbarch_convert_register_p (gdbarch, amd64_convert_register_p);
d532c08f
MK
1091 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
1092 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
1093
efb1c01c 1094 set_gdbarch_return_value (gdbarch, amd64_return_value);
e53bef9f 1095 /* Override, since this is handled by amd64_extract_return_value. */
b83b026c 1096 set_gdbarch_extract_struct_value_address (gdbarch, NULL);
53e95fcf 1097
e53bef9f 1098 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
53e95fcf 1099
c4f35dd8 1100 /* Avoid wiring in the MMX registers for now. */
2213a65d 1101 set_gdbarch_num_pseudo_regs (gdbarch, 0);
5716833c 1102 tdep->mm0_regnum = -1;
2213a65d 1103
e53bef9f 1104 set_gdbarch_unwind_dummy_id (gdbarch, amd64_unwind_dummy_id);
53e95fcf 1105
b83b026c 1106 /* FIXME: kettenis/20021026: This is ELF-specific. Fine for now,
e53bef9f 1107 since all supported AMD64 targets are ELF, but that might change
b83b026c 1108 in the future. */
8a8ab2b9 1109 set_gdbarch_in_solib_call_trampoline (gdbarch, in_plt_section);
c4f35dd8 1110
e53bef9f
MK
1111 frame_unwind_append_sniffer (gdbarch, amd64_sigtramp_frame_sniffer);
1112 frame_unwind_append_sniffer (gdbarch, amd64_frame_sniffer);
1113 frame_base_set_default (gdbarch, &amd64_frame_base);
c6b33596
MK
1114
1115 /* If we have a register mapping, enable the generic core file support. */
1116 if (tdep->gregset_reg_offset)
1117 set_gdbarch_regset_from_core_section (gdbarch,
e53bef9f 1118 amd64_regset_from_core_section);
c4f35dd8
MK
1119}
1120\f
1121
5716833c 1122#define I387_ST0_REGNUM X86_64_ST0_REGNUM
c4f35dd8 1123
41d041d6
MK
1124/* The 64-bit FXSAVE format differs from the 32-bit format in the
1125 sense that the instruction pointer and data pointer are simply
1126 64-bit offsets into the code segment and the data segment instead
1127 of a selector offset pair. The functions below store the upper 32
1128 bits of these pointers (instead of just the 16-bits of the segment
1129 selector). */
1130
1131/* Fill register REGNUM in REGCACHE with the appropriate
0485f6ad
MK
1132 floating-point or SSE register value from *FXSAVE. If REGNUM is
1133 -1, do this for all registers. This function masks off any of the
1134 reserved bits in *FXSAVE. */
c4f35dd8
MK
1135
1136void
41d041d6
MK
1137x86_64_supply_fxsave (struct regcache *regcache, int regnum,
1138 const void *fxsave)
c4f35dd8 1139{
41d041d6 1140 i387_supply_fxsave (regcache, regnum, fxsave);
c4f35dd8
MK
1141
1142 if (fxsave)
1143 {
41d041d6
MK
1144 const char *regs = fxsave;
1145
0485f6ad 1146 if (regnum == -1 || regnum == I387_FISEG_REGNUM)
41d041d6 1147 regcache_raw_supply (regcache, I387_FISEG_REGNUM, regs + 12);
0485f6ad 1148 if (regnum == -1 || regnum == I387_FOSEG_REGNUM)
41d041d6 1149 regcache_raw_supply (regcache, I387_FOSEG_REGNUM, regs + 20);
c4f35dd8 1150 }
0c1a73d6
MK
1151}
1152
c4f35dd8 1153/* Fill register REGNUM (if it is a floating-point or SSE register) in
0485f6ad 1154 *FXSAVE with the value in GDB's register cache. If REGNUM is -1, do
c4f35dd8
MK
1155 this for all registers. This function doesn't touch any of the
1156 reserved bits in *FXSAVE. */
1157
53e95fcf 1158void
c4f35dd8 1159x86_64_fill_fxsave (char *fxsave, int regnum)
53e95fcf 1160{
c4f35dd8 1161 i387_fill_fxsave (fxsave, regnum);
53e95fcf 1162
c4f35dd8 1163 if (regnum == -1 || regnum == I387_FISEG_REGNUM)
088ce440 1164 regcache_collect (I387_FISEG_REGNUM, fxsave + 12);
c4f35dd8 1165 if (regnum == -1 || regnum == I387_FOSEG_REGNUM)
088ce440 1166 regcache_collect (I387_FOSEG_REGNUM, fxsave + 20);
53e95fcf 1167}
This page took 0.428618 seconds and 4 git commands to generate.