*** empty log message ***
[deliverable/binutils-gdb.git] / gdb / x86-64-tdep.c
1 /* Target-dependent code for the x86-64 for GDB, the GNU debugger.
2
3 Copyright 2001, 2002, 2003 Free Software Foundation, Inc.
4 Contributed by Jiri Smid, SuSE Labs.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23 #include "defs.h"
24 #include "arch-utils.h"
25 #include "block.h"
26 #include "dummy-frame.h"
27 #include "frame.h"
28 #include "frame-base.h"
29 #include "frame-unwind.h"
30 #include "inferior.h"
31 #include "gdbcmd.h"
32 #include "gdbcore.h"
33 #include "objfiles.h"
34 #include "regcache.h"
35 #include "regset.h"
36 #include "symfile.h"
37
38 #include "gdb_assert.h"
39
40 #include "x86-64-tdep.h"
41 #include "i387-tdep.h"
42
43 /* Register information. */
44
45 struct x86_64_register_info
46 {
47 char *name;
48 struct type **type;
49 };
50
51 static struct x86_64_register_info x86_64_register_info[] =
52 {
53 { "rax", &builtin_type_int64 },
54 { "rbx", &builtin_type_int64 },
55 { "rcx", &builtin_type_int64 },
56 { "rdx", &builtin_type_int64 },
57 { "rsi", &builtin_type_int64 },
58 { "rdi", &builtin_type_int64 },
59 { "rbp", &builtin_type_void_data_ptr },
60 { "rsp", &builtin_type_void_data_ptr },
61
62 /* %r8 is indeed register number 8. */
63 { "r8", &builtin_type_int64 },
64 { "r9", &builtin_type_int64 },
65 { "r10", &builtin_type_int64 },
66 { "r11", &builtin_type_int64 },
67 { "r12", &builtin_type_int64 },
68 { "r13", &builtin_type_int64 },
69 { "r14", &builtin_type_int64 },
70 { "r15", &builtin_type_int64 },
71 { "rip", &builtin_type_void_func_ptr },
72 { "eflags", &builtin_type_int32 },
73 { "ds", &builtin_type_int32 },
74 { "es", &builtin_type_int32 },
75 { "fs", &builtin_type_int32 },
76 { "gs", &builtin_type_int32 },
77
78 /* %st0 is register number 22. */
79 { "st0", &builtin_type_i387_ext },
80 { "st1", &builtin_type_i387_ext },
81 { "st2", &builtin_type_i387_ext },
82 { "st3", &builtin_type_i387_ext },
83 { "st4", &builtin_type_i387_ext },
84 { "st5", &builtin_type_i387_ext },
85 { "st6", &builtin_type_i387_ext },
86 { "st7", &builtin_type_i387_ext },
87 { "fctrl", &builtin_type_int32 },
88 { "fstat", &builtin_type_int32 },
89 { "ftag", &builtin_type_int32 },
90 { "fiseg", &builtin_type_int32 },
91 { "fioff", &builtin_type_int32 },
92 { "foseg", &builtin_type_int32 },
93 { "fooff", &builtin_type_int32 },
94 { "fop", &builtin_type_int32 },
95
96 /* %xmm0 is register number 38. */
97 { "xmm0", &builtin_type_v4sf },
98 { "xmm1", &builtin_type_v4sf },
99 { "xmm2", &builtin_type_v4sf },
100 { "xmm3", &builtin_type_v4sf },
101 { "xmm4", &builtin_type_v4sf },
102 { "xmm5", &builtin_type_v4sf },
103 { "xmm6", &builtin_type_v4sf },
104 { "xmm7", &builtin_type_v4sf },
105 { "xmm8", &builtin_type_v4sf },
106 { "xmm9", &builtin_type_v4sf },
107 { "xmm10", &builtin_type_v4sf },
108 { "xmm11", &builtin_type_v4sf },
109 { "xmm12", &builtin_type_v4sf },
110 { "xmm13", &builtin_type_v4sf },
111 { "xmm14", &builtin_type_v4sf },
112 { "xmm15", &builtin_type_v4sf },
113 { "mxcsr", &builtin_type_int32 }
114 };
115
116 /* Total number of registers. */
117 #define X86_64_NUM_REGS \
118 (sizeof (x86_64_register_info) / sizeof (x86_64_register_info[0]))
119
120 /* Return the name of register REGNUM. */
121
122 static const char *
123 x86_64_register_name (int regnum)
124 {
125 if (regnum >= 0 && regnum < X86_64_NUM_REGS)
126 return x86_64_register_info[regnum].name;
127
128 return NULL;
129 }
130
131 /* Return the GDB type object for the "standard" data type of data in
132 register REGNUM. */
133
134 static struct type *
135 x86_64_register_type (struct gdbarch *gdbarch, int regnum)
136 {
137 gdb_assert (regnum >= 0 && regnum < X86_64_NUM_REGS);
138
139 return *x86_64_register_info[regnum].type;
140 }
141
142 /* DWARF Register Number Mapping as defined in the System V psABI,
143 section 3.6. */
144
145 static int x86_64_dwarf_regmap[] =
146 {
147 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
148 X86_64_RAX_REGNUM, X86_64_RDX_REGNUM, 2, 1,
149 4, X86_64_RDI_REGNUM,
150
151 /* Frame Pointer Register RBP. */
152 X86_64_RBP_REGNUM,
153
154 /* Stack Pointer Register RSP. */
155 X86_64_RSP_REGNUM,
156
157 /* Extended Integer Registers 8 - 15. */
158 8, 9, 10, 11, 12, 13, 14, 15,
159
160 /* Return Address RA. Not mapped. */
161 -1,
162
163 /* SSE Registers 0 - 7. */
164 X86_64_XMM0_REGNUM + 0, X86_64_XMM1_REGNUM,
165 X86_64_XMM0_REGNUM + 2, X86_64_XMM0_REGNUM + 3,
166 X86_64_XMM0_REGNUM + 4, X86_64_XMM0_REGNUM + 5,
167 X86_64_XMM0_REGNUM + 6, X86_64_XMM0_REGNUM + 7,
168
169 /* Extended SSE Registers 8 - 15. */
170 X86_64_XMM0_REGNUM + 8, X86_64_XMM0_REGNUM + 9,
171 X86_64_XMM0_REGNUM + 10, X86_64_XMM0_REGNUM + 11,
172 X86_64_XMM0_REGNUM + 12, X86_64_XMM0_REGNUM + 13,
173 X86_64_XMM0_REGNUM + 14, X86_64_XMM0_REGNUM + 15,
174
175 /* Floating Point Registers 0-7. */
176 X86_64_ST0_REGNUM + 0, X86_64_ST0_REGNUM + 1,
177 X86_64_ST0_REGNUM + 2, X86_64_ST0_REGNUM + 3,
178 X86_64_ST0_REGNUM + 4, X86_64_ST0_REGNUM + 5,
179 X86_64_ST0_REGNUM + 6, X86_64_ST0_REGNUM + 7
180 };
181
182 static const int x86_64_dwarf_regmap_len =
183 (sizeof (x86_64_dwarf_regmap) / sizeof (x86_64_dwarf_regmap[0]));
184
185 /* Convert DWARF register number REG to the appropriate register
186 number used by GDB. */
187
188 static int
189 x86_64_dwarf_reg_to_regnum (int reg)
190 {
191 int regnum = -1;
192
193 if (reg >= 0 || reg < x86_64_dwarf_regmap_len)
194 regnum = x86_64_dwarf_regmap[reg];
195
196 if (regnum == -1)
197 warning ("Unmapped DWARF Register #%d encountered\n", reg);
198
199 return regnum;
200 }
201
202 /* Return nonzero if a value of type TYPE stored in register REGNUM
203 needs any special handling. */
204
205 static int
206 x86_64_convert_register_p (int regnum, struct type *type)
207 {
208 return i386_fp_regnum_p (regnum);
209 }
210 \f
211
212 /* The returning of values is done according to the special algorithm.
213 Some types are returned in registers an some (big structures) in
214 memory. See the System V psABI for details. */
215
216 #define MAX_CLASSES 4
217
218 enum x86_64_reg_class
219 {
220 X86_64_NO_CLASS,
221 X86_64_INTEGER_CLASS,
222 X86_64_INTEGERSI_CLASS,
223 X86_64_SSE_CLASS,
224 X86_64_SSESF_CLASS,
225 X86_64_SSEDF_CLASS,
226 X86_64_SSEUP_CLASS,
227 X86_64_X87_CLASS,
228 X86_64_X87UP_CLASS,
229 X86_64_MEMORY_CLASS
230 };
231
232 /* Return the union class of CLASS1 and CLASS2.
233 See the System V psABI for details. */
234
235 static enum x86_64_reg_class
236 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
237 {
238 /* Rule (a): If both classes are equal, this is the resulting class. */
239 if (class1 == class2)
240 return class1;
241
242 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
243 is the other class. */
244 if (class1 == X86_64_NO_CLASS)
245 return class2;
246 if (class2 == X86_64_NO_CLASS)
247 return class1;
248
249 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
250 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
251 return X86_64_MEMORY_CLASS;
252
253 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
254 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
255 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
256 return X86_64_INTEGERSI_CLASS;
257 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
258 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
259 return X86_64_INTEGER_CLASS;
260
261 /* Rule (e): If one of the classes is X87 or X87UP class, MEMORY is
262 used as class. */
263 if (class1 == X86_64_X87_CLASS || class1 == X86_64_X87UP_CLASS
264 || class2 == X86_64_X87_CLASS || class2 == X86_64_X87UP_CLASS)
265 return X86_64_MEMORY_CLASS;
266
267 /* Rule (f): Otherwise class SSE is used. */
268 return X86_64_SSE_CLASS;
269 }
270
271 /* Classify the argument type. CLASSES will be filled by the register
272 class used to pass each word of the operand. The number of words
273 is returned. In case the parameter should be passed in memory, 0
274 is returned. As a special case for zero sized containers,
275 classes[0] will be NO_CLASS and 1 is returned.
276
277 See the System V psABI for details. */
278
279 static int
280 classify_argument (struct type *type,
281 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
282 {
283 int bytes = TYPE_LENGTH (type);
284 int words = (bytes + 8 - 1) / 8;
285
286 switch (TYPE_CODE (type))
287 {
288 case TYPE_CODE_ARRAY:
289 case TYPE_CODE_STRUCT:
290 case TYPE_CODE_UNION:
291 {
292 int i;
293 enum x86_64_reg_class subclasses[MAX_CLASSES];
294
295 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
296 if (bytes > 16)
297 return 0;
298
299 for (i = 0; i < words; i++)
300 classes[i] = X86_64_NO_CLASS;
301
302 /* Zero sized arrays or structures are NO_CLASS. We return 0
303 to signalize memory class, so handle it as special case. */
304 if (!words)
305 {
306 classes[0] = X86_64_NO_CLASS;
307 return 1;
308 }
309 switch (TYPE_CODE (type))
310 {
311 case TYPE_CODE_STRUCT:
312 {
313 int j;
314 for (j = 0; j < TYPE_NFIELDS (type); ++j)
315 {
316 int num = classify_argument (TYPE_FIELDS (type)[j].type,
317 subclasses,
318 (TYPE_FIELDS (type)[j].loc.
319 bitpos + bit_offset) % 256);
320 if (!num)
321 return 0;
322 for (i = 0; i < num; i++)
323 {
324 int pos =
325 (TYPE_FIELDS (type)[j].loc.bitpos +
326 bit_offset) / 8 / 8;
327 classes[i + pos] =
328 merge_classes (subclasses[i], classes[i + pos]);
329 }
330 }
331 }
332 break;
333 case TYPE_CODE_ARRAY:
334 {
335 int num;
336
337 num = classify_argument (TYPE_TARGET_TYPE (type),
338 subclasses, bit_offset);
339 if (!num)
340 return 0;
341
342 /* The partial classes are now full classes. */
343 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
344 subclasses[0] = X86_64_SSE_CLASS;
345 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
346 subclasses[0] = X86_64_INTEGER_CLASS;
347
348 for (i = 0; i < words; i++)
349 classes[i] = subclasses[i % num];
350 }
351 break;
352 case TYPE_CODE_UNION:
353 {
354 int j;
355 {
356 for (j = 0; j < TYPE_NFIELDS (type); ++j)
357 {
358 int num;
359 num = classify_argument (TYPE_FIELDS (type)[j].type,
360 subclasses, bit_offset);
361 if (!num)
362 return 0;
363 for (i = 0; i < num; i++)
364 classes[i] = merge_classes (subclasses[i], classes[i]);
365 }
366 }
367 }
368 break;
369 default:
370 break;
371 }
372 /* Final merger cleanup. */
373 for (i = 0; i < words; i++)
374 {
375 /* If one class is MEMORY, everything should be passed in
376 memory. */
377 if (classes[i] == X86_64_MEMORY_CLASS)
378 return 0;
379
380 /* The X86_64_SSEUP_CLASS should be always preceeded by
381 X86_64_SSE_CLASS. */
382 if (classes[i] == X86_64_SSEUP_CLASS
383 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
384 classes[i] = X86_64_SSE_CLASS;
385
386 /* X86_64_X87UP_CLASS should be preceeded by X86_64_X87_CLASS. */
387 if (classes[i] == X86_64_X87UP_CLASS
388 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
389 classes[i] = X86_64_SSE_CLASS;
390 }
391 return words;
392 }
393 break;
394 case TYPE_CODE_FLT:
395 switch (bytes)
396 {
397 case 4:
398 if (!(bit_offset % 64))
399 classes[0] = X86_64_SSESF_CLASS;
400 else
401 classes[0] = X86_64_SSE_CLASS;
402 return 1;
403 case 8:
404 classes[0] = X86_64_SSEDF_CLASS;
405 return 1;
406 case 16:
407 classes[0] = X86_64_X87_CLASS;
408 classes[1] = X86_64_X87UP_CLASS;
409 return 2;
410 }
411 break;
412 case TYPE_CODE_ENUM:
413 case TYPE_CODE_REF:
414 case TYPE_CODE_INT:
415 case TYPE_CODE_PTR:
416 switch (bytes)
417 {
418 case 1:
419 case 2:
420 case 4:
421 case 8:
422 if (bytes * 8 + bit_offset <= 32)
423 classes[0] = X86_64_INTEGERSI_CLASS;
424 else
425 classes[0] = X86_64_INTEGER_CLASS;
426 return 1;
427 case 16:
428 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
429 return 2;
430 default:
431 break;
432 }
433 case TYPE_CODE_VOID:
434 return 0;
435 default: /* Avoid warning. */
436 break;
437 }
438 internal_error (__FILE__, __LINE__,
439 "classify_argument: unknown argument type");
440 }
441
442 /* Examine the argument and set *INT_NREGS and *SSE_NREGS to the
443 number of registers required based on the information passed in
444 CLASSES. Return 0 if parameter should be passed in memory. */
445
446 static int
447 examine_argument (enum x86_64_reg_class classes[MAX_CLASSES],
448 int n, int *int_nregs, int *sse_nregs)
449 {
450 *int_nregs = 0;
451 *sse_nregs = 0;
452 if (!n)
453 return 0;
454 for (n--; n >= 0; n--)
455 switch (classes[n])
456 {
457 case X86_64_INTEGER_CLASS:
458 case X86_64_INTEGERSI_CLASS:
459 (*int_nregs)++;
460 break;
461 case X86_64_SSE_CLASS:
462 case X86_64_SSESF_CLASS:
463 case X86_64_SSEDF_CLASS:
464 (*sse_nregs)++;
465 break;
466 case X86_64_NO_CLASS:
467 case X86_64_SSEUP_CLASS:
468 case X86_64_X87_CLASS:
469 case X86_64_X87UP_CLASS:
470 break;
471 case X86_64_MEMORY_CLASS:
472 internal_error (__FILE__, __LINE__,
473 "examine_argument: unexpected memory class");
474 }
475 return 1;
476 }
477
478 #define INT_REGS 6
479 #define SSE_REGS 8
480
481 static CORE_ADDR
482 x86_64_push_arguments (struct regcache *regcache, int nargs,
483 struct value **args, CORE_ADDR sp)
484 {
485 int intreg = 0;
486 int ssereg = 0;
487 /* For varargs functions we have to pass the total number of SSE
488 registers used in %rax. So, let's count this number. */
489 int total_sse_args = 0;
490 /* Once an SSE/int argument is passed on the stack, all subsequent
491 arguments are passed there. */
492 int sse_stack = 0;
493 int int_stack = 0;
494 unsigned total_sp;
495 int i;
496 char buf[8];
497 static int int_parameter_registers[INT_REGS] =
498 {
499 X86_64_RDI_REGNUM, 4, /* %rdi, %rsi */
500 X86_64_RDX_REGNUM, 2, /* %rdx, %rcx */
501 8, 9 /* %r8, %r9 */
502 };
503 /* %xmm0 - %xmm7 */
504 static int sse_parameter_registers[SSE_REGS] =
505 {
506 X86_64_XMM0_REGNUM + 0, X86_64_XMM1_REGNUM,
507 X86_64_XMM0_REGNUM + 2, X86_64_XMM0_REGNUM + 3,
508 X86_64_XMM0_REGNUM + 4, X86_64_XMM0_REGNUM + 5,
509 X86_64_XMM0_REGNUM + 6, X86_64_XMM0_REGNUM + 7,
510 };
511 int stack_values_count = 0;
512 int *stack_values;
513 stack_values = alloca (nargs * sizeof (int));
514
515 for (i = 0; i < nargs; i++)
516 {
517 enum x86_64_reg_class class[MAX_CLASSES];
518 int n = classify_argument (args[i]->type, class, 0);
519 int needed_intregs;
520 int needed_sseregs;
521
522 if (!n ||
523 !examine_argument (class, n, &needed_intregs, &needed_sseregs))
524 { /* memory class */
525 stack_values[stack_values_count++] = i;
526 }
527 else
528 {
529 int j;
530 int offset = 0;
531
532 if (intreg / 2 + needed_intregs > INT_REGS)
533 int_stack = 1;
534 if (ssereg / 2 + needed_sseregs > SSE_REGS)
535 sse_stack = 1;
536 if (!sse_stack)
537 total_sse_args += needed_sseregs;
538
539 for (j = 0; j < n; j++)
540 {
541 switch (class[j])
542 {
543 case X86_64_NO_CLASS:
544 break;
545 case X86_64_INTEGER_CLASS:
546 if (int_stack)
547 stack_values[stack_values_count++] = i;
548 else
549 {
550 regcache_cooked_write
551 (regcache, int_parameter_registers[(intreg + 1) / 2],
552 VALUE_CONTENTS_ALL (args[i]) + offset);
553 offset += 8;
554 intreg += 2;
555 }
556 break;
557 case X86_64_INTEGERSI_CLASS:
558 if (int_stack)
559 stack_values[stack_values_count++] = i;
560 else
561 {
562 LONGEST val = extract_signed_integer
563 (VALUE_CONTENTS_ALL (args[i]) + offset, 4);
564 regcache_cooked_write_signed
565 (regcache, int_parameter_registers[intreg / 2], val);
566
567 offset += 8;
568 intreg++;
569 }
570 break;
571 case X86_64_SSEDF_CLASS:
572 case X86_64_SSESF_CLASS:
573 case X86_64_SSE_CLASS:
574 if (sse_stack)
575 stack_values[stack_values_count++] = i;
576 else
577 {
578 regcache_cooked_write
579 (regcache, sse_parameter_registers[(ssereg + 1) / 2],
580 VALUE_CONTENTS_ALL (args[i]) + offset);
581 offset += 8;
582 ssereg += 2;
583 }
584 break;
585 case X86_64_SSEUP_CLASS:
586 if (sse_stack)
587 stack_values[stack_values_count++] = i;
588 else
589 {
590 regcache_cooked_write
591 (regcache, sse_parameter_registers[ssereg / 2],
592 VALUE_CONTENTS_ALL (args[i]) + offset);
593 offset += 8;
594 ssereg++;
595 }
596 break;
597 case X86_64_X87_CLASS:
598 case X86_64_MEMORY_CLASS:
599 stack_values[stack_values_count++] = i;
600 break;
601 case X86_64_X87UP_CLASS:
602 break;
603 default:
604 internal_error (__FILE__, __LINE__,
605 "Unexpected argument class");
606 }
607 intreg += intreg % 2;
608 ssereg += ssereg % 2;
609 }
610 }
611 }
612
613 /* We have to make sure that the stack is 16-byte aligned after the
614 setup. Let's calculate size of arguments first, align stack and
615 then fill in the arguments. */
616 total_sp = 0;
617 for (i = 0; i < stack_values_count; i++)
618 {
619 struct value *arg = args[stack_values[i]];
620 int len = TYPE_LENGTH (VALUE_ENCLOSING_TYPE (arg));
621 total_sp += (len + 7) & ~7;
622 }
623 /* total_sp is now a multiple of 8, if it is not a multiple of 16,
624 change the stack pointer so that it will be afterwards correctly
625 aligned. */
626 if (total_sp & 15)
627 sp -= 8;
628
629 /* Push any remaining arguments onto the stack. */
630 while (--stack_values_count >= 0)
631 {
632 struct value *arg = args[stack_values[stack_values_count]];
633 int len = TYPE_LENGTH (VALUE_ENCLOSING_TYPE (arg));
634
635 /* Make sure the stack is 8-byte-aligned. */
636 sp -= (len + 7) & ~7;
637 write_memory (sp, VALUE_CONTENTS_ALL (arg), len);
638 }
639
640 /* Write number of SSE type arguments to RAX to take care of varargs
641 functions. */
642 store_unsigned_integer (buf, 8, total_sse_args);
643 regcache_cooked_write (regcache, X86_64_RAX_REGNUM, buf);
644
645 return sp;
646 }
647
648 /* Register classes as defined in the psABI. */
649
650 enum amd64_reg_class
651 {
652 AMD64_INTEGER,
653 AMD64_SSE,
654 AMD64_SSEUP,
655 AMD64_X87,
656 AMD64_X87UP,
657 AMD64_COMPLEX_X87,
658 AMD64_NO_CLASS,
659 AMD64_MEMORY
660 };
661
662 /* Return the union class of CLASS1 and CLASS2. See the psABI for
663 details. */
664
665 static enum amd64_reg_class
666 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
667 {
668 /* Rule (a): If both classes are equal, this is the resulting class. */
669 if (class1 == class2)
670 return class1;
671
672 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
673 is the other class. */
674 if (class1 == AMD64_NO_CLASS)
675 return class2;
676 if (class2 == AMD64_NO_CLASS)
677 return class1;
678
679 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
680 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
681 return AMD64_MEMORY;
682
683 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
684 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
685 return AMD64_INTEGER;
686
687 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
688 MEMORY is used as class. */
689 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
690 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
691 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
692 return AMD64_MEMORY;
693
694 /* Rule (f): Otherwise class SSE is used. */
695 return AMD64_SSE;
696 }
697
698 static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
699
700 /* Classify TYPE according to the rules for aggregate (structures and
701 arrays) and union types, and store the result in CLASS. */
702
703 static void
704 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
705 {
706 int len = TYPE_LENGTH (type);
707
708 /* 1. If the size of an object is larger than two eightbytes, or in
709 C++, is a non-POD structure or union type, or contains
710 unaligned fields, it has class memory. */
711 if (len > 16)
712 {
713 class[0] = class[1] = AMD64_MEMORY;
714 return;
715 }
716
717 /* 2. Both eightbytes get initialized to class NO_CLASS. */
718 class[0] = class[1] = AMD64_NO_CLASS;
719
720 /* 3. Each field of an object is classified recursively so that
721 always two fields are considered. The resulting class is
722 calculated according to the classes of the fields in the
723 eightbyte: */
724
725 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
726 {
727 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
728
729 /* All fields in an array have the same type. */
730 amd64_classify (subtype, class);
731 if (len > 8 && class[1] == AMD64_NO_CLASS)
732 class[1] = class[0];
733 }
734 else
735 {
736 int i;
737
738 /* Structure or union. */
739 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
740 || TYPE_CODE (type) == TYPE_CODE_UNION);
741
742 for (i = 0; i < TYPE_NFIELDS (type); i++)
743 {
744 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
745 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
746 enum amd64_reg_class subclass[2];
747
748 gdb_assert (pos == 0 || pos == 1);
749
750 amd64_classify (subtype, subclass);
751 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
752 if (pos == 0)
753 class[1] = amd64_merge_classes (class[1], subclass[1]);
754 }
755 }
756
757 /* 4. Then a post merger cleanup is done: */
758
759 /* Rule (a): If one of the classes is MEMORY, the whole argument is
760 passed in memory. */
761 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
762 class[0] = class[1] = AMD64_MEMORY;
763
764 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
765 SSE. */
766 if (class[0] == AMD64_SSEUP)
767 class[0] = AMD64_SSE;
768 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
769 class[1] = AMD64_SSE;
770 }
771
772 /* Classify TYPE, and store the result in CLASS. */
773
774 static void
775 amd64_classify (struct type *type, enum amd64_reg_class class[2])
776 {
777 enum type_code code = TYPE_CODE (type);
778 int len = TYPE_LENGTH (type);
779
780 class[0] = class[1] = AMD64_NO_CLASS;
781
782 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
783 long, long long, and pointers are in the INTEGER class. */
784 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
785 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
786 && (len == 1 || len == 2 || len == 4 || len == 8))
787 class[0] = AMD64_INTEGER;
788
789 /* Arguments of types float, double and __m64 are in class SSE. */
790 else if (code == TYPE_CODE_FLT && (len == 4 || len == 8))
791 /* FIXME: __m64 . */
792 class[0] = AMD64_SSE;
793
794 /* Arguments of types __float128 and __m128 are split into two
795 halves. The least significant ones belong to class SSE, the most
796 significant one to class SSEUP. */
797 /* FIXME: __float128, __m128. */
798
799 /* The 64-bit mantissa of arguments of type long double belongs to
800 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
801 class X87UP. */
802 else if (code == TYPE_CODE_FLT && len == 16)
803 /* Class X87 and X87UP. */
804 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
805
806 /* Aggregates. */
807 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
808 || code == TYPE_CODE_UNION)
809 amd64_classify_aggregate (type, class);
810 }
811
812 static enum return_value_convention
813 amd64_return_value (struct gdbarch *gdbarch, struct type *type,
814 struct regcache *regcache,
815 void *readbuf, const void *writebuf)
816 {
817 enum amd64_reg_class class[2];
818 int len = TYPE_LENGTH (type);
819 static int integer_regnum[] = { X86_64_RAX_REGNUM, X86_64_RDX_REGNUM };
820 static int sse_regnum[] = { X86_64_XMM0_REGNUM, X86_64_XMM1_REGNUM };
821 int integer_reg = 0;
822 int sse_reg = 0;
823 int i;
824
825 gdb_assert (!(readbuf && writebuf));
826
827 /* 1. Classify the return type with the classification algorithm. */
828 amd64_classify (type, class);
829
830 /* 2. If the type has class MEMORY, then the caller provides space
831 for the return value and passes the address of this storage in
832 %rdi as if it were the first argument to the function. In
833 effect, this address becomes a hidden first argument. */
834 if (class[0] == AMD64_MEMORY)
835 return RETURN_VALUE_STRUCT_CONVENTION;
836
837 gdb_assert (class[1] != AMD64_MEMORY);
838 gdb_assert (len <= 16);
839
840 for (i = 0; len > 0; i++, len -= 8)
841 {
842 int regnum = -1;
843 int offset = 0;
844
845 switch (class[i])
846 {
847 case AMD64_INTEGER:
848 /* 3. If the class is INTEGER, the next available register
849 of the sequence %rax, %rdx is used. */
850 regnum = integer_regnum[integer_reg++];
851 break;
852
853 case AMD64_SSE:
854 /* 4. If the class is SSE, the next available SSE register
855 of the sequence %xmm0, %xmm1 is used. */
856 regnum = sse_regnum[sse_reg++];
857 break;
858
859 case AMD64_SSEUP:
860 /* 5. If the class is SSEUP, the eightbyte is passed in the
861 upper half of the last used SSE register. */
862 gdb_assert (sse_reg > 0);
863 regnum = sse_regnum[sse_reg - 1];
864 offset = 8;
865 break;
866
867 case AMD64_X87:
868 /* 6. If the class is X87, the value is returned on the X87
869 stack in %st0 as 80-bit x87 number. */
870 regnum = X86_64_ST0_REGNUM;
871 if (writebuf)
872 i387_return_value (gdbarch, regcache);
873 break;
874
875 case AMD64_X87UP:
876 /* 7. If the class is X87UP, the value is returned together
877 with the previous X87 value in %st0. */
878 gdb_assert (i > 0 && class[0] == AMD64_X87);
879 regnum = X86_64_ST0_REGNUM;
880 offset = 8;
881 len = 2;
882 break;
883
884 case AMD64_NO_CLASS:
885 continue;
886
887 default:
888 gdb_assert (!"Unexpected register class.");
889 }
890
891 gdb_assert (regnum != -1);
892
893 if (readbuf)
894 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
895 (char *) readbuf + i * 8);
896 if (writebuf)
897 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
898 (const char *) writebuf + i * 8);
899 }
900
901 return RETURN_VALUE_REGISTER_CONVENTION;
902 }
903 \f
904
905 static CORE_ADDR
906 x86_64_push_dummy_call (struct gdbarch *gdbarch, CORE_ADDR func_addr,
907 struct regcache *regcache, CORE_ADDR bp_addr,
908 int nargs, struct value **args, CORE_ADDR sp,
909 int struct_return, CORE_ADDR struct_addr)
910 {
911 char buf[8];
912
913 /* Pass arguments. */
914 sp = x86_64_push_arguments (regcache, nargs, args, sp);
915
916 /* Pass "hidden" argument". */
917 if (struct_return)
918 {
919 store_unsigned_integer (buf, 8, struct_addr);
920 regcache_cooked_write (regcache, X86_64_RDI_REGNUM, buf);
921 }
922
923 /* Store return address. */
924 sp -= 8;
925 store_unsigned_integer (buf, 8, bp_addr);
926 write_memory (sp, buf, 8);
927
928 /* Finally, update the stack pointer... */
929 store_unsigned_integer (buf, 8, sp);
930 regcache_cooked_write (regcache, X86_64_RSP_REGNUM, buf);
931
932 /* ...and fake a frame pointer. */
933 regcache_cooked_write (regcache, X86_64_RBP_REGNUM, buf);
934
935 return sp + 16;
936 }
937 \f
938
939 /* The maximum number of saved registers. This should include %rip. */
940 #define X86_64_NUM_SAVED_REGS X86_64_NUM_GREGS
941
942 struct x86_64_frame_cache
943 {
944 /* Base address. */
945 CORE_ADDR base;
946 CORE_ADDR sp_offset;
947 CORE_ADDR pc;
948
949 /* Saved registers. */
950 CORE_ADDR saved_regs[X86_64_NUM_SAVED_REGS];
951 CORE_ADDR saved_sp;
952
953 /* Do we have a frame? */
954 int frameless_p;
955 };
956
957 /* Allocate and initialize a frame cache. */
958
959 static struct x86_64_frame_cache *
960 x86_64_alloc_frame_cache (void)
961 {
962 struct x86_64_frame_cache *cache;
963 int i;
964
965 cache = FRAME_OBSTACK_ZALLOC (struct x86_64_frame_cache);
966
967 /* Base address. */
968 cache->base = 0;
969 cache->sp_offset = -8;
970 cache->pc = 0;
971
972 /* Saved registers. We initialize these to -1 since zero is a valid
973 offset (that's where %rbp is supposed to be stored). */
974 for (i = 0; i < X86_64_NUM_SAVED_REGS; i++)
975 cache->saved_regs[i] = -1;
976 cache->saved_sp = 0;
977
978 /* Frameless until proven otherwise. */
979 cache->frameless_p = 1;
980
981 return cache;
982 }
983
984 /* Do a limited analysis of the prologue at PC and update CACHE
985 accordingly. Bail out early if CURRENT_PC is reached. Return the
986 address where the analysis stopped.
987
988 We will handle only functions beginning with:
989
990 pushq %rbp 0x55
991 movq %rsp, %rbp 0x48 0x89 0xe5
992
993 Any function that doesn't start with this sequence will be assumed
994 to have no prologue and thus no valid frame pointer in %rbp. */
995
996 static CORE_ADDR
997 x86_64_analyze_prologue (CORE_ADDR pc, CORE_ADDR current_pc,
998 struct x86_64_frame_cache *cache)
999 {
1000 static unsigned char proto[3] = { 0x48, 0x89, 0xe5 };
1001 unsigned char buf[3];
1002 unsigned char op;
1003
1004 if (current_pc <= pc)
1005 return current_pc;
1006
1007 op = read_memory_unsigned_integer (pc, 1);
1008
1009 if (op == 0x55) /* pushq %rbp */
1010 {
1011 /* Take into account that we've executed the `pushq %rbp' that
1012 starts this instruction sequence. */
1013 cache->saved_regs[X86_64_RBP_REGNUM] = 0;
1014 cache->sp_offset += 8;
1015
1016 /* If that's all, return now. */
1017 if (current_pc <= pc + 1)
1018 return current_pc;
1019
1020 /* Check for `movq %rsp, %rbp'. */
1021 read_memory (pc + 1, buf, 3);
1022 if (memcmp (buf, proto, 3) != 0)
1023 return pc + 1;
1024
1025 /* OK, we actually have a frame. */
1026 cache->frameless_p = 0;
1027 return pc + 4;
1028 }
1029
1030 return pc;
1031 }
1032
1033 /* Return PC of first real instruction. */
1034
1035 static CORE_ADDR
1036 x86_64_skip_prologue (CORE_ADDR start_pc)
1037 {
1038 struct x86_64_frame_cache cache;
1039 CORE_ADDR pc;
1040
1041 pc = x86_64_analyze_prologue (start_pc, 0xffffffffffffffff, &cache);
1042 if (cache.frameless_p)
1043 return start_pc;
1044
1045 return pc;
1046 }
1047 \f
1048
1049 /* Normal frames. */
1050
1051 static struct x86_64_frame_cache *
1052 x86_64_frame_cache (struct frame_info *next_frame, void **this_cache)
1053 {
1054 struct x86_64_frame_cache *cache;
1055 char buf[8];
1056 int i;
1057
1058 if (*this_cache)
1059 return *this_cache;
1060
1061 cache = x86_64_alloc_frame_cache ();
1062 *this_cache = cache;
1063
1064 cache->pc = frame_func_unwind (next_frame);
1065 if (cache->pc != 0)
1066 x86_64_analyze_prologue (cache->pc, frame_pc_unwind (next_frame), cache);
1067
1068 if (cache->frameless_p)
1069 {
1070 /* We didn't find a valid frame, which means that CACHE->base
1071 currently holds the frame pointer for our calling frame. If
1072 we're at the start of a function, or somewhere half-way its
1073 prologue, the function's frame probably hasn't been fully
1074 setup yet. Try to reconstruct the base address for the stack
1075 frame by looking at the stack pointer. For truly "frameless"
1076 functions this might work too. */
1077
1078 frame_unwind_register (next_frame, X86_64_RSP_REGNUM, buf);
1079 cache->base = extract_unsigned_integer (buf, 8) + cache->sp_offset;
1080 }
1081 else
1082 {
1083 frame_unwind_register (next_frame, X86_64_RBP_REGNUM, buf);
1084 cache->base = extract_unsigned_integer (buf, 8);
1085 }
1086
1087 /* Now that we have the base address for the stack frame we can
1088 calculate the value of %rsp in the calling frame. */
1089 cache->saved_sp = cache->base + 16;
1090
1091 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
1092 frame we find it at the same offset from the reconstructed base
1093 address. */
1094 cache->saved_regs[X86_64_RIP_REGNUM] = 8;
1095
1096 /* Adjust all the saved registers such that they contain addresses
1097 instead of offsets. */
1098 for (i = 0; i < X86_64_NUM_SAVED_REGS; i++)
1099 if (cache->saved_regs[i] != -1)
1100 cache->saved_regs[i] += cache->base;
1101
1102 return cache;
1103 }
1104
1105 static void
1106 x86_64_frame_this_id (struct frame_info *next_frame, void **this_cache,
1107 struct frame_id *this_id)
1108 {
1109 struct x86_64_frame_cache *cache =
1110 x86_64_frame_cache (next_frame, this_cache);
1111
1112 /* This marks the outermost frame. */
1113 if (cache->base == 0)
1114 return;
1115
1116 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
1117 }
1118
1119 static void
1120 x86_64_frame_prev_register (struct frame_info *next_frame, void **this_cache,
1121 int regnum, int *optimizedp,
1122 enum lval_type *lvalp, CORE_ADDR *addrp,
1123 int *realnump, void *valuep)
1124 {
1125 struct x86_64_frame_cache *cache =
1126 x86_64_frame_cache (next_frame, this_cache);
1127
1128 gdb_assert (regnum >= 0);
1129
1130 if (regnum == SP_REGNUM && cache->saved_sp)
1131 {
1132 *optimizedp = 0;
1133 *lvalp = not_lval;
1134 *addrp = 0;
1135 *realnump = -1;
1136 if (valuep)
1137 {
1138 /* Store the value. */
1139 store_unsigned_integer (valuep, 8, cache->saved_sp);
1140 }
1141 return;
1142 }
1143
1144 if (regnum < X86_64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
1145 {
1146 *optimizedp = 0;
1147 *lvalp = lval_memory;
1148 *addrp = cache->saved_regs[regnum];
1149 *realnump = -1;
1150 if (valuep)
1151 {
1152 /* Read the value in from memory. */
1153 read_memory (*addrp, valuep,
1154 register_size (current_gdbarch, regnum));
1155 }
1156 return;
1157 }
1158
1159 frame_register_unwind (next_frame, regnum,
1160 optimizedp, lvalp, addrp, realnump, valuep);
1161 }
1162
1163 static const struct frame_unwind x86_64_frame_unwind =
1164 {
1165 NORMAL_FRAME,
1166 x86_64_frame_this_id,
1167 x86_64_frame_prev_register
1168 };
1169
1170 static const struct frame_unwind *
1171 x86_64_frame_sniffer (struct frame_info *next_frame)
1172 {
1173 return &x86_64_frame_unwind;
1174 }
1175 \f
1176
1177 /* Signal trampolines. */
1178
1179 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
1180 64-bit variants. This would require using identical frame caches
1181 on both platforms. */
1182
1183 static struct x86_64_frame_cache *
1184 x86_64_sigtramp_frame_cache (struct frame_info *next_frame, void **this_cache)
1185 {
1186 struct x86_64_frame_cache *cache;
1187 struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch);
1188 CORE_ADDR addr;
1189 char buf[8];
1190 int i;
1191
1192 if (*this_cache)
1193 return *this_cache;
1194
1195 cache = x86_64_alloc_frame_cache ();
1196
1197 frame_unwind_register (next_frame, X86_64_RSP_REGNUM, buf);
1198 cache->base = extract_unsigned_integer (buf, 8) - 8;
1199
1200 addr = tdep->sigcontext_addr (next_frame);
1201 gdb_assert (tdep->sc_reg_offset);
1202 gdb_assert (tdep->sc_num_regs <= X86_64_NUM_SAVED_REGS);
1203 for (i = 0; i < tdep->sc_num_regs; i++)
1204 if (tdep->sc_reg_offset[i] != -1)
1205 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
1206
1207 *this_cache = cache;
1208 return cache;
1209 }
1210
1211 static void
1212 x86_64_sigtramp_frame_this_id (struct frame_info *next_frame,
1213 void **this_cache, struct frame_id *this_id)
1214 {
1215 struct x86_64_frame_cache *cache =
1216 x86_64_sigtramp_frame_cache (next_frame, this_cache);
1217
1218 (*this_id) = frame_id_build (cache->base + 16, frame_pc_unwind (next_frame));
1219 }
1220
1221 static void
1222 x86_64_sigtramp_frame_prev_register (struct frame_info *next_frame,
1223 void **this_cache,
1224 int regnum, int *optimizedp,
1225 enum lval_type *lvalp, CORE_ADDR *addrp,
1226 int *realnump, void *valuep)
1227 {
1228 /* Make sure we've initialized the cache. */
1229 x86_64_sigtramp_frame_cache (next_frame, this_cache);
1230
1231 x86_64_frame_prev_register (next_frame, this_cache, regnum,
1232 optimizedp, lvalp, addrp, realnump, valuep);
1233 }
1234
1235 static const struct frame_unwind x86_64_sigtramp_frame_unwind =
1236 {
1237 SIGTRAMP_FRAME,
1238 x86_64_sigtramp_frame_this_id,
1239 x86_64_sigtramp_frame_prev_register
1240 };
1241
1242 static const struct frame_unwind *
1243 x86_64_sigtramp_frame_sniffer (struct frame_info *next_frame)
1244 {
1245 CORE_ADDR pc = frame_pc_unwind (next_frame);
1246 char *name;
1247
1248 find_pc_partial_function (pc, &name, NULL, NULL);
1249 if (PC_IN_SIGTRAMP (pc, name))
1250 {
1251 gdb_assert (gdbarch_tdep (current_gdbarch)->sigcontext_addr);
1252
1253 return &x86_64_sigtramp_frame_unwind;
1254 }
1255
1256 return NULL;
1257 }
1258 \f
1259
1260 static CORE_ADDR
1261 x86_64_frame_base_address (struct frame_info *next_frame, void **this_cache)
1262 {
1263 struct x86_64_frame_cache *cache =
1264 x86_64_frame_cache (next_frame, this_cache);
1265
1266 return cache->base;
1267 }
1268
1269 static const struct frame_base x86_64_frame_base =
1270 {
1271 &x86_64_frame_unwind,
1272 x86_64_frame_base_address,
1273 x86_64_frame_base_address,
1274 x86_64_frame_base_address
1275 };
1276
1277 static struct frame_id
1278 x86_64_unwind_dummy_id (struct gdbarch *gdbarch, struct frame_info *next_frame)
1279 {
1280 char buf[8];
1281 CORE_ADDR fp;
1282
1283 frame_unwind_register (next_frame, X86_64_RBP_REGNUM, buf);
1284 fp = extract_unsigned_integer (buf, 8);
1285
1286 return frame_id_build (fp + 16, frame_pc_unwind (next_frame));
1287 }
1288
1289 /* 16 byte align the SP per frame requirements. */
1290
1291 static CORE_ADDR
1292 x86_64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1293 {
1294 return sp & -(CORE_ADDR)16;
1295 }
1296 \f
1297
1298 /* Supply register REGNUM from the floating-point register set REGSET
1299 to register cache REGCACHE. If REGNUM is -1, do this for all
1300 registers in REGSET. */
1301
1302 static void
1303 x86_64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
1304 int regnum, const void *fpregs, size_t len)
1305 {
1306 const struct gdbarch_tdep *tdep = regset->descr;
1307
1308 gdb_assert (len == tdep->sizeof_fpregset);
1309 x86_64_supply_fxsave (regcache, regnum, fpregs);
1310 }
1311
1312 /* Return the appropriate register set for the core section identified
1313 by SECT_NAME and SECT_SIZE. */
1314
1315 static const struct regset *
1316 x86_64_regset_from_core_section (struct gdbarch *gdbarch,
1317 const char *sect_name, size_t sect_size)
1318 {
1319 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1320
1321 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
1322 {
1323 if (tdep->fpregset == NULL)
1324 {
1325 tdep->fpregset = XMALLOC (struct regset);
1326 tdep->fpregset->descr = tdep;
1327 tdep->fpregset->supply_regset = x86_64_supply_fpregset;
1328 }
1329
1330 return tdep->fpregset;
1331 }
1332
1333 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
1334 }
1335 \f
1336
1337 void
1338 x86_64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
1339 {
1340 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1341
1342 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
1343 floating-point registers. */
1344 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
1345
1346 /* AMD64 has an FPU and 16 SSE registers. */
1347 tdep->st0_regnum = X86_64_ST0_REGNUM;
1348 tdep->num_xmm_regs = 16;
1349
1350 /* This is what all the fuss is about. */
1351 set_gdbarch_long_bit (gdbarch, 64);
1352 set_gdbarch_long_long_bit (gdbarch, 64);
1353 set_gdbarch_ptr_bit (gdbarch, 64);
1354
1355 /* In contrast to the i386, on the x86-64 a `long double' actually
1356 takes up 128 bits, even though it's still based on the i387
1357 extended floating-point format which has only 80 significant bits. */
1358 set_gdbarch_long_double_bit (gdbarch, 128);
1359
1360 set_gdbarch_num_regs (gdbarch, X86_64_NUM_REGS);
1361 set_gdbarch_register_name (gdbarch, x86_64_register_name);
1362 set_gdbarch_register_type (gdbarch, x86_64_register_type);
1363
1364 /* Register numbers of various important registers. */
1365 set_gdbarch_sp_regnum (gdbarch, X86_64_RSP_REGNUM); /* %rsp */
1366 set_gdbarch_pc_regnum (gdbarch, X86_64_RIP_REGNUM); /* %rip */
1367 set_gdbarch_ps_regnum (gdbarch, X86_64_EFLAGS_REGNUM); /* %eflags */
1368 set_gdbarch_fp0_regnum (gdbarch, X86_64_ST0_REGNUM); /* %st(0) */
1369
1370 /* The "default" register numbering scheme for the x86-64 is
1371 referred to as the "DWARF Register Number Mapping" in the System
1372 V psABI. The preferred debugging format for all known x86-64
1373 targets is actually DWARF2, and GCC doesn't seem to support DWARF
1374 (that is DWARF-1), but we provide the same mapping just in case.
1375 This mapping is also used for stabs, which GCC does support. */
1376 set_gdbarch_stab_reg_to_regnum (gdbarch, x86_64_dwarf_reg_to_regnum);
1377 set_gdbarch_dwarf_reg_to_regnum (gdbarch, x86_64_dwarf_reg_to_regnum);
1378 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, x86_64_dwarf_reg_to_regnum);
1379
1380 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
1381 be in use on any of the supported x86-64 targets. */
1382
1383 /* Call dummy code. */
1384 set_gdbarch_push_dummy_call (gdbarch, x86_64_push_dummy_call);
1385 set_gdbarch_frame_align (gdbarch, x86_64_frame_align);
1386 set_gdbarch_frame_red_zone_size (gdbarch, 128);
1387
1388 set_gdbarch_convert_register_p (gdbarch, x86_64_convert_register_p);
1389 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
1390 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
1391
1392 set_gdbarch_return_value (gdbarch, amd64_return_value);
1393 /* Override, since this is handled by x86_64_extract_return_value. */
1394 set_gdbarch_extract_struct_value_address (gdbarch, NULL);
1395
1396 set_gdbarch_skip_prologue (gdbarch, x86_64_skip_prologue);
1397
1398 /* Avoid wiring in the MMX registers for now. */
1399 set_gdbarch_num_pseudo_regs (gdbarch, 0);
1400 tdep->mm0_regnum = -1;
1401
1402 set_gdbarch_unwind_dummy_id (gdbarch, x86_64_unwind_dummy_id);
1403
1404 /* FIXME: kettenis/20021026: This is ELF-specific. Fine for now,
1405 since all supported x86-64 targets are ELF, but that might change
1406 in the future. */
1407 set_gdbarch_in_solib_call_trampoline (gdbarch, in_plt_section);
1408
1409 frame_unwind_append_sniffer (gdbarch, x86_64_sigtramp_frame_sniffer);
1410 frame_unwind_append_sniffer (gdbarch, x86_64_frame_sniffer);
1411 frame_base_set_default (gdbarch, &x86_64_frame_base);
1412
1413 /* If we have a register mapping, enable the generic core file support. */
1414 if (tdep->gregset_reg_offset)
1415 set_gdbarch_regset_from_core_section (gdbarch,
1416 x86_64_regset_from_core_section);
1417 }
1418 \f
1419
1420 #define I387_ST0_REGNUM X86_64_ST0_REGNUM
1421
1422 /* The 64-bit FXSAVE format differs from the 32-bit format in the
1423 sense that the instruction pointer and data pointer are simply
1424 64-bit offsets into the code segment and the data segment instead
1425 of a selector offset pair. The functions below store the upper 32
1426 bits of these pointers (instead of just the 16-bits of the segment
1427 selector). */
1428
1429 /* Fill register REGNUM in REGCACHE with the appropriate
1430 floating-point or SSE register value from *FXSAVE. If REGNUM is
1431 -1, do this for all registers. This function masks off any of the
1432 reserved bits in *FXSAVE. */
1433
1434 void
1435 x86_64_supply_fxsave (struct regcache *regcache, int regnum,
1436 const void *fxsave)
1437 {
1438 i387_supply_fxsave (regcache, regnum, fxsave);
1439
1440 if (fxsave)
1441 {
1442 const char *regs = fxsave;
1443
1444 if (regnum == -1 || regnum == I387_FISEG_REGNUM)
1445 regcache_raw_supply (regcache, I387_FISEG_REGNUM, regs + 12);
1446 if (regnum == -1 || regnum == I387_FOSEG_REGNUM)
1447 regcache_raw_supply (regcache, I387_FOSEG_REGNUM, regs + 20);
1448 }
1449 }
1450
1451 /* Fill register REGNUM (if it is a floating-point or SSE register) in
1452 *FXSAVE with the value in GDB's register cache. If REGNUM is -1, do
1453 this for all registers. This function doesn't touch any of the
1454 reserved bits in *FXSAVE. */
1455
1456 void
1457 x86_64_fill_fxsave (char *fxsave, int regnum)
1458 {
1459 i387_fill_fxsave (fxsave, regnum);
1460
1461 if (regnum == -1 || regnum == I387_FISEG_REGNUM)
1462 regcache_collect (I387_FISEG_REGNUM, fxsave + 12);
1463 if (regnum == -1 || regnum == I387_FOSEG_REGNUM)
1464 regcache_collect (I387_FOSEG_REGNUM, fxsave + 20);
1465 }
This page took 0.057601 seconds and 5 git commands to generate.