* ld/testsuite/ld-arm/arm-merge-incompatible.d: New test.
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
CommitLineData
e53bef9f 1/* Target-dependent code for AMD64.
ce0eebec 2
4c38e0a4 3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5ae96ec1
MK
4 Free Software Foundation, Inc.
5
6 Contributed by Jiri Smid, SuSE Labs.
53e95fcf
JS
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
a9762ec7 12 the Free Software Foundation; either version 3 of the License, or
53e95fcf
JS
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
a9762ec7 21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
53e95fcf
JS
22
23#include "defs.h"
35669430
DE
24#include "opcode/i386.h"
25#include "dis-asm.h"
c4f35dd8
MK
26#include "arch-utils.h"
27#include "block.h"
28#include "dummy-frame.h"
29#include "frame.h"
30#include "frame-base.h"
31#include "frame-unwind.h"
53e95fcf 32#include "inferior.h"
53e95fcf 33#include "gdbcmd.h"
c4f35dd8
MK
34#include "gdbcore.h"
35#include "objfiles.h"
53e95fcf 36#include "regcache.h"
2c261fae 37#include "regset.h"
53e95fcf 38#include "symfile.h"
c4f35dd8 39
82dbc5f7 40#include "gdb_assert.h"
c4f35dd8 41
9c1488cb 42#include "amd64-tdep.h"
c4f35dd8 43#include "i387-tdep.h"
53e95fcf 44
90884b2b
L
45#include "features/i386/amd64.c"
46
e53bef9f
MK
47/* Note that the AMD64 architecture was previously known as x86-64.
48 The latter is (forever) engraved into the canonical system name as
90f90721 49 returned by config.guess, and used as the name for the AMD64 port
e53bef9f
MK
50 of GNU/Linux. The BSD's have renamed their ports to amd64; they
51 don't like to shout. For GDB we prefer the amd64_-prefix over the
52 x86_64_-prefix since it's so much easier to type. */
53
402ecd56 54/* Register information. */
c4f35dd8 55
6707b003 56static const char *amd64_register_names[] =
de220d0f 57{
6707b003 58 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
c4f35dd8
MK
59
60 /* %r8 is indeed register number 8. */
6707b003
UW
61 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
62 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
c4f35dd8 63
af233647 64 /* %st0 is register number 24. */
6707b003
UW
65 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
66 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
c4f35dd8 67
af233647 68 /* %xmm0 is register number 40. */
6707b003
UW
69 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
70 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
71 "mxcsr",
0e04a514
ML
72};
73
c4f35dd8 74/* Total number of registers. */
6707b003 75#define AMD64_NUM_REGS ARRAY_SIZE (amd64_register_names)
de220d0f 76
ba581dc1
JB
77/* The registers used to pass integer arguments during a function call. */
78static int amd64_dummy_call_integer_regs[] =
79{
80 AMD64_RDI_REGNUM, /* %rdi */
81 AMD64_RSI_REGNUM, /* %rsi */
82 AMD64_RDX_REGNUM, /* %rdx */
83 AMD64_RCX_REGNUM, /* %rcx */
84 8, /* %r8 */
85 9 /* %r9 */
86};
87
c4f35dd8
MK
88/* DWARF Register Number Mapping as defined in the System V psABI,
89 section 3.6. */
53e95fcf 90
e53bef9f 91static int amd64_dwarf_regmap[] =
0e04a514 92{
c4f35dd8 93 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
90f90721
MK
94 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
95 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
96 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
c4f35dd8
MK
97
98 /* Frame Pointer Register RBP. */
90f90721 99 AMD64_RBP_REGNUM,
c4f35dd8
MK
100
101 /* Stack Pointer Register RSP. */
90f90721 102 AMD64_RSP_REGNUM,
c4f35dd8
MK
103
104 /* Extended Integer Registers 8 - 15. */
105 8, 9, 10, 11, 12, 13, 14, 15,
106
59207364 107 /* Return Address RA. Mapped to RIP. */
90f90721 108 AMD64_RIP_REGNUM,
c4f35dd8
MK
109
110 /* SSE Registers 0 - 7. */
90f90721
MK
111 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
112 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
113 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
114 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
c4f35dd8
MK
115
116 /* Extended SSE Registers 8 - 15. */
90f90721
MK
117 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
118 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
119 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
120 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
c4f35dd8
MK
121
122 /* Floating Point Registers 0-7. */
90f90721
MK
123 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
124 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
125 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
c6f4c129
JB
126 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
127
128 /* Control and Status Flags Register. */
129 AMD64_EFLAGS_REGNUM,
130
131 /* Selector Registers. */
132 AMD64_ES_REGNUM,
133 AMD64_CS_REGNUM,
134 AMD64_SS_REGNUM,
135 AMD64_DS_REGNUM,
136 AMD64_FS_REGNUM,
137 AMD64_GS_REGNUM,
138 -1,
139 -1,
140
141 /* Segment Base Address Registers. */
142 -1,
143 -1,
144 -1,
145 -1,
146
147 /* Special Selector Registers. */
148 -1,
149 -1,
150
151 /* Floating Point Control Registers. */
152 AMD64_MXCSR_REGNUM,
153 AMD64_FCTRL_REGNUM,
154 AMD64_FSTAT_REGNUM
c4f35dd8 155};
0e04a514 156
e53bef9f
MK
157static const int amd64_dwarf_regmap_len =
158 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
0e04a514 159
c4f35dd8
MK
160/* Convert DWARF register number REG to the appropriate register
161 number used by GDB. */
26abbdc4 162
c4f35dd8 163static int
d3f73121 164amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
53e95fcf 165{
c4f35dd8 166 int regnum = -1;
53e95fcf 167
16aff9a6 168 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
e53bef9f 169 regnum = amd64_dwarf_regmap[reg];
53e95fcf 170
c4f35dd8 171 if (regnum == -1)
8a3fe4f8 172 warning (_("Unmapped DWARF Register #%d encountered."), reg);
c4f35dd8
MK
173
174 return regnum;
53e95fcf 175}
d532c08f 176
35669430
DE
177/* Map architectural register numbers to gdb register numbers. */
178
179static const int amd64_arch_regmap[16] =
180{
181 AMD64_RAX_REGNUM, /* %rax */
182 AMD64_RCX_REGNUM, /* %rcx */
183 AMD64_RDX_REGNUM, /* %rdx */
184 AMD64_RBX_REGNUM, /* %rbx */
185 AMD64_RSP_REGNUM, /* %rsp */
186 AMD64_RBP_REGNUM, /* %rbp */
187 AMD64_RSI_REGNUM, /* %rsi */
188 AMD64_RDI_REGNUM, /* %rdi */
189 AMD64_R8_REGNUM, /* %r8 */
190 AMD64_R9_REGNUM, /* %r9 */
191 AMD64_R10_REGNUM, /* %r10 */
192 AMD64_R11_REGNUM, /* %r11 */
193 AMD64_R12_REGNUM, /* %r12 */
194 AMD64_R13_REGNUM, /* %r13 */
195 AMD64_R14_REGNUM, /* %r14 */
196 AMD64_R15_REGNUM /* %r15 */
197};
198
199static const int amd64_arch_regmap_len =
200 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
201
202/* Convert architectural register number REG to the appropriate register
203 number used by GDB. */
204
205static int
206amd64_arch_reg_to_regnum (int reg)
207{
208 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
209
210 return amd64_arch_regmap[reg];
211}
212
53e95fcf
JS
213\f
214
efb1c01c
MK
215/* Return the union class of CLASS1 and CLASS2. See the psABI for
216 details. */
217
218static enum amd64_reg_class
219amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
220{
221 /* Rule (a): If both classes are equal, this is the resulting class. */
222 if (class1 == class2)
223 return class1;
224
225 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
226 is the other class. */
227 if (class1 == AMD64_NO_CLASS)
228 return class2;
229 if (class2 == AMD64_NO_CLASS)
230 return class1;
231
232 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
233 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
234 return AMD64_MEMORY;
235
236 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
237 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
238 return AMD64_INTEGER;
239
240 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
241 MEMORY is used as class. */
242 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
243 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
244 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
245 return AMD64_MEMORY;
246
247 /* Rule (f): Otherwise class SSE is used. */
248 return AMD64_SSE;
249}
250
79b1ab3d
MK
251/* Return non-zero if TYPE is a non-POD structure or union type. */
252
253static int
254amd64_non_pod_p (struct type *type)
255{
256 /* ??? A class with a base class certainly isn't POD, but does this
257 catch all non-POD structure types? */
258 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
259 return 1;
260
261 return 0;
262}
263
efb1c01c
MK
264/* Classify TYPE according to the rules for aggregate (structures and
265 arrays) and union types, and store the result in CLASS. */
c4f35dd8
MK
266
267static void
efb1c01c 268amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
53e95fcf
JS
269{
270 int len = TYPE_LENGTH (type);
271
efb1c01c
MK
272 /* 1. If the size of an object is larger than two eightbytes, or in
273 C++, is a non-POD structure or union type, or contains
274 unaligned fields, it has class memory. */
79b1ab3d 275 if (len > 16 || amd64_non_pod_p (type))
53e95fcf 276 {
efb1c01c
MK
277 class[0] = class[1] = AMD64_MEMORY;
278 return;
53e95fcf 279 }
efb1c01c
MK
280
281 /* 2. Both eightbytes get initialized to class NO_CLASS. */
282 class[0] = class[1] = AMD64_NO_CLASS;
283
284 /* 3. Each field of an object is classified recursively so that
285 always two fields are considered. The resulting class is
286 calculated according to the classes of the fields in the
287 eightbyte: */
288
289 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
8ffd9b1b 290 {
efb1c01c
MK
291 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
292
293 /* All fields in an array have the same type. */
294 amd64_classify (subtype, class);
295 if (len > 8 && class[1] == AMD64_NO_CLASS)
296 class[1] = class[0];
8ffd9b1b 297 }
53e95fcf
JS
298 else
299 {
efb1c01c 300 int i;
53e95fcf 301
efb1c01c
MK
302 /* Structure or union. */
303 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
304 || TYPE_CODE (type) == TYPE_CODE_UNION);
305
306 for (i = 0; i < TYPE_NFIELDS (type); i++)
53e95fcf 307 {
efb1c01c
MK
308 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
309 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
310 enum amd64_reg_class subclass[2];
e4e2711a
JB
311 int bitsize = TYPE_FIELD_BITSIZE (type, i);
312 int endpos;
313
314 if (bitsize == 0)
315 bitsize = TYPE_LENGTH (subtype) * 8;
316 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
efb1c01c 317
562c50c2 318 /* Ignore static fields. */
d6a843b5 319 if (field_is_static (&TYPE_FIELD (type, i)))
562c50c2
MK
320 continue;
321
efb1c01c
MK
322 gdb_assert (pos == 0 || pos == 1);
323
324 amd64_classify (subtype, subclass);
325 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
e4e2711a
JB
326 if (bitsize <= 64 && pos == 0 && endpos == 1)
327 /* This is a bit of an odd case: We have a field that would
328 normally fit in one of the two eightbytes, except that
329 it is placed in a way that this field straddles them.
330 This has been seen with a structure containing an array.
331
332 The ABI is a bit unclear in this case, but we assume that
333 this field's class (stored in subclass[0]) must also be merged
334 into class[1]. In other words, our field has a piece stored
335 in the second eight-byte, and thus its class applies to
336 the second eight-byte as well.
337
338 In the case where the field length exceeds 8 bytes,
339 it should not be necessary to merge the field class
340 into class[1]. As LEN > 8, subclass[1] is necessarily
341 different from AMD64_NO_CLASS. If subclass[1] is equal
342 to subclass[0], then the normal class[1]/subclass[1]
343 merging will take care of everything. For subclass[1]
344 to be different from subclass[0], I can only see the case
345 where we have a SSE/SSEUP or X87/X87UP pair, which both
346 use up all 16 bytes of the aggregate, and are already
347 handled just fine (because each portion sits on its own
348 8-byte). */
349 class[1] = amd64_merge_classes (class[1], subclass[0]);
efb1c01c
MK
350 if (pos == 0)
351 class[1] = amd64_merge_classes (class[1], subclass[1]);
53e95fcf 352 }
53e95fcf 353 }
efb1c01c
MK
354
355 /* 4. Then a post merger cleanup is done: */
356
357 /* Rule (a): If one of the classes is MEMORY, the whole argument is
358 passed in memory. */
359 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
360 class[0] = class[1] = AMD64_MEMORY;
361
362 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
363 SSE. */
364 if (class[0] == AMD64_SSEUP)
365 class[0] = AMD64_SSE;
366 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
367 class[1] = AMD64_SSE;
368}
369
370/* Classify TYPE, and store the result in CLASS. */
371
ba581dc1 372void
efb1c01c
MK
373amd64_classify (struct type *type, enum amd64_reg_class class[2])
374{
375 enum type_code code = TYPE_CODE (type);
376 int len = TYPE_LENGTH (type);
377
378 class[0] = class[1] = AMD64_NO_CLASS;
379
380 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
5a7225ed
JB
381 long, long long, and pointers are in the INTEGER class. Similarly,
382 range types, used by languages such as Ada, are also in the INTEGER
383 class. */
efb1c01c 384 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
b929c77f 385 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
9db13498 386 || code == TYPE_CODE_CHAR
efb1c01c
MK
387 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
388 && (len == 1 || len == 2 || len == 4 || len == 8))
389 class[0] = AMD64_INTEGER;
390
5daa78cc
TJB
391 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
392 are in class SSE. */
393 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
394 && (len == 4 || len == 8))
efb1c01c
MK
395 /* FIXME: __m64 . */
396 class[0] = AMD64_SSE;
397
5daa78cc
TJB
398 /* Arguments of types __float128, _Decimal128 and __m128 are split into
399 two halves. The least significant ones belong to class SSE, the most
efb1c01c 400 significant one to class SSEUP. */
5daa78cc
TJB
401 else if (code == TYPE_CODE_DECFLOAT && len == 16)
402 /* FIXME: __float128, __m128. */
403 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
efb1c01c
MK
404
405 /* The 64-bit mantissa of arguments of type long double belongs to
406 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
407 class X87UP. */
408 else if (code == TYPE_CODE_FLT && len == 16)
409 /* Class X87 and X87UP. */
410 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
411
412 /* Aggregates. */
413 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
414 || code == TYPE_CODE_UNION)
415 amd64_classify_aggregate (type, class);
416}
417
418static enum return_value_convention
c055b101
CV
419amd64_return_value (struct gdbarch *gdbarch, struct type *func_type,
420 struct type *type, struct regcache *regcache,
42835c2b 421 gdb_byte *readbuf, const gdb_byte *writebuf)
efb1c01c 422{
ba581dc1 423 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
efb1c01c
MK
424 enum amd64_reg_class class[2];
425 int len = TYPE_LENGTH (type);
90f90721
MK
426 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
427 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
efb1c01c
MK
428 int integer_reg = 0;
429 int sse_reg = 0;
430 int i;
431
432 gdb_assert (!(readbuf && writebuf));
ba581dc1 433 gdb_assert (tdep->classify);
efb1c01c
MK
434
435 /* 1. Classify the return type with the classification algorithm. */
ba581dc1 436 tdep->classify (type, class);
efb1c01c
MK
437
438 /* 2. If the type has class MEMORY, then the caller provides space
6fa57a7d
MK
439 for the return value and passes the address of this storage in
440 %rdi as if it were the first argument to the function. In effect,
441 this address becomes a hidden first argument.
442
443 On return %rax will contain the address that has been passed in
444 by the caller in %rdi. */
efb1c01c 445 if (class[0] == AMD64_MEMORY)
6fa57a7d
MK
446 {
447 /* As indicated by the comment above, the ABI guarantees that we
448 can always find the return value just after the function has
449 returned. */
450
451 if (readbuf)
452 {
453 ULONGEST addr;
454
455 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
456 read_memory (addr, readbuf, TYPE_LENGTH (type));
457 }
458
459 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
460 }
efb1c01c
MK
461
462 gdb_assert (class[1] != AMD64_MEMORY);
463 gdb_assert (len <= 16);
464
465 for (i = 0; len > 0; i++, len -= 8)
466 {
467 int regnum = -1;
468 int offset = 0;
469
470 switch (class[i])
471 {
472 case AMD64_INTEGER:
473 /* 3. If the class is INTEGER, the next available register
474 of the sequence %rax, %rdx is used. */
475 regnum = integer_regnum[integer_reg++];
476 break;
477
478 case AMD64_SSE:
479 /* 4. If the class is SSE, the next available SSE register
480 of the sequence %xmm0, %xmm1 is used. */
481 regnum = sse_regnum[sse_reg++];
482 break;
483
484 case AMD64_SSEUP:
485 /* 5. If the class is SSEUP, the eightbyte is passed in the
486 upper half of the last used SSE register. */
487 gdb_assert (sse_reg > 0);
488 regnum = sse_regnum[sse_reg - 1];
489 offset = 8;
490 break;
491
492 case AMD64_X87:
493 /* 6. If the class is X87, the value is returned on the X87
494 stack in %st0 as 80-bit x87 number. */
90f90721 495 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
496 if (writebuf)
497 i387_return_value (gdbarch, regcache);
498 break;
499
500 case AMD64_X87UP:
501 /* 7. If the class is X87UP, the value is returned together
502 with the previous X87 value in %st0. */
503 gdb_assert (i > 0 && class[0] == AMD64_X87);
90f90721 504 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
505 offset = 8;
506 len = 2;
507 break;
508
509 case AMD64_NO_CLASS:
510 continue;
511
512 default:
513 gdb_assert (!"Unexpected register class.");
514 }
515
516 gdb_assert (regnum != -1);
517
518 if (readbuf)
519 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
42835c2b 520 readbuf + i * 8);
efb1c01c
MK
521 if (writebuf)
522 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
42835c2b 523 writebuf + i * 8);
efb1c01c
MK
524 }
525
526 return RETURN_VALUE_REGISTER_CONVENTION;
53e95fcf
JS
527}
528\f
529
720aa428
MK
530static CORE_ADDR
531amd64_push_arguments (struct regcache *regcache, int nargs,
6470d250 532 struct value **args, CORE_ADDR sp, int struct_return)
720aa428 533{
80d19a06
JB
534 struct gdbarch *gdbarch = get_regcache_arch (regcache);
535 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
ba581dc1
JB
536 int *integer_regs = tdep->call_dummy_integer_regs;
537 int num_integer_regs = tdep->call_dummy_num_integer_regs;
538
720aa428
MK
539 static int sse_regnum[] =
540 {
541 /* %xmm0 ... %xmm7 */
90f90721
MK
542 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
543 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
544 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
545 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
720aa428
MK
546 };
547 struct value **stack_args = alloca (nargs * sizeof (struct value *));
80d19a06
JB
548 /* An array that mirrors the stack_args array. For all arguments
549 that are passed by MEMORY, if that argument's address also needs
550 to be stored in a register, the ARG_ADDR_REGNO array will contain
551 that register number (or a negative value otherwise). */
552 int *arg_addr_regno = alloca (nargs * sizeof (int));
720aa428
MK
553 int num_stack_args = 0;
554 int num_elements = 0;
555 int element = 0;
556 int integer_reg = 0;
557 int sse_reg = 0;
558 int i;
559
ba581dc1
JB
560 gdb_assert (tdep->classify);
561
6470d250
MK
562 /* Reserve a register for the "hidden" argument. */
563 if (struct_return)
564 integer_reg++;
565
720aa428
MK
566 for (i = 0; i < nargs; i++)
567 {
4991999e 568 struct type *type = value_type (args[i]);
720aa428
MK
569 int len = TYPE_LENGTH (type);
570 enum amd64_reg_class class[2];
571 int needed_integer_regs = 0;
572 int needed_sse_regs = 0;
573 int j;
574
575 /* Classify argument. */
ba581dc1 576 tdep->classify (type, class);
720aa428
MK
577
578 /* Calculate the number of integer and SSE registers needed for
579 this argument. */
580 for (j = 0; j < 2; j++)
581 {
582 if (class[j] == AMD64_INTEGER)
583 needed_integer_regs++;
584 else if (class[j] == AMD64_SSE)
585 needed_sse_regs++;
586 }
587
588 /* Check whether enough registers are available, and if the
589 argument should be passed in registers at all. */
ba581dc1 590 if (integer_reg + needed_integer_regs > num_integer_regs
720aa428
MK
591 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
592 || (needed_integer_regs == 0 && needed_sse_regs == 0))
593 {
594 /* The argument will be passed on the stack. */
595 num_elements += ((len + 7) / 8);
80d19a06
JB
596 stack_args[num_stack_args] = args[i];
597 /* If this is an AMD64_MEMORY argument whose address must also
598 be passed in one of the integer registers, reserve that
599 register and associate this value to that register so that
600 we can store the argument address as soon as we know it. */
601 if (class[0] == AMD64_MEMORY
602 && tdep->memory_args_by_pointer
603 && integer_reg < tdep->call_dummy_num_integer_regs)
604 arg_addr_regno[num_stack_args] =
605 tdep->call_dummy_integer_regs[integer_reg++];
606 else
607 arg_addr_regno[num_stack_args] = -1;
608 num_stack_args++;
720aa428
MK
609 }
610 else
611 {
612 /* The argument will be passed in registers. */
d8de1ef7
MK
613 const gdb_byte *valbuf = value_contents (args[i]);
614 gdb_byte buf[8];
720aa428
MK
615
616 gdb_assert (len <= 16);
617
618 for (j = 0; len > 0; j++, len -= 8)
619 {
620 int regnum = -1;
621 int offset = 0;
622
623 switch (class[j])
624 {
625 case AMD64_INTEGER:
ba581dc1 626 regnum = integer_regs[integer_reg++];
720aa428
MK
627 break;
628
629 case AMD64_SSE:
630 regnum = sse_regnum[sse_reg++];
631 break;
632
633 case AMD64_SSEUP:
634 gdb_assert (sse_reg > 0);
635 regnum = sse_regnum[sse_reg - 1];
636 offset = 8;
637 break;
638
639 default:
640 gdb_assert (!"Unexpected register class.");
641 }
642
643 gdb_assert (regnum != -1);
644 memset (buf, 0, sizeof buf);
645 memcpy (buf, valbuf + j * 8, min (len, 8));
646 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
647 }
648 }
649 }
650
651 /* Allocate space for the arguments on the stack. */
652 sp -= num_elements * 8;
653
654 /* The psABI says that "The end of the input argument area shall be
655 aligned on a 16 byte boundary." */
656 sp &= ~0xf;
657
658 /* Write out the arguments to the stack. */
659 for (i = 0; i < num_stack_args; i++)
660 {
4991999e 661 struct type *type = value_type (stack_args[i]);
d8de1ef7 662 const gdb_byte *valbuf = value_contents (stack_args[i]);
720aa428 663 int len = TYPE_LENGTH (type);
80d19a06
JB
664 CORE_ADDR arg_addr = sp + element * 8;
665
666 write_memory (arg_addr, valbuf, len);
667 if (arg_addr_regno[i] >= 0)
668 {
669 /* We also need to store the address of that argument in
670 the given register. */
671 gdb_byte buf[8];
672 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
673
674 store_unsigned_integer (buf, 8, byte_order, arg_addr);
675 regcache_cooked_write (regcache, arg_addr_regno[i], buf);
676 }
720aa428
MK
677 element += ((len + 7) / 8);
678 }
679
680 /* The psABI says that "For calls that may call functions that use
681 varargs or stdargs (prototype-less calls or calls to functions
682 containing ellipsis (...) in the declaration) %al is used as
683 hidden argument to specify the number of SSE registers used. */
90f90721 684 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
720aa428
MK
685 return sp;
686}
687
c4f35dd8 688static CORE_ADDR
7d9b040b 689amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
e53bef9f
MK
690 struct regcache *regcache, CORE_ADDR bp_addr,
691 int nargs, struct value **args, CORE_ADDR sp,
692 int struct_return, CORE_ADDR struct_addr)
53e95fcf 693{
e17a4113 694 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3af6ddfe 695 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
d8de1ef7 696 gdb_byte buf[8];
c4f35dd8
MK
697
698 /* Pass arguments. */
6470d250 699 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
c4f35dd8
MK
700
701 /* Pass "hidden" argument". */
702 if (struct_return)
703 {
ba581dc1
JB
704 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
705 /* The "hidden" argument is passed throught the first argument
706 register. */
707 const int arg_regnum = tdep->call_dummy_integer_regs[0];
708
e17a4113 709 store_unsigned_integer (buf, 8, byte_order, struct_addr);
ba581dc1 710 regcache_cooked_write (regcache, arg_regnum, buf);
c4f35dd8
MK
711 }
712
3af6ddfe
JB
713 /* Reserve some memory on the stack for the integer-parameter registers,
714 if required by the ABI. */
715 if (tdep->integer_param_regs_saved_in_caller_frame)
716 sp -= tdep->call_dummy_num_integer_regs * 8;
717
c4f35dd8
MK
718 /* Store return address. */
719 sp -= 8;
e17a4113 720 store_unsigned_integer (buf, 8, byte_order, bp_addr);
c4f35dd8
MK
721 write_memory (sp, buf, 8);
722
723 /* Finally, update the stack pointer... */
e17a4113 724 store_unsigned_integer (buf, 8, byte_order, sp);
90f90721 725 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
c4f35dd8
MK
726
727 /* ...and fake a frame pointer. */
90f90721 728 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
c4f35dd8 729
3e210248 730 return sp + 16;
53e95fcf 731}
c4f35dd8 732\f
35669430
DE
733/* Displaced instruction handling. */
734
735/* A partially decoded instruction.
736 This contains enough details for displaced stepping purposes. */
737
738struct amd64_insn
739{
740 /* The number of opcode bytes. */
741 int opcode_len;
742 /* The offset of the rex prefix or -1 if not present. */
743 int rex_offset;
744 /* The offset to the first opcode byte. */
745 int opcode_offset;
746 /* The offset to the modrm byte or -1 if not present. */
747 int modrm_offset;
748
749 /* The raw instruction. */
750 gdb_byte *raw_insn;
751};
752
753struct displaced_step_closure
754{
755 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
756 int tmp_used;
757 int tmp_regno;
758 ULONGEST tmp_save;
759
760 /* Details of the instruction. */
761 struct amd64_insn insn_details;
762
763 /* Amount of space allocated to insn_buf. */
764 int max_len;
765
766 /* The possibly modified insn.
767 This is a variable-length field. */
768 gdb_byte insn_buf[1];
769};
770
771/* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
772 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
773 at which point delete these in favor of libopcodes' versions). */
774
775static const unsigned char onebyte_has_modrm[256] = {
776 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
777 /* ------------------------------- */
778 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
779 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
780 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
781 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
782 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
783 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
784 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
785 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
786 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
787 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
788 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
789 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
790 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
791 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
792 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
793 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
794 /* ------------------------------- */
795 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
796};
797
798static const unsigned char twobyte_has_modrm[256] = {
799 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
800 /* ------------------------------- */
801 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
802 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
803 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
804 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
805 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
806 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
807 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
808 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
809 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
810 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
811 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
812 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
813 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
814 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
815 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
816 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
817 /* ------------------------------- */
818 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
819};
820
821static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
822
823static int
824rex_prefix_p (gdb_byte pfx)
825{
826 return REX_PREFIX_P (pfx);
827}
828
829/* Skip the legacy instruction prefixes in INSN.
830 We assume INSN is properly sentineled so we don't have to worry
831 about falling off the end of the buffer. */
832
833static gdb_byte *
1903f0e6 834amd64_skip_prefixes (gdb_byte *insn)
35669430
DE
835{
836 while (1)
837 {
838 switch (*insn)
839 {
840 case DATA_PREFIX_OPCODE:
841 case ADDR_PREFIX_OPCODE:
842 case CS_PREFIX_OPCODE:
843 case DS_PREFIX_OPCODE:
844 case ES_PREFIX_OPCODE:
845 case FS_PREFIX_OPCODE:
846 case GS_PREFIX_OPCODE:
847 case SS_PREFIX_OPCODE:
848 case LOCK_PREFIX_OPCODE:
849 case REPE_PREFIX_OPCODE:
850 case REPNE_PREFIX_OPCODE:
851 ++insn;
852 continue;
853 default:
854 break;
855 }
856 break;
857 }
858
859 return insn;
860}
861
862/* fprintf-function for amd64_insn_length.
863 This function is a nop, we don't want to print anything, we just want to
864 compute the length of the insn. */
865
866static int ATTR_FORMAT (printf, 2, 3)
867amd64_insn_length_fprintf (void *stream, const char *format, ...)
868{
869 return 0;
870}
871
872/* Initialize a struct disassemble_info for amd64_insn_length. */
873
874static void
875amd64_insn_length_init_dis (struct gdbarch *gdbarch,
876 struct disassemble_info *di,
877 const gdb_byte *insn, int max_len,
878 CORE_ADDR addr)
879{
880 init_disassemble_info (di, NULL, amd64_insn_length_fprintf);
881
882 /* init_disassemble_info installs buffer_read_memory, etc.
883 so we don't need to do that here.
884 The cast is necessary until disassemble_info is const-ified. */
885 di->buffer = (gdb_byte *) insn;
886 di->buffer_length = max_len;
887 di->buffer_vma = addr;
888
889 di->arch = gdbarch_bfd_arch_info (gdbarch)->arch;
890 di->mach = gdbarch_bfd_arch_info (gdbarch)->mach;
891 di->endian = gdbarch_byte_order (gdbarch);
892 di->endian_code = gdbarch_byte_order_for_code (gdbarch);
893
894 disassemble_init_for_target (di);
895}
896
897/* Return the length in bytes of INSN.
898 MAX_LEN is the size of the buffer containing INSN.
899 libopcodes currently doesn't export a utility to compute the
900 instruction length, so use the disassembler until then. */
901
902static int
903amd64_insn_length (struct gdbarch *gdbarch,
904 const gdb_byte *insn, int max_len, CORE_ADDR addr)
905{
906 struct disassemble_info di;
907
908 amd64_insn_length_init_dis (gdbarch, &di, insn, max_len, addr);
909
910 return gdbarch_print_insn (gdbarch, addr, &di);
911}
912
913/* Return an integer register (other than RSP) that is unused as an input
914 operand in INSN.
915 In order to not require adding a rex prefix if the insn doesn't already
916 have one, the result is restricted to RAX ... RDI, sans RSP.
917 The register numbering of the result follows architecture ordering,
918 e.g. RDI = 7. */
919
920static int
921amd64_get_unused_input_int_reg (const struct amd64_insn *details)
922{
923 /* 1 bit for each reg */
924 int used_regs_mask = 0;
925
926 /* There can be at most 3 int regs used as inputs in an insn, and we have
927 7 to choose from (RAX ... RDI, sans RSP).
928 This allows us to take a conservative approach and keep things simple.
929 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
930 that implicitly specify RAX. */
931
932 /* Avoid RAX. */
933 used_regs_mask |= 1 << EAX_REG_NUM;
934 /* Similarily avoid RDX, implicit operand in divides. */
935 used_regs_mask |= 1 << EDX_REG_NUM;
936 /* Avoid RSP. */
937 used_regs_mask |= 1 << ESP_REG_NUM;
938
939 /* If the opcode is one byte long and there's no ModRM byte,
940 assume the opcode specifies a register. */
941 if (details->opcode_len == 1 && details->modrm_offset == -1)
942 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
943
944 /* Mark used regs in the modrm/sib bytes. */
945 if (details->modrm_offset != -1)
946 {
947 int modrm = details->raw_insn[details->modrm_offset];
948 int mod = MODRM_MOD_FIELD (modrm);
949 int reg = MODRM_REG_FIELD (modrm);
950 int rm = MODRM_RM_FIELD (modrm);
951 int have_sib = mod != 3 && rm == 4;
952
953 /* Assume the reg field of the modrm byte specifies a register. */
954 used_regs_mask |= 1 << reg;
955
956 if (have_sib)
957 {
958 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
959 int index = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
960 used_regs_mask |= 1 << base;
961 used_regs_mask |= 1 << index;
962 }
963 else
964 {
965 used_regs_mask |= 1 << rm;
966 }
967 }
968
969 gdb_assert (used_regs_mask < 256);
970 gdb_assert (used_regs_mask != 255);
971
972 /* Finally, find a free reg. */
973 {
974 int i;
975
976 for (i = 0; i < 8; ++i)
977 {
978 if (! (used_regs_mask & (1 << i)))
979 return i;
980 }
981
982 /* We shouldn't get here. */
983 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
984 }
985}
986
987/* Extract the details of INSN that we need. */
988
989static void
990amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
991{
992 gdb_byte *start = insn;
993 int need_modrm;
994
995 details->raw_insn = insn;
996
997 details->opcode_len = -1;
998 details->rex_offset = -1;
999 details->opcode_offset = -1;
1000 details->modrm_offset = -1;
1001
1002 /* Skip legacy instruction prefixes. */
1903f0e6 1003 insn = amd64_skip_prefixes (insn);
35669430
DE
1004
1005 /* Skip REX instruction prefix. */
1006 if (rex_prefix_p (*insn))
1007 {
1008 details->rex_offset = insn - start;
1009 ++insn;
1010 }
1011
1012 details->opcode_offset = insn - start;
1013
1014 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1015 {
1016 /* Two or three-byte opcode. */
1017 ++insn;
1018 need_modrm = twobyte_has_modrm[*insn];
1019
1020 /* Check for three-byte opcode. */
1903f0e6 1021 switch (*insn)
35669430 1022 {
1903f0e6
DE
1023 case 0x24:
1024 case 0x25:
1025 case 0x38:
1026 case 0x3a:
1027 case 0x7a:
1028 case 0x7b:
35669430
DE
1029 ++insn;
1030 details->opcode_len = 3;
1903f0e6
DE
1031 break;
1032 default:
1033 details->opcode_len = 2;
1034 break;
35669430 1035 }
35669430
DE
1036 }
1037 else
1038 {
1039 /* One-byte opcode. */
1040 need_modrm = onebyte_has_modrm[*insn];
1041 details->opcode_len = 1;
1042 }
1043
1044 if (need_modrm)
1045 {
1046 ++insn;
1047 details->modrm_offset = insn - start;
1048 }
1049}
1050
1051/* Update %rip-relative addressing in INSN.
1052
1053 %rip-relative addressing only uses a 32-bit displacement.
1054 32 bits is not enough to be guaranteed to cover the distance between where
1055 the real instruction is and where its copy is.
1056 Convert the insn to use base+disp addressing.
1057 We set base = pc + insn_length so we can leave disp unchanged. */
c4f35dd8 1058
35669430
DE
1059static void
1060fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1061 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1062{
e17a4113 1063 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
35669430
DE
1064 const struct amd64_insn *insn_details = &dsc->insn_details;
1065 int modrm_offset = insn_details->modrm_offset;
1066 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1067 CORE_ADDR rip_base;
1068 int32_t disp;
1069 int insn_length;
1070 int arch_tmp_regno, tmp_regno;
1071 ULONGEST orig_value;
1072
1073 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1074 ++insn;
1075
1076 /* Compute the rip-relative address. */
e17a4113 1077 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
35669430
DE
1078 insn_length = amd64_insn_length (gdbarch, dsc->insn_buf, dsc->max_len, from);
1079 rip_base = from + insn_length;
1080
1081 /* We need a register to hold the address.
1082 Pick one not used in the insn.
1083 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1084 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1085 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1086
1087 /* REX.B should be unset as we were using rip-relative addressing,
1088 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1089 if (insn_details->rex_offset != -1)
1090 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1091
1092 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1093 dsc->tmp_regno = tmp_regno;
1094 dsc->tmp_save = orig_value;
1095 dsc->tmp_used = 1;
1096
1097 /* Convert the ModRM field to be base+disp. */
1098 dsc->insn_buf[modrm_offset] &= ~0xc7;
1099 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1100
1101 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1102
1103 if (debug_displaced)
1104 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
5af949e3
UW
1105 "displaced: using temp reg %d, old value %s, new value %s\n",
1106 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1107 paddress (gdbarch, rip_base));
35669430
DE
1108}
1109
1110static void
1111fixup_displaced_copy (struct gdbarch *gdbarch,
1112 struct displaced_step_closure *dsc,
1113 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1114{
1115 const struct amd64_insn *details = &dsc->insn_details;
1116
1117 if (details->modrm_offset != -1)
1118 {
1119 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1120
1121 if ((modrm & 0xc7) == 0x05)
1122 {
1123 /* The insn uses rip-relative addressing.
1124 Deal with it. */
1125 fixup_riprel (gdbarch, dsc, from, to, regs);
1126 }
1127 }
1128}
1129
1130struct displaced_step_closure *
1131amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1132 CORE_ADDR from, CORE_ADDR to,
1133 struct regcache *regs)
1134{
1135 int len = gdbarch_max_insn_length (gdbarch);
1136 /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to
1137 continually watch for running off the end of the buffer. */
1138 int fixup_sentinel_space = len;
1139 struct displaced_step_closure *dsc =
1140 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1141 gdb_byte *buf = &dsc->insn_buf[0];
1142 struct amd64_insn *details = &dsc->insn_details;
1143
1144 dsc->tmp_used = 0;
1145 dsc->max_len = len + fixup_sentinel_space;
1146
1147 read_memory (from, buf, len);
1148
1149 /* Set up the sentinel space so we don't have to worry about running
1150 off the end of the buffer. An excessive number of leading prefixes
1151 could otherwise cause this. */
1152 memset (buf + len, 0, fixup_sentinel_space);
1153
1154 amd64_get_insn_details (buf, details);
1155
1156 /* GDB may get control back after the insn after the syscall.
1157 Presumably this is a kernel bug.
1158 If this is a syscall, make sure there's a nop afterwards. */
1159 {
1160 int syscall_length;
1161
1162 if (amd64_syscall_p (details, &syscall_length))
1163 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1164 }
1165
1166 /* Modify the insn to cope with the address where it will be executed from.
1167 In particular, handle any rip-relative addressing. */
1168 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1169
1170 write_memory (to, buf, len);
1171
1172 if (debug_displaced)
1173 {
5af949e3
UW
1174 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1175 paddress (gdbarch, from), paddress (gdbarch, to));
35669430
DE
1176 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1177 }
1178
1179 return dsc;
1180}
1181
1182static int
1183amd64_absolute_jmp_p (const struct amd64_insn *details)
1184{
1185 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1186
1187 if (insn[0] == 0xff)
1188 {
1189 /* jump near, absolute indirect (/4) */
1190 if ((insn[1] & 0x38) == 0x20)
1191 return 1;
1192
1193 /* jump far, absolute indirect (/5) */
1194 if ((insn[1] & 0x38) == 0x28)
1195 return 1;
1196 }
1197
1198 return 0;
1199}
1200
1201static int
1202amd64_absolute_call_p (const struct amd64_insn *details)
1203{
1204 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1205
1206 if (insn[0] == 0xff)
1207 {
1208 /* Call near, absolute indirect (/2) */
1209 if ((insn[1] & 0x38) == 0x10)
1210 return 1;
1211
1212 /* Call far, absolute indirect (/3) */
1213 if ((insn[1] & 0x38) == 0x18)
1214 return 1;
1215 }
1216
1217 return 0;
1218}
1219
1220static int
1221amd64_ret_p (const struct amd64_insn *details)
1222{
1223 /* NOTE: gcc can emit "repz ; ret". */
1224 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1225
1226 switch (insn[0])
1227 {
1228 case 0xc2: /* ret near, pop N bytes */
1229 case 0xc3: /* ret near */
1230 case 0xca: /* ret far, pop N bytes */
1231 case 0xcb: /* ret far */
1232 case 0xcf: /* iret */
1233 return 1;
1234
1235 default:
1236 return 0;
1237 }
1238}
1239
1240static int
1241amd64_call_p (const struct amd64_insn *details)
1242{
1243 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1244
1245 if (amd64_absolute_call_p (details))
1246 return 1;
1247
1248 /* call near, relative */
1249 if (insn[0] == 0xe8)
1250 return 1;
1251
1252 return 0;
1253}
1254
35669430
DE
1255/* Return non-zero if INSN is a system call, and set *LENGTHP to its
1256 length in bytes. Otherwise, return zero. */
1257
1258static int
1259amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1260{
1261 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1262
1263 if (insn[0] == 0x0f && insn[1] == 0x05)
1264 {
1265 *lengthp = 2;
1266 return 1;
1267 }
1268
1269 return 0;
1270}
1271
1272/* Fix up the state of registers and memory after having single-stepped
1273 a displaced instruction. */
1274
1275void
1276amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1277 struct displaced_step_closure *dsc,
1278 CORE_ADDR from, CORE_ADDR to,
1279 struct regcache *regs)
1280{
e17a4113 1281 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
35669430
DE
1282 /* The offset we applied to the instruction's address. */
1283 ULONGEST insn_offset = to - from;
1284 gdb_byte *insn = dsc->insn_buf;
1285 const struct amd64_insn *insn_details = &dsc->insn_details;
1286
1287 if (debug_displaced)
1288 fprintf_unfiltered (gdb_stdlog,
5af949e3 1289 "displaced: fixup (%s, %s), "
35669430 1290 "insn = 0x%02x 0x%02x ...\n",
5af949e3
UW
1291 paddress (gdbarch, from), paddress (gdbarch, to),
1292 insn[0], insn[1]);
35669430
DE
1293
1294 /* If we used a tmp reg, restore it. */
1295
1296 if (dsc->tmp_used)
1297 {
1298 if (debug_displaced)
5af949e3
UW
1299 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1300 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
35669430
DE
1301 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1302 }
1303
1304 /* The list of issues to contend with here is taken from
1305 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1306 Yay for Free Software! */
1307
1308 /* Relocate the %rip back to the program's instruction stream,
1309 if necessary. */
1310
1311 /* Except in the case of absolute or indirect jump or call
1312 instructions, or a return instruction, the new rip is relative to
1313 the displaced instruction; make it relative to the original insn.
1314 Well, signal handler returns don't need relocation either, but we use the
1315 value of %rip to recognize those; see below. */
1316 if (! amd64_absolute_jmp_p (insn_details)
1317 && ! amd64_absolute_call_p (insn_details)
1318 && ! amd64_ret_p (insn_details))
1319 {
1320 ULONGEST orig_rip;
1321 int insn_len;
1322
1323 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1324
1325 /* A signal trampoline system call changes the %rip, resuming
1326 execution of the main program after the signal handler has
1327 returned. That makes them like 'return' instructions; we
1328 shouldn't relocate %rip.
1329
1330 But most system calls don't, and we do need to relocate %rip.
1331
1332 Our heuristic for distinguishing these cases: if stepping
1333 over the system call instruction left control directly after
1334 the instruction, the we relocate --- control almost certainly
1335 doesn't belong in the displaced copy. Otherwise, we assume
1336 the instruction has put control where it belongs, and leave
1337 it unrelocated. Goodness help us if there are PC-relative
1338 system calls. */
1339 if (amd64_syscall_p (insn_details, &insn_len)
1340 && orig_rip != to + insn_len
1341 /* GDB can get control back after the insn after the syscall.
1342 Presumably this is a kernel bug.
1343 Fixup ensures its a nop, we add one to the length for it. */
1344 && orig_rip != to + insn_len + 1)
1345 {
1346 if (debug_displaced)
1347 fprintf_unfiltered (gdb_stdlog,
1348 "displaced: syscall changed %%rip; "
1349 "not relocating\n");
1350 }
1351 else
1352 {
1353 ULONGEST rip = orig_rip - insn_offset;
1354
1903f0e6
DE
1355 /* If we just stepped over a breakpoint insn, we don't backup
1356 the pc on purpose; this is to match behaviour without
1357 stepping. */
35669430
DE
1358
1359 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1360
1361 if (debug_displaced)
1362 fprintf_unfiltered (gdb_stdlog,
1363 "displaced: "
5af949e3
UW
1364 "relocated %%rip from %s to %s\n",
1365 paddress (gdbarch, orig_rip),
1366 paddress (gdbarch, rip));
35669430
DE
1367 }
1368 }
1369
1370 /* If the instruction was PUSHFL, then the TF bit will be set in the
1371 pushed value, and should be cleared. We'll leave this for later,
1372 since GDB already messes up the TF flag when stepping over a
1373 pushfl. */
1374
1375 /* If the instruction was a call, the return address now atop the
1376 stack is the address following the copied instruction. We need
1377 to make it the address following the original instruction. */
1378 if (amd64_call_p (insn_details))
1379 {
1380 ULONGEST rsp;
1381 ULONGEST retaddr;
1382 const ULONGEST retaddr_len = 8;
1383
1384 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
e17a4113 1385 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
35669430 1386 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
e17a4113 1387 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
35669430
DE
1388
1389 if (debug_displaced)
1390 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
1391 "displaced: relocated return addr at %s "
1392 "to %s\n",
1393 paddress (gdbarch, rsp),
1394 paddress (gdbarch, retaddr));
35669430
DE
1395 }
1396}
1397\f
c4f35dd8 1398/* The maximum number of saved registers. This should include %rip. */
90f90721 1399#define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
c4f35dd8 1400
e53bef9f 1401struct amd64_frame_cache
c4f35dd8
MK
1402{
1403 /* Base address. */
1404 CORE_ADDR base;
1405 CORE_ADDR sp_offset;
1406 CORE_ADDR pc;
1407
1408 /* Saved registers. */
e53bef9f 1409 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
c4f35dd8 1410 CORE_ADDR saved_sp;
e0c62198 1411 int saved_sp_reg;
c4f35dd8
MK
1412
1413 /* Do we have a frame? */
1414 int frameless_p;
1415};
8dda9770 1416
d2449ee8 1417/* Initialize a frame cache. */
c4f35dd8 1418
d2449ee8
DJ
1419static void
1420amd64_init_frame_cache (struct amd64_frame_cache *cache)
8dda9770 1421{
c4f35dd8
MK
1422 int i;
1423
c4f35dd8
MK
1424 /* Base address. */
1425 cache->base = 0;
1426 cache->sp_offset = -8;
1427 cache->pc = 0;
1428
1429 /* Saved registers. We initialize these to -1 since zero is a valid
bba66b87
DE
1430 offset (that's where %rbp is supposed to be stored).
1431 The values start out as being offsets, and are later converted to
1432 addresses (at which point -1 is interpreted as an address, still meaning
1433 "invalid"). */
e53bef9f 1434 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
1435 cache->saved_regs[i] = -1;
1436 cache->saved_sp = 0;
e0c62198 1437 cache->saved_sp_reg = -1;
c4f35dd8
MK
1438
1439 /* Frameless until proven otherwise. */
1440 cache->frameless_p = 1;
d2449ee8 1441}
c4f35dd8 1442
d2449ee8
DJ
1443/* Allocate and initialize a frame cache. */
1444
1445static struct amd64_frame_cache *
1446amd64_alloc_frame_cache (void)
1447{
1448 struct amd64_frame_cache *cache;
1449
1450 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1451 amd64_init_frame_cache (cache);
c4f35dd8 1452 return cache;
8dda9770 1453}
53e95fcf 1454
e0c62198
L
1455/* GCC 4.4 and later, can put code in the prologue to realign the
1456 stack pointer. Check whether PC points to such code, and update
1457 CACHE accordingly. Return the first instruction after the code
1458 sequence or CURRENT_PC, whichever is smaller. If we don't
1459 recognize the code, return PC. */
1460
1461static CORE_ADDR
1462amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1463 struct amd64_frame_cache *cache)
1464{
1465 /* There are 2 code sequences to re-align stack before the frame
1466 gets set up:
1467
1468 1. Use a caller-saved saved register:
1469
1470 leaq 8(%rsp), %reg
1471 andq $-XXX, %rsp
1472 pushq -8(%reg)
1473
1474 2. Use a callee-saved saved register:
1475
1476 pushq %reg
1477 leaq 16(%rsp), %reg
1478 andq $-XXX, %rsp
1479 pushq -8(%reg)
1480
1481 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1482
1483 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1484 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1485 */
1486
1487 gdb_byte buf[18];
1488 int reg, r;
1489 int offset, offset_and;
e0c62198
L
1490
1491 if (target_read_memory (pc, buf, sizeof buf))
1492 return pc;
1493
1494 /* Check caller-saved saved register. The first instruction has
1495 to be "leaq 8(%rsp), %reg". */
1496 if ((buf[0] & 0xfb) == 0x48
1497 && buf[1] == 0x8d
1498 && buf[3] == 0x24
1499 && buf[4] == 0x8)
1500 {
1501 /* MOD must be binary 10 and R/M must be binary 100. */
1502 if ((buf[2] & 0xc7) != 0x44)
1503 return pc;
1504
1505 /* REG has register number. */
1506 reg = (buf[2] >> 3) & 7;
1507
1508 /* Check the REX.R bit. */
1509 if (buf[0] == 0x4c)
1510 reg += 8;
1511
1512 offset = 5;
1513 }
1514 else
1515 {
1516 /* Check callee-saved saved register. The first instruction
1517 has to be "pushq %reg". */
1518 reg = 0;
1519 if ((buf[0] & 0xf8) == 0x50)
1520 offset = 0;
1521 else if ((buf[0] & 0xf6) == 0x40
1522 && (buf[1] & 0xf8) == 0x50)
1523 {
1524 /* Check the REX.B bit. */
1525 if ((buf[0] & 1) != 0)
1526 reg = 8;
1527
1528 offset = 1;
1529 }
1530 else
1531 return pc;
1532
1533 /* Get register. */
1534 reg += buf[offset] & 0x7;
1535
1536 offset++;
1537
1538 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1539 if ((buf[offset] & 0xfb) != 0x48
1540 || buf[offset + 1] != 0x8d
1541 || buf[offset + 3] != 0x24
1542 || buf[offset + 4] != 0x10)
1543 return pc;
1544
1545 /* MOD must be binary 10 and R/M must be binary 100. */
1546 if ((buf[offset + 2] & 0xc7) != 0x44)
1547 return pc;
1548
1549 /* REG has register number. */
1550 r = (buf[offset + 2] >> 3) & 7;
1551
1552 /* Check the REX.R bit. */
1553 if (buf[offset] == 0x4c)
1554 r += 8;
1555
1556 /* Registers in pushq and leaq have to be the same. */
1557 if (reg != r)
1558 return pc;
1559
1560 offset += 5;
1561 }
1562
1563 /* Rigister can't be %rsp nor %rbp. */
1564 if (reg == 4 || reg == 5)
1565 return pc;
1566
1567 /* The next instruction has to be "andq $-XXX, %rsp". */
1568 if (buf[offset] != 0x48
1569 || buf[offset + 2] != 0xe4
1570 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1571 return pc;
1572
1573 offset_and = offset;
1574 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1575
1576 /* The next instruction has to be "pushq -8(%reg)". */
1577 r = 0;
1578 if (buf[offset] == 0xff)
1579 offset++;
1580 else if ((buf[offset] & 0xf6) == 0x40
1581 && buf[offset + 1] == 0xff)
1582 {
1583 /* Check the REX.B bit. */
1584 if ((buf[offset] & 0x1) != 0)
1585 r = 8;
1586 offset += 2;
1587 }
1588 else
1589 return pc;
1590
1591 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1592 01. */
1593 if (buf[offset + 1] != 0xf8
1594 || (buf[offset] & 0xf8) != 0x70)
1595 return pc;
1596
1597 /* R/M has register. */
1598 r += buf[offset] & 7;
1599
1600 /* Registers in leaq and pushq have to be the same. */
1601 if (reg != r)
1602 return pc;
1603
1604 if (current_pc > pc + offset_and)
35669430 1605 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
e0c62198
L
1606
1607 return min (pc + offset + 2, current_pc);
1608}
1609
c4f35dd8
MK
1610/* Do a limited analysis of the prologue at PC and update CACHE
1611 accordingly. Bail out early if CURRENT_PC is reached. Return the
1612 address where the analysis stopped.
1613
1614 We will handle only functions beginning with:
1615
1616 pushq %rbp 0x55
1617 movq %rsp, %rbp 0x48 0x89 0xe5
1618
1619 Any function that doesn't start with this sequence will be assumed
1620 to have no prologue and thus no valid frame pointer in %rbp. */
1621
1622static CORE_ADDR
e17a4113
UW
1623amd64_analyze_prologue (struct gdbarch *gdbarch,
1624 CORE_ADDR pc, CORE_ADDR current_pc,
e53bef9f 1625 struct amd64_frame_cache *cache)
53e95fcf 1626{
e17a4113 1627 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
d8de1ef7
MK
1628 static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
1629 gdb_byte buf[3];
1630 gdb_byte op;
c4f35dd8
MK
1631
1632 if (current_pc <= pc)
1633 return current_pc;
1634
e0c62198
L
1635 pc = amd64_analyze_stack_align (pc, current_pc, cache);
1636
e17a4113 1637 op = read_memory_unsigned_integer (pc, 1, byte_order);
c4f35dd8
MK
1638
1639 if (op == 0x55) /* pushq %rbp */
1640 {
1641 /* Take into account that we've executed the `pushq %rbp' that
1642 starts this instruction sequence. */
90f90721 1643 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
c4f35dd8
MK
1644 cache->sp_offset += 8;
1645
1646 /* If that's all, return now. */
1647 if (current_pc <= pc + 1)
1648 return current_pc;
1649
1650 /* Check for `movq %rsp, %rbp'. */
1651 read_memory (pc + 1, buf, 3);
1652 if (memcmp (buf, proto, 3) != 0)
1653 return pc + 1;
1654
1655 /* OK, we actually have a frame. */
1656 cache->frameless_p = 0;
1657 return pc + 4;
1658 }
1659
1660 return pc;
53e95fcf
JS
1661}
1662
c4f35dd8
MK
1663/* Return PC of first real instruction. */
1664
1665static CORE_ADDR
6093d2eb 1666amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
53e95fcf 1667{
e53bef9f 1668 struct amd64_frame_cache cache;
c4f35dd8
MK
1669 CORE_ADDR pc;
1670
d2449ee8 1671 amd64_init_frame_cache (&cache);
e17a4113
UW
1672 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
1673 &cache);
c4f35dd8
MK
1674 if (cache.frameless_p)
1675 return start_pc;
1676
1677 return pc;
53e95fcf 1678}
c4f35dd8 1679\f
53e95fcf 1680
c4f35dd8
MK
1681/* Normal frames. */
1682
e53bef9f 1683static struct amd64_frame_cache *
10458914 1684amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
6d686a84 1685{
e17a4113
UW
1686 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1687 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
e53bef9f 1688 struct amd64_frame_cache *cache;
d8de1ef7 1689 gdb_byte buf[8];
6d686a84 1690 int i;
6d686a84 1691
c4f35dd8
MK
1692 if (*this_cache)
1693 return *this_cache;
6d686a84 1694
e53bef9f 1695 cache = amd64_alloc_frame_cache ();
c4f35dd8
MK
1696 *this_cache = cache;
1697
10458914 1698 cache->pc = get_frame_func (this_frame);
c4f35dd8 1699 if (cache->pc != 0)
e17a4113
UW
1700 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
1701 cache);
c4f35dd8 1702
e0c62198
L
1703 if (cache->saved_sp_reg != -1)
1704 {
1705 /* Stack pointer has been saved. */
1706 get_frame_register (this_frame, cache->saved_sp_reg, buf);
e17a4113 1707 cache->saved_sp = extract_unsigned_integer(buf, 8, byte_order);
e0c62198
L
1708 }
1709
c4f35dd8
MK
1710 if (cache->frameless_p)
1711 {
4a28816e
MK
1712 /* We didn't find a valid frame. If we're at the start of a
1713 function, or somewhere half-way its prologue, the function's
1714 frame probably hasn't been fully setup yet. Try to
1715 reconstruct the base address for the stack frame by looking
1716 at the stack pointer. For truly "frameless" functions this
1717 might work too. */
c4f35dd8 1718
e0c62198
L
1719 if (cache->saved_sp_reg != -1)
1720 {
1721 /* We're halfway aligning the stack. */
1722 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
1723 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
1724
1725 /* This will be added back below. */
1726 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
1727 }
1728 else
1729 {
1730 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
e17a4113
UW
1731 cache->base = extract_unsigned_integer (buf, 8, byte_order)
1732 + cache->sp_offset;
e0c62198 1733 }
c4f35dd8 1734 }
35883a3f
MK
1735 else
1736 {
10458914 1737 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
e17a4113 1738 cache->base = extract_unsigned_integer (buf, 8, byte_order);
35883a3f 1739 }
c4f35dd8
MK
1740
1741 /* Now that we have the base address for the stack frame we can
1742 calculate the value of %rsp in the calling frame. */
1743 cache->saved_sp = cache->base + 16;
1744
35883a3f
MK
1745 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
1746 frame we find it at the same offset from the reconstructed base
e0c62198
L
1747 address. If we're halfway aligning the stack, %rip is handled
1748 differently (see above). */
1749 if (!cache->frameless_p || cache->saved_sp_reg == -1)
1750 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
35883a3f 1751
c4f35dd8
MK
1752 /* Adjust all the saved registers such that they contain addresses
1753 instead of offsets. */
e53bef9f 1754 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
1755 if (cache->saved_regs[i] != -1)
1756 cache->saved_regs[i] += cache->base;
1757
1758 return cache;
6d686a84
ML
1759}
1760
c4f35dd8 1761static void
10458914 1762amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
e53bef9f 1763 struct frame_id *this_id)
c4f35dd8 1764{
e53bef9f 1765 struct amd64_frame_cache *cache =
10458914 1766 amd64_frame_cache (this_frame, this_cache);
c4f35dd8
MK
1767
1768 /* This marks the outermost frame. */
1769 if (cache->base == 0)
1770 return;
1771
1772 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
1773}
e76e1718 1774
10458914
DJ
1775static struct value *
1776amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1777 int regnum)
53e95fcf 1778{
10458914 1779 struct gdbarch *gdbarch = get_frame_arch (this_frame);
e53bef9f 1780 struct amd64_frame_cache *cache =
10458914 1781 amd64_frame_cache (this_frame, this_cache);
e76e1718 1782
c4f35dd8 1783 gdb_assert (regnum >= 0);
b1ab997b 1784
2ae02b47 1785 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
10458914 1786 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
e76e1718 1787
e53bef9f 1788 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
10458914
DJ
1789 return frame_unwind_got_memory (this_frame, regnum,
1790 cache->saved_regs[regnum]);
e76e1718 1791
10458914 1792 return frame_unwind_got_register (this_frame, regnum, regnum);
c4f35dd8 1793}
e76e1718 1794
e53bef9f 1795static const struct frame_unwind amd64_frame_unwind =
c4f35dd8
MK
1796{
1797 NORMAL_FRAME,
e53bef9f 1798 amd64_frame_this_id,
10458914
DJ
1799 amd64_frame_prev_register,
1800 NULL,
1801 default_frame_sniffer
c4f35dd8 1802};
c4f35dd8 1803\f
e76e1718 1804
c4f35dd8
MK
1805/* Signal trampolines. */
1806
1807/* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
1808 64-bit variants. This would require using identical frame caches
1809 on both platforms. */
1810
e53bef9f 1811static struct amd64_frame_cache *
10458914 1812amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
c4f35dd8 1813{
e17a4113
UW
1814 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1815 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1816 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
e53bef9f 1817 struct amd64_frame_cache *cache;
c4f35dd8 1818 CORE_ADDR addr;
d8de1ef7 1819 gdb_byte buf[8];
2b5e0749 1820 int i;
c4f35dd8
MK
1821
1822 if (*this_cache)
1823 return *this_cache;
1824
e53bef9f 1825 cache = amd64_alloc_frame_cache ();
c4f35dd8 1826
10458914 1827 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
e17a4113 1828 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
c4f35dd8 1829
10458914 1830 addr = tdep->sigcontext_addr (this_frame);
2b5e0749 1831 gdb_assert (tdep->sc_reg_offset);
e53bef9f 1832 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2b5e0749
MK
1833 for (i = 0; i < tdep->sc_num_regs; i++)
1834 if (tdep->sc_reg_offset[i] != -1)
1835 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
c4f35dd8
MK
1836
1837 *this_cache = cache;
1838 return cache;
53e95fcf
JS
1839}
1840
c4f35dd8 1841static void
10458914 1842amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
e53bef9f 1843 void **this_cache, struct frame_id *this_id)
c4f35dd8 1844{
e53bef9f 1845 struct amd64_frame_cache *cache =
10458914 1846 amd64_sigtramp_frame_cache (this_frame, this_cache);
c4f35dd8 1847
10458914 1848 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
c4f35dd8
MK
1849}
1850
10458914
DJ
1851static struct value *
1852amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
1853 void **this_cache, int regnum)
c4f35dd8
MK
1854{
1855 /* Make sure we've initialized the cache. */
10458914 1856 amd64_sigtramp_frame_cache (this_frame, this_cache);
c4f35dd8 1857
10458914 1858 return amd64_frame_prev_register (this_frame, this_cache, regnum);
c4f35dd8
MK
1859}
1860
10458914
DJ
1861static int
1862amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
1863 struct frame_info *this_frame,
1864 void **this_cache)
c4f35dd8 1865{
10458914 1866 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
911bc6ee
MK
1867
1868 /* We shouldn't even bother if we don't have a sigcontext_addr
1869 handler. */
1870 if (tdep->sigcontext_addr == NULL)
10458914 1871 return 0;
911bc6ee
MK
1872
1873 if (tdep->sigtramp_p != NULL)
1874 {
10458914
DJ
1875 if (tdep->sigtramp_p (this_frame))
1876 return 1;
911bc6ee 1877 }
c4f35dd8 1878
911bc6ee 1879 if (tdep->sigtramp_start != 0)
1c3545ae 1880 {
10458914 1881 CORE_ADDR pc = get_frame_pc (this_frame);
1c3545ae 1882
911bc6ee
MK
1883 gdb_assert (tdep->sigtramp_end != 0);
1884 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
10458914 1885 return 1;
1c3545ae 1886 }
c4f35dd8 1887
10458914 1888 return 0;
c4f35dd8 1889}
10458914
DJ
1890
1891static const struct frame_unwind amd64_sigtramp_frame_unwind =
1892{
1893 SIGTRAMP_FRAME,
1894 amd64_sigtramp_frame_this_id,
1895 amd64_sigtramp_frame_prev_register,
1896 NULL,
1897 amd64_sigtramp_frame_sniffer
1898};
c4f35dd8
MK
1899\f
1900
1901static CORE_ADDR
10458914 1902amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
c4f35dd8 1903{
e53bef9f 1904 struct amd64_frame_cache *cache =
10458914 1905 amd64_frame_cache (this_frame, this_cache);
c4f35dd8
MK
1906
1907 return cache->base;
1908}
1909
e53bef9f 1910static const struct frame_base amd64_frame_base =
c4f35dd8 1911{
e53bef9f
MK
1912 &amd64_frame_unwind,
1913 amd64_frame_base_address,
1914 amd64_frame_base_address,
1915 amd64_frame_base_address
c4f35dd8
MK
1916};
1917
872761f4
MS
1918/* Normal frames, but in a function epilogue. */
1919
1920/* The epilogue is defined here as the 'ret' instruction, which will
1921 follow any instruction such as 'leave' or 'pop %ebp' that destroys
1922 the function's stack frame. */
1923
1924static int
1925amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
1926{
1927 gdb_byte insn;
1928
1929 if (target_read_memory (pc, &insn, 1))
1930 return 0; /* Can't read memory at pc. */
1931
1932 if (insn != 0xc3) /* 'ret' instruction. */
1933 return 0;
1934
1935 return 1;
1936}
1937
1938static int
1939amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
1940 struct frame_info *this_frame,
1941 void **this_prologue_cache)
1942{
1943 if (frame_relative_level (this_frame) == 0)
1944 return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
1945 get_frame_pc (this_frame));
1946 else
1947 return 0;
1948}
1949
1950static struct amd64_frame_cache *
1951amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
1952{
1953 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1954 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1955 struct amd64_frame_cache *cache;
6c10c06b 1956 gdb_byte buf[8];
872761f4
MS
1957
1958 if (*this_cache)
1959 return *this_cache;
1960
1961 cache = amd64_alloc_frame_cache ();
1962 *this_cache = cache;
1963
1964 /* Cache base will be %esp plus cache->sp_offset (-8). */
1965 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1966 cache->base = extract_unsigned_integer (buf, 8,
1967 byte_order) + cache->sp_offset;
1968
1969 /* Cache pc will be the frame func. */
1970 cache->pc = get_frame_pc (this_frame);
1971
1972 /* The saved %esp will be at cache->base plus 16. */
1973 cache->saved_sp = cache->base + 16;
1974
1975 /* The saved %eip will be at cache->base plus 8. */
1976 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
1977
1978 return cache;
1979}
1980
1981static void
1982amd64_epilogue_frame_this_id (struct frame_info *this_frame,
1983 void **this_cache,
1984 struct frame_id *this_id)
1985{
1986 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
1987 this_cache);
1988
1989 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
1990}
1991
1992static const struct frame_unwind amd64_epilogue_frame_unwind =
1993{
1994 NORMAL_FRAME,
1995 amd64_epilogue_frame_this_id,
1996 amd64_frame_prev_register,
1997 NULL,
1998 amd64_epilogue_frame_sniffer
1999};
2000
166f4c7b 2001static struct frame_id
10458914 2002amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
166f4c7b 2003{
c4f35dd8
MK
2004 CORE_ADDR fp;
2005
10458914 2006 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
c4f35dd8 2007
10458914 2008 return frame_id_build (fp + 16, get_frame_pc (this_frame));
166f4c7b
ML
2009}
2010
8b148df9
AC
2011/* 16 byte align the SP per frame requirements. */
2012
2013static CORE_ADDR
e53bef9f 2014amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
8b148df9
AC
2015{
2016 return sp & -(CORE_ADDR)16;
2017}
473f17b0
MK
2018\f
2019
593adc23
MK
2020/* Supply register REGNUM from the buffer specified by FPREGS and LEN
2021 in the floating-point register set REGSET to register cache
2022 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
473f17b0
MK
2023
2024static void
e53bef9f
MK
2025amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2026 int regnum, const void *fpregs, size_t len)
473f17b0 2027{
9ea75c57 2028 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
473f17b0
MK
2029
2030 gdb_assert (len == tdep->sizeof_fpregset);
90f90721 2031 amd64_supply_fxsave (regcache, regnum, fpregs);
473f17b0 2032}
8b148df9 2033
593adc23
MK
2034/* Collect register REGNUM from the register cache REGCACHE and store
2035 it in the buffer specified by FPREGS and LEN as described by the
2036 floating-point register set REGSET. If REGNUM is -1, do this for
2037 all registers in REGSET. */
2038
2039static void
2040amd64_collect_fpregset (const struct regset *regset,
2041 const struct regcache *regcache,
2042 int regnum, void *fpregs, size_t len)
2043{
2044 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2045
2046 gdb_assert (len == tdep->sizeof_fpregset);
2047 amd64_collect_fxsave (regcache, regnum, fpregs);
2048}
2049
c6b33596
MK
2050/* Return the appropriate register set for the core section identified
2051 by SECT_NAME and SECT_SIZE. */
2052
2053static const struct regset *
e53bef9f
MK
2054amd64_regset_from_core_section (struct gdbarch *gdbarch,
2055 const char *sect_name, size_t sect_size)
c6b33596
MK
2056{
2057 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2058
2059 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
2060 {
2061 if (tdep->fpregset == NULL)
593adc23
MK
2062 tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
2063 amd64_collect_fpregset);
c6b33596
MK
2064
2065 return tdep->fpregset;
2066 }
2067
2068 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
2069}
2070\f
2071
436675d3
PA
2072/* Figure out where the longjmp will land. Slurp the jmp_buf out of
2073 %rdi. We expect its value to be a pointer to the jmp_buf structure
2074 from which we extract the address that we will land at. This
2075 address is copied into PC. This routine returns non-zero on
2076 success. */
2077
2078static int
2079amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2080{
2081 gdb_byte buf[8];
2082 CORE_ADDR jb_addr;
2083 struct gdbarch *gdbarch = get_frame_arch (frame);
2084 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
0dfff4cb 2085 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
436675d3
PA
2086
2087 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2088 longjmp will land. */
2089 if (jb_pc_offset == -1)
2090 return 0;
2091
2092 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
0dfff4cb
UW
2093 jb_addr= extract_typed_address
2094 (buf, builtin_type (gdbarch)->builtin_data_ptr);
436675d3
PA
2095 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2096 return 0;
2097
0dfff4cb 2098 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
436675d3
PA
2099
2100 return 1;
2101}
2102
cf648174
HZ
2103static const int amd64_record_regmap[] =
2104{
2105 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2106 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2107 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2108 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2109 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2110 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2111};
2112
2213a65d 2113void
90f90721 2114amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
53e95fcf 2115{
0c1a73d6 2116 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
90884b2b 2117 const struct target_desc *tdesc = info.target_desc;
53e95fcf 2118
473f17b0
MK
2119 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2120 floating-point registers. */
2121 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2122
90884b2b
L
2123 if (! tdesc_has_registers (tdesc))
2124 tdesc = tdesc_amd64;
2125 tdep->tdesc = tdesc;
2126
2127 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
2128 tdep->register_names = amd64_register_names;
2129
5716833c 2130 /* AMD64 has an FPU and 16 SSE registers. */
90f90721 2131 tdep->st0_regnum = AMD64_ST0_REGNUM;
0c1a73d6 2132 tdep->num_xmm_regs = 16;
53e95fcf 2133
0c1a73d6 2134 /* This is what all the fuss is about. */
53e95fcf
JS
2135 set_gdbarch_long_bit (gdbarch, 64);
2136 set_gdbarch_long_long_bit (gdbarch, 64);
2137 set_gdbarch_ptr_bit (gdbarch, 64);
2138
e53bef9f
MK
2139 /* In contrast to the i386, on AMD64 a `long double' actually takes
2140 up 128 bits, even though it's still based on the i387 extended
2141 floating-point format which has only 80 significant bits. */
b83b026c
MK
2142 set_gdbarch_long_double_bit (gdbarch, 128);
2143
e53bef9f 2144 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
b83b026c
MK
2145
2146 /* Register numbers of various important registers. */
90f90721
MK
2147 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
2148 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
2149 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
2150 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
b83b026c 2151
e53bef9f
MK
2152 /* The "default" register numbering scheme for AMD64 is referred to
2153 as the "DWARF Register Number Mapping" in the System V psABI.
2154 The preferred debugging format for all known AMD64 targets is
2155 actually DWARF2, and GCC doesn't seem to support DWARF (that is
2156 DWARF-1), but we provide the same mapping just in case. This
2157 mapping is also used for stabs, which GCC does support. */
2158 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
e53bef9f 2159 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
de220d0f 2160
c4f35dd8 2161 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
e53bef9f 2162 be in use on any of the supported AMD64 targets. */
53e95fcf 2163
c4f35dd8 2164 /* Call dummy code. */
e53bef9f
MK
2165 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
2166 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
8b148df9 2167 set_gdbarch_frame_red_zone_size (gdbarch, 128);
ba581dc1
JB
2168 tdep->call_dummy_num_integer_regs =
2169 ARRAY_SIZE (amd64_dummy_call_integer_regs);
2170 tdep->call_dummy_integer_regs = amd64_dummy_call_integer_regs;
2171 tdep->classify = amd64_classify;
53e95fcf 2172
83acabca 2173 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
d532c08f
MK
2174 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
2175 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
2176
efb1c01c 2177 set_gdbarch_return_value (gdbarch, amd64_return_value);
53e95fcf 2178
e53bef9f 2179 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
53e95fcf 2180
c4f35dd8 2181 /* Avoid wiring in the MMX registers for now. */
2213a65d 2182 set_gdbarch_num_pseudo_regs (gdbarch, 0);
5716833c 2183 tdep->mm0_regnum = -1;
2213a65d 2184
cf648174
HZ
2185 tdep->record_regmap = amd64_record_regmap;
2186
10458914 2187 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
53e95fcf 2188
872761f4
MS
2189 /* Hook the function epilogue frame unwinder. This unwinder is
2190 appended to the list first, so that it supercedes the other
2191 unwinders in function epilogues. */
2192 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
2193
2194 /* Hook the prologue-based frame unwinders. */
10458914
DJ
2195 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
2196 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
e53bef9f 2197 frame_base_set_default (gdbarch, &amd64_frame_base);
c6b33596
MK
2198
2199 /* If we have a register mapping, enable the generic core file support. */
2200 if (tdep->gregset_reg_offset)
2201 set_gdbarch_regset_from_core_section (gdbarch,
e53bef9f 2202 amd64_regset_from_core_section);
436675d3
PA
2203
2204 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
c4f35dd8 2205}
90884b2b
L
2206
2207/* Provide a prototype to silence -Wmissing-prototypes. */
2208void _initialize_amd64_tdep (void);
2209
2210void
2211_initialize_amd64_tdep (void)
2212{
2213 initialize_tdesc_amd64 ();
2214}
c4f35dd8
MK
2215\f
2216
41d041d6
MK
2217/* The 64-bit FXSAVE format differs from the 32-bit format in the
2218 sense that the instruction pointer and data pointer are simply
2219 64-bit offsets into the code segment and the data segment instead
2220 of a selector offset pair. The functions below store the upper 32
2221 bits of these pointers (instead of just the 16-bits of the segment
2222 selector). */
2223
2224/* Fill register REGNUM in REGCACHE with the appropriate
0485f6ad
MK
2225 floating-point or SSE register value from *FXSAVE. If REGNUM is
2226 -1, do this for all registers. This function masks off any of the
2227 reserved bits in *FXSAVE. */
c4f35dd8
MK
2228
2229void
90f90721 2230amd64_supply_fxsave (struct regcache *regcache, int regnum,
20a6ec49 2231 const void *fxsave)
c4f35dd8 2232{
20a6ec49
MD
2233 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2234 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2235
41d041d6 2236 i387_supply_fxsave (regcache, regnum, fxsave);
c4f35dd8 2237
20a6ec49 2238 if (fxsave && gdbarch_ptr_bit (gdbarch) == 64)
c4f35dd8 2239 {
d8de1ef7 2240 const gdb_byte *regs = fxsave;
41d041d6 2241
20a6ec49
MD
2242 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2243 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2244 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2245 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
c4f35dd8 2246 }
0c1a73d6
MK
2247}
2248
3c017e40
MK
2249/* Fill register REGNUM (if it is a floating-point or SSE register) in
2250 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
2251 all registers. This function doesn't touch any of the reserved
2252 bits in *FXSAVE. */
2253
2254void
2255amd64_collect_fxsave (const struct regcache *regcache, int regnum,
2256 void *fxsave)
2257{
20a6ec49
MD
2258 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2259 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
d8de1ef7 2260 gdb_byte *regs = fxsave;
3c017e40
MK
2261
2262 i387_collect_fxsave (regcache, regnum, fxsave);
2263
20a6ec49 2264 if (gdbarch_ptr_bit (gdbarch) == 64)
f0ef85a5 2265 {
20a6ec49
MD
2266 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2267 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2268 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2269 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
f0ef85a5 2270 }
3c017e40 2271}
This page took 0.773939 seconds and 4 git commands to generate.