Remove spurious gdb/ ...
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
CommitLineData
e53bef9f 1/* Target-dependent code for AMD64.
ce0eebec 2
0fb0cc75 3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
5ae96ec1
MK
4 Free Software Foundation, Inc.
5
6 Contributed by Jiri Smid, SuSE Labs.
53e95fcf
JS
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
a9762ec7 12 the Free Software Foundation; either version 3 of the License, or
53e95fcf
JS
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
a9762ec7 21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
53e95fcf
JS
22
23#include "defs.h"
35669430
DE
24#include "opcode/i386.h"
25#include "dis-asm.h"
c4f35dd8
MK
26#include "arch-utils.h"
27#include "block.h"
28#include "dummy-frame.h"
29#include "frame.h"
30#include "frame-base.h"
31#include "frame-unwind.h"
53e95fcf 32#include "inferior.h"
53e95fcf 33#include "gdbcmd.h"
c4f35dd8
MK
34#include "gdbcore.h"
35#include "objfiles.h"
53e95fcf 36#include "regcache.h"
2c261fae 37#include "regset.h"
53e95fcf 38#include "symfile.h"
c4f35dd8 39
82dbc5f7 40#include "gdb_assert.h"
c4f35dd8 41
9c1488cb 42#include "amd64-tdep.h"
c4f35dd8 43#include "i387-tdep.h"
53e95fcf 44
e53bef9f
MK
45/* Note that the AMD64 architecture was previously known as x86-64.
46 The latter is (forever) engraved into the canonical system name as
90f90721 47 returned by config.guess, and used as the name for the AMD64 port
e53bef9f
MK
48 of GNU/Linux. The BSD's have renamed their ports to amd64; they
49 don't like to shout. For GDB we prefer the amd64_-prefix over the
50 x86_64_-prefix since it's so much easier to type. */
51
402ecd56 52/* Register information. */
c4f35dd8 53
6707b003 54static const char *amd64_register_names[] =
de220d0f 55{
6707b003 56 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
c4f35dd8
MK
57
58 /* %r8 is indeed register number 8. */
6707b003
UW
59 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
60 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
c4f35dd8 61
af233647 62 /* %st0 is register number 24. */
6707b003
UW
63 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
64 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
c4f35dd8 65
af233647 66 /* %xmm0 is register number 40. */
6707b003
UW
67 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
68 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
69 "mxcsr",
0e04a514
ML
70};
71
c4f35dd8 72/* Total number of registers. */
6707b003 73#define AMD64_NUM_REGS ARRAY_SIZE (amd64_register_names)
de220d0f 74
c4f35dd8 75/* Return the name of register REGNUM. */
b6779aa2 76
8695c747 77const char *
d93859e2 78amd64_register_name (struct gdbarch *gdbarch, int regnum)
53e95fcf 79{
e53bef9f 80 if (regnum >= 0 && regnum < AMD64_NUM_REGS)
6707b003 81 return amd64_register_names[regnum];
53e95fcf 82
c4f35dd8 83 return NULL;
53e95fcf
JS
84}
85
86/* Return the GDB type object for the "standard" data type of data in
c4f35dd8 87 register REGNUM. */
53e95fcf 88
8695c747 89struct type *
e53bef9f 90amd64_register_type (struct gdbarch *gdbarch, int regnum)
53e95fcf 91{
6707b003 92 if (regnum >= AMD64_RAX_REGNUM && regnum <= AMD64_RDI_REGNUM)
df4df182 93 return builtin_type (gdbarch)->builtin_int64;
6707b003 94 if (regnum == AMD64_RBP_REGNUM || regnum == AMD64_RSP_REGNUM)
0dfff4cb 95 return builtin_type (gdbarch)->builtin_data_ptr;
6707b003 96 if (regnum >= AMD64_R8_REGNUM && regnum <= AMD64_R15_REGNUM)
df4df182 97 return builtin_type (gdbarch)->builtin_int64;
6707b003 98 if (regnum == AMD64_RIP_REGNUM)
0dfff4cb 99 return builtin_type (gdbarch)->builtin_func_ptr;
6707b003 100 if (regnum == AMD64_EFLAGS_REGNUM)
209bd28e 101 return i386_eflags_type (gdbarch);
6707b003 102 if (regnum >= AMD64_CS_REGNUM && regnum <= AMD64_GS_REGNUM)
df4df182 103 return builtin_type (gdbarch)->builtin_int32;
6707b003 104 if (regnum >= AMD64_ST0_REGNUM && regnum <= AMD64_ST0_REGNUM + 7)
27067745 105 return i387_ext_type (gdbarch);
6707b003 106 if (regnum >= AMD64_FCTRL_REGNUM && regnum <= AMD64_FCTRL_REGNUM + 7)
df4df182 107 return builtin_type (gdbarch)->builtin_int32;
6707b003 108 if (regnum >= AMD64_XMM0_REGNUM && regnum <= AMD64_XMM0_REGNUM + 15)
794ac428 109 return i386_sse_type (gdbarch);
6707b003 110 if (regnum == AMD64_MXCSR_REGNUM)
209bd28e 111 return i386_mxcsr_type (gdbarch);
6707b003
UW
112
113 internal_error (__FILE__, __LINE__, _("invalid regnum"));
53e95fcf
JS
114}
115
c4f35dd8
MK
116/* DWARF Register Number Mapping as defined in the System V psABI,
117 section 3.6. */
53e95fcf 118
e53bef9f 119static int amd64_dwarf_regmap[] =
0e04a514 120{
c4f35dd8 121 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
90f90721
MK
122 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
123 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
124 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
c4f35dd8
MK
125
126 /* Frame Pointer Register RBP. */
90f90721 127 AMD64_RBP_REGNUM,
c4f35dd8
MK
128
129 /* Stack Pointer Register RSP. */
90f90721 130 AMD64_RSP_REGNUM,
c4f35dd8
MK
131
132 /* Extended Integer Registers 8 - 15. */
133 8, 9, 10, 11, 12, 13, 14, 15,
134
59207364 135 /* Return Address RA. Mapped to RIP. */
90f90721 136 AMD64_RIP_REGNUM,
c4f35dd8
MK
137
138 /* SSE Registers 0 - 7. */
90f90721
MK
139 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
140 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
141 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
142 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
c4f35dd8
MK
143
144 /* Extended SSE Registers 8 - 15. */
90f90721
MK
145 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
146 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
147 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
148 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
c4f35dd8
MK
149
150 /* Floating Point Registers 0-7. */
90f90721
MK
151 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
152 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
153 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
c6f4c129
JB
154 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
155
156 /* Control and Status Flags Register. */
157 AMD64_EFLAGS_REGNUM,
158
159 /* Selector Registers. */
160 AMD64_ES_REGNUM,
161 AMD64_CS_REGNUM,
162 AMD64_SS_REGNUM,
163 AMD64_DS_REGNUM,
164 AMD64_FS_REGNUM,
165 AMD64_GS_REGNUM,
166 -1,
167 -1,
168
169 /* Segment Base Address Registers. */
170 -1,
171 -1,
172 -1,
173 -1,
174
175 /* Special Selector Registers. */
176 -1,
177 -1,
178
179 /* Floating Point Control Registers. */
180 AMD64_MXCSR_REGNUM,
181 AMD64_FCTRL_REGNUM,
182 AMD64_FSTAT_REGNUM
c4f35dd8 183};
0e04a514 184
e53bef9f
MK
185static const int amd64_dwarf_regmap_len =
186 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
0e04a514 187
c4f35dd8
MK
188/* Convert DWARF register number REG to the appropriate register
189 number used by GDB. */
26abbdc4 190
c4f35dd8 191static int
d3f73121 192amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
53e95fcf 193{
c4f35dd8 194 int regnum = -1;
53e95fcf 195
16aff9a6 196 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
e53bef9f 197 regnum = amd64_dwarf_regmap[reg];
53e95fcf 198
c4f35dd8 199 if (regnum == -1)
8a3fe4f8 200 warning (_("Unmapped DWARF Register #%d encountered."), reg);
c4f35dd8
MK
201
202 return regnum;
53e95fcf 203}
d532c08f 204
35669430
DE
205/* Map architectural register numbers to gdb register numbers. */
206
207static const int amd64_arch_regmap[16] =
208{
209 AMD64_RAX_REGNUM, /* %rax */
210 AMD64_RCX_REGNUM, /* %rcx */
211 AMD64_RDX_REGNUM, /* %rdx */
212 AMD64_RBX_REGNUM, /* %rbx */
213 AMD64_RSP_REGNUM, /* %rsp */
214 AMD64_RBP_REGNUM, /* %rbp */
215 AMD64_RSI_REGNUM, /* %rsi */
216 AMD64_RDI_REGNUM, /* %rdi */
217 AMD64_R8_REGNUM, /* %r8 */
218 AMD64_R9_REGNUM, /* %r9 */
219 AMD64_R10_REGNUM, /* %r10 */
220 AMD64_R11_REGNUM, /* %r11 */
221 AMD64_R12_REGNUM, /* %r12 */
222 AMD64_R13_REGNUM, /* %r13 */
223 AMD64_R14_REGNUM, /* %r14 */
224 AMD64_R15_REGNUM /* %r15 */
225};
226
227static const int amd64_arch_regmap_len =
228 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
229
230/* Convert architectural register number REG to the appropriate register
231 number used by GDB. */
232
233static int
234amd64_arch_reg_to_regnum (int reg)
235{
236 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
237
238 return amd64_arch_regmap[reg];
239}
240
53e95fcf
JS
241\f
242
efb1c01c
MK
243/* Register classes as defined in the psABI. */
244
245enum amd64_reg_class
246{
247 AMD64_INTEGER,
248 AMD64_SSE,
249 AMD64_SSEUP,
250 AMD64_X87,
251 AMD64_X87UP,
252 AMD64_COMPLEX_X87,
253 AMD64_NO_CLASS,
254 AMD64_MEMORY
255};
256
257/* Return the union class of CLASS1 and CLASS2. See the psABI for
258 details. */
259
260static enum amd64_reg_class
261amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
262{
263 /* Rule (a): If both classes are equal, this is the resulting class. */
264 if (class1 == class2)
265 return class1;
266
267 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
268 is the other class. */
269 if (class1 == AMD64_NO_CLASS)
270 return class2;
271 if (class2 == AMD64_NO_CLASS)
272 return class1;
273
274 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
275 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
276 return AMD64_MEMORY;
277
278 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
279 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
280 return AMD64_INTEGER;
281
282 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
283 MEMORY is used as class. */
284 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
285 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
286 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
287 return AMD64_MEMORY;
288
289 /* Rule (f): Otherwise class SSE is used. */
290 return AMD64_SSE;
291}
292
293static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
294
79b1ab3d
MK
295/* Return non-zero if TYPE is a non-POD structure or union type. */
296
297static int
298amd64_non_pod_p (struct type *type)
299{
300 /* ??? A class with a base class certainly isn't POD, but does this
301 catch all non-POD structure types? */
302 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
303 return 1;
304
305 return 0;
306}
307
efb1c01c
MK
308/* Classify TYPE according to the rules for aggregate (structures and
309 arrays) and union types, and store the result in CLASS. */
c4f35dd8
MK
310
311static void
efb1c01c 312amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
53e95fcf
JS
313{
314 int len = TYPE_LENGTH (type);
315
efb1c01c
MK
316 /* 1. If the size of an object is larger than two eightbytes, or in
317 C++, is a non-POD structure or union type, or contains
318 unaligned fields, it has class memory. */
79b1ab3d 319 if (len > 16 || amd64_non_pod_p (type))
53e95fcf 320 {
efb1c01c
MK
321 class[0] = class[1] = AMD64_MEMORY;
322 return;
53e95fcf 323 }
efb1c01c
MK
324
325 /* 2. Both eightbytes get initialized to class NO_CLASS. */
326 class[0] = class[1] = AMD64_NO_CLASS;
327
328 /* 3. Each field of an object is classified recursively so that
329 always two fields are considered. The resulting class is
330 calculated according to the classes of the fields in the
331 eightbyte: */
332
333 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
8ffd9b1b 334 {
efb1c01c
MK
335 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
336
337 /* All fields in an array have the same type. */
338 amd64_classify (subtype, class);
339 if (len > 8 && class[1] == AMD64_NO_CLASS)
340 class[1] = class[0];
8ffd9b1b 341 }
53e95fcf
JS
342 else
343 {
efb1c01c 344 int i;
53e95fcf 345
efb1c01c
MK
346 /* Structure or union. */
347 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
348 || TYPE_CODE (type) == TYPE_CODE_UNION);
349
350 for (i = 0; i < TYPE_NFIELDS (type); i++)
53e95fcf 351 {
efb1c01c
MK
352 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
353 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
354 enum amd64_reg_class subclass[2];
355
562c50c2 356 /* Ignore static fields. */
d6a843b5 357 if (field_is_static (&TYPE_FIELD (type, i)))
562c50c2
MK
358 continue;
359
efb1c01c
MK
360 gdb_assert (pos == 0 || pos == 1);
361
362 amd64_classify (subtype, subclass);
363 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
364 if (pos == 0)
365 class[1] = amd64_merge_classes (class[1], subclass[1]);
53e95fcf 366 }
53e95fcf 367 }
efb1c01c
MK
368
369 /* 4. Then a post merger cleanup is done: */
370
371 /* Rule (a): If one of the classes is MEMORY, the whole argument is
372 passed in memory. */
373 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
374 class[0] = class[1] = AMD64_MEMORY;
375
376 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
377 SSE. */
378 if (class[0] == AMD64_SSEUP)
379 class[0] = AMD64_SSE;
380 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
381 class[1] = AMD64_SSE;
382}
383
384/* Classify TYPE, and store the result in CLASS. */
385
386static void
387amd64_classify (struct type *type, enum amd64_reg_class class[2])
388{
389 enum type_code code = TYPE_CODE (type);
390 int len = TYPE_LENGTH (type);
391
392 class[0] = class[1] = AMD64_NO_CLASS;
393
394 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
5a7225ed
JB
395 long, long long, and pointers are in the INTEGER class. Similarly,
396 range types, used by languages such as Ada, are also in the INTEGER
397 class. */
efb1c01c 398 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
b929c77f 399 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
9db13498 400 || code == TYPE_CODE_CHAR
efb1c01c
MK
401 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
402 && (len == 1 || len == 2 || len == 4 || len == 8))
403 class[0] = AMD64_INTEGER;
404
5daa78cc
TJB
405 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
406 are in class SSE. */
407 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
408 && (len == 4 || len == 8))
efb1c01c
MK
409 /* FIXME: __m64 . */
410 class[0] = AMD64_SSE;
411
5daa78cc
TJB
412 /* Arguments of types __float128, _Decimal128 and __m128 are split into
413 two halves. The least significant ones belong to class SSE, the most
efb1c01c 414 significant one to class SSEUP. */
5daa78cc
TJB
415 else if (code == TYPE_CODE_DECFLOAT && len == 16)
416 /* FIXME: __float128, __m128. */
417 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
efb1c01c
MK
418
419 /* The 64-bit mantissa of arguments of type long double belongs to
420 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
421 class X87UP. */
422 else if (code == TYPE_CODE_FLT && len == 16)
423 /* Class X87 and X87UP. */
424 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
425
426 /* Aggregates. */
427 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
428 || code == TYPE_CODE_UNION)
429 amd64_classify_aggregate (type, class);
430}
431
432static enum return_value_convention
c055b101
CV
433amd64_return_value (struct gdbarch *gdbarch, struct type *func_type,
434 struct type *type, struct regcache *regcache,
42835c2b 435 gdb_byte *readbuf, const gdb_byte *writebuf)
efb1c01c
MK
436{
437 enum amd64_reg_class class[2];
438 int len = TYPE_LENGTH (type);
90f90721
MK
439 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
440 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
efb1c01c
MK
441 int integer_reg = 0;
442 int sse_reg = 0;
443 int i;
444
445 gdb_assert (!(readbuf && writebuf));
446
447 /* 1. Classify the return type with the classification algorithm. */
448 amd64_classify (type, class);
449
450 /* 2. If the type has class MEMORY, then the caller provides space
6fa57a7d
MK
451 for the return value and passes the address of this storage in
452 %rdi as if it were the first argument to the function. In effect,
453 this address becomes a hidden first argument.
454
455 On return %rax will contain the address that has been passed in
456 by the caller in %rdi. */
efb1c01c 457 if (class[0] == AMD64_MEMORY)
6fa57a7d
MK
458 {
459 /* As indicated by the comment above, the ABI guarantees that we
460 can always find the return value just after the function has
461 returned. */
462
463 if (readbuf)
464 {
465 ULONGEST addr;
466
467 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
468 read_memory (addr, readbuf, TYPE_LENGTH (type));
469 }
470
471 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
472 }
efb1c01c
MK
473
474 gdb_assert (class[1] != AMD64_MEMORY);
475 gdb_assert (len <= 16);
476
477 for (i = 0; len > 0; i++, len -= 8)
478 {
479 int regnum = -1;
480 int offset = 0;
481
482 switch (class[i])
483 {
484 case AMD64_INTEGER:
485 /* 3. If the class is INTEGER, the next available register
486 of the sequence %rax, %rdx is used. */
487 regnum = integer_regnum[integer_reg++];
488 break;
489
490 case AMD64_SSE:
491 /* 4. If the class is SSE, the next available SSE register
492 of the sequence %xmm0, %xmm1 is used. */
493 regnum = sse_regnum[sse_reg++];
494 break;
495
496 case AMD64_SSEUP:
497 /* 5. If the class is SSEUP, the eightbyte is passed in the
498 upper half of the last used SSE register. */
499 gdb_assert (sse_reg > 0);
500 regnum = sse_regnum[sse_reg - 1];
501 offset = 8;
502 break;
503
504 case AMD64_X87:
505 /* 6. If the class is X87, the value is returned on the X87
506 stack in %st0 as 80-bit x87 number. */
90f90721 507 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
508 if (writebuf)
509 i387_return_value (gdbarch, regcache);
510 break;
511
512 case AMD64_X87UP:
513 /* 7. If the class is X87UP, the value is returned together
514 with the previous X87 value in %st0. */
515 gdb_assert (i > 0 && class[0] == AMD64_X87);
90f90721 516 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
517 offset = 8;
518 len = 2;
519 break;
520
521 case AMD64_NO_CLASS:
522 continue;
523
524 default:
525 gdb_assert (!"Unexpected register class.");
526 }
527
528 gdb_assert (regnum != -1);
529
530 if (readbuf)
531 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
42835c2b 532 readbuf + i * 8);
efb1c01c
MK
533 if (writebuf)
534 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
42835c2b 535 writebuf + i * 8);
efb1c01c
MK
536 }
537
538 return RETURN_VALUE_REGISTER_CONVENTION;
53e95fcf
JS
539}
540\f
541
720aa428
MK
542static CORE_ADDR
543amd64_push_arguments (struct regcache *regcache, int nargs,
6470d250 544 struct value **args, CORE_ADDR sp, int struct_return)
720aa428
MK
545{
546 static int integer_regnum[] =
547 {
90f90721
MK
548 AMD64_RDI_REGNUM, /* %rdi */
549 AMD64_RSI_REGNUM, /* %rsi */
550 AMD64_RDX_REGNUM, /* %rdx */
551 AMD64_RCX_REGNUM, /* %rcx */
552 8, /* %r8 */
553 9 /* %r9 */
720aa428
MK
554 };
555 static int sse_regnum[] =
556 {
557 /* %xmm0 ... %xmm7 */
90f90721
MK
558 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
559 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
560 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
561 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
720aa428
MK
562 };
563 struct value **stack_args = alloca (nargs * sizeof (struct value *));
564 int num_stack_args = 0;
565 int num_elements = 0;
566 int element = 0;
567 int integer_reg = 0;
568 int sse_reg = 0;
569 int i;
570
6470d250
MK
571 /* Reserve a register for the "hidden" argument. */
572 if (struct_return)
573 integer_reg++;
574
720aa428
MK
575 for (i = 0; i < nargs; i++)
576 {
4991999e 577 struct type *type = value_type (args[i]);
720aa428
MK
578 int len = TYPE_LENGTH (type);
579 enum amd64_reg_class class[2];
580 int needed_integer_regs = 0;
581 int needed_sse_regs = 0;
582 int j;
583
584 /* Classify argument. */
585 amd64_classify (type, class);
586
587 /* Calculate the number of integer and SSE registers needed for
588 this argument. */
589 for (j = 0; j < 2; j++)
590 {
591 if (class[j] == AMD64_INTEGER)
592 needed_integer_regs++;
593 else if (class[j] == AMD64_SSE)
594 needed_sse_regs++;
595 }
596
597 /* Check whether enough registers are available, and if the
598 argument should be passed in registers at all. */
599 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
600 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
601 || (needed_integer_regs == 0 && needed_sse_regs == 0))
602 {
603 /* The argument will be passed on the stack. */
604 num_elements += ((len + 7) / 8);
605 stack_args[num_stack_args++] = args[i];
606 }
607 else
608 {
609 /* The argument will be passed in registers. */
d8de1ef7
MK
610 const gdb_byte *valbuf = value_contents (args[i]);
611 gdb_byte buf[8];
720aa428
MK
612
613 gdb_assert (len <= 16);
614
615 for (j = 0; len > 0; j++, len -= 8)
616 {
617 int regnum = -1;
618 int offset = 0;
619
620 switch (class[j])
621 {
622 case AMD64_INTEGER:
623 regnum = integer_regnum[integer_reg++];
624 break;
625
626 case AMD64_SSE:
627 regnum = sse_regnum[sse_reg++];
628 break;
629
630 case AMD64_SSEUP:
631 gdb_assert (sse_reg > 0);
632 regnum = sse_regnum[sse_reg - 1];
633 offset = 8;
634 break;
635
636 default:
637 gdb_assert (!"Unexpected register class.");
638 }
639
640 gdb_assert (regnum != -1);
641 memset (buf, 0, sizeof buf);
642 memcpy (buf, valbuf + j * 8, min (len, 8));
643 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
644 }
645 }
646 }
647
648 /* Allocate space for the arguments on the stack. */
649 sp -= num_elements * 8;
650
651 /* The psABI says that "The end of the input argument area shall be
652 aligned on a 16 byte boundary." */
653 sp &= ~0xf;
654
655 /* Write out the arguments to the stack. */
656 for (i = 0; i < num_stack_args; i++)
657 {
4991999e 658 struct type *type = value_type (stack_args[i]);
d8de1ef7 659 const gdb_byte *valbuf = value_contents (stack_args[i]);
720aa428
MK
660 int len = TYPE_LENGTH (type);
661
662 write_memory (sp + element * 8, valbuf, len);
663 element += ((len + 7) / 8);
664 }
665
666 /* The psABI says that "For calls that may call functions that use
667 varargs or stdargs (prototype-less calls or calls to functions
668 containing ellipsis (...) in the declaration) %al is used as
669 hidden argument to specify the number of SSE registers used. */
90f90721 670 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
720aa428
MK
671 return sp;
672}
673
c4f35dd8 674static CORE_ADDR
7d9b040b 675amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
e53bef9f
MK
676 struct regcache *regcache, CORE_ADDR bp_addr,
677 int nargs, struct value **args, CORE_ADDR sp,
678 int struct_return, CORE_ADDR struct_addr)
53e95fcf 679{
e17a4113 680 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
d8de1ef7 681 gdb_byte buf[8];
c4f35dd8
MK
682
683 /* Pass arguments. */
6470d250 684 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
c4f35dd8
MK
685
686 /* Pass "hidden" argument". */
687 if (struct_return)
688 {
e17a4113 689 store_unsigned_integer (buf, 8, byte_order, struct_addr);
90f90721 690 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
c4f35dd8
MK
691 }
692
693 /* Store return address. */
694 sp -= 8;
e17a4113 695 store_unsigned_integer (buf, 8, byte_order, bp_addr);
c4f35dd8
MK
696 write_memory (sp, buf, 8);
697
698 /* Finally, update the stack pointer... */
e17a4113 699 store_unsigned_integer (buf, 8, byte_order, sp);
90f90721 700 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
c4f35dd8
MK
701
702 /* ...and fake a frame pointer. */
90f90721 703 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
c4f35dd8 704
3e210248 705 return sp + 16;
53e95fcf 706}
c4f35dd8 707\f
35669430
DE
708/* Displaced instruction handling. */
709
710/* A partially decoded instruction.
711 This contains enough details for displaced stepping purposes. */
712
713struct amd64_insn
714{
715 /* The number of opcode bytes. */
716 int opcode_len;
717 /* The offset of the rex prefix or -1 if not present. */
718 int rex_offset;
719 /* The offset to the first opcode byte. */
720 int opcode_offset;
721 /* The offset to the modrm byte or -1 if not present. */
722 int modrm_offset;
723
724 /* The raw instruction. */
725 gdb_byte *raw_insn;
726};
727
728struct displaced_step_closure
729{
730 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
731 int tmp_used;
732 int tmp_regno;
733 ULONGEST tmp_save;
734
735 /* Details of the instruction. */
736 struct amd64_insn insn_details;
737
738 /* Amount of space allocated to insn_buf. */
739 int max_len;
740
741 /* The possibly modified insn.
742 This is a variable-length field. */
743 gdb_byte insn_buf[1];
744};
745
746/* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
747 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
748 at which point delete these in favor of libopcodes' versions). */
749
750static const unsigned char onebyte_has_modrm[256] = {
751 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
752 /* ------------------------------- */
753 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
754 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
755 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
756 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
757 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
758 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
759 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
760 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
761 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
762 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
763 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
764 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
765 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
766 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
767 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
768 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
769 /* ------------------------------- */
770 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
771};
772
773static const unsigned char twobyte_has_modrm[256] = {
774 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
775 /* ------------------------------- */
776 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
777 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
778 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
779 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
780 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
781 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
782 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
783 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
784 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
785 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
786 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
787 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
788 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
789 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
790 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
791 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
792 /* ------------------------------- */
793 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
794};
795
796static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
797
798static int
799rex_prefix_p (gdb_byte pfx)
800{
801 return REX_PREFIX_P (pfx);
802}
803
804/* Skip the legacy instruction prefixes in INSN.
805 We assume INSN is properly sentineled so we don't have to worry
806 about falling off the end of the buffer. */
807
808static gdb_byte *
1903f0e6 809amd64_skip_prefixes (gdb_byte *insn)
35669430
DE
810{
811 while (1)
812 {
813 switch (*insn)
814 {
815 case DATA_PREFIX_OPCODE:
816 case ADDR_PREFIX_OPCODE:
817 case CS_PREFIX_OPCODE:
818 case DS_PREFIX_OPCODE:
819 case ES_PREFIX_OPCODE:
820 case FS_PREFIX_OPCODE:
821 case GS_PREFIX_OPCODE:
822 case SS_PREFIX_OPCODE:
823 case LOCK_PREFIX_OPCODE:
824 case REPE_PREFIX_OPCODE:
825 case REPNE_PREFIX_OPCODE:
826 ++insn;
827 continue;
828 default:
829 break;
830 }
831 break;
832 }
833
834 return insn;
835}
836
837/* fprintf-function for amd64_insn_length.
838 This function is a nop, we don't want to print anything, we just want to
839 compute the length of the insn. */
840
841static int ATTR_FORMAT (printf, 2, 3)
842amd64_insn_length_fprintf (void *stream, const char *format, ...)
843{
844 return 0;
845}
846
847/* Initialize a struct disassemble_info for amd64_insn_length. */
848
849static void
850amd64_insn_length_init_dis (struct gdbarch *gdbarch,
851 struct disassemble_info *di,
852 const gdb_byte *insn, int max_len,
853 CORE_ADDR addr)
854{
855 init_disassemble_info (di, NULL, amd64_insn_length_fprintf);
856
857 /* init_disassemble_info installs buffer_read_memory, etc.
858 so we don't need to do that here.
859 The cast is necessary until disassemble_info is const-ified. */
860 di->buffer = (gdb_byte *) insn;
861 di->buffer_length = max_len;
862 di->buffer_vma = addr;
863
864 di->arch = gdbarch_bfd_arch_info (gdbarch)->arch;
865 di->mach = gdbarch_bfd_arch_info (gdbarch)->mach;
866 di->endian = gdbarch_byte_order (gdbarch);
867 di->endian_code = gdbarch_byte_order_for_code (gdbarch);
868
869 disassemble_init_for_target (di);
870}
871
872/* Return the length in bytes of INSN.
873 MAX_LEN is the size of the buffer containing INSN.
874 libopcodes currently doesn't export a utility to compute the
875 instruction length, so use the disassembler until then. */
876
877static int
878amd64_insn_length (struct gdbarch *gdbarch,
879 const gdb_byte *insn, int max_len, CORE_ADDR addr)
880{
881 struct disassemble_info di;
882
883 amd64_insn_length_init_dis (gdbarch, &di, insn, max_len, addr);
884
885 return gdbarch_print_insn (gdbarch, addr, &di);
886}
887
888/* Return an integer register (other than RSP) that is unused as an input
889 operand in INSN.
890 In order to not require adding a rex prefix if the insn doesn't already
891 have one, the result is restricted to RAX ... RDI, sans RSP.
892 The register numbering of the result follows architecture ordering,
893 e.g. RDI = 7. */
894
895static int
896amd64_get_unused_input_int_reg (const struct amd64_insn *details)
897{
898 /* 1 bit for each reg */
899 int used_regs_mask = 0;
900
901 /* There can be at most 3 int regs used as inputs in an insn, and we have
902 7 to choose from (RAX ... RDI, sans RSP).
903 This allows us to take a conservative approach and keep things simple.
904 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
905 that implicitly specify RAX. */
906
907 /* Avoid RAX. */
908 used_regs_mask |= 1 << EAX_REG_NUM;
909 /* Similarily avoid RDX, implicit operand in divides. */
910 used_regs_mask |= 1 << EDX_REG_NUM;
911 /* Avoid RSP. */
912 used_regs_mask |= 1 << ESP_REG_NUM;
913
914 /* If the opcode is one byte long and there's no ModRM byte,
915 assume the opcode specifies a register. */
916 if (details->opcode_len == 1 && details->modrm_offset == -1)
917 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
918
919 /* Mark used regs in the modrm/sib bytes. */
920 if (details->modrm_offset != -1)
921 {
922 int modrm = details->raw_insn[details->modrm_offset];
923 int mod = MODRM_MOD_FIELD (modrm);
924 int reg = MODRM_REG_FIELD (modrm);
925 int rm = MODRM_RM_FIELD (modrm);
926 int have_sib = mod != 3 && rm == 4;
927
928 /* Assume the reg field of the modrm byte specifies a register. */
929 used_regs_mask |= 1 << reg;
930
931 if (have_sib)
932 {
933 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
934 int index = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
935 used_regs_mask |= 1 << base;
936 used_regs_mask |= 1 << index;
937 }
938 else
939 {
940 used_regs_mask |= 1 << rm;
941 }
942 }
943
944 gdb_assert (used_regs_mask < 256);
945 gdb_assert (used_regs_mask != 255);
946
947 /* Finally, find a free reg. */
948 {
949 int i;
950
951 for (i = 0; i < 8; ++i)
952 {
953 if (! (used_regs_mask & (1 << i)))
954 return i;
955 }
956
957 /* We shouldn't get here. */
958 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
959 }
960}
961
962/* Extract the details of INSN that we need. */
963
964static void
965amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
966{
967 gdb_byte *start = insn;
968 int need_modrm;
969
970 details->raw_insn = insn;
971
972 details->opcode_len = -1;
973 details->rex_offset = -1;
974 details->opcode_offset = -1;
975 details->modrm_offset = -1;
976
977 /* Skip legacy instruction prefixes. */
1903f0e6 978 insn = amd64_skip_prefixes (insn);
35669430
DE
979
980 /* Skip REX instruction prefix. */
981 if (rex_prefix_p (*insn))
982 {
983 details->rex_offset = insn - start;
984 ++insn;
985 }
986
987 details->opcode_offset = insn - start;
988
989 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
990 {
991 /* Two or three-byte opcode. */
992 ++insn;
993 need_modrm = twobyte_has_modrm[*insn];
994
995 /* Check for three-byte opcode. */
1903f0e6 996 switch (*insn)
35669430 997 {
1903f0e6
DE
998 case 0x24:
999 case 0x25:
1000 case 0x38:
1001 case 0x3a:
1002 case 0x7a:
1003 case 0x7b:
35669430
DE
1004 ++insn;
1005 details->opcode_len = 3;
1903f0e6
DE
1006 break;
1007 default:
1008 details->opcode_len = 2;
1009 break;
35669430 1010 }
35669430
DE
1011 }
1012 else
1013 {
1014 /* One-byte opcode. */
1015 need_modrm = onebyte_has_modrm[*insn];
1016 details->opcode_len = 1;
1017 }
1018
1019 if (need_modrm)
1020 {
1021 ++insn;
1022 details->modrm_offset = insn - start;
1023 }
1024}
1025
1026/* Update %rip-relative addressing in INSN.
1027
1028 %rip-relative addressing only uses a 32-bit displacement.
1029 32 bits is not enough to be guaranteed to cover the distance between where
1030 the real instruction is and where its copy is.
1031 Convert the insn to use base+disp addressing.
1032 We set base = pc + insn_length so we can leave disp unchanged. */
c4f35dd8 1033
35669430
DE
1034static void
1035fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1036 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1037{
e17a4113 1038 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
35669430
DE
1039 const struct amd64_insn *insn_details = &dsc->insn_details;
1040 int modrm_offset = insn_details->modrm_offset;
1041 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1042 CORE_ADDR rip_base;
1043 int32_t disp;
1044 int insn_length;
1045 int arch_tmp_regno, tmp_regno;
1046 ULONGEST orig_value;
1047
1048 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1049 ++insn;
1050
1051 /* Compute the rip-relative address. */
e17a4113 1052 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
35669430
DE
1053 insn_length = amd64_insn_length (gdbarch, dsc->insn_buf, dsc->max_len, from);
1054 rip_base = from + insn_length;
1055
1056 /* We need a register to hold the address.
1057 Pick one not used in the insn.
1058 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1059 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1060 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1061
1062 /* REX.B should be unset as we were using rip-relative addressing,
1063 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1064 if (insn_details->rex_offset != -1)
1065 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1066
1067 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1068 dsc->tmp_regno = tmp_regno;
1069 dsc->tmp_save = orig_value;
1070 dsc->tmp_used = 1;
1071
1072 /* Convert the ModRM field to be base+disp. */
1073 dsc->insn_buf[modrm_offset] &= ~0xc7;
1074 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1075
1076 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1077
1078 if (debug_displaced)
1079 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
5af949e3
UW
1080 "displaced: using temp reg %d, old value %s, new value %s\n",
1081 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1082 paddress (gdbarch, rip_base));
35669430
DE
1083}
1084
1085static void
1086fixup_displaced_copy (struct gdbarch *gdbarch,
1087 struct displaced_step_closure *dsc,
1088 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1089{
1090 const struct amd64_insn *details = &dsc->insn_details;
1091
1092 if (details->modrm_offset != -1)
1093 {
1094 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1095
1096 if ((modrm & 0xc7) == 0x05)
1097 {
1098 /* The insn uses rip-relative addressing.
1099 Deal with it. */
1100 fixup_riprel (gdbarch, dsc, from, to, regs);
1101 }
1102 }
1103}
1104
1105struct displaced_step_closure *
1106amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1107 CORE_ADDR from, CORE_ADDR to,
1108 struct regcache *regs)
1109{
1110 int len = gdbarch_max_insn_length (gdbarch);
1111 /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to
1112 continually watch for running off the end of the buffer. */
1113 int fixup_sentinel_space = len;
1114 struct displaced_step_closure *dsc =
1115 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1116 gdb_byte *buf = &dsc->insn_buf[0];
1117 struct amd64_insn *details = &dsc->insn_details;
1118
1119 dsc->tmp_used = 0;
1120 dsc->max_len = len + fixup_sentinel_space;
1121
1122 read_memory (from, buf, len);
1123
1124 /* Set up the sentinel space so we don't have to worry about running
1125 off the end of the buffer. An excessive number of leading prefixes
1126 could otherwise cause this. */
1127 memset (buf + len, 0, fixup_sentinel_space);
1128
1129 amd64_get_insn_details (buf, details);
1130
1131 /* GDB may get control back after the insn after the syscall.
1132 Presumably this is a kernel bug.
1133 If this is a syscall, make sure there's a nop afterwards. */
1134 {
1135 int syscall_length;
1136
1137 if (amd64_syscall_p (details, &syscall_length))
1138 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1139 }
1140
1141 /* Modify the insn to cope with the address where it will be executed from.
1142 In particular, handle any rip-relative addressing. */
1143 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1144
1145 write_memory (to, buf, len);
1146
1147 if (debug_displaced)
1148 {
5af949e3
UW
1149 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1150 paddress (gdbarch, from), paddress (gdbarch, to));
35669430
DE
1151 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1152 }
1153
1154 return dsc;
1155}
1156
1157static int
1158amd64_absolute_jmp_p (const struct amd64_insn *details)
1159{
1160 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1161
1162 if (insn[0] == 0xff)
1163 {
1164 /* jump near, absolute indirect (/4) */
1165 if ((insn[1] & 0x38) == 0x20)
1166 return 1;
1167
1168 /* jump far, absolute indirect (/5) */
1169 if ((insn[1] & 0x38) == 0x28)
1170 return 1;
1171 }
1172
1173 return 0;
1174}
1175
1176static int
1177amd64_absolute_call_p (const struct amd64_insn *details)
1178{
1179 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1180
1181 if (insn[0] == 0xff)
1182 {
1183 /* Call near, absolute indirect (/2) */
1184 if ((insn[1] & 0x38) == 0x10)
1185 return 1;
1186
1187 /* Call far, absolute indirect (/3) */
1188 if ((insn[1] & 0x38) == 0x18)
1189 return 1;
1190 }
1191
1192 return 0;
1193}
1194
1195static int
1196amd64_ret_p (const struct amd64_insn *details)
1197{
1198 /* NOTE: gcc can emit "repz ; ret". */
1199 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1200
1201 switch (insn[0])
1202 {
1203 case 0xc2: /* ret near, pop N bytes */
1204 case 0xc3: /* ret near */
1205 case 0xca: /* ret far, pop N bytes */
1206 case 0xcb: /* ret far */
1207 case 0xcf: /* iret */
1208 return 1;
1209
1210 default:
1211 return 0;
1212 }
1213}
1214
1215static int
1216amd64_call_p (const struct amd64_insn *details)
1217{
1218 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1219
1220 if (amd64_absolute_call_p (details))
1221 return 1;
1222
1223 /* call near, relative */
1224 if (insn[0] == 0xe8)
1225 return 1;
1226
1227 return 0;
1228}
1229
35669430
DE
1230/* Return non-zero if INSN is a system call, and set *LENGTHP to its
1231 length in bytes. Otherwise, return zero. */
1232
1233static int
1234amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1235{
1236 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1237
1238 if (insn[0] == 0x0f && insn[1] == 0x05)
1239 {
1240 *lengthp = 2;
1241 return 1;
1242 }
1243
1244 return 0;
1245}
1246
1247/* Fix up the state of registers and memory after having single-stepped
1248 a displaced instruction. */
1249
1250void
1251amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1252 struct displaced_step_closure *dsc,
1253 CORE_ADDR from, CORE_ADDR to,
1254 struct regcache *regs)
1255{
e17a4113 1256 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
35669430
DE
1257 /* The offset we applied to the instruction's address. */
1258 ULONGEST insn_offset = to - from;
1259 gdb_byte *insn = dsc->insn_buf;
1260 const struct amd64_insn *insn_details = &dsc->insn_details;
1261
1262 if (debug_displaced)
1263 fprintf_unfiltered (gdb_stdlog,
5af949e3 1264 "displaced: fixup (%s, %s), "
35669430 1265 "insn = 0x%02x 0x%02x ...\n",
5af949e3
UW
1266 paddress (gdbarch, from), paddress (gdbarch, to),
1267 insn[0], insn[1]);
35669430
DE
1268
1269 /* If we used a tmp reg, restore it. */
1270
1271 if (dsc->tmp_used)
1272 {
1273 if (debug_displaced)
5af949e3
UW
1274 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1275 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
35669430
DE
1276 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1277 }
1278
1279 /* The list of issues to contend with here is taken from
1280 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1281 Yay for Free Software! */
1282
1283 /* Relocate the %rip back to the program's instruction stream,
1284 if necessary. */
1285
1286 /* Except in the case of absolute or indirect jump or call
1287 instructions, or a return instruction, the new rip is relative to
1288 the displaced instruction; make it relative to the original insn.
1289 Well, signal handler returns don't need relocation either, but we use the
1290 value of %rip to recognize those; see below. */
1291 if (! amd64_absolute_jmp_p (insn_details)
1292 && ! amd64_absolute_call_p (insn_details)
1293 && ! amd64_ret_p (insn_details))
1294 {
1295 ULONGEST orig_rip;
1296 int insn_len;
1297
1298 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1299
1300 /* A signal trampoline system call changes the %rip, resuming
1301 execution of the main program after the signal handler has
1302 returned. That makes them like 'return' instructions; we
1303 shouldn't relocate %rip.
1304
1305 But most system calls don't, and we do need to relocate %rip.
1306
1307 Our heuristic for distinguishing these cases: if stepping
1308 over the system call instruction left control directly after
1309 the instruction, the we relocate --- control almost certainly
1310 doesn't belong in the displaced copy. Otherwise, we assume
1311 the instruction has put control where it belongs, and leave
1312 it unrelocated. Goodness help us if there are PC-relative
1313 system calls. */
1314 if (amd64_syscall_p (insn_details, &insn_len)
1315 && orig_rip != to + insn_len
1316 /* GDB can get control back after the insn after the syscall.
1317 Presumably this is a kernel bug.
1318 Fixup ensures its a nop, we add one to the length for it. */
1319 && orig_rip != to + insn_len + 1)
1320 {
1321 if (debug_displaced)
1322 fprintf_unfiltered (gdb_stdlog,
1323 "displaced: syscall changed %%rip; "
1324 "not relocating\n");
1325 }
1326 else
1327 {
1328 ULONGEST rip = orig_rip - insn_offset;
1329
1903f0e6
DE
1330 /* If we just stepped over a breakpoint insn, we don't backup
1331 the pc on purpose; this is to match behaviour without
1332 stepping. */
35669430
DE
1333
1334 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1335
1336 if (debug_displaced)
1337 fprintf_unfiltered (gdb_stdlog,
1338 "displaced: "
5af949e3
UW
1339 "relocated %%rip from %s to %s\n",
1340 paddress (gdbarch, orig_rip),
1341 paddress (gdbarch, rip));
35669430
DE
1342 }
1343 }
1344
1345 /* If the instruction was PUSHFL, then the TF bit will be set in the
1346 pushed value, and should be cleared. We'll leave this for later,
1347 since GDB already messes up the TF flag when stepping over a
1348 pushfl. */
1349
1350 /* If the instruction was a call, the return address now atop the
1351 stack is the address following the copied instruction. We need
1352 to make it the address following the original instruction. */
1353 if (amd64_call_p (insn_details))
1354 {
1355 ULONGEST rsp;
1356 ULONGEST retaddr;
1357 const ULONGEST retaddr_len = 8;
1358
1359 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
e17a4113 1360 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
35669430 1361 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
e17a4113 1362 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
35669430
DE
1363
1364 if (debug_displaced)
1365 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
1366 "displaced: relocated return addr at %s "
1367 "to %s\n",
1368 paddress (gdbarch, rsp),
1369 paddress (gdbarch, retaddr));
35669430
DE
1370 }
1371}
1372\f
c4f35dd8 1373/* The maximum number of saved registers. This should include %rip. */
90f90721 1374#define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
c4f35dd8 1375
e53bef9f 1376struct amd64_frame_cache
c4f35dd8
MK
1377{
1378 /* Base address. */
1379 CORE_ADDR base;
1380 CORE_ADDR sp_offset;
1381 CORE_ADDR pc;
1382
1383 /* Saved registers. */
e53bef9f 1384 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
c4f35dd8 1385 CORE_ADDR saved_sp;
e0c62198 1386 int saved_sp_reg;
c4f35dd8
MK
1387
1388 /* Do we have a frame? */
1389 int frameless_p;
1390};
8dda9770 1391
d2449ee8 1392/* Initialize a frame cache. */
c4f35dd8 1393
d2449ee8
DJ
1394static void
1395amd64_init_frame_cache (struct amd64_frame_cache *cache)
8dda9770 1396{
c4f35dd8
MK
1397 int i;
1398
c4f35dd8
MK
1399 /* Base address. */
1400 cache->base = 0;
1401 cache->sp_offset = -8;
1402 cache->pc = 0;
1403
1404 /* Saved registers. We initialize these to -1 since zero is a valid
1405 offset (that's where %rbp is supposed to be stored). */
e53bef9f 1406 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
1407 cache->saved_regs[i] = -1;
1408 cache->saved_sp = 0;
e0c62198 1409 cache->saved_sp_reg = -1;
c4f35dd8
MK
1410
1411 /* Frameless until proven otherwise. */
1412 cache->frameless_p = 1;
d2449ee8 1413}
c4f35dd8 1414
d2449ee8
DJ
1415/* Allocate and initialize a frame cache. */
1416
1417static struct amd64_frame_cache *
1418amd64_alloc_frame_cache (void)
1419{
1420 struct amd64_frame_cache *cache;
1421
1422 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1423 amd64_init_frame_cache (cache);
c4f35dd8 1424 return cache;
8dda9770 1425}
53e95fcf 1426
e0c62198
L
1427/* GCC 4.4 and later, can put code in the prologue to realign the
1428 stack pointer. Check whether PC points to such code, and update
1429 CACHE accordingly. Return the first instruction after the code
1430 sequence or CURRENT_PC, whichever is smaller. If we don't
1431 recognize the code, return PC. */
1432
1433static CORE_ADDR
1434amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1435 struct amd64_frame_cache *cache)
1436{
1437 /* There are 2 code sequences to re-align stack before the frame
1438 gets set up:
1439
1440 1. Use a caller-saved saved register:
1441
1442 leaq 8(%rsp), %reg
1443 andq $-XXX, %rsp
1444 pushq -8(%reg)
1445
1446 2. Use a callee-saved saved register:
1447
1448 pushq %reg
1449 leaq 16(%rsp), %reg
1450 andq $-XXX, %rsp
1451 pushq -8(%reg)
1452
1453 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1454
1455 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1456 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1457 */
1458
1459 gdb_byte buf[18];
1460 int reg, r;
1461 int offset, offset_and;
e0c62198
L
1462
1463 if (target_read_memory (pc, buf, sizeof buf))
1464 return pc;
1465
1466 /* Check caller-saved saved register. The first instruction has
1467 to be "leaq 8(%rsp), %reg". */
1468 if ((buf[0] & 0xfb) == 0x48
1469 && buf[1] == 0x8d
1470 && buf[3] == 0x24
1471 && buf[4] == 0x8)
1472 {
1473 /* MOD must be binary 10 and R/M must be binary 100. */
1474 if ((buf[2] & 0xc7) != 0x44)
1475 return pc;
1476
1477 /* REG has register number. */
1478 reg = (buf[2] >> 3) & 7;
1479
1480 /* Check the REX.R bit. */
1481 if (buf[0] == 0x4c)
1482 reg += 8;
1483
1484 offset = 5;
1485 }
1486 else
1487 {
1488 /* Check callee-saved saved register. The first instruction
1489 has to be "pushq %reg". */
1490 reg = 0;
1491 if ((buf[0] & 0xf8) == 0x50)
1492 offset = 0;
1493 else if ((buf[0] & 0xf6) == 0x40
1494 && (buf[1] & 0xf8) == 0x50)
1495 {
1496 /* Check the REX.B bit. */
1497 if ((buf[0] & 1) != 0)
1498 reg = 8;
1499
1500 offset = 1;
1501 }
1502 else
1503 return pc;
1504
1505 /* Get register. */
1506 reg += buf[offset] & 0x7;
1507
1508 offset++;
1509
1510 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1511 if ((buf[offset] & 0xfb) != 0x48
1512 || buf[offset + 1] != 0x8d
1513 || buf[offset + 3] != 0x24
1514 || buf[offset + 4] != 0x10)
1515 return pc;
1516
1517 /* MOD must be binary 10 and R/M must be binary 100. */
1518 if ((buf[offset + 2] & 0xc7) != 0x44)
1519 return pc;
1520
1521 /* REG has register number. */
1522 r = (buf[offset + 2] >> 3) & 7;
1523
1524 /* Check the REX.R bit. */
1525 if (buf[offset] == 0x4c)
1526 r += 8;
1527
1528 /* Registers in pushq and leaq have to be the same. */
1529 if (reg != r)
1530 return pc;
1531
1532 offset += 5;
1533 }
1534
1535 /* Rigister can't be %rsp nor %rbp. */
1536 if (reg == 4 || reg == 5)
1537 return pc;
1538
1539 /* The next instruction has to be "andq $-XXX, %rsp". */
1540 if (buf[offset] != 0x48
1541 || buf[offset + 2] != 0xe4
1542 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1543 return pc;
1544
1545 offset_and = offset;
1546 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1547
1548 /* The next instruction has to be "pushq -8(%reg)". */
1549 r = 0;
1550 if (buf[offset] == 0xff)
1551 offset++;
1552 else if ((buf[offset] & 0xf6) == 0x40
1553 && buf[offset + 1] == 0xff)
1554 {
1555 /* Check the REX.B bit. */
1556 if ((buf[offset] & 0x1) != 0)
1557 r = 8;
1558 offset += 2;
1559 }
1560 else
1561 return pc;
1562
1563 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1564 01. */
1565 if (buf[offset + 1] != 0xf8
1566 || (buf[offset] & 0xf8) != 0x70)
1567 return pc;
1568
1569 /* R/M has register. */
1570 r += buf[offset] & 7;
1571
1572 /* Registers in leaq and pushq have to be the same. */
1573 if (reg != r)
1574 return pc;
1575
1576 if (current_pc > pc + offset_and)
35669430 1577 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
e0c62198
L
1578
1579 return min (pc + offset + 2, current_pc);
1580}
1581
c4f35dd8
MK
1582/* Do a limited analysis of the prologue at PC and update CACHE
1583 accordingly. Bail out early if CURRENT_PC is reached. Return the
1584 address where the analysis stopped.
1585
1586 We will handle only functions beginning with:
1587
1588 pushq %rbp 0x55
1589 movq %rsp, %rbp 0x48 0x89 0xe5
1590
1591 Any function that doesn't start with this sequence will be assumed
1592 to have no prologue and thus no valid frame pointer in %rbp. */
1593
1594static CORE_ADDR
e17a4113
UW
1595amd64_analyze_prologue (struct gdbarch *gdbarch,
1596 CORE_ADDR pc, CORE_ADDR current_pc,
e53bef9f 1597 struct amd64_frame_cache *cache)
53e95fcf 1598{
e17a4113 1599 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
d8de1ef7
MK
1600 static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
1601 gdb_byte buf[3];
1602 gdb_byte op;
c4f35dd8
MK
1603
1604 if (current_pc <= pc)
1605 return current_pc;
1606
e0c62198
L
1607 pc = amd64_analyze_stack_align (pc, current_pc, cache);
1608
e17a4113 1609 op = read_memory_unsigned_integer (pc, 1, byte_order);
c4f35dd8
MK
1610
1611 if (op == 0x55) /* pushq %rbp */
1612 {
1613 /* Take into account that we've executed the `pushq %rbp' that
1614 starts this instruction sequence. */
90f90721 1615 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
c4f35dd8
MK
1616 cache->sp_offset += 8;
1617
1618 /* If that's all, return now. */
1619 if (current_pc <= pc + 1)
1620 return current_pc;
1621
1622 /* Check for `movq %rsp, %rbp'. */
1623 read_memory (pc + 1, buf, 3);
1624 if (memcmp (buf, proto, 3) != 0)
1625 return pc + 1;
1626
1627 /* OK, we actually have a frame. */
1628 cache->frameless_p = 0;
1629 return pc + 4;
1630 }
1631
1632 return pc;
53e95fcf
JS
1633}
1634
c4f35dd8
MK
1635/* Return PC of first real instruction. */
1636
1637static CORE_ADDR
6093d2eb 1638amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
53e95fcf 1639{
e53bef9f 1640 struct amd64_frame_cache cache;
c4f35dd8
MK
1641 CORE_ADDR pc;
1642
d2449ee8 1643 amd64_init_frame_cache (&cache);
e17a4113
UW
1644 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
1645 &cache);
c4f35dd8
MK
1646 if (cache.frameless_p)
1647 return start_pc;
1648
1649 return pc;
53e95fcf 1650}
c4f35dd8 1651\f
53e95fcf 1652
c4f35dd8
MK
1653/* Normal frames. */
1654
e53bef9f 1655static struct amd64_frame_cache *
10458914 1656amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
6d686a84 1657{
e17a4113
UW
1658 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1659 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
e53bef9f 1660 struct amd64_frame_cache *cache;
d8de1ef7 1661 gdb_byte buf[8];
6d686a84 1662 int i;
6d686a84 1663
c4f35dd8
MK
1664 if (*this_cache)
1665 return *this_cache;
6d686a84 1666
e53bef9f 1667 cache = amd64_alloc_frame_cache ();
c4f35dd8
MK
1668 *this_cache = cache;
1669
10458914 1670 cache->pc = get_frame_func (this_frame);
c4f35dd8 1671 if (cache->pc != 0)
e17a4113
UW
1672 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
1673 cache);
c4f35dd8 1674
e0c62198
L
1675 if (cache->saved_sp_reg != -1)
1676 {
1677 /* Stack pointer has been saved. */
1678 get_frame_register (this_frame, cache->saved_sp_reg, buf);
e17a4113 1679 cache->saved_sp = extract_unsigned_integer(buf, 8, byte_order);
e0c62198
L
1680 }
1681
c4f35dd8
MK
1682 if (cache->frameless_p)
1683 {
4a28816e
MK
1684 /* We didn't find a valid frame. If we're at the start of a
1685 function, or somewhere half-way its prologue, the function's
1686 frame probably hasn't been fully setup yet. Try to
1687 reconstruct the base address for the stack frame by looking
1688 at the stack pointer. For truly "frameless" functions this
1689 might work too. */
c4f35dd8 1690
e0c62198
L
1691 if (cache->saved_sp_reg != -1)
1692 {
1693 /* We're halfway aligning the stack. */
1694 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
1695 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
1696
1697 /* This will be added back below. */
1698 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
1699 }
1700 else
1701 {
1702 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
e17a4113
UW
1703 cache->base = extract_unsigned_integer (buf, 8, byte_order)
1704 + cache->sp_offset;
e0c62198 1705 }
c4f35dd8 1706 }
35883a3f
MK
1707 else
1708 {
10458914 1709 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
e17a4113 1710 cache->base = extract_unsigned_integer (buf, 8, byte_order);
35883a3f 1711 }
c4f35dd8
MK
1712
1713 /* Now that we have the base address for the stack frame we can
1714 calculate the value of %rsp in the calling frame. */
1715 cache->saved_sp = cache->base + 16;
1716
35883a3f
MK
1717 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
1718 frame we find it at the same offset from the reconstructed base
e0c62198
L
1719 address. If we're halfway aligning the stack, %rip is handled
1720 differently (see above). */
1721 if (!cache->frameless_p || cache->saved_sp_reg == -1)
1722 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
35883a3f 1723
c4f35dd8
MK
1724 /* Adjust all the saved registers such that they contain addresses
1725 instead of offsets. */
e53bef9f 1726 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
1727 if (cache->saved_regs[i] != -1)
1728 cache->saved_regs[i] += cache->base;
1729
1730 return cache;
6d686a84
ML
1731}
1732
c4f35dd8 1733static void
10458914 1734amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
e53bef9f 1735 struct frame_id *this_id)
c4f35dd8 1736{
e53bef9f 1737 struct amd64_frame_cache *cache =
10458914 1738 amd64_frame_cache (this_frame, this_cache);
c4f35dd8
MK
1739
1740 /* This marks the outermost frame. */
1741 if (cache->base == 0)
1742 return;
1743
1744 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
1745}
e76e1718 1746
10458914
DJ
1747static struct value *
1748amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1749 int regnum)
53e95fcf 1750{
10458914 1751 struct gdbarch *gdbarch = get_frame_arch (this_frame);
e53bef9f 1752 struct amd64_frame_cache *cache =
10458914 1753 amd64_frame_cache (this_frame, this_cache);
e76e1718 1754
c4f35dd8 1755 gdb_assert (regnum >= 0);
b1ab997b 1756
2ae02b47 1757 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
10458914 1758 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
e76e1718 1759
e53bef9f 1760 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
10458914
DJ
1761 return frame_unwind_got_memory (this_frame, regnum,
1762 cache->saved_regs[regnum]);
e76e1718 1763
10458914 1764 return frame_unwind_got_register (this_frame, regnum, regnum);
c4f35dd8 1765}
e76e1718 1766
e53bef9f 1767static const struct frame_unwind amd64_frame_unwind =
c4f35dd8
MK
1768{
1769 NORMAL_FRAME,
e53bef9f 1770 amd64_frame_this_id,
10458914
DJ
1771 amd64_frame_prev_register,
1772 NULL,
1773 default_frame_sniffer
c4f35dd8 1774};
c4f35dd8 1775\f
e76e1718 1776
c4f35dd8
MK
1777/* Signal trampolines. */
1778
1779/* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
1780 64-bit variants. This would require using identical frame caches
1781 on both platforms. */
1782
e53bef9f 1783static struct amd64_frame_cache *
10458914 1784amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
c4f35dd8 1785{
e17a4113
UW
1786 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1787 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1788 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
e53bef9f 1789 struct amd64_frame_cache *cache;
c4f35dd8 1790 CORE_ADDR addr;
d8de1ef7 1791 gdb_byte buf[8];
2b5e0749 1792 int i;
c4f35dd8
MK
1793
1794 if (*this_cache)
1795 return *this_cache;
1796
e53bef9f 1797 cache = amd64_alloc_frame_cache ();
c4f35dd8 1798
10458914 1799 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
e17a4113 1800 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
c4f35dd8 1801
10458914 1802 addr = tdep->sigcontext_addr (this_frame);
2b5e0749 1803 gdb_assert (tdep->sc_reg_offset);
e53bef9f 1804 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2b5e0749
MK
1805 for (i = 0; i < tdep->sc_num_regs; i++)
1806 if (tdep->sc_reg_offset[i] != -1)
1807 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
c4f35dd8
MK
1808
1809 *this_cache = cache;
1810 return cache;
53e95fcf
JS
1811}
1812
c4f35dd8 1813static void
10458914 1814amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
e53bef9f 1815 void **this_cache, struct frame_id *this_id)
c4f35dd8 1816{
e53bef9f 1817 struct amd64_frame_cache *cache =
10458914 1818 amd64_sigtramp_frame_cache (this_frame, this_cache);
c4f35dd8 1819
10458914 1820 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
c4f35dd8
MK
1821}
1822
10458914
DJ
1823static struct value *
1824amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
1825 void **this_cache, int regnum)
c4f35dd8
MK
1826{
1827 /* Make sure we've initialized the cache. */
10458914 1828 amd64_sigtramp_frame_cache (this_frame, this_cache);
c4f35dd8 1829
10458914 1830 return amd64_frame_prev_register (this_frame, this_cache, regnum);
c4f35dd8
MK
1831}
1832
10458914
DJ
1833static int
1834amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
1835 struct frame_info *this_frame,
1836 void **this_cache)
c4f35dd8 1837{
10458914 1838 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
911bc6ee
MK
1839
1840 /* We shouldn't even bother if we don't have a sigcontext_addr
1841 handler. */
1842 if (tdep->sigcontext_addr == NULL)
10458914 1843 return 0;
911bc6ee
MK
1844
1845 if (tdep->sigtramp_p != NULL)
1846 {
10458914
DJ
1847 if (tdep->sigtramp_p (this_frame))
1848 return 1;
911bc6ee 1849 }
c4f35dd8 1850
911bc6ee 1851 if (tdep->sigtramp_start != 0)
1c3545ae 1852 {
10458914 1853 CORE_ADDR pc = get_frame_pc (this_frame);
1c3545ae 1854
911bc6ee
MK
1855 gdb_assert (tdep->sigtramp_end != 0);
1856 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
10458914 1857 return 1;
1c3545ae 1858 }
c4f35dd8 1859
10458914 1860 return 0;
c4f35dd8 1861}
10458914
DJ
1862
1863static const struct frame_unwind amd64_sigtramp_frame_unwind =
1864{
1865 SIGTRAMP_FRAME,
1866 amd64_sigtramp_frame_this_id,
1867 amd64_sigtramp_frame_prev_register,
1868 NULL,
1869 amd64_sigtramp_frame_sniffer
1870};
c4f35dd8
MK
1871\f
1872
1873static CORE_ADDR
10458914 1874amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
c4f35dd8 1875{
e53bef9f 1876 struct amd64_frame_cache *cache =
10458914 1877 amd64_frame_cache (this_frame, this_cache);
c4f35dd8
MK
1878
1879 return cache->base;
1880}
1881
e53bef9f 1882static const struct frame_base amd64_frame_base =
c4f35dd8 1883{
e53bef9f
MK
1884 &amd64_frame_unwind,
1885 amd64_frame_base_address,
1886 amd64_frame_base_address,
1887 amd64_frame_base_address
c4f35dd8
MK
1888};
1889
872761f4
MS
1890/* Normal frames, but in a function epilogue. */
1891
1892/* The epilogue is defined here as the 'ret' instruction, which will
1893 follow any instruction such as 'leave' or 'pop %ebp' that destroys
1894 the function's stack frame. */
1895
1896static int
1897amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
1898{
1899 gdb_byte insn;
1900
1901 if (target_read_memory (pc, &insn, 1))
1902 return 0; /* Can't read memory at pc. */
1903
1904 if (insn != 0xc3) /* 'ret' instruction. */
1905 return 0;
1906
1907 return 1;
1908}
1909
1910static int
1911amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
1912 struct frame_info *this_frame,
1913 void **this_prologue_cache)
1914{
1915 if (frame_relative_level (this_frame) == 0)
1916 return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
1917 get_frame_pc (this_frame));
1918 else
1919 return 0;
1920}
1921
1922static struct amd64_frame_cache *
1923amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
1924{
1925 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1926 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1927 struct amd64_frame_cache *cache;
1928 gdb_byte buf[4];
1929
1930 if (*this_cache)
1931 return *this_cache;
1932
1933 cache = amd64_alloc_frame_cache ();
1934 *this_cache = cache;
1935
1936 /* Cache base will be %esp plus cache->sp_offset (-8). */
1937 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1938 cache->base = extract_unsigned_integer (buf, 8,
1939 byte_order) + cache->sp_offset;
1940
1941 /* Cache pc will be the frame func. */
1942 cache->pc = get_frame_pc (this_frame);
1943
1944 /* The saved %esp will be at cache->base plus 16. */
1945 cache->saved_sp = cache->base + 16;
1946
1947 /* The saved %eip will be at cache->base plus 8. */
1948 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
1949
1950 return cache;
1951}
1952
1953static void
1954amd64_epilogue_frame_this_id (struct frame_info *this_frame,
1955 void **this_cache,
1956 struct frame_id *this_id)
1957{
1958 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
1959 this_cache);
1960
1961 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
1962}
1963
1964static const struct frame_unwind amd64_epilogue_frame_unwind =
1965{
1966 NORMAL_FRAME,
1967 amd64_epilogue_frame_this_id,
1968 amd64_frame_prev_register,
1969 NULL,
1970 amd64_epilogue_frame_sniffer
1971};
1972
166f4c7b 1973static struct frame_id
10458914 1974amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
166f4c7b 1975{
c4f35dd8
MK
1976 CORE_ADDR fp;
1977
10458914 1978 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
c4f35dd8 1979
10458914 1980 return frame_id_build (fp + 16, get_frame_pc (this_frame));
166f4c7b
ML
1981}
1982
8b148df9
AC
1983/* 16 byte align the SP per frame requirements. */
1984
1985static CORE_ADDR
e53bef9f 1986amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
8b148df9
AC
1987{
1988 return sp & -(CORE_ADDR)16;
1989}
473f17b0
MK
1990\f
1991
593adc23
MK
1992/* Supply register REGNUM from the buffer specified by FPREGS and LEN
1993 in the floating-point register set REGSET to register cache
1994 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
473f17b0
MK
1995
1996static void
e53bef9f
MK
1997amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
1998 int regnum, const void *fpregs, size_t len)
473f17b0 1999{
9ea75c57 2000 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
473f17b0
MK
2001
2002 gdb_assert (len == tdep->sizeof_fpregset);
90f90721 2003 amd64_supply_fxsave (regcache, regnum, fpregs);
473f17b0 2004}
8b148df9 2005
593adc23
MK
2006/* Collect register REGNUM from the register cache REGCACHE and store
2007 it in the buffer specified by FPREGS and LEN as described by the
2008 floating-point register set REGSET. If REGNUM is -1, do this for
2009 all registers in REGSET. */
2010
2011static void
2012amd64_collect_fpregset (const struct regset *regset,
2013 const struct regcache *regcache,
2014 int regnum, void *fpregs, size_t len)
2015{
2016 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2017
2018 gdb_assert (len == tdep->sizeof_fpregset);
2019 amd64_collect_fxsave (regcache, regnum, fpregs);
2020}
2021
c6b33596
MK
2022/* Return the appropriate register set for the core section identified
2023 by SECT_NAME and SECT_SIZE. */
2024
2025static const struct regset *
e53bef9f
MK
2026amd64_regset_from_core_section (struct gdbarch *gdbarch,
2027 const char *sect_name, size_t sect_size)
c6b33596
MK
2028{
2029 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2030
2031 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
2032 {
2033 if (tdep->fpregset == NULL)
593adc23
MK
2034 tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
2035 amd64_collect_fpregset);
c6b33596
MK
2036
2037 return tdep->fpregset;
2038 }
2039
2040 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
2041}
2042\f
2043
436675d3
PA
2044/* Figure out where the longjmp will land. Slurp the jmp_buf out of
2045 %rdi. We expect its value to be a pointer to the jmp_buf structure
2046 from which we extract the address that we will land at. This
2047 address is copied into PC. This routine returns non-zero on
2048 success. */
2049
2050static int
2051amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2052{
2053 gdb_byte buf[8];
2054 CORE_ADDR jb_addr;
2055 struct gdbarch *gdbarch = get_frame_arch (frame);
2056 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
0dfff4cb 2057 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
436675d3
PA
2058
2059 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2060 longjmp will land. */
2061 if (jb_pc_offset == -1)
2062 return 0;
2063
2064 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
0dfff4cb
UW
2065 jb_addr= extract_typed_address
2066 (buf, builtin_type (gdbarch)->builtin_data_ptr);
436675d3
PA
2067 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2068 return 0;
2069
0dfff4cb 2070 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
436675d3
PA
2071
2072 return 1;
2073}
2074
cf648174
HZ
2075static const int amd64_record_regmap[] =
2076{
2077 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2078 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2079 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2080 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2081 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2082 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2083};
2084
2213a65d 2085void
90f90721 2086amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
53e95fcf 2087{
0c1a73d6 2088 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
53e95fcf 2089
473f17b0
MK
2090 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2091 floating-point registers. */
2092 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2093
5716833c 2094 /* AMD64 has an FPU and 16 SSE registers. */
90f90721 2095 tdep->st0_regnum = AMD64_ST0_REGNUM;
0c1a73d6 2096 tdep->num_xmm_regs = 16;
53e95fcf 2097
0c1a73d6 2098 /* This is what all the fuss is about. */
53e95fcf
JS
2099 set_gdbarch_long_bit (gdbarch, 64);
2100 set_gdbarch_long_long_bit (gdbarch, 64);
2101 set_gdbarch_ptr_bit (gdbarch, 64);
2102
e53bef9f
MK
2103 /* In contrast to the i386, on AMD64 a `long double' actually takes
2104 up 128 bits, even though it's still based on the i387 extended
2105 floating-point format which has only 80 significant bits. */
b83b026c
MK
2106 set_gdbarch_long_double_bit (gdbarch, 128);
2107
e53bef9f
MK
2108 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
2109 set_gdbarch_register_name (gdbarch, amd64_register_name);
2110 set_gdbarch_register_type (gdbarch, amd64_register_type);
b83b026c
MK
2111
2112 /* Register numbers of various important registers. */
90f90721
MK
2113 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
2114 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
2115 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
2116 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
b83b026c 2117
e53bef9f
MK
2118 /* The "default" register numbering scheme for AMD64 is referred to
2119 as the "DWARF Register Number Mapping" in the System V psABI.
2120 The preferred debugging format for all known AMD64 targets is
2121 actually DWARF2, and GCC doesn't seem to support DWARF (that is
2122 DWARF-1), but we provide the same mapping just in case. This
2123 mapping is also used for stabs, which GCC does support. */
2124 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
e53bef9f 2125 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
de220d0f 2126
c4f35dd8 2127 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
e53bef9f 2128 be in use on any of the supported AMD64 targets. */
53e95fcf 2129
c4f35dd8 2130 /* Call dummy code. */
e53bef9f
MK
2131 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
2132 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
8b148df9 2133 set_gdbarch_frame_red_zone_size (gdbarch, 128);
53e95fcf 2134
83acabca 2135 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
d532c08f
MK
2136 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
2137 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
2138
efb1c01c 2139 set_gdbarch_return_value (gdbarch, amd64_return_value);
53e95fcf 2140
e53bef9f 2141 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
53e95fcf 2142
c4f35dd8 2143 /* Avoid wiring in the MMX registers for now. */
2213a65d 2144 set_gdbarch_num_pseudo_regs (gdbarch, 0);
5716833c 2145 tdep->mm0_regnum = -1;
2213a65d 2146
cf648174
HZ
2147 tdep->record_regmap = amd64_record_regmap;
2148
10458914 2149 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
53e95fcf 2150
872761f4
MS
2151 /* Hook the function epilogue frame unwinder. This unwinder is
2152 appended to the list first, so that it supercedes the other
2153 unwinders in function epilogues. */
2154 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
2155
2156 /* Hook the prologue-based frame unwinders. */
10458914
DJ
2157 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
2158 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
e53bef9f 2159 frame_base_set_default (gdbarch, &amd64_frame_base);
c6b33596
MK
2160
2161 /* If we have a register mapping, enable the generic core file support. */
2162 if (tdep->gregset_reg_offset)
2163 set_gdbarch_regset_from_core_section (gdbarch,
e53bef9f 2164 amd64_regset_from_core_section);
436675d3
PA
2165
2166 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
c4f35dd8
MK
2167}
2168\f
2169
41d041d6
MK
2170/* The 64-bit FXSAVE format differs from the 32-bit format in the
2171 sense that the instruction pointer and data pointer are simply
2172 64-bit offsets into the code segment and the data segment instead
2173 of a selector offset pair. The functions below store the upper 32
2174 bits of these pointers (instead of just the 16-bits of the segment
2175 selector). */
2176
2177/* Fill register REGNUM in REGCACHE with the appropriate
0485f6ad
MK
2178 floating-point or SSE register value from *FXSAVE. If REGNUM is
2179 -1, do this for all registers. This function masks off any of the
2180 reserved bits in *FXSAVE. */
c4f35dd8
MK
2181
2182void
90f90721 2183amd64_supply_fxsave (struct regcache *regcache, int regnum,
20a6ec49 2184 const void *fxsave)
c4f35dd8 2185{
20a6ec49
MD
2186 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2187 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2188
41d041d6 2189 i387_supply_fxsave (regcache, regnum, fxsave);
c4f35dd8 2190
20a6ec49 2191 if (fxsave && gdbarch_ptr_bit (gdbarch) == 64)
c4f35dd8 2192 {
d8de1ef7 2193 const gdb_byte *regs = fxsave;
41d041d6 2194
20a6ec49
MD
2195 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2196 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2197 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2198 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
c4f35dd8 2199 }
0c1a73d6
MK
2200}
2201
3c017e40
MK
2202/* Fill register REGNUM (if it is a floating-point or SSE register) in
2203 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
2204 all registers. This function doesn't touch any of the reserved
2205 bits in *FXSAVE. */
2206
2207void
2208amd64_collect_fxsave (const struct regcache *regcache, int regnum,
2209 void *fxsave)
2210{
20a6ec49
MD
2211 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2212 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
d8de1ef7 2213 gdb_byte *regs = fxsave;
3c017e40
MK
2214
2215 i387_collect_fxsave (regcache, regnum, fxsave);
2216
20a6ec49 2217 if (gdbarch_ptr_bit (gdbarch) == 64)
f0ef85a5 2218 {
20a6ec49
MD
2219 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2220 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2221 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2222 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
f0ef85a5 2223 }
3c017e40 2224}
This page took 0.744985 seconds and 4 git commands to generate.