* config.guess: Update to version 2011-02-02
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
CommitLineData
e53bef9f 1/* Target-dependent code for AMD64.
ce0eebec 2
7b6bb8da
JB
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
5ae96ec1
MK
5
6 Contributed by Jiri Smid, SuSE Labs.
53e95fcf
JS
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
a9762ec7 12 the Free Software Foundation; either version 3 of the License, or
53e95fcf
JS
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
a9762ec7 21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
53e95fcf
JS
22
23#include "defs.h"
35669430
DE
24#include "opcode/i386.h"
25#include "dis-asm.h"
c4f35dd8
MK
26#include "arch-utils.h"
27#include "block.h"
28#include "dummy-frame.h"
29#include "frame.h"
30#include "frame-base.h"
31#include "frame-unwind.h"
53e95fcf 32#include "inferior.h"
53e95fcf 33#include "gdbcmd.h"
c4f35dd8
MK
34#include "gdbcore.h"
35#include "objfiles.h"
53e95fcf 36#include "regcache.h"
2c261fae 37#include "regset.h"
53e95fcf 38#include "symfile.h"
eda5a4d7 39#include "disasm.h"
82dbc5f7 40#include "gdb_assert.h"
c4f35dd8 41
9c1488cb 42#include "amd64-tdep.h"
c4f35dd8 43#include "i387-tdep.h"
53e95fcf 44
90884b2b 45#include "features/i386/amd64.c"
a055a187 46#include "features/i386/amd64-avx.c"
90884b2b 47
e53bef9f
MK
48/* Note that the AMD64 architecture was previously known as x86-64.
49 The latter is (forever) engraved into the canonical system name as
90f90721 50 returned by config.guess, and used as the name for the AMD64 port
e53bef9f
MK
51 of GNU/Linux. The BSD's have renamed their ports to amd64; they
52 don't like to shout. For GDB we prefer the amd64_-prefix over the
53 x86_64_-prefix since it's so much easier to type. */
54
402ecd56 55/* Register information. */
c4f35dd8 56
6707b003 57static const char *amd64_register_names[] =
de220d0f 58{
6707b003 59 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
c4f35dd8
MK
60
61 /* %r8 is indeed register number 8. */
6707b003
UW
62 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
63 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
c4f35dd8 64
af233647 65 /* %st0 is register number 24. */
6707b003
UW
66 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
67 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
c4f35dd8 68
af233647 69 /* %xmm0 is register number 40. */
6707b003
UW
70 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
71 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
72 "mxcsr",
0e04a514
ML
73};
74
a055a187
L
75static const char *amd64_ymm_names[] =
76{
77 "ymm0", "ymm1", "ymm2", "ymm3",
78 "ymm4", "ymm5", "ymm6", "ymm7",
79 "ymm8", "ymm9", "ymm10", "ymm11",
80 "ymm12", "ymm13", "ymm14", "ymm15"
81};
82
83static const char *amd64_ymmh_names[] =
84{
85 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
86 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
87 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
88 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
89};
de220d0f 90
ba581dc1
JB
91/* The registers used to pass integer arguments during a function call. */
92static int amd64_dummy_call_integer_regs[] =
93{
94 AMD64_RDI_REGNUM, /* %rdi */
95 AMD64_RSI_REGNUM, /* %rsi */
96 AMD64_RDX_REGNUM, /* %rdx */
97 AMD64_RCX_REGNUM, /* %rcx */
98 8, /* %r8 */
99 9 /* %r9 */
100};
101
c4f35dd8
MK
102/* DWARF Register Number Mapping as defined in the System V psABI,
103 section 3.6. */
53e95fcf 104
e53bef9f 105static int amd64_dwarf_regmap[] =
0e04a514 106{
c4f35dd8 107 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
90f90721
MK
108 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
109 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
110 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
c4f35dd8
MK
111
112 /* Frame Pointer Register RBP. */
90f90721 113 AMD64_RBP_REGNUM,
c4f35dd8
MK
114
115 /* Stack Pointer Register RSP. */
90f90721 116 AMD64_RSP_REGNUM,
c4f35dd8
MK
117
118 /* Extended Integer Registers 8 - 15. */
119 8, 9, 10, 11, 12, 13, 14, 15,
120
59207364 121 /* Return Address RA. Mapped to RIP. */
90f90721 122 AMD64_RIP_REGNUM,
c4f35dd8
MK
123
124 /* SSE Registers 0 - 7. */
90f90721
MK
125 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
126 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
127 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
128 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
c4f35dd8
MK
129
130 /* Extended SSE Registers 8 - 15. */
90f90721
MK
131 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
132 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
133 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
134 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
c4f35dd8
MK
135
136 /* Floating Point Registers 0-7. */
90f90721
MK
137 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
138 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
139 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
c6f4c129
JB
140 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
141
142 /* Control and Status Flags Register. */
143 AMD64_EFLAGS_REGNUM,
144
145 /* Selector Registers. */
146 AMD64_ES_REGNUM,
147 AMD64_CS_REGNUM,
148 AMD64_SS_REGNUM,
149 AMD64_DS_REGNUM,
150 AMD64_FS_REGNUM,
151 AMD64_GS_REGNUM,
152 -1,
153 -1,
154
155 /* Segment Base Address Registers. */
156 -1,
157 -1,
158 -1,
159 -1,
160
161 /* Special Selector Registers. */
162 -1,
163 -1,
164
165 /* Floating Point Control Registers. */
166 AMD64_MXCSR_REGNUM,
167 AMD64_FCTRL_REGNUM,
168 AMD64_FSTAT_REGNUM
c4f35dd8 169};
0e04a514 170
e53bef9f
MK
171static const int amd64_dwarf_regmap_len =
172 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
0e04a514 173
c4f35dd8
MK
174/* Convert DWARF register number REG to the appropriate register
175 number used by GDB. */
26abbdc4 176
c4f35dd8 177static int
d3f73121 178amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
53e95fcf 179{
a055a187
L
180 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
181 int ymm0_regnum = tdep->ymm0_regnum;
c4f35dd8 182 int regnum = -1;
53e95fcf 183
16aff9a6 184 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
e53bef9f 185 regnum = amd64_dwarf_regmap[reg];
53e95fcf 186
c4f35dd8 187 if (regnum == -1)
8a3fe4f8 188 warning (_("Unmapped DWARF Register #%d encountered."), reg);
a055a187
L
189 else if (ymm0_regnum >= 0
190 && i386_xmm_regnum_p (gdbarch, regnum))
191 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
c4f35dd8
MK
192
193 return regnum;
53e95fcf 194}
d532c08f 195
35669430
DE
196/* Map architectural register numbers to gdb register numbers. */
197
198static const int amd64_arch_regmap[16] =
199{
200 AMD64_RAX_REGNUM, /* %rax */
201 AMD64_RCX_REGNUM, /* %rcx */
202 AMD64_RDX_REGNUM, /* %rdx */
203 AMD64_RBX_REGNUM, /* %rbx */
204 AMD64_RSP_REGNUM, /* %rsp */
205 AMD64_RBP_REGNUM, /* %rbp */
206 AMD64_RSI_REGNUM, /* %rsi */
207 AMD64_RDI_REGNUM, /* %rdi */
208 AMD64_R8_REGNUM, /* %r8 */
209 AMD64_R9_REGNUM, /* %r9 */
210 AMD64_R10_REGNUM, /* %r10 */
211 AMD64_R11_REGNUM, /* %r11 */
212 AMD64_R12_REGNUM, /* %r12 */
213 AMD64_R13_REGNUM, /* %r13 */
214 AMD64_R14_REGNUM, /* %r14 */
215 AMD64_R15_REGNUM /* %r15 */
216};
217
218static const int amd64_arch_regmap_len =
219 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
220
221/* Convert architectural register number REG to the appropriate register
222 number used by GDB. */
223
224static int
225amd64_arch_reg_to_regnum (int reg)
226{
227 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
228
229 return amd64_arch_regmap[reg];
230}
231
1ba53b71
L
232/* Register names for byte pseudo-registers. */
233
234static const char *amd64_byte_names[] =
235{
236 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
fe01d668
L
237 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
238 "ah", "bh", "ch", "dh"
1ba53b71
L
239};
240
fe01d668
L
241/* Number of lower byte registers. */
242#define AMD64_NUM_LOWER_BYTE_REGS 16
243
1ba53b71
L
244/* Register names for word pseudo-registers. */
245
246static const char *amd64_word_names[] =
247{
9cad29ac 248 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
1ba53b71
L
249 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
250};
251
252/* Register names for dword pseudo-registers. */
253
254static const char *amd64_dword_names[] =
255{
256 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
257 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d"
258};
259
260/* Return the name of register REGNUM. */
261
262static const char *
263amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
264{
265 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
266 if (i386_byte_regnum_p (gdbarch, regnum))
267 return amd64_byte_names[regnum - tdep->al_regnum];
a055a187
L
268 else if (i386_ymm_regnum_p (gdbarch, regnum))
269 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
1ba53b71
L
270 else if (i386_word_regnum_p (gdbarch, regnum))
271 return amd64_word_names[regnum - tdep->ax_regnum];
272 else if (i386_dword_regnum_p (gdbarch, regnum))
273 return amd64_dword_names[regnum - tdep->eax_regnum];
274 else
275 return i386_pseudo_register_name (gdbarch, regnum);
276}
277
278static void
279amd64_pseudo_register_read (struct gdbarch *gdbarch,
280 struct regcache *regcache,
281 int regnum, gdb_byte *buf)
282{
283 gdb_byte raw_buf[MAX_REGISTER_SIZE];
284 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
285
286 if (i386_byte_regnum_p (gdbarch, regnum))
287 {
288 int gpnum = regnum - tdep->al_regnum;
289
290 /* Extract (always little endian). */
fe01d668
L
291 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
292 {
293 /* Special handling for AH, BH, CH, DH. */
294 regcache_raw_read (regcache,
295 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
296 memcpy (buf, raw_buf + 1, 1);
297 }
298 else
299 {
300 regcache_raw_read (regcache, gpnum, raw_buf);
301 memcpy (buf, raw_buf, 1);
302 }
1ba53b71
L
303 }
304 else if (i386_dword_regnum_p (gdbarch, regnum))
305 {
306 int gpnum = regnum - tdep->eax_regnum;
307 /* Extract (always little endian). */
308 regcache_raw_read (regcache, gpnum, raw_buf);
309 memcpy (buf, raw_buf, 4);
310 }
311 else
312 i386_pseudo_register_read (gdbarch, regcache, regnum, buf);
313}
314
315static void
316amd64_pseudo_register_write (struct gdbarch *gdbarch,
317 struct regcache *regcache,
318 int regnum, const gdb_byte *buf)
319{
320 gdb_byte raw_buf[MAX_REGISTER_SIZE];
321 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
322
323 if (i386_byte_regnum_p (gdbarch, regnum))
324 {
325 int gpnum = regnum - tdep->al_regnum;
326
fe01d668
L
327 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
328 {
329 /* Read ... AH, BH, CH, DH. */
330 regcache_raw_read (regcache,
331 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
332 /* ... Modify ... (always little endian). */
333 memcpy (raw_buf + 1, buf, 1);
334 /* ... Write. */
335 regcache_raw_write (regcache,
336 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
337 }
338 else
339 {
340 /* Read ... */
341 regcache_raw_read (regcache, gpnum, raw_buf);
342 /* ... Modify ... (always little endian). */
343 memcpy (raw_buf, buf, 1);
344 /* ... Write. */
345 regcache_raw_write (regcache, gpnum, raw_buf);
346 }
1ba53b71
L
347 }
348 else if (i386_dword_regnum_p (gdbarch, regnum))
349 {
350 int gpnum = regnum - tdep->eax_regnum;
351
352 /* Read ... */
353 regcache_raw_read (regcache, gpnum, raw_buf);
354 /* ... Modify ... (always little endian). */
355 memcpy (raw_buf, buf, 4);
356 /* ... Write. */
357 regcache_raw_write (regcache, gpnum, raw_buf);
358 }
359 else
360 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
361}
362
53e95fcf
JS
363\f
364
efb1c01c
MK
365/* Return the union class of CLASS1 and CLASS2. See the psABI for
366 details. */
367
368static enum amd64_reg_class
369amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
370{
371 /* Rule (a): If both classes are equal, this is the resulting class. */
372 if (class1 == class2)
373 return class1;
374
375 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
376 is the other class. */
377 if (class1 == AMD64_NO_CLASS)
378 return class2;
379 if (class2 == AMD64_NO_CLASS)
380 return class1;
381
382 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
383 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
384 return AMD64_MEMORY;
385
386 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
387 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
388 return AMD64_INTEGER;
389
390 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
391 MEMORY is used as class. */
392 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
393 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
394 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
395 return AMD64_MEMORY;
396
397 /* Rule (f): Otherwise class SSE is used. */
398 return AMD64_SSE;
399}
400
79b1ab3d
MK
401/* Return non-zero if TYPE is a non-POD structure or union type. */
402
403static int
404amd64_non_pod_p (struct type *type)
405{
406 /* ??? A class with a base class certainly isn't POD, but does this
407 catch all non-POD structure types? */
408 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
409 return 1;
410
411 return 0;
412}
413
efb1c01c
MK
414/* Classify TYPE according to the rules for aggregate (structures and
415 arrays) and union types, and store the result in CLASS. */
c4f35dd8
MK
416
417static void
efb1c01c 418amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
53e95fcf
JS
419{
420 int len = TYPE_LENGTH (type);
421
efb1c01c
MK
422 /* 1. If the size of an object is larger than two eightbytes, or in
423 C++, is a non-POD structure or union type, or contains
424 unaligned fields, it has class memory. */
79b1ab3d 425 if (len > 16 || amd64_non_pod_p (type))
53e95fcf 426 {
efb1c01c
MK
427 class[0] = class[1] = AMD64_MEMORY;
428 return;
53e95fcf 429 }
efb1c01c
MK
430
431 /* 2. Both eightbytes get initialized to class NO_CLASS. */
432 class[0] = class[1] = AMD64_NO_CLASS;
433
434 /* 3. Each field of an object is classified recursively so that
435 always two fields are considered. The resulting class is
436 calculated according to the classes of the fields in the
437 eightbyte: */
438
439 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
8ffd9b1b 440 {
efb1c01c
MK
441 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
442
443 /* All fields in an array have the same type. */
444 amd64_classify (subtype, class);
445 if (len > 8 && class[1] == AMD64_NO_CLASS)
446 class[1] = class[0];
8ffd9b1b 447 }
53e95fcf
JS
448 else
449 {
efb1c01c 450 int i;
53e95fcf 451
efb1c01c
MK
452 /* Structure or union. */
453 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
454 || TYPE_CODE (type) == TYPE_CODE_UNION);
455
456 for (i = 0; i < TYPE_NFIELDS (type); i++)
53e95fcf 457 {
efb1c01c
MK
458 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
459 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
460 enum amd64_reg_class subclass[2];
e4e2711a
JB
461 int bitsize = TYPE_FIELD_BITSIZE (type, i);
462 int endpos;
463
464 if (bitsize == 0)
465 bitsize = TYPE_LENGTH (subtype) * 8;
466 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
efb1c01c 467
562c50c2 468 /* Ignore static fields. */
d6a843b5 469 if (field_is_static (&TYPE_FIELD (type, i)))
562c50c2
MK
470 continue;
471
efb1c01c
MK
472 gdb_assert (pos == 0 || pos == 1);
473
474 amd64_classify (subtype, subclass);
475 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
e4e2711a
JB
476 if (bitsize <= 64 && pos == 0 && endpos == 1)
477 /* This is a bit of an odd case: We have a field that would
478 normally fit in one of the two eightbytes, except that
479 it is placed in a way that this field straddles them.
480 This has been seen with a structure containing an array.
481
482 The ABI is a bit unclear in this case, but we assume that
483 this field's class (stored in subclass[0]) must also be merged
484 into class[1]. In other words, our field has a piece stored
485 in the second eight-byte, and thus its class applies to
486 the second eight-byte as well.
487
488 In the case where the field length exceeds 8 bytes,
489 it should not be necessary to merge the field class
490 into class[1]. As LEN > 8, subclass[1] is necessarily
491 different from AMD64_NO_CLASS. If subclass[1] is equal
492 to subclass[0], then the normal class[1]/subclass[1]
493 merging will take care of everything. For subclass[1]
494 to be different from subclass[0], I can only see the case
495 where we have a SSE/SSEUP or X87/X87UP pair, which both
496 use up all 16 bytes of the aggregate, and are already
497 handled just fine (because each portion sits on its own
498 8-byte). */
499 class[1] = amd64_merge_classes (class[1], subclass[0]);
efb1c01c
MK
500 if (pos == 0)
501 class[1] = amd64_merge_classes (class[1], subclass[1]);
53e95fcf 502 }
53e95fcf 503 }
efb1c01c
MK
504
505 /* 4. Then a post merger cleanup is done: */
506
507 /* Rule (a): If one of the classes is MEMORY, the whole argument is
508 passed in memory. */
509 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
510 class[0] = class[1] = AMD64_MEMORY;
511
512 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
513 SSE. */
514 if (class[0] == AMD64_SSEUP)
515 class[0] = AMD64_SSE;
516 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
517 class[1] = AMD64_SSE;
518}
519
520/* Classify TYPE, and store the result in CLASS. */
521
ba581dc1 522void
efb1c01c
MK
523amd64_classify (struct type *type, enum amd64_reg_class class[2])
524{
525 enum type_code code = TYPE_CODE (type);
526 int len = TYPE_LENGTH (type);
527
528 class[0] = class[1] = AMD64_NO_CLASS;
529
530 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
5a7225ed
JB
531 long, long long, and pointers are in the INTEGER class. Similarly,
532 range types, used by languages such as Ada, are also in the INTEGER
533 class. */
efb1c01c 534 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
b929c77f 535 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
9db13498 536 || code == TYPE_CODE_CHAR
efb1c01c
MK
537 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
538 && (len == 1 || len == 2 || len == 4 || len == 8))
539 class[0] = AMD64_INTEGER;
540
5daa78cc
TJB
541 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
542 are in class SSE. */
543 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
544 && (len == 4 || len == 8))
efb1c01c
MK
545 /* FIXME: __m64 . */
546 class[0] = AMD64_SSE;
547
5daa78cc
TJB
548 /* Arguments of types __float128, _Decimal128 and __m128 are split into
549 two halves. The least significant ones belong to class SSE, the most
efb1c01c 550 significant one to class SSEUP. */
5daa78cc
TJB
551 else if (code == TYPE_CODE_DECFLOAT && len == 16)
552 /* FIXME: __float128, __m128. */
553 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
efb1c01c
MK
554
555 /* The 64-bit mantissa of arguments of type long double belongs to
556 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
557 class X87UP. */
558 else if (code == TYPE_CODE_FLT && len == 16)
559 /* Class X87 and X87UP. */
560 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
561
562 /* Aggregates. */
563 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
564 || code == TYPE_CODE_UNION)
565 amd64_classify_aggregate (type, class);
566}
567
568static enum return_value_convention
c055b101
CV
569amd64_return_value (struct gdbarch *gdbarch, struct type *func_type,
570 struct type *type, struct regcache *regcache,
42835c2b 571 gdb_byte *readbuf, const gdb_byte *writebuf)
efb1c01c 572{
ba581dc1 573 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
efb1c01c
MK
574 enum amd64_reg_class class[2];
575 int len = TYPE_LENGTH (type);
90f90721
MK
576 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
577 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
efb1c01c
MK
578 int integer_reg = 0;
579 int sse_reg = 0;
580 int i;
581
582 gdb_assert (!(readbuf && writebuf));
ba581dc1 583 gdb_assert (tdep->classify);
efb1c01c
MK
584
585 /* 1. Classify the return type with the classification algorithm. */
ba581dc1 586 tdep->classify (type, class);
efb1c01c
MK
587
588 /* 2. If the type has class MEMORY, then the caller provides space
6fa57a7d 589 for the return value and passes the address of this storage in
0963b4bd 590 %rdi as if it were the first argument to the function. In effect,
6fa57a7d
MK
591 this address becomes a hidden first argument.
592
593 On return %rax will contain the address that has been passed in
594 by the caller in %rdi. */
efb1c01c 595 if (class[0] == AMD64_MEMORY)
6fa57a7d
MK
596 {
597 /* As indicated by the comment above, the ABI guarantees that we
598 can always find the return value just after the function has
599 returned. */
600
601 if (readbuf)
602 {
603 ULONGEST addr;
604
605 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
606 read_memory (addr, readbuf, TYPE_LENGTH (type));
607 }
608
609 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
610 }
efb1c01c
MK
611
612 gdb_assert (class[1] != AMD64_MEMORY);
613 gdb_assert (len <= 16);
614
615 for (i = 0; len > 0; i++, len -= 8)
616 {
617 int regnum = -1;
618 int offset = 0;
619
620 switch (class[i])
621 {
622 case AMD64_INTEGER:
623 /* 3. If the class is INTEGER, the next available register
624 of the sequence %rax, %rdx is used. */
625 regnum = integer_regnum[integer_reg++];
626 break;
627
628 case AMD64_SSE:
629 /* 4. If the class is SSE, the next available SSE register
630 of the sequence %xmm0, %xmm1 is used. */
631 regnum = sse_regnum[sse_reg++];
632 break;
633
634 case AMD64_SSEUP:
635 /* 5. If the class is SSEUP, the eightbyte is passed in the
636 upper half of the last used SSE register. */
637 gdb_assert (sse_reg > 0);
638 regnum = sse_regnum[sse_reg - 1];
639 offset = 8;
640 break;
641
642 case AMD64_X87:
643 /* 6. If the class is X87, the value is returned on the X87
644 stack in %st0 as 80-bit x87 number. */
90f90721 645 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
646 if (writebuf)
647 i387_return_value (gdbarch, regcache);
648 break;
649
650 case AMD64_X87UP:
651 /* 7. If the class is X87UP, the value is returned together
652 with the previous X87 value in %st0. */
653 gdb_assert (i > 0 && class[0] == AMD64_X87);
90f90721 654 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
655 offset = 8;
656 len = 2;
657 break;
658
659 case AMD64_NO_CLASS:
660 continue;
661
662 default:
663 gdb_assert (!"Unexpected register class.");
664 }
665
666 gdb_assert (regnum != -1);
667
668 if (readbuf)
669 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
42835c2b 670 readbuf + i * 8);
efb1c01c
MK
671 if (writebuf)
672 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
42835c2b 673 writebuf + i * 8);
efb1c01c
MK
674 }
675
676 return RETURN_VALUE_REGISTER_CONVENTION;
53e95fcf
JS
677}
678\f
679
720aa428
MK
680static CORE_ADDR
681amd64_push_arguments (struct regcache *regcache, int nargs,
6470d250 682 struct value **args, CORE_ADDR sp, int struct_return)
720aa428 683{
80d19a06
JB
684 struct gdbarch *gdbarch = get_regcache_arch (regcache);
685 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
ba581dc1
JB
686 int *integer_regs = tdep->call_dummy_integer_regs;
687 int num_integer_regs = tdep->call_dummy_num_integer_regs;
688
720aa428
MK
689 static int sse_regnum[] =
690 {
691 /* %xmm0 ... %xmm7 */
90f90721
MK
692 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
693 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
694 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
695 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
720aa428
MK
696 };
697 struct value **stack_args = alloca (nargs * sizeof (struct value *));
80d19a06
JB
698 /* An array that mirrors the stack_args array. For all arguments
699 that are passed by MEMORY, if that argument's address also needs
700 to be stored in a register, the ARG_ADDR_REGNO array will contain
701 that register number (or a negative value otherwise). */
702 int *arg_addr_regno = alloca (nargs * sizeof (int));
720aa428
MK
703 int num_stack_args = 0;
704 int num_elements = 0;
705 int element = 0;
706 int integer_reg = 0;
707 int sse_reg = 0;
708 int i;
709
ba581dc1
JB
710 gdb_assert (tdep->classify);
711
6470d250
MK
712 /* Reserve a register for the "hidden" argument. */
713 if (struct_return)
714 integer_reg++;
715
720aa428
MK
716 for (i = 0; i < nargs; i++)
717 {
4991999e 718 struct type *type = value_type (args[i]);
720aa428
MK
719 int len = TYPE_LENGTH (type);
720 enum amd64_reg_class class[2];
721 int needed_integer_regs = 0;
722 int needed_sse_regs = 0;
723 int j;
724
725 /* Classify argument. */
ba581dc1 726 tdep->classify (type, class);
720aa428
MK
727
728 /* Calculate the number of integer and SSE registers needed for
729 this argument. */
730 for (j = 0; j < 2; j++)
731 {
732 if (class[j] == AMD64_INTEGER)
733 needed_integer_regs++;
734 else if (class[j] == AMD64_SSE)
735 needed_sse_regs++;
736 }
737
738 /* Check whether enough registers are available, and if the
739 argument should be passed in registers at all. */
ba581dc1 740 if (integer_reg + needed_integer_regs > num_integer_regs
720aa428
MK
741 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
742 || (needed_integer_regs == 0 && needed_sse_regs == 0))
743 {
744 /* The argument will be passed on the stack. */
745 num_elements += ((len + 7) / 8);
80d19a06
JB
746 stack_args[num_stack_args] = args[i];
747 /* If this is an AMD64_MEMORY argument whose address must also
748 be passed in one of the integer registers, reserve that
749 register and associate this value to that register so that
750 we can store the argument address as soon as we know it. */
751 if (class[0] == AMD64_MEMORY
752 && tdep->memory_args_by_pointer
753 && integer_reg < tdep->call_dummy_num_integer_regs)
754 arg_addr_regno[num_stack_args] =
755 tdep->call_dummy_integer_regs[integer_reg++];
756 else
757 arg_addr_regno[num_stack_args] = -1;
758 num_stack_args++;
720aa428
MK
759 }
760 else
761 {
762 /* The argument will be passed in registers. */
d8de1ef7
MK
763 const gdb_byte *valbuf = value_contents (args[i]);
764 gdb_byte buf[8];
720aa428
MK
765
766 gdb_assert (len <= 16);
767
768 for (j = 0; len > 0; j++, len -= 8)
769 {
770 int regnum = -1;
771 int offset = 0;
772
773 switch (class[j])
774 {
775 case AMD64_INTEGER:
ba581dc1 776 regnum = integer_regs[integer_reg++];
720aa428
MK
777 break;
778
779 case AMD64_SSE:
780 regnum = sse_regnum[sse_reg++];
781 break;
782
783 case AMD64_SSEUP:
784 gdb_assert (sse_reg > 0);
785 regnum = sse_regnum[sse_reg - 1];
786 offset = 8;
787 break;
788
789 default:
790 gdb_assert (!"Unexpected register class.");
791 }
792
793 gdb_assert (regnum != -1);
794 memset (buf, 0, sizeof buf);
795 memcpy (buf, valbuf + j * 8, min (len, 8));
796 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
797 }
798 }
799 }
800
801 /* Allocate space for the arguments on the stack. */
802 sp -= num_elements * 8;
803
804 /* The psABI says that "The end of the input argument area shall be
805 aligned on a 16 byte boundary." */
806 sp &= ~0xf;
807
808 /* Write out the arguments to the stack. */
809 for (i = 0; i < num_stack_args; i++)
810 {
4991999e 811 struct type *type = value_type (stack_args[i]);
d8de1ef7 812 const gdb_byte *valbuf = value_contents (stack_args[i]);
720aa428 813 int len = TYPE_LENGTH (type);
80d19a06
JB
814 CORE_ADDR arg_addr = sp + element * 8;
815
816 write_memory (arg_addr, valbuf, len);
817 if (arg_addr_regno[i] >= 0)
818 {
819 /* We also need to store the address of that argument in
820 the given register. */
821 gdb_byte buf[8];
822 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
823
824 store_unsigned_integer (buf, 8, byte_order, arg_addr);
825 regcache_cooked_write (regcache, arg_addr_regno[i], buf);
826 }
720aa428
MK
827 element += ((len + 7) / 8);
828 }
829
830 /* The psABI says that "For calls that may call functions that use
831 varargs or stdargs (prototype-less calls or calls to functions
832 containing ellipsis (...) in the declaration) %al is used as
833 hidden argument to specify the number of SSE registers used. */
90f90721 834 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
720aa428
MK
835 return sp;
836}
837
c4f35dd8 838static CORE_ADDR
7d9b040b 839amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
e53bef9f
MK
840 struct regcache *regcache, CORE_ADDR bp_addr,
841 int nargs, struct value **args, CORE_ADDR sp,
842 int struct_return, CORE_ADDR struct_addr)
53e95fcf 843{
e17a4113 844 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3af6ddfe 845 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
d8de1ef7 846 gdb_byte buf[8];
c4f35dd8
MK
847
848 /* Pass arguments. */
6470d250 849 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
c4f35dd8
MK
850
851 /* Pass "hidden" argument". */
852 if (struct_return)
853 {
ba581dc1
JB
854 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
855 /* The "hidden" argument is passed throught the first argument
856 register. */
857 const int arg_regnum = tdep->call_dummy_integer_regs[0];
858
e17a4113 859 store_unsigned_integer (buf, 8, byte_order, struct_addr);
ba581dc1 860 regcache_cooked_write (regcache, arg_regnum, buf);
c4f35dd8
MK
861 }
862
3af6ddfe
JB
863 /* Reserve some memory on the stack for the integer-parameter registers,
864 if required by the ABI. */
865 if (tdep->integer_param_regs_saved_in_caller_frame)
866 sp -= tdep->call_dummy_num_integer_regs * 8;
867
c4f35dd8
MK
868 /* Store return address. */
869 sp -= 8;
e17a4113 870 store_unsigned_integer (buf, 8, byte_order, bp_addr);
c4f35dd8
MK
871 write_memory (sp, buf, 8);
872
873 /* Finally, update the stack pointer... */
e17a4113 874 store_unsigned_integer (buf, 8, byte_order, sp);
90f90721 875 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
c4f35dd8
MK
876
877 /* ...and fake a frame pointer. */
90f90721 878 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
c4f35dd8 879
3e210248 880 return sp + 16;
53e95fcf 881}
c4f35dd8 882\f
35669430
DE
883/* Displaced instruction handling. */
884
885/* A partially decoded instruction.
886 This contains enough details for displaced stepping purposes. */
887
888struct amd64_insn
889{
890 /* The number of opcode bytes. */
891 int opcode_len;
892 /* The offset of the rex prefix or -1 if not present. */
893 int rex_offset;
894 /* The offset to the first opcode byte. */
895 int opcode_offset;
896 /* The offset to the modrm byte or -1 if not present. */
897 int modrm_offset;
898
899 /* The raw instruction. */
900 gdb_byte *raw_insn;
901};
902
903struct displaced_step_closure
904{
905 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
906 int tmp_used;
907 int tmp_regno;
908 ULONGEST tmp_save;
909
910 /* Details of the instruction. */
911 struct amd64_insn insn_details;
912
913 /* Amount of space allocated to insn_buf. */
914 int max_len;
915
916 /* The possibly modified insn.
917 This is a variable-length field. */
918 gdb_byte insn_buf[1];
919};
920
921/* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
922 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
923 at which point delete these in favor of libopcodes' versions). */
924
925static const unsigned char onebyte_has_modrm[256] = {
926 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
927 /* ------------------------------- */
928 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
929 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
930 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
931 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
932 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
933 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
934 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
935 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
936 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
937 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
938 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
939 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
940 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
941 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
942 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
943 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
944 /* ------------------------------- */
945 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
946};
947
948static const unsigned char twobyte_has_modrm[256] = {
949 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
950 /* ------------------------------- */
951 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
952 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
953 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
954 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
955 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
956 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
957 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
958 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
959 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
960 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
961 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
962 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
963 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
964 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
965 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
966 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
967 /* ------------------------------- */
968 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
969};
970
971static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
972
973static int
974rex_prefix_p (gdb_byte pfx)
975{
976 return REX_PREFIX_P (pfx);
977}
978
979/* Skip the legacy instruction prefixes in INSN.
980 We assume INSN is properly sentineled so we don't have to worry
981 about falling off the end of the buffer. */
982
983static gdb_byte *
1903f0e6 984amd64_skip_prefixes (gdb_byte *insn)
35669430
DE
985{
986 while (1)
987 {
988 switch (*insn)
989 {
990 case DATA_PREFIX_OPCODE:
991 case ADDR_PREFIX_OPCODE:
992 case CS_PREFIX_OPCODE:
993 case DS_PREFIX_OPCODE:
994 case ES_PREFIX_OPCODE:
995 case FS_PREFIX_OPCODE:
996 case GS_PREFIX_OPCODE:
997 case SS_PREFIX_OPCODE:
998 case LOCK_PREFIX_OPCODE:
999 case REPE_PREFIX_OPCODE:
1000 case REPNE_PREFIX_OPCODE:
1001 ++insn;
1002 continue;
1003 default:
1004 break;
1005 }
1006 break;
1007 }
1008
1009 return insn;
1010}
1011
35669430
DE
1012/* Return an integer register (other than RSP) that is unused as an input
1013 operand in INSN.
1014 In order to not require adding a rex prefix if the insn doesn't already
1015 have one, the result is restricted to RAX ... RDI, sans RSP.
1016 The register numbering of the result follows architecture ordering,
1017 e.g. RDI = 7. */
1018
1019static int
1020amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1021{
1022 /* 1 bit for each reg */
1023 int used_regs_mask = 0;
1024
1025 /* There can be at most 3 int regs used as inputs in an insn, and we have
1026 7 to choose from (RAX ... RDI, sans RSP).
1027 This allows us to take a conservative approach and keep things simple.
1028 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1029 that implicitly specify RAX. */
1030
1031 /* Avoid RAX. */
1032 used_regs_mask |= 1 << EAX_REG_NUM;
1033 /* Similarily avoid RDX, implicit operand in divides. */
1034 used_regs_mask |= 1 << EDX_REG_NUM;
1035 /* Avoid RSP. */
1036 used_regs_mask |= 1 << ESP_REG_NUM;
1037
1038 /* If the opcode is one byte long and there's no ModRM byte,
1039 assume the opcode specifies a register. */
1040 if (details->opcode_len == 1 && details->modrm_offset == -1)
1041 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1042
1043 /* Mark used regs in the modrm/sib bytes. */
1044 if (details->modrm_offset != -1)
1045 {
1046 int modrm = details->raw_insn[details->modrm_offset];
1047 int mod = MODRM_MOD_FIELD (modrm);
1048 int reg = MODRM_REG_FIELD (modrm);
1049 int rm = MODRM_RM_FIELD (modrm);
1050 int have_sib = mod != 3 && rm == 4;
1051
1052 /* Assume the reg field of the modrm byte specifies a register. */
1053 used_regs_mask |= 1 << reg;
1054
1055 if (have_sib)
1056 {
1057 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1058 int index = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1059 used_regs_mask |= 1 << base;
1060 used_regs_mask |= 1 << index;
1061 }
1062 else
1063 {
1064 used_regs_mask |= 1 << rm;
1065 }
1066 }
1067
1068 gdb_assert (used_regs_mask < 256);
1069 gdb_assert (used_regs_mask != 255);
1070
1071 /* Finally, find a free reg. */
1072 {
1073 int i;
1074
1075 for (i = 0; i < 8; ++i)
1076 {
1077 if (! (used_regs_mask & (1 << i)))
1078 return i;
1079 }
1080
1081 /* We shouldn't get here. */
1082 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1083 }
1084}
1085
1086/* Extract the details of INSN that we need. */
1087
1088static void
1089amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1090{
1091 gdb_byte *start = insn;
1092 int need_modrm;
1093
1094 details->raw_insn = insn;
1095
1096 details->opcode_len = -1;
1097 details->rex_offset = -1;
1098 details->opcode_offset = -1;
1099 details->modrm_offset = -1;
1100
1101 /* Skip legacy instruction prefixes. */
1903f0e6 1102 insn = amd64_skip_prefixes (insn);
35669430
DE
1103
1104 /* Skip REX instruction prefix. */
1105 if (rex_prefix_p (*insn))
1106 {
1107 details->rex_offset = insn - start;
1108 ++insn;
1109 }
1110
1111 details->opcode_offset = insn - start;
1112
1113 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1114 {
1115 /* Two or three-byte opcode. */
1116 ++insn;
1117 need_modrm = twobyte_has_modrm[*insn];
1118
1119 /* Check for three-byte opcode. */
1903f0e6 1120 switch (*insn)
35669430 1121 {
1903f0e6
DE
1122 case 0x24:
1123 case 0x25:
1124 case 0x38:
1125 case 0x3a:
1126 case 0x7a:
1127 case 0x7b:
35669430
DE
1128 ++insn;
1129 details->opcode_len = 3;
1903f0e6
DE
1130 break;
1131 default:
1132 details->opcode_len = 2;
1133 break;
35669430 1134 }
35669430
DE
1135 }
1136 else
1137 {
1138 /* One-byte opcode. */
1139 need_modrm = onebyte_has_modrm[*insn];
1140 details->opcode_len = 1;
1141 }
1142
1143 if (need_modrm)
1144 {
1145 ++insn;
1146 details->modrm_offset = insn - start;
1147 }
1148}
1149
1150/* Update %rip-relative addressing in INSN.
1151
1152 %rip-relative addressing only uses a 32-bit displacement.
1153 32 bits is not enough to be guaranteed to cover the distance between where
1154 the real instruction is and where its copy is.
1155 Convert the insn to use base+disp addressing.
1156 We set base = pc + insn_length so we can leave disp unchanged. */
c4f35dd8 1157
35669430
DE
1158static void
1159fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1160 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1161{
e17a4113 1162 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
35669430
DE
1163 const struct amd64_insn *insn_details = &dsc->insn_details;
1164 int modrm_offset = insn_details->modrm_offset;
1165 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1166 CORE_ADDR rip_base;
1167 int32_t disp;
1168 int insn_length;
1169 int arch_tmp_regno, tmp_regno;
1170 ULONGEST orig_value;
1171
1172 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1173 ++insn;
1174
1175 /* Compute the rip-relative address. */
e17a4113 1176 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
eda5a4d7
PA
1177 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf,
1178 dsc->max_len, from);
35669430
DE
1179 rip_base = from + insn_length;
1180
1181 /* We need a register to hold the address.
1182 Pick one not used in the insn.
1183 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1184 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1185 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1186
1187 /* REX.B should be unset as we were using rip-relative addressing,
1188 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1189 if (insn_details->rex_offset != -1)
1190 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1191
1192 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1193 dsc->tmp_regno = tmp_regno;
1194 dsc->tmp_save = orig_value;
1195 dsc->tmp_used = 1;
1196
1197 /* Convert the ModRM field to be base+disp. */
1198 dsc->insn_buf[modrm_offset] &= ~0xc7;
1199 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1200
1201 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1202
1203 if (debug_displaced)
1204 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
5af949e3
UW
1205 "displaced: using temp reg %d, old value %s, new value %s\n",
1206 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1207 paddress (gdbarch, rip_base));
35669430
DE
1208}
1209
1210static void
1211fixup_displaced_copy (struct gdbarch *gdbarch,
1212 struct displaced_step_closure *dsc,
1213 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1214{
1215 const struct amd64_insn *details = &dsc->insn_details;
1216
1217 if (details->modrm_offset != -1)
1218 {
1219 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1220
1221 if ((modrm & 0xc7) == 0x05)
1222 {
1223 /* The insn uses rip-relative addressing.
1224 Deal with it. */
1225 fixup_riprel (gdbarch, dsc, from, to, regs);
1226 }
1227 }
1228}
1229
1230struct displaced_step_closure *
1231amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1232 CORE_ADDR from, CORE_ADDR to,
1233 struct regcache *regs)
1234{
1235 int len = gdbarch_max_insn_length (gdbarch);
1236 /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to
1237 continually watch for running off the end of the buffer. */
1238 int fixup_sentinel_space = len;
1239 struct displaced_step_closure *dsc =
1240 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1241 gdb_byte *buf = &dsc->insn_buf[0];
1242 struct amd64_insn *details = &dsc->insn_details;
1243
1244 dsc->tmp_used = 0;
1245 dsc->max_len = len + fixup_sentinel_space;
1246
1247 read_memory (from, buf, len);
1248
1249 /* Set up the sentinel space so we don't have to worry about running
1250 off the end of the buffer. An excessive number of leading prefixes
1251 could otherwise cause this. */
1252 memset (buf + len, 0, fixup_sentinel_space);
1253
1254 amd64_get_insn_details (buf, details);
1255
1256 /* GDB may get control back after the insn after the syscall.
1257 Presumably this is a kernel bug.
1258 If this is a syscall, make sure there's a nop afterwards. */
1259 {
1260 int syscall_length;
1261
1262 if (amd64_syscall_p (details, &syscall_length))
1263 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1264 }
1265
1266 /* Modify the insn to cope with the address where it will be executed from.
1267 In particular, handle any rip-relative addressing. */
1268 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1269
1270 write_memory (to, buf, len);
1271
1272 if (debug_displaced)
1273 {
5af949e3
UW
1274 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1275 paddress (gdbarch, from), paddress (gdbarch, to));
35669430
DE
1276 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1277 }
1278
1279 return dsc;
1280}
1281
1282static int
1283amd64_absolute_jmp_p (const struct amd64_insn *details)
1284{
1285 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1286
1287 if (insn[0] == 0xff)
1288 {
1289 /* jump near, absolute indirect (/4) */
1290 if ((insn[1] & 0x38) == 0x20)
1291 return 1;
1292
1293 /* jump far, absolute indirect (/5) */
1294 if ((insn[1] & 0x38) == 0x28)
1295 return 1;
1296 }
1297
1298 return 0;
1299}
1300
1301static int
1302amd64_absolute_call_p (const struct amd64_insn *details)
1303{
1304 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1305
1306 if (insn[0] == 0xff)
1307 {
1308 /* Call near, absolute indirect (/2) */
1309 if ((insn[1] & 0x38) == 0x10)
1310 return 1;
1311
1312 /* Call far, absolute indirect (/3) */
1313 if ((insn[1] & 0x38) == 0x18)
1314 return 1;
1315 }
1316
1317 return 0;
1318}
1319
1320static int
1321amd64_ret_p (const struct amd64_insn *details)
1322{
1323 /* NOTE: gcc can emit "repz ; ret". */
1324 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1325
1326 switch (insn[0])
1327 {
1328 case 0xc2: /* ret near, pop N bytes */
1329 case 0xc3: /* ret near */
1330 case 0xca: /* ret far, pop N bytes */
1331 case 0xcb: /* ret far */
1332 case 0xcf: /* iret */
1333 return 1;
1334
1335 default:
1336 return 0;
1337 }
1338}
1339
1340static int
1341amd64_call_p (const struct amd64_insn *details)
1342{
1343 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1344
1345 if (amd64_absolute_call_p (details))
1346 return 1;
1347
1348 /* call near, relative */
1349 if (insn[0] == 0xe8)
1350 return 1;
1351
1352 return 0;
1353}
1354
35669430
DE
1355/* Return non-zero if INSN is a system call, and set *LENGTHP to its
1356 length in bytes. Otherwise, return zero. */
1357
1358static int
1359amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1360{
1361 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1362
1363 if (insn[0] == 0x0f && insn[1] == 0x05)
1364 {
1365 *lengthp = 2;
1366 return 1;
1367 }
1368
1369 return 0;
1370}
1371
1372/* Fix up the state of registers and memory after having single-stepped
1373 a displaced instruction. */
1374
1375void
1376amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1377 struct displaced_step_closure *dsc,
1378 CORE_ADDR from, CORE_ADDR to,
1379 struct regcache *regs)
1380{
e17a4113 1381 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
35669430
DE
1382 /* The offset we applied to the instruction's address. */
1383 ULONGEST insn_offset = to - from;
1384 gdb_byte *insn = dsc->insn_buf;
1385 const struct amd64_insn *insn_details = &dsc->insn_details;
1386
1387 if (debug_displaced)
1388 fprintf_unfiltered (gdb_stdlog,
5af949e3 1389 "displaced: fixup (%s, %s), "
35669430 1390 "insn = 0x%02x 0x%02x ...\n",
5af949e3
UW
1391 paddress (gdbarch, from), paddress (gdbarch, to),
1392 insn[0], insn[1]);
35669430
DE
1393
1394 /* If we used a tmp reg, restore it. */
1395
1396 if (dsc->tmp_used)
1397 {
1398 if (debug_displaced)
5af949e3
UW
1399 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1400 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
35669430
DE
1401 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1402 }
1403
1404 /* The list of issues to contend with here is taken from
1405 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1406 Yay for Free Software! */
1407
1408 /* Relocate the %rip back to the program's instruction stream,
1409 if necessary. */
1410
1411 /* Except in the case of absolute or indirect jump or call
1412 instructions, or a return instruction, the new rip is relative to
1413 the displaced instruction; make it relative to the original insn.
1414 Well, signal handler returns don't need relocation either, but we use the
1415 value of %rip to recognize those; see below. */
1416 if (! amd64_absolute_jmp_p (insn_details)
1417 && ! amd64_absolute_call_p (insn_details)
1418 && ! amd64_ret_p (insn_details))
1419 {
1420 ULONGEST orig_rip;
1421 int insn_len;
1422
1423 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1424
1425 /* A signal trampoline system call changes the %rip, resuming
1426 execution of the main program after the signal handler has
1427 returned. That makes them like 'return' instructions; we
1428 shouldn't relocate %rip.
1429
1430 But most system calls don't, and we do need to relocate %rip.
1431
1432 Our heuristic for distinguishing these cases: if stepping
1433 over the system call instruction left control directly after
1434 the instruction, the we relocate --- control almost certainly
1435 doesn't belong in the displaced copy. Otherwise, we assume
1436 the instruction has put control where it belongs, and leave
1437 it unrelocated. Goodness help us if there are PC-relative
1438 system calls. */
1439 if (amd64_syscall_p (insn_details, &insn_len)
1440 && orig_rip != to + insn_len
1441 /* GDB can get control back after the insn after the syscall.
1442 Presumably this is a kernel bug.
1443 Fixup ensures its a nop, we add one to the length for it. */
1444 && orig_rip != to + insn_len + 1)
1445 {
1446 if (debug_displaced)
1447 fprintf_unfiltered (gdb_stdlog,
1448 "displaced: syscall changed %%rip; "
1449 "not relocating\n");
1450 }
1451 else
1452 {
1453 ULONGEST rip = orig_rip - insn_offset;
1454
1903f0e6
DE
1455 /* If we just stepped over a breakpoint insn, we don't backup
1456 the pc on purpose; this is to match behaviour without
1457 stepping. */
35669430
DE
1458
1459 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1460
1461 if (debug_displaced)
1462 fprintf_unfiltered (gdb_stdlog,
1463 "displaced: "
5af949e3
UW
1464 "relocated %%rip from %s to %s\n",
1465 paddress (gdbarch, orig_rip),
1466 paddress (gdbarch, rip));
35669430
DE
1467 }
1468 }
1469
1470 /* If the instruction was PUSHFL, then the TF bit will be set in the
1471 pushed value, and should be cleared. We'll leave this for later,
1472 since GDB already messes up the TF flag when stepping over a
1473 pushfl. */
1474
1475 /* If the instruction was a call, the return address now atop the
1476 stack is the address following the copied instruction. We need
1477 to make it the address following the original instruction. */
1478 if (amd64_call_p (insn_details))
1479 {
1480 ULONGEST rsp;
1481 ULONGEST retaddr;
1482 const ULONGEST retaddr_len = 8;
1483
1484 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
e17a4113 1485 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
35669430 1486 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
e17a4113 1487 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
35669430
DE
1488
1489 if (debug_displaced)
1490 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
1491 "displaced: relocated return addr at %s "
1492 "to %s\n",
1493 paddress (gdbarch, rsp),
1494 paddress (gdbarch, retaddr));
35669430
DE
1495 }
1496}
dde08ee1
PA
1497
1498/* If the instruction INSN uses RIP-relative addressing, return the
1499 offset into the raw INSN where the displacement to be adjusted is
1500 found. Returns 0 if the instruction doesn't use RIP-relative
1501 addressing. */
1502
1503static int
1504rip_relative_offset (struct amd64_insn *insn)
1505{
1506 if (insn->modrm_offset != -1)
1507 {
1508 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1509
1510 if ((modrm & 0xc7) == 0x05)
1511 {
1512 /* The displacement is found right after the ModRM byte. */
1513 return insn->modrm_offset + 1;
1514 }
1515 }
1516
1517 return 0;
1518}
1519
1520static void
1521append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1522{
1523 target_write_memory (*to, buf, len);
1524 *to += len;
1525}
1526
1527void
1528amd64_relocate_instruction (struct gdbarch *gdbarch,
1529 CORE_ADDR *to, CORE_ADDR oldloc)
1530{
1531 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1532 int len = gdbarch_max_insn_length (gdbarch);
1533 /* Extra space for sentinels. */
1534 int fixup_sentinel_space = len;
1535 gdb_byte *buf = xmalloc (len + fixup_sentinel_space);
1536 struct amd64_insn insn_details;
1537 int offset = 0;
1538 LONGEST rel32, newrel;
1539 gdb_byte *insn;
1540 int insn_length;
1541
1542 read_memory (oldloc, buf, len);
1543
1544 /* Set up the sentinel space so we don't have to worry about running
1545 off the end of the buffer. An excessive number of leading prefixes
1546 could otherwise cause this. */
1547 memset (buf + len, 0, fixup_sentinel_space);
1548
1549 insn = buf;
1550 amd64_get_insn_details (insn, &insn_details);
1551
1552 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1553
1554 /* Skip legacy instruction prefixes. */
1555 insn = amd64_skip_prefixes (insn);
1556
1557 /* Adjust calls with 32-bit relative addresses as push/jump, with
1558 the address pushed being the location where the original call in
1559 the user program would return to. */
1560 if (insn[0] == 0xe8)
1561 {
1562 gdb_byte push_buf[16];
1563 unsigned int ret_addr;
1564
1565 /* Where "ret" in the original code will return to. */
1566 ret_addr = oldloc + insn_length;
0963b4bd 1567 push_buf[0] = 0x68; /* pushq $... */
dde08ee1
PA
1568 memcpy (&push_buf[1], &ret_addr, 4);
1569 /* Push the push. */
1570 append_insns (to, 5, push_buf);
1571
1572 /* Convert the relative call to a relative jump. */
1573 insn[0] = 0xe9;
1574
1575 /* Adjust the destination offset. */
1576 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1577 newrel = (oldloc - *to) + rel32;
f4a1794a
KY
1578 store_signed_integer (insn + 1, 4, byte_order, newrel);
1579
1580 if (debug_displaced)
1581 fprintf_unfiltered (gdb_stdlog,
1582 "Adjusted insn rel32=%s at %s to"
1583 " rel32=%s at %s\n",
1584 hex_string (rel32), paddress (gdbarch, oldloc),
1585 hex_string (newrel), paddress (gdbarch, *to));
dde08ee1
PA
1586
1587 /* Write the adjusted jump into its displaced location. */
1588 append_insns (to, 5, insn);
1589 return;
1590 }
1591
1592 offset = rip_relative_offset (&insn_details);
1593 if (!offset)
1594 {
1595 /* Adjust jumps with 32-bit relative addresses. Calls are
1596 already handled above. */
1597 if (insn[0] == 0xe9)
1598 offset = 1;
1599 /* Adjust conditional jumps. */
1600 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1601 offset = 2;
1602 }
1603
1604 if (offset)
1605 {
1606 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1607 newrel = (oldloc - *to) + rel32;
f4a1794a 1608 store_signed_integer (insn + offset, 4, byte_order, newrel);
dde08ee1
PA
1609 if (debug_displaced)
1610 fprintf_unfiltered (gdb_stdlog,
f4a1794a
KY
1611 "Adjusted insn rel32=%s at %s to"
1612 " rel32=%s at %s\n",
dde08ee1
PA
1613 hex_string (rel32), paddress (gdbarch, oldloc),
1614 hex_string (newrel), paddress (gdbarch, *to));
1615 }
1616
1617 /* Write the adjusted instruction into its displaced location. */
1618 append_insns (to, insn_length, buf);
1619}
1620
35669430 1621\f
c4f35dd8 1622/* The maximum number of saved registers. This should include %rip. */
90f90721 1623#define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
c4f35dd8 1624
e53bef9f 1625struct amd64_frame_cache
c4f35dd8
MK
1626{
1627 /* Base address. */
1628 CORE_ADDR base;
1629 CORE_ADDR sp_offset;
1630 CORE_ADDR pc;
1631
1632 /* Saved registers. */
e53bef9f 1633 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
c4f35dd8 1634 CORE_ADDR saved_sp;
e0c62198 1635 int saved_sp_reg;
c4f35dd8
MK
1636
1637 /* Do we have a frame? */
1638 int frameless_p;
1639};
8dda9770 1640
d2449ee8 1641/* Initialize a frame cache. */
c4f35dd8 1642
d2449ee8
DJ
1643static void
1644amd64_init_frame_cache (struct amd64_frame_cache *cache)
8dda9770 1645{
c4f35dd8
MK
1646 int i;
1647
c4f35dd8
MK
1648 /* Base address. */
1649 cache->base = 0;
1650 cache->sp_offset = -8;
1651 cache->pc = 0;
1652
1653 /* Saved registers. We initialize these to -1 since zero is a valid
bba66b87
DE
1654 offset (that's where %rbp is supposed to be stored).
1655 The values start out as being offsets, and are later converted to
1656 addresses (at which point -1 is interpreted as an address, still meaning
1657 "invalid"). */
e53bef9f 1658 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
1659 cache->saved_regs[i] = -1;
1660 cache->saved_sp = 0;
e0c62198 1661 cache->saved_sp_reg = -1;
c4f35dd8
MK
1662
1663 /* Frameless until proven otherwise. */
1664 cache->frameless_p = 1;
d2449ee8 1665}
c4f35dd8 1666
d2449ee8
DJ
1667/* Allocate and initialize a frame cache. */
1668
1669static struct amd64_frame_cache *
1670amd64_alloc_frame_cache (void)
1671{
1672 struct amd64_frame_cache *cache;
1673
1674 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1675 amd64_init_frame_cache (cache);
c4f35dd8 1676 return cache;
8dda9770 1677}
53e95fcf 1678
e0c62198
L
1679/* GCC 4.4 and later, can put code in the prologue to realign the
1680 stack pointer. Check whether PC points to such code, and update
1681 CACHE accordingly. Return the first instruction after the code
1682 sequence or CURRENT_PC, whichever is smaller. If we don't
1683 recognize the code, return PC. */
1684
1685static CORE_ADDR
1686amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1687 struct amd64_frame_cache *cache)
1688{
1689 /* There are 2 code sequences to re-align stack before the frame
1690 gets set up:
1691
1692 1. Use a caller-saved saved register:
1693
1694 leaq 8(%rsp), %reg
1695 andq $-XXX, %rsp
1696 pushq -8(%reg)
1697
1698 2. Use a callee-saved saved register:
1699
1700 pushq %reg
1701 leaq 16(%rsp), %reg
1702 andq $-XXX, %rsp
1703 pushq -8(%reg)
1704
1705 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1706
1707 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1708 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1709 */
1710
1711 gdb_byte buf[18];
1712 int reg, r;
1713 int offset, offset_and;
e0c62198
L
1714
1715 if (target_read_memory (pc, buf, sizeof buf))
1716 return pc;
1717
1718 /* Check caller-saved saved register. The first instruction has
1719 to be "leaq 8(%rsp), %reg". */
1720 if ((buf[0] & 0xfb) == 0x48
1721 && buf[1] == 0x8d
1722 && buf[3] == 0x24
1723 && buf[4] == 0x8)
1724 {
1725 /* MOD must be binary 10 and R/M must be binary 100. */
1726 if ((buf[2] & 0xc7) != 0x44)
1727 return pc;
1728
1729 /* REG has register number. */
1730 reg = (buf[2] >> 3) & 7;
1731
1732 /* Check the REX.R bit. */
1733 if (buf[0] == 0x4c)
1734 reg += 8;
1735
1736 offset = 5;
1737 }
1738 else
1739 {
1740 /* Check callee-saved saved register. The first instruction
1741 has to be "pushq %reg". */
1742 reg = 0;
1743 if ((buf[0] & 0xf8) == 0x50)
1744 offset = 0;
1745 else if ((buf[0] & 0xf6) == 0x40
1746 && (buf[1] & 0xf8) == 0x50)
1747 {
1748 /* Check the REX.B bit. */
1749 if ((buf[0] & 1) != 0)
1750 reg = 8;
1751
1752 offset = 1;
1753 }
1754 else
1755 return pc;
1756
1757 /* Get register. */
1758 reg += buf[offset] & 0x7;
1759
1760 offset++;
1761
1762 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1763 if ((buf[offset] & 0xfb) != 0x48
1764 || buf[offset + 1] != 0x8d
1765 || buf[offset + 3] != 0x24
1766 || buf[offset + 4] != 0x10)
1767 return pc;
1768
1769 /* MOD must be binary 10 and R/M must be binary 100. */
1770 if ((buf[offset + 2] & 0xc7) != 0x44)
1771 return pc;
1772
1773 /* REG has register number. */
1774 r = (buf[offset + 2] >> 3) & 7;
1775
1776 /* Check the REX.R bit. */
1777 if (buf[offset] == 0x4c)
1778 r += 8;
1779
1780 /* Registers in pushq and leaq have to be the same. */
1781 if (reg != r)
1782 return pc;
1783
1784 offset += 5;
1785 }
1786
1787 /* Rigister can't be %rsp nor %rbp. */
1788 if (reg == 4 || reg == 5)
1789 return pc;
1790
1791 /* The next instruction has to be "andq $-XXX, %rsp". */
1792 if (buf[offset] != 0x48
1793 || buf[offset + 2] != 0xe4
1794 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1795 return pc;
1796
1797 offset_and = offset;
1798 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1799
1800 /* The next instruction has to be "pushq -8(%reg)". */
1801 r = 0;
1802 if (buf[offset] == 0xff)
1803 offset++;
1804 else if ((buf[offset] & 0xf6) == 0x40
1805 && buf[offset + 1] == 0xff)
1806 {
1807 /* Check the REX.B bit. */
1808 if ((buf[offset] & 0x1) != 0)
1809 r = 8;
1810 offset += 2;
1811 }
1812 else
1813 return pc;
1814
1815 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1816 01. */
1817 if (buf[offset + 1] != 0xf8
1818 || (buf[offset] & 0xf8) != 0x70)
1819 return pc;
1820
1821 /* R/M has register. */
1822 r += buf[offset] & 7;
1823
1824 /* Registers in leaq and pushq have to be the same. */
1825 if (reg != r)
1826 return pc;
1827
1828 if (current_pc > pc + offset_and)
35669430 1829 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
e0c62198
L
1830
1831 return min (pc + offset + 2, current_pc);
1832}
1833
c4f35dd8
MK
1834/* Do a limited analysis of the prologue at PC and update CACHE
1835 accordingly. Bail out early if CURRENT_PC is reached. Return the
1836 address where the analysis stopped.
1837
1838 We will handle only functions beginning with:
1839
1840 pushq %rbp 0x55
1841 movq %rsp, %rbp 0x48 0x89 0xe5
1842
1843 Any function that doesn't start with this sequence will be assumed
1844 to have no prologue and thus no valid frame pointer in %rbp. */
1845
1846static CORE_ADDR
e17a4113
UW
1847amd64_analyze_prologue (struct gdbarch *gdbarch,
1848 CORE_ADDR pc, CORE_ADDR current_pc,
e53bef9f 1849 struct amd64_frame_cache *cache)
53e95fcf 1850{
e17a4113 1851 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
d8de1ef7
MK
1852 static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
1853 gdb_byte buf[3];
1854 gdb_byte op;
c4f35dd8
MK
1855
1856 if (current_pc <= pc)
1857 return current_pc;
1858
e0c62198
L
1859 pc = amd64_analyze_stack_align (pc, current_pc, cache);
1860
e17a4113 1861 op = read_memory_unsigned_integer (pc, 1, byte_order);
c4f35dd8
MK
1862
1863 if (op == 0x55) /* pushq %rbp */
1864 {
1865 /* Take into account that we've executed the `pushq %rbp' that
1866 starts this instruction sequence. */
90f90721 1867 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
c4f35dd8
MK
1868 cache->sp_offset += 8;
1869
1870 /* If that's all, return now. */
1871 if (current_pc <= pc + 1)
1872 return current_pc;
1873
1874 /* Check for `movq %rsp, %rbp'. */
1875 read_memory (pc + 1, buf, 3);
1876 if (memcmp (buf, proto, 3) != 0)
1877 return pc + 1;
1878
1879 /* OK, we actually have a frame. */
1880 cache->frameless_p = 0;
1881 return pc + 4;
1882 }
1883
1884 return pc;
53e95fcf
JS
1885}
1886
c4f35dd8
MK
1887/* Return PC of first real instruction. */
1888
1889static CORE_ADDR
6093d2eb 1890amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
53e95fcf 1891{
e53bef9f 1892 struct amd64_frame_cache cache;
c4f35dd8
MK
1893 CORE_ADDR pc;
1894
d2449ee8 1895 amd64_init_frame_cache (&cache);
e17a4113
UW
1896 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
1897 &cache);
c4f35dd8
MK
1898 if (cache.frameless_p)
1899 return start_pc;
1900
1901 return pc;
53e95fcf 1902}
c4f35dd8 1903\f
53e95fcf 1904
c4f35dd8
MK
1905/* Normal frames. */
1906
e53bef9f 1907static struct amd64_frame_cache *
10458914 1908amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
6d686a84 1909{
e17a4113
UW
1910 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1911 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
e53bef9f 1912 struct amd64_frame_cache *cache;
d8de1ef7 1913 gdb_byte buf[8];
6d686a84 1914 int i;
6d686a84 1915
c4f35dd8
MK
1916 if (*this_cache)
1917 return *this_cache;
6d686a84 1918
e53bef9f 1919 cache = amd64_alloc_frame_cache ();
c4f35dd8
MK
1920 *this_cache = cache;
1921
10458914 1922 cache->pc = get_frame_func (this_frame);
c4f35dd8 1923 if (cache->pc != 0)
e17a4113
UW
1924 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
1925 cache);
c4f35dd8 1926
e0c62198
L
1927 if (cache->saved_sp_reg != -1)
1928 {
1929 /* Stack pointer has been saved. */
1930 get_frame_register (this_frame, cache->saved_sp_reg, buf);
e17a4113 1931 cache->saved_sp = extract_unsigned_integer(buf, 8, byte_order);
e0c62198
L
1932 }
1933
c4f35dd8
MK
1934 if (cache->frameless_p)
1935 {
4a28816e
MK
1936 /* We didn't find a valid frame. If we're at the start of a
1937 function, or somewhere half-way its prologue, the function's
1938 frame probably hasn't been fully setup yet. Try to
1939 reconstruct the base address for the stack frame by looking
1940 at the stack pointer. For truly "frameless" functions this
1941 might work too. */
c4f35dd8 1942
e0c62198
L
1943 if (cache->saved_sp_reg != -1)
1944 {
1945 /* We're halfway aligning the stack. */
1946 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
1947 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
1948
1949 /* This will be added back below. */
1950 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
1951 }
1952 else
1953 {
1954 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
e17a4113
UW
1955 cache->base = extract_unsigned_integer (buf, 8, byte_order)
1956 + cache->sp_offset;
e0c62198 1957 }
c4f35dd8 1958 }
35883a3f
MK
1959 else
1960 {
10458914 1961 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
e17a4113 1962 cache->base = extract_unsigned_integer (buf, 8, byte_order);
35883a3f 1963 }
c4f35dd8
MK
1964
1965 /* Now that we have the base address for the stack frame we can
1966 calculate the value of %rsp in the calling frame. */
1967 cache->saved_sp = cache->base + 16;
1968
35883a3f
MK
1969 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
1970 frame we find it at the same offset from the reconstructed base
e0c62198
L
1971 address. If we're halfway aligning the stack, %rip is handled
1972 differently (see above). */
1973 if (!cache->frameless_p || cache->saved_sp_reg == -1)
1974 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
35883a3f 1975
c4f35dd8
MK
1976 /* Adjust all the saved registers such that they contain addresses
1977 instead of offsets. */
e53bef9f 1978 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
1979 if (cache->saved_regs[i] != -1)
1980 cache->saved_regs[i] += cache->base;
1981
1982 return cache;
6d686a84
ML
1983}
1984
c4f35dd8 1985static void
10458914 1986amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
e53bef9f 1987 struct frame_id *this_id)
c4f35dd8 1988{
e53bef9f 1989 struct amd64_frame_cache *cache =
10458914 1990 amd64_frame_cache (this_frame, this_cache);
c4f35dd8
MK
1991
1992 /* This marks the outermost frame. */
1993 if (cache->base == 0)
1994 return;
1995
1996 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
1997}
e76e1718 1998
10458914
DJ
1999static struct value *
2000amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2001 int regnum)
53e95fcf 2002{
10458914 2003 struct gdbarch *gdbarch = get_frame_arch (this_frame);
e53bef9f 2004 struct amd64_frame_cache *cache =
10458914 2005 amd64_frame_cache (this_frame, this_cache);
e76e1718 2006
c4f35dd8 2007 gdb_assert (regnum >= 0);
b1ab997b 2008
2ae02b47 2009 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
10458914 2010 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
e76e1718 2011
e53bef9f 2012 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
10458914
DJ
2013 return frame_unwind_got_memory (this_frame, regnum,
2014 cache->saved_regs[regnum]);
e76e1718 2015
10458914 2016 return frame_unwind_got_register (this_frame, regnum, regnum);
c4f35dd8 2017}
e76e1718 2018
e53bef9f 2019static const struct frame_unwind amd64_frame_unwind =
c4f35dd8
MK
2020{
2021 NORMAL_FRAME,
e53bef9f 2022 amd64_frame_this_id,
10458914
DJ
2023 amd64_frame_prev_register,
2024 NULL,
2025 default_frame_sniffer
c4f35dd8 2026};
c4f35dd8 2027\f
e76e1718 2028
c4f35dd8
MK
2029/* Signal trampolines. */
2030
2031/* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2032 64-bit variants. This would require using identical frame caches
2033 on both platforms. */
2034
e53bef9f 2035static struct amd64_frame_cache *
10458914 2036amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
c4f35dd8 2037{
e17a4113
UW
2038 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2039 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2040 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
e53bef9f 2041 struct amd64_frame_cache *cache;
c4f35dd8 2042 CORE_ADDR addr;
d8de1ef7 2043 gdb_byte buf[8];
2b5e0749 2044 int i;
c4f35dd8
MK
2045
2046 if (*this_cache)
2047 return *this_cache;
2048
e53bef9f 2049 cache = amd64_alloc_frame_cache ();
c4f35dd8 2050
10458914 2051 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
e17a4113 2052 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
c4f35dd8 2053
10458914 2054 addr = tdep->sigcontext_addr (this_frame);
2b5e0749 2055 gdb_assert (tdep->sc_reg_offset);
e53bef9f 2056 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2b5e0749
MK
2057 for (i = 0; i < tdep->sc_num_regs; i++)
2058 if (tdep->sc_reg_offset[i] != -1)
2059 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
c4f35dd8
MK
2060
2061 *this_cache = cache;
2062 return cache;
53e95fcf
JS
2063}
2064
c4f35dd8 2065static void
10458914 2066amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
e53bef9f 2067 void **this_cache, struct frame_id *this_id)
c4f35dd8 2068{
e53bef9f 2069 struct amd64_frame_cache *cache =
10458914 2070 amd64_sigtramp_frame_cache (this_frame, this_cache);
c4f35dd8 2071
10458914 2072 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
c4f35dd8
MK
2073}
2074
10458914
DJ
2075static struct value *
2076amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2077 void **this_cache, int regnum)
c4f35dd8
MK
2078{
2079 /* Make sure we've initialized the cache. */
10458914 2080 amd64_sigtramp_frame_cache (this_frame, this_cache);
c4f35dd8 2081
10458914 2082 return amd64_frame_prev_register (this_frame, this_cache, regnum);
c4f35dd8
MK
2083}
2084
10458914
DJ
2085static int
2086amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2087 struct frame_info *this_frame,
2088 void **this_cache)
c4f35dd8 2089{
10458914 2090 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
911bc6ee
MK
2091
2092 /* We shouldn't even bother if we don't have a sigcontext_addr
2093 handler. */
2094 if (tdep->sigcontext_addr == NULL)
10458914 2095 return 0;
911bc6ee
MK
2096
2097 if (tdep->sigtramp_p != NULL)
2098 {
10458914
DJ
2099 if (tdep->sigtramp_p (this_frame))
2100 return 1;
911bc6ee 2101 }
c4f35dd8 2102
911bc6ee 2103 if (tdep->sigtramp_start != 0)
1c3545ae 2104 {
10458914 2105 CORE_ADDR pc = get_frame_pc (this_frame);
1c3545ae 2106
911bc6ee
MK
2107 gdb_assert (tdep->sigtramp_end != 0);
2108 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
10458914 2109 return 1;
1c3545ae 2110 }
c4f35dd8 2111
10458914 2112 return 0;
c4f35dd8 2113}
10458914
DJ
2114
2115static const struct frame_unwind amd64_sigtramp_frame_unwind =
2116{
2117 SIGTRAMP_FRAME,
2118 amd64_sigtramp_frame_this_id,
2119 amd64_sigtramp_frame_prev_register,
2120 NULL,
2121 amd64_sigtramp_frame_sniffer
2122};
c4f35dd8
MK
2123\f
2124
2125static CORE_ADDR
10458914 2126amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
c4f35dd8 2127{
e53bef9f 2128 struct amd64_frame_cache *cache =
10458914 2129 amd64_frame_cache (this_frame, this_cache);
c4f35dd8
MK
2130
2131 return cache->base;
2132}
2133
e53bef9f 2134static const struct frame_base amd64_frame_base =
c4f35dd8 2135{
e53bef9f
MK
2136 &amd64_frame_unwind,
2137 amd64_frame_base_address,
2138 amd64_frame_base_address,
2139 amd64_frame_base_address
c4f35dd8
MK
2140};
2141
872761f4
MS
2142/* Normal frames, but in a function epilogue. */
2143
2144/* The epilogue is defined here as the 'ret' instruction, which will
2145 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2146 the function's stack frame. */
2147
2148static int
2149amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2150{
2151 gdb_byte insn;
2152
2153 if (target_read_memory (pc, &insn, 1))
2154 return 0; /* Can't read memory at pc. */
2155
2156 if (insn != 0xc3) /* 'ret' instruction. */
2157 return 0;
2158
2159 return 1;
2160}
2161
2162static int
2163amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2164 struct frame_info *this_frame,
2165 void **this_prologue_cache)
2166{
2167 if (frame_relative_level (this_frame) == 0)
2168 return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
2169 get_frame_pc (this_frame));
2170 else
2171 return 0;
2172}
2173
2174static struct amd64_frame_cache *
2175amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2176{
2177 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2178 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2179 struct amd64_frame_cache *cache;
6c10c06b 2180 gdb_byte buf[8];
872761f4
MS
2181
2182 if (*this_cache)
2183 return *this_cache;
2184
2185 cache = amd64_alloc_frame_cache ();
2186 *this_cache = cache;
2187
2188 /* Cache base will be %esp plus cache->sp_offset (-8). */
2189 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2190 cache->base = extract_unsigned_integer (buf, 8,
2191 byte_order) + cache->sp_offset;
2192
2193 /* Cache pc will be the frame func. */
2194 cache->pc = get_frame_pc (this_frame);
2195
2196 /* The saved %esp will be at cache->base plus 16. */
2197 cache->saved_sp = cache->base + 16;
2198
2199 /* The saved %eip will be at cache->base plus 8. */
2200 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2201
2202 return cache;
2203}
2204
2205static void
2206amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2207 void **this_cache,
2208 struct frame_id *this_id)
2209{
2210 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2211 this_cache);
2212
2213 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2214}
2215
2216static const struct frame_unwind amd64_epilogue_frame_unwind =
2217{
2218 NORMAL_FRAME,
2219 amd64_epilogue_frame_this_id,
2220 amd64_frame_prev_register,
2221 NULL,
2222 amd64_epilogue_frame_sniffer
2223};
2224
166f4c7b 2225static struct frame_id
10458914 2226amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
166f4c7b 2227{
c4f35dd8
MK
2228 CORE_ADDR fp;
2229
10458914 2230 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
c4f35dd8 2231
10458914 2232 return frame_id_build (fp + 16, get_frame_pc (this_frame));
166f4c7b
ML
2233}
2234
8b148df9
AC
2235/* 16 byte align the SP per frame requirements. */
2236
2237static CORE_ADDR
e53bef9f 2238amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
8b148df9
AC
2239{
2240 return sp & -(CORE_ADDR)16;
2241}
473f17b0
MK
2242\f
2243
593adc23
MK
2244/* Supply register REGNUM from the buffer specified by FPREGS and LEN
2245 in the floating-point register set REGSET to register cache
2246 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
473f17b0
MK
2247
2248static void
e53bef9f
MK
2249amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2250 int regnum, const void *fpregs, size_t len)
473f17b0 2251{
9ea75c57 2252 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
473f17b0
MK
2253
2254 gdb_assert (len == tdep->sizeof_fpregset);
90f90721 2255 amd64_supply_fxsave (regcache, regnum, fpregs);
473f17b0 2256}
8b148df9 2257
593adc23
MK
2258/* Collect register REGNUM from the register cache REGCACHE and store
2259 it in the buffer specified by FPREGS and LEN as described by the
2260 floating-point register set REGSET. If REGNUM is -1, do this for
2261 all registers in REGSET. */
2262
2263static void
2264amd64_collect_fpregset (const struct regset *regset,
2265 const struct regcache *regcache,
2266 int regnum, void *fpregs, size_t len)
2267{
2268 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2269
2270 gdb_assert (len == tdep->sizeof_fpregset);
2271 amd64_collect_fxsave (regcache, regnum, fpregs);
2272}
2273
a055a187
L
2274/* Similar to amd64_supply_fpregset, but use XSAVE extended state. */
2275
2276static void
2277amd64_supply_xstateregset (const struct regset *regset,
2278 struct regcache *regcache, int regnum,
2279 const void *xstateregs, size_t len)
2280{
a055a187
L
2281 amd64_supply_xsave (regcache, regnum, xstateregs);
2282}
2283
2284/* Similar to amd64_collect_fpregset, but use XSAVE extended state. */
2285
2286static void
2287amd64_collect_xstateregset (const struct regset *regset,
2288 const struct regcache *regcache,
2289 int regnum, void *xstateregs, size_t len)
2290{
a055a187
L
2291 amd64_collect_xsave (regcache, regnum, xstateregs, 1);
2292}
2293
c6b33596
MK
2294/* Return the appropriate register set for the core section identified
2295 by SECT_NAME and SECT_SIZE. */
2296
2297static const struct regset *
e53bef9f
MK
2298amd64_regset_from_core_section (struct gdbarch *gdbarch,
2299 const char *sect_name, size_t sect_size)
c6b33596
MK
2300{
2301 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2302
2303 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
2304 {
2305 if (tdep->fpregset == NULL)
593adc23
MK
2306 tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
2307 amd64_collect_fpregset);
c6b33596
MK
2308
2309 return tdep->fpregset;
2310 }
2311
a055a187
L
2312 if (strcmp (sect_name, ".reg-xstate") == 0)
2313 {
2314 if (tdep->xstateregset == NULL)
2315 tdep->xstateregset = regset_alloc (gdbarch,
2316 amd64_supply_xstateregset,
2317 amd64_collect_xstateregset);
2318
2319 return tdep->xstateregset;
2320 }
2321
c6b33596
MK
2322 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
2323}
2324\f
2325
436675d3
PA
2326/* Figure out where the longjmp will land. Slurp the jmp_buf out of
2327 %rdi. We expect its value to be a pointer to the jmp_buf structure
2328 from which we extract the address that we will land at. This
2329 address is copied into PC. This routine returns non-zero on
2330 success. */
2331
2332static int
2333amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2334{
2335 gdb_byte buf[8];
2336 CORE_ADDR jb_addr;
2337 struct gdbarch *gdbarch = get_frame_arch (frame);
2338 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
0dfff4cb 2339 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
436675d3
PA
2340
2341 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2342 longjmp will land. */
2343 if (jb_pc_offset == -1)
2344 return 0;
2345
2346 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
0dfff4cb
UW
2347 jb_addr= extract_typed_address
2348 (buf, builtin_type (gdbarch)->builtin_data_ptr);
436675d3
PA
2349 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2350 return 0;
2351
0dfff4cb 2352 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
436675d3
PA
2353
2354 return 1;
2355}
2356
cf648174
HZ
2357static const int amd64_record_regmap[] =
2358{
2359 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2360 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2361 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2362 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2363 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2364 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2365};
2366
2213a65d 2367void
90f90721 2368amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
53e95fcf 2369{
0c1a73d6 2370 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
90884b2b 2371 const struct target_desc *tdesc = info.target_desc;
53e95fcf 2372
473f17b0
MK
2373 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2374 floating-point registers. */
2375 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2376
90884b2b
L
2377 if (! tdesc_has_registers (tdesc))
2378 tdesc = tdesc_amd64;
2379 tdep->tdesc = tdesc;
2380
2381 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
2382 tdep->register_names = amd64_register_names;
2383
a055a187
L
2384 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
2385 {
2386 tdep->ymmh_register_names = amd64_ymmh_names;
2387 tdep->num_ymm_regs = 16;
2388 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
2389 }
2390
fe01d668 2391 tdep->num_byte_regs = 20;
1ba53b71
L
2392 tdep->num_word_regs = 16;
2393 tdep->num_dword_regs = 16;
2394 /* Avoid wiring in the MMX registers for now. */
2395 tdep->num_mmx_regs = 0;
2396
2397 set_gdbarch_pseudo_register_read (gdbarch,
2398 amd64_pseudo_register_read);
2399 set_gdbarch_pseudo_register_write (gdbarch,
2400 amd64_pseudo_register_write);
2401
2402 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
2403
5716833c 2404 /* AMD64 has an FPU and 16 SSE registers. */
90f90721 2405 tdep->st0_regnum = AMD64_ST0_REGNUM;
0c1a73d6 2406 tdep->num_xmm_regs = 16;
53e95fcf 2407
0c1a73d6 2408 /* This is what all the fuss is about. */
53e95fcf
JS
2409 set_gdbarch_long_bit (gdbarch, 64);
2410 set_gdbarch_long_long_bit (gdbarch, 64);
2411 set_gdbarch_ptr_bit (gdbarch, 64);
2412
e53bef9f
MK
2413 /* In contrast to the i386, on AMD64 a `long double' actually takes
2414 up 128 bits, even though it's still based on the i387 extended
2415 floating-point format which has only 80 significant bits. */
b83b026c
MK
2416 set_gdbarch_long_double_bit (gdbarch, 128);
2417
e53bef9f 2418 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
b83b026c
MK
2419
2420 /* Register numbers of various important registers. */
90f90721
MK
2421 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
2422 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
2423 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
2424 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
b83b026c 2425
e53bef9f
MK
2426 /* The "default" register numbering scheme for AMD64 is referred to
2427 as the "DWARF Register Number Mapping" in the System V psABI.
2428 The preferred debugging format for all known AMD64 targets is
2429 actually DWARF2, and GCC doesn't seem to support DWARF (that is
2430 DWARF-1), but we provide the same mapping just in case. This
2431 mapping is also used for stabs, which GCC does support. */
2432 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
e53bef9f 2433 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
de220d0f 2434
c4f35dd8 2435 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
e53bef9f 2436 be in use on any of the supported AMD64 targets. */
53e95fcf 2437
c4f35dd8 2438 /* Call dummy code. */
e53bef9f
MK
2439 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
2440 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
8b148df9 2441 set_gdbarch_frame_red_zone_size (gdbarch, 128);
ba581dc1
JB
2442 tdep->call_dummy_num_integer_regs =
2443 ARRAY_SIZE (amd64_dummy_call_integer_regs);
2444 tdep->call_dummy_integer_regs = amd64_dummy_call_integer_regs;
2445 tdep->classify = amd64_classify;
53e95fcf 2446
83acabca 2447 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
d532c08f
MK
2448 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
2449 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
2450
efb1c01c 2451 set_gdbarch_return_value (gdbarch, amd64_return_value);
53e95fcf 2452
e53bef9f 2453 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
53e95fcf 2454
cf648174
HZ
2455 tdep->record_regmap = amd64_record_regmap;
2456
10458914 2457 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
53e95fcf 2458
872761f4
MS
2459 /* Hook the function epilogue frame unwinder. This unwinder is
2460 appended to the list first, so that it supercedes the other
2461 unwinders in function epilogues. */
2462 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
2463
2464 /* Hook the prologue-based frame unwinders. */
10458914
DJ
2465 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
2466 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
e53bef9f 2467 frame_base_set_default (gdbarch, &amd64_frame_base);
c6b33596
MK
2468
2469 /* If we have a register mapping, enable the generic core file support. */
2470 if (tdep->gregset_reg_offset)
2471 set_gdbarch_regset_from_core_section (gdbarch,
e53bef9f 2472 amd64_regset_from_core_section);
436675d3
PA
2473
2474 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
dde08ee1
PA
2475
2476 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
c4f35dd8 2477}
90884b2b
L
2478
2479/* Provide a prototype to silence -Wmissing-prototypes. */
2480void _initialize_amd64_tdep (void);
2481
2482void
2483_initialize_amd64_tdep (void)
2484{
2485 initialize_tdesc_amd64 ();
a055a187 2486 initialize_tdesc_amd64_avx ();
90884b2b 2487}
c4f35dd8
MK
2488\f
2489
41d041d6
MK
2490/* The 64-bit FXSAVE format differs from the 32-bit format in the
2491 sense that the instruction pointer and data pointer are simply
2492 64-bit offsets into the code segment and the data segment instead
2493 of a selector offset pair. The functions below store the upper 32
2494 bits of these pointers (instead of just the 16-bits of the segment
2495 selector). */
2496
2497/* Fill register REGNUM in REGCACHE with the appropriate
0485f6ad
MK
2498 floating-point or SSE register value from *FXSAVE. If REGNUM is
2499 -1, do this for all registers. This function masks off any of the
2500 reserved bits in *FXSAVE. */
c4f35dd8
MK
2501
2502void
90f90721 2503amd64_supply_fxsave (struct regcache *regcache, int regnum,
20a6ec49 2504 const void *fxsave)
c4f35dd8 2505{
20a6ec49
MD
2506 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2507 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2508
41d041d6 2509 i387_supply_fxsave (regcache, regnum, fxsave);
c4f35dd8 2510
20a6ec49 2511 if (fxsave && gdbarch_ptr_bit (gdbarch) == 64)
c4f35dd8 2512 {
d8de1ef7 2513 const gdb_byte *regs = fxsave;
41d041d6 2514
20a6ec49
MD
2515 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2516 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2517 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2518 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
c4f35dd8 2519 }
0c1a73d6
MK
2520}
2521
a055a187
L
2522/* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
2523
2524void
2525amd64_supply_xsave (struct regcache *regcache, int regnum,
2526 const void *xsave)
2527{
2528 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2529 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2530
2531 i387_supply_xsave (regcache, regnum, xsave);
2532
2533 if (xsave && gdbarch_ptr_bit (gdbarch) == 64)
2534 {
2535 const gdb_byte *regs = xsave;
2536
2537 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2538 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep),
2539 regs + 12);
2540 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2541 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep),
2542 regs + 20);
2543 }
2544}
2545
3c017e40
MK
2546/* Fill register REGNUM (if it is a floating-point or SSE register) in
2547 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
2548 all registers. This function doesn't touch any of the reserved
2549 bits in *FXSAVE. */
2550
2551void
2552amd64_collect_fxsave (const struct regcache *regcache, int regnum,
2553 void *fxsave)
2554{
20a6ec49
MD
2555 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2556 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
d8de1ef7 2557 gdb_byte *regs = fxsave;
3c017e40
MK
2558
2559 i387_collect_fxsave (regcache, regnum, fxsave);
2560
20a6ec49 2561 if (gdbarch_ptr_bit (gdbarch) == 64)
f0ef85a5 2562 {
20a6ec49
MD
2563 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2564 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2565 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2566 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
f0ef85a5 2567 }
3c017e40 2568}
a055a187
L
2569
2570/* Similar to amd64_collect_fxsave, but but use XSAVE extended state. */
2571
2572void
2573amd64_collect_xsave (const struct regcache *regcache, int regnum,
2574 void *xsave, int gcore)
2575{
2576 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2577 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2578 gdb_byte *regs = xsave;
2579
2580 i387_collect_xsave (regcache, regnum, xsave, gcore);
2581
2582 if (gdbarch_ptr_bit (gdbarch) == 64)
2583 {
2584 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2585 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep),
2586 regs + 12);
2587 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2588 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep),
2589 regs + 20);
2590 }
2591}
This page took 1.176275 seconds and 4 git commands to generate.