gdb/remote: Use true/false instead of 1/0
[deliverable/binutils-gdb.git] / gdb / amd64-windows-tdep.c
1 /* Copyright (C) 2009-2021 Free Software Foundation, Inc.
2
3 This file is part of GDB.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>. */
17
18 #include "defs.h"
19 #include "osabi.h"
20 #include "amd64-tdep.h"
21 #include "gdbsupport/x86-xstate.h"
22 #include "gdbtypes.h"
23 #include "gdbcore.h"
24 #include "regcache.h"
25 #include "windows-tdep.h"
26 #include "frame.h"
27 #include "objfiles.h"
28 #include "frame-unwind.h"
29 #include "coff/internal.h"
30 #include "coff/i386.h"
31 #include "coff/pe.h"
32 #include "libcoff.h"
33 #include "value.h"
34 #include <algorithm>
35
36 /* The registers used to pass integer arguments during a function call. */
37 static int amd64_windows_dummy_call_integer_regs[] =
38 {
39 AMD64_RCX_REGNUM, /* %rcx */
40 AMD64_RDX_REGNUM, /* %rdx */
41 AMD64_R8_REGNUM, /* %r8 */
42 AMD64_R9_REGNUM /* %r9 */
43 };
44
45 /* This vector maps GDB's idea of a register's number into an offset into
46 the Windows API CONTEXT structure. */
47 static int amd64_windows_gregset_reg_offset[] =
48 {
49 120, /* Rax */
50 144, /* Rbx */
51 128, /* Rcx */
52 136, /* Rdx */
53 168, /* Rsi */
54 176, /* Rdi */
55 160, /* Rbp */
56 152, /* Rsp */
57 184, /* R8 */
58 192, /* R9 */
59 200, /* R10 */
60 208, /* R11 */
61 216, /* R12 */
62 224, /* R13 */
63 232, /* R14 */
64 240, /* R15 */
65 248, /* Rip */
66 68, /* EFlags */
67 56, /* SegCs */
68 66, /* SegSs */
69 58, /* SegDs */
70 60, /* SegEs */
71 62, /* SegFs */
72 64, /* SegGs */
73 288, /* FloatSave.FloatRegisters[0] */
74 304, /* FloatSave.FloatRegisters[1] */
75 320, /* FloatSave.FloatRegisters[2] */
76 336, /* FloatSave.FloatRegisters[3] */
77 352, /* FloatSave.FloatRegisters[4] */
78 368, /* FloatSave.FloatRegisters[5] */
79 384, /* FloatSave.FloatRegisters[6] */
80 400, /* FloatSave.FloatRegisters[7] */
81 256, /* FloatSave.ControlWord */
82 258, /* FloatSave.StatusWord */
83 260, /* FloatSave.TagWord */
84 268, /* FloatSave.ErrorSelector */
85 264, /* FloatSave.ErrorOffset */
86 276, /* FloatSave.DataSelector */
87 272, /* FloatSave.DataOffset */
88 268, /* FloatSave.ErrorSelector */
89 416, /* Xmm0 */
90 432, /* Xmm1 */
91 448, /* Xmm2 */
92 464, /* Xmm3 */
93 480, /* Xmm4 */
94 496, /* Xmm5 */
95 512, /* Xmm6 */
96 528, /* Xmm7 */
97 544, /* Xmm8 */
98 560, /* Xmm9 */
99 576, /* Xmm10 */
100 592, /* Xmm11 */
101 608, /* Xmm12 */
102 624, /* Xmm13 */
103 640, /* Xmm14 */
104 656, /* Xmm15 */
105 280, /* FloatSave.MxCsr */
106 };
107
108 #define AMD64_WINDOWS_SIZEOF_GREGSET 1232
109
110 /* Return nonzero if an argument of type TYPE should be passed
111 via one of the integer registers. */
112
113 static int
114 amd64_windows_passed_by_integer_register (struct type *type)
115 {
116 switch (type->code ())
117 {
118 case TYPE_CODE_INT:
119 case TYPE_CODE_ENUM:
120 case TYPE_CODE_BOOL:
121 case TYPE_CODE_RANGE:
122 case TYPE_CODE_CHAR:
123 case TYPE_CODE_PTR:
124 case TYPE_CODE_REF:
125 case TYPE_CODE_RVALUE_REF:
126 case TYPE_CODE_STRUCT:
127 case TYPE_CODE_UNION:
128 case TYPE_CODE_COMPLEX:
129 return (TYPE_LENGTH (type) == 1
130 || TYPE_LENGTH (type) == 2
131 || TYPE_LENGTH (type) == 4
132 || TYPE_LENGTH (type) == 8);
133
134 default:
135 return 0;
136 }
137 }
138
139 /* Return nonzero if an argument of type TYPE should be passed
140 via one of the XMM registers. */
141
142 static int
143 amd64_windows_passed_by_xmm_register (struct type *type)
144 {
145 return ((type->code () == TYPE_CODE_FLT
146 || type->code () == TYPE_CODE_DECFLOAT)
147 && (TYPE_LENGTH (type) == 4 || TYPE_LENGTH (type) == 8));
148 }
149
150 /* Return non-zero iff an argument of the given TYPE should be passed
151 by pointer. */
152
153 static int
154 amd64_windows_passed_by_pointer (struct type *type)
155 {
156 if (amd64_windows_passed_by_integer_register (type))
157 return 0;
158
159 if (amd64_windows_passed_by_xmm_register (type))
160 return 0;
161
162 return 1;
163 }
164
165 /* For each argument that should be passed by pointer, reserve some
166 stack space, store a copy of the argument on the stack, and replace
167 the argument by its address. Return the new Stack Pointer value.
168
169 NARGS is the number of arguments. ARGS is the array containing
170 the value of each argument. SP is value of the Stack Pointer. */
171
172 static CORE_ADDR
173 amd64_windows_adjust_args_passed_by_pointer (struct value **args,
174 int nargs, CORE_ADDR sp)
175 {
176 int i;
177
178 for (i = 0; i < nargs; i++)
179 if (amd64_windows_passed_by_pointer (value_type (args[i])))
180 {
181 struct type *type = value_type (args[i]);
182 const gdb_byte *valbuf = value_contents (args[i]);
183 const int len = TYPE_LENGTH (type);
184
185 /* Store a copy of that argument on the stack, aligned to
186 a 16 bytes boundary, and then use the copy's address as
187 the argument. */
188
189 sp -= len;
190 sp &= ~0xf;
191 write_memory (sp, valbuf, len);
192
193 args[i]
194 = value_addr (value_from_contents_and_address (type, valbuf, sp));
195 }
196
197 return sp;
198 }
199
200 /* Store the value of ARG in register REGNO (right-justified).
201 REGCACHE is the register cache. */
202
203 static void
204 amd64_windows_store_arg_in_reg (struct regcache *regcache,
205 struct value *arg, int regno)
206 {
207 struct type *type = value_type (arg);
208 const gdb_byte *valbuf = value_contents (arg);
209 gdb_byte buf[8];
210
211 gdb_assert (TYPE_LENGTH (type) <= 8);
212 memset (buf, 0, sizeof buf);
213 memcpy (buf, valbuf, std::min (TYPE_LENGTH (type), (ULONGEST) 8));
214 regcache->cooked_write (regno, buf);
215 }
216
217 /* Push the arguments for an inferior function call, and return
218 the updated value of the SP (Stack Pointer).
219
220 All arguments are identical to the arguments used in
221 amd64_windows_push_dummy_call. */
222
223 static CORE_ADDR
224 amd64_windows_push_arguments (struct regcache *regcache, int nargs,
225 struct value **args, CORE_ADDR sp,
226 function_call_return_method return_method)
227 {
228 int reg_idx = 0;
229 int i;
230 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
231 int num_stack_args = 0;
232 int num_elements = 0;
233 int element = 0;
234
235 /* First, handle the arguments passed by pointer.
236
237 These arguments are replaced by pointers to a copy we are making
238 in inferior memory. So use a copy of the ARGS table, to avoid
239 modifying the original one. */
240 {
241 struct value **args1 = XALLOCAVEC (struct value *, nargs);
242
243 memcpy (args1, args, nargs * sizeof (struct value *));
244 sp = amd64_windows_adjust_args_passed_by_pointer (args1, nargs, sp);
245 args = args1;
246 }
247
248 /* Reserve a register for the "hidden" argument. */
249 if (return_method == return_method_struct)
250 reg_idx++;
251
252 for (i = 0; i < nargs; i++)
253 {
254 struct type *type = value_type (args[i]);
255 int len = TYPE_LENGTH (type);
256 int on_stack_p = 1;
257
258 if (reg_idx < ARRAY_SIZE (amd64_windows_dummy_call_integer_regs))
259 {
260 if (amd64_windows_passed_by_integer_register (type))
261 {
262 amd64_windows_store_arg_in_reg
263 (regcache, args[i],
264 amd64_windows_dummy_call_integer_regs[reg_idx]);
265 on_stack_p = 0;
266 reg_idx++;
267 }
268 else if (amd64_windows_passed_by_xmm_register (type))
269 {
270 amd64_windows_store_arg_in_reg
271 (regcache, args[i], AMD64_XMM0_REGNUM + reg_idx);
272 /* In case of varargs, these parameters must also be
273 passed via the integer registers. */
274 amd64_windows_store_arg_in_reg
275 (regcache, args[i],
276 amd64_windows_dummy_call_integer_regs[reg_idx]);
277 on_stack_p = 0;
278 reg_idx++;
279 }
280 }
281
282 if (on_stack_p)
283 {
284 num_elements += ((len + 7) / 8);
285 stack_args[num_stack_args++] = args[i];
286 }
287 }
288
289 /* Allocate space for the arguments on the stack, keeping it
290 aligned on a 16 byte boundary. */
291 sp -= num_elements * 8;
292 sp &= ~0xf;
293
294 /* Write out the arguments to the stack. */
295 for (i = 0; i < num_stack_args; i++)
296 {
297 struct type *type = value_type (stack_args[i]);
298 const gdb_byte *valbuf = value_contents (stack_args[i]);
299
300 write_memory (sp + element * 8, valbuf, TYPE_LENGTH (type));
301 element += ((TYPE_LENGTH (type) + 7) / 8);
302 }
303
304 return sp;
305 }
306
307 /* Implement the "push_dummy_call" gdbarch method. */
308
309 static CORE_ADDR
310 amd64_windows_push_dummy_call
311 (struct gdbarch *gdbarch, struct value *function,
312 struct regcache *regcache, CORE_ADDR bp_addr,
313 int nargs, struct value **args, CORE_ADDR sp,
314 function_call_return_method return_method, CORE_ADDR struct_addr)
315 {
316 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
317 gdb_byte buf[8];
318
319 /* Pass arguments. */
320 sp = amd64_windows_push_arguments (regcache, nargs, args, sp,
321 return_method);
322
323 /* Pass "hidden" argument". */
324 if (return_method == return_method_struct)
325 {
326 /* The "hidden" argument is passed throught the first argument
327 register. */
328 const int arg_regnum = amd64_windows_dummy_call_integer_regs[0];
329
330 store_unsigned_integer (buf, 8, byte_order, struct_addr);
331 regcache->cooked_write (arg_regnum, buf);
332 }
333
334 /* Reserve some memory on the stack for the integer-parameter
335 registers, as required by the ABI. */
336 sp -= ARRAY_SIZE (amd64_windows_dummy_call_integer_regs) * 8;
337
338 /* Store return address. */
339 sp -= 8;
340 store_unsigned_integer (buf, 8, byte_order, bp_addr);
341 write_memory (sp, buf, 8);
342
343 /* Update the stack pointer... */
344 store_unsigned_integer (buf, 8, byte_order, sp);
345 regcache->cooked_write (AMD64_RSP_REGNUM, buf);
346
347 /* ...and fake a frame pointer. */
348 regcache->cooked_write (AMD64_RBP_REGNUM, buf);
349
350 return sp + 16;
351 }
352
353 /* Implement the "return_value" gdbarch method for amd64-windows. */
354
355 static enum return_value_convention
356 amd64_windows_return_value (struct gdbarch *gdbarch, struct value *function,
357 struct type *type, struct regcache *regcache,
358 gdb_byte *readbuf, const gdb_byte *writebuf)
359 {
360 int len = TYPE_LENGTH (type);
361 int regnum = -1;
362
363 /* See if our value is returned through a register. If it is, then
364 store the associated register number in REGNUM. */
365 switch (type->code ())
366 {
367 case TYPE_CODE_FLT:
368 /* floats, and doubles are returned via XMM0. */
369 if (len == 4 || len == 8)
370 regnum = AMD64_XMM0_REGNUM;
371 break;
372 case TYPE_CODE_ARRAY:
373 /* __m128, __m128i and __m128d are returned via XMM0. */
374 if (type->is_vector () && len == 16)
375 {
376 enum type_code code = TYPE_TARGET_TYPE (type)->code ();
377 if (code == TYPE_CODE_INT || code == TYPE_CODE_FLT)
378 {
379 regnum = AMD64_XMM0_REGNUM;
380 break;
381 }
382 }
383 /* fall through */
384 default:
385 /* All other values that are 1, 2, 4 or 8 bytes long are returned
386 via RAX. */
387 if (len == 1 || len == 2 || len == 4 || len == 8)
388 regnum = AMD64_RAX_REGNUM;
389 else if (len == 16 && type->code () == TYPE_CODE_INT)
390 regnum = AMD64_XMM0_REGNUM;
391 break;
392 }
393
394 if (regnum < 0)
395 {
396 /* RAX contains the address where the return value has been stored. */
397 if (readbuf)
398 {
399 ULONGEST addr;
400
401 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
402 read_memory (addr, readbuf, TYPE_LENGTH (type));
403 }
404 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
405 }
406 else
407 {
408 /* Extract the return value from the register where it was stored. */
409 if (readbuf)
410 regcache->raw_read_part (regnum, 0, len, readbuf);
411 if (writebuf)
412 regcache->raw_write_part (regnum, 0, len, writebuf);
413 return RETURN_VALUE_REGISTER_CONVENTION;
414 }
415 }
416
417 /* Check that the code pointed to by PC corresponds to a call to
418 __main, skip it if so. Return PC otherwise. */
419
420 static CORE_ADDR
421 amd64_skip_main_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
422 {
423 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
424 gdb_byte op;
425
426 target_read_memory (pc, &op, 1);
427 if (op == 0xe8)
428 {
429 gdb_byte buf[4];
430
431 if (target_read_memory (pc + 1, buf, sizeof buf) == 0)
432 {
433 struct bound_minimal_symbol s;
434 CORE_ADDR call_dest;
435
436 call_dest = pc + 5 + extract_signed_integer (buf, 4, byte_order);
437 s = lookup_minimal_symbol_by_pc (call_dest);
438 if (s.minsym != NULL
439 && s.minsym->linkage_name () != NULL
440 && strcmp (s.minsym->linkage_name (), "__main") == 0)
441 pc += 5;
442 }
443 }
444
445 return pc;
446 }
447
448 struct amd64_windows_frame_cache
449 {
450 /* ImageBase for the module. */
451 CORE_ADDR image_base;
452
453 /* Function start and end rva. */
454 CORE_ADDR start_rva;
455 CORE_ADDR end_rva;
456
457 /* Next instruction to be executed. */
458 CORE_ADDR pc;
459
460 /* Current sp. */
461 CORE_ADDR sp;
462
463 /* Address of saved integer and xmm registers. */
464 CORE_ADDR prev_reg_addr[16];
465 CORE_ADDR prev_xmm_addr[16];
466
467 /* These two next fields are set only for machine info frames. */
468
469 /* Likewise for RIP. */
470 CORE_ADDR prev_rip_addr;
471
472 /* Likewise for RSP. */
473 CORE_ADDR prev_rsp_addr;
474
475 /* Address of the previous frame. */
476 CORE_ADDR prev_sp;
477 };
478
479 /* Convert a Windows register number to gdb. */
480 static const enum amd64_regnum amd64_windows_w2gdb_regnum[] =
481 {
482 AMD64_RAX_REGNUM,
483 AMD64_RCX_REGNUM,
484 AMD64_RDX_REGNUM,
485 AMD64_RBX_REGNUM,
486 AMD64_RSP_REGNUM,
487 AMD64_RBP_REGNUM,
488 AMD64_RSI_REGNUM,
489 AMD64_RDI_REGNUM,
490 AMD64_R8_REGNUM,
491 AMD64_R9_REGNUM,
492 AMD64_R10_REGNUM,
493 AMD64_R11_REGNUM,
494 AMD64_R12_REGNUM,
495 AMD64_R13_REGNUM,
496 AMD64_R14_REGNUM,
497 AMD64_R15_REGNUM
498 };
499
500 /* Return TRUE iff PC is the range of the function corresponding to
501 CACHE. */
502
503 static int
504 pc_in_range (CORE_ADDR pc, const struct amd64_windows_frame_cache *cache)
505 {
506 return (pc >= cache->image_base + cache->start_rva
507 && pc < cache->image_base + cache->end_rva);
508 }
509
510 /* Try to recognize and decode an epilogue sequence.
511
512 Return -1 if we fail to read the instructions for any reason.
513 Return 1 if an epilogue sequence was recognized, 0 otherwise. */
514
515 static int
516 amd64_windows_frame_decode_epilogue (struct frame_info *this_frame,
517 struct amd64_windows_frame_cache *cache)
518 {
519 /* According to MSDN an epilogue "must consist of either an add RSP,constant
520 or lea RSP,constant[FPReg], followed by a series of zero or more 8-byte
521 register pops and a return or a jmp".
522
523 Furthermore, according to RtlVirtualUnwind, the complete list of
524 epilog marker is:
525 - ret [c3]
526 - ret n [c2 imm16]
527 - rep ret [f3 c3]
528 - jmp imm8 | imm32 [eb rel8] or [e9 rel32]
529 - jmp qword ptr imm32 - not handled
530 - rex.w jmp reg [4X ff eY]
531 */
532
533 CORE_ADDR pc = cache->pc;
534 CORE_ADDR cur_sp = cache->sp;
535 struct gdbarch *gdbarch = get_frame_arch (this_frame);
536 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
537 gdb_byte op;
538 gdb_byte rex;
539
540 /* We don't care about the instruction deallocating the frame:
541 if it hasn't been executed, the pc is still in the body,
542 if it has been executed, the following epilog decoding will work. */
543
544 /* First decode:
545 - pop reg [41 58-5f] or [58-5f]. */
546
547 while (1)
548 {
549 /* Read opcode. */
550 if (target_read_memory (pc, &op, 1) != 0)
551 return -1;
552
553 if (op >= 0x40 && op <= 0x4f)
554 {
555 /* REX prefix. */
556 rex = op;
557
558 /* Read opcode. */
559 if (target_read_memory (pc + 1, &op, 1) != 0)
560 return -1;
561 }
562 else
563 rex = 0;
564
565 if (op >= 0x58 && op <= 0x5f)
566 {
567 /* pop reg */
568 gdb_byte reg = (op & 0x0f) | ((rex & 1) << 3);
569
570 cache->prev_reg_addr[amd64_windows_w2gdb_regnum[reg]] = cur_sp;
571 cur_sp += 8;
572 pc += rex ? 2 : 1;
573 }
574 else
575 break;
576
577 /* Allow the user to break this loop. This shouldn't happen as the
578 number of consecutive pop should be small. */
579 QUIT;
580 }
581
582 /* Then decode the marker. */
583
584 /* Read opcode. */
585 if (target_read_memory (pc, &op, 1) != 0)
586 return -1;
587
588 switch (op)
589 {
590 case 0xc3:
591 /* Ret. */
592 cache->prev_rip_addr = cur_sp;
593 cache->prev_sp = cur_sp + 8;
594 return 1;
595
596 case 0xeb:
597 {
598 /* jmp rel8 */
599 gdb_byte rel8;
600 CORE_ADDR npc;
601
602 if (target_read_memory (pc + 1, &rel8, 1) != 0)
603 return -1;
604 npc = pc + 2 + (signed char) rel8;
605
606 /* If the jump is within the function, then this is not a marker,
607 otherwise this is a tail-call. */
608 return !pc_in_range (npc, cache);
609 }
610
611 case 0xec:
612 {
613 /* jmp rel32 */
614 gdb_byte rel32[4];
615 CORE_ADDR npc;
616
617 if (target_read_memory (pc + 1, rel32, 4) != 0)
618 return -1;
619 npc = pc + 5 + extract_signed_integer (rel32, 4, byte_order);
620
621 /* If the jump is within the function, then this is not a marker,
622 otherwise this is a tail-call. */
623 return !pc_in_range (npc, cache);
624 }
625
626 case 0xc2:
627 {
628 /* ret n */
629 gdb_byte imm16[2];
630
631 if (target_read_memory (pc + 1, imm16, 2) != 0)
632 return -1;
633 cache->prev_rip_addr = cur_sp;
634 cache->prev_sp = cur_sp
635 + extract_unsigned_integer (imm16, 4, byte_order);
636 return 1;
637 }
638
639 case 0xf3:
640 {
641 /* rep; ret */
642 gdb_byte op1;
643
644 if (target_read_memory (pc + 2, &op1, 1) != 0)
645 return -1;
646 if (op1 != 0xc3)
647 return 0;
648
649 cache->prev_rip_addr = cur_sp;
650 cache->prev_sp = cur_sp + 8;
651 return 1;
652 }
653
654 case 0x40:
655 case 0x41:
656 case 0x42:
657 case 0x43:
658 case 0x44:
659 case 0x45:
660 case 0x46:
661 case 0x47:
662 case 0x48:
663 case 0x49:
664 case 0x4a:
665 case 0x4b:
666 case 0x4c:
667 case 0x4d:
668 case 0x4e:
669 case 0x4f:
670 /* Got a REX prefix, read next byte. */
671 rex = op;
672 if (target_read_memory (pc + 1, &op, 1) != 0)
673 return -1;
674
675 if (op == 0xff)
676 {
677 /* rex jmp reg */
678 gdb_byte op1;
679
680 if (target_read_memory (pc + 2, &op1, 1) != 0)
681 return -1;
682 return (op1 & 0xf8) == 0xe0;
683 }
684 else
685 return 0;
686
687 default:
688 /* Not REX, so unknown. */
689 return 0;
690 }
691 }
692
693 /* Decode and execute unwind insns at UNWIND_INFO. */
694
695 static void
696 amd64_windows_frame_decode_insns (struct frame_info *this_frame,
697 struct amd64_windows_frame_cache *cache,
698 CORE_ADDR unwind_info)
699 {
700 CORE_ADDR save_addr = 0;
701 CORE_ADDR cur_sp = cache->sp;
702 struct gdbarch *gdbarch = get_frame_arch (this_frame);
703 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
704 int first = 1;
705
706 /* There are at least 3 possibilities to share an unwind info entry:
707 1. Two different runtime_function entries (in .pdata) can point to the
708 same unwind info entry. There is no such indication while unwinding,
709 so we don't really care about that case. We suppose this scheme is
710 used to save memory when the unwind entries are exactly the same.
711 2. Chained unwind_info entries, with no unwind codes (no prologue).
712 There is a major difference with the previous case: the pc range for
713 the function is different (in case 1, the pc range comes from the
714 runtime_function entry; in case 2, the pc range for the chained entry
715 comes from the first unwind entry). Case 1 cannot be used instead as
716 the pc is not in the prologue. This case is officially documented.
717 (There might be unwind code in the first unwind entry to handle
718 additional unwinding). GCC (at least until gcc 5.0) doesn't chain
719 entries.
720 3. Undocumented unwind info redirection. Hard to know the exact purpose,
721 so it is considered as a memory optimization of case 2.
722 */
723
724 if (unwind_info & 1)
725 {
726 /* Unofficially documented unwind info redirection, when UNWIND_INFO
727 address is odd (http://www.codemachine.com/article_x64deepdive.html).
728 */
729 struct external_pex64_runtime_function d;
730
731 if (target_read_memory (cache->image_base + (unwind_info & ~1),
732 (gdb_byte *) &d, sizeof (d)) != 0)
733 return;
734
735 cache->start_rva
736 = extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
737 cache->end_rva
738 = extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
739 unwind_info
740 = extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
741 }
742
743 while (1)
744 {
745 struct external_pex64_unwind_info ex_ui;
746 /* There are at most 256 16-bit unwind insns. */
747 gdb_byte insns[2 * 256];
748 gdb_byte *p;
749 gdb_byte *end_insns;
750 unsigned char codes_count;
751 unsigned char frame_reg;
752 CORE_ADDR start;
753
754 /* Read and decode header. */
755 if (target_read_memory (cache->image_base + unwind_info,
756 (gdb_byte *) &ex_ui, sizeof (ex_ui)) != 0)
757 return;
758
759 if (frame_debug)
760 fprintf_unfiltered
761 (gdb_stdlog,
762 "amd64_windows_frame_decodes_insn: "
763 "%s: ver: %02x, plgsz: %02x, cnt: %02x, frame: %02x\n",
764 paddress (gdbarch, unwind_info),
765 ex_ui.Version_Flags, ex_ui.SizeOfPrologue,
766 ex_ui.CountOfCodes, ex_ui.FrameRegisterOffset);
767
768 /* Check version. */
769 if (PEX64_UWI_VERSION (ex_ui.Version_Flags) != 1
770 && PEX64_UWI_VERSION (ex_ui.Version_Flags) != 2)
771 return;
772
773 start = cache->image_base + cache->start_rva;
774 if (first
775 && !(cache->pc >= start && cache->pc < start + ex_ui.SizeOfPrologue))
776 {
777 /* We want to detect if the PC points to an epilogue. This needs
778 to be checked only once, and an epilogue can be anywhere but in
779 the prologue. If so, the epilogue detection+decoding function is
780 sufficient. Otherwise, the unwinder will consider that the PC
781 is in the body of the function and will need to decode unwind
782 info. */
783 if (amd64_windows_frame_decode_epilogue (this_frame, cache) == 1)
784 return;
785
786 /* Not in an epilog. Clear possible side effects. */
787 memset (cache->prev_reg_addr, 0, sizeof (cache->prev_reg_addr));
788 }
789
790 codes_count = ex_ui.CountOfCodes;
791 frame_reg = PEX64_UWI_FRAMEREG (ex_ui.FrameRegisterOffset);
792
793 if (frame_reg != 0)
794 {
795 /* According to msdn:
796 If an FP reg is used, then any unwind code taking an offset must
797 only be used after the FP reg is established in the prolog. */
798 gdb_byte buf[8];
799 int frreg = amd64_windows_w2gdb_regnum[frame_reg];
800
801 get_frame_register (this_frame, frreg, buf);
802 save_addr = extract_unsigned_integer (buf, 8, byte_order);
803
804 if (frame_debug)
805 fprintf_unfiltered (gdb_stdlog, " frame_reg=%s, val=%s\n",
806 gdbarch_register_name (gdbarch, frreg),
807 paddress (gdbarch, save_addr));
808 }
809
810 /* Read opcodes. */
811 if (codes_count != 0
812 && target_read_memory (cache->image_base + unwind_info
813 + sizeof (ex_ui),
814 insns, codes_count * 2) != 0)
815 return;
816
817 end_insns = &insns[codes_count * 2];
818 p = insns;
819
820 /* Skip opcodes 6 of version 2. This opcode is not documented. */
821 if (PEX64_UWI_VERSION (ex_ui.Version_Flags) == 2)
822 {
823 for (; p < end_insns; p += 2)
824 if (PEX64_UNWCODE_CODE (p[1]) != 6)
825 break;
826 }
827
828 for (; p < end_insns; p += 2)
829 {
830 int reg;
831
832 /* Virtually execute the operation if the pc is after the
833 corresponding instruction (that does matter in case of break
834 within the prologue). Note that for chained info (!first), the
835 prologue has been fully executed. */
836 if (cache->pc >= start + p[0] || cache->pc < start)
837 {
838 if (frame_debug)
839 fprintf_unfiltered
840 (gdb_stdlog, " op #%u: off=0x%02x, insn=0x%02x\n",
841 (unsigned) (p - insns), p[0], p[1]);
842
843 /* If there is no frame registers defined, the current value of
844 rsp is used instead. */
845 if (frame_reg == 0)
846 save_addr = cur_sp;
847
848 reg = -1;
849
850 switch (PEX64_UNWCODE_CODE (p[1]))
851 {
852 case UWOP_PUSH_NONVOL:
853 /* Push pre-decrements RSP. */
854 reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
855 cache->prev_reg_addr[reg] = cur_sp;
856 cur_sp += 8;
857 break;
858 case UWOP_ALLOC_LARGE:
859 if (PEX64_UNWCODE_INFO (p[1]) == 0)
860 cur_sp +=
861 8 * extract_unsigned_integer (p + 2, 2, byte_order);
862 else if (PEX64_UNWCODE_INFO (p[1]) == 1)
863 cur_sp += extract_unsigned_integer (p + 2, 4, byte_order);
864 else
865 return;
866 break;
867 case UWOP_ALLOC_SMALL:
868 cur_sp += 8 + 8 * PEX64_UNWCODE_INFO (p[1]);
869 break;
870 case UWOP_SET_FPREG:
871 cur_sp = save_addr
872 - PEX64_UWI_FRAMEOFF (ex_ui.FrameRegisterOffset) * 16;
873 break;
874 case UWOP_SAVE_NONVOL:
875 reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
876 cache->prev_reg_addr[reg] = save_addr
877 + 8 * extract_unsigned_integer (p + 2, 2, byte_order);
878 break;
879 case UWOP_SAVE_NONVOL_FAR:
880 reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
881 cache->prev_reg_addr[reg] = save_addr
882 + 8 * extract_unsigned_integer (p + 2, 4, byte_order);
883 break;
884 case UWOP_SAVE_XMM128:
885 cache->prev_xmm_addr[PEX64_UNWCODE_INFO (p[1])] =
886 save_addr
887 - 16 * extract_unsigned_integer (p + 2, 2, byte_order);
888 break;
889 case UWOP_SAVE_XMM128_FAR:
890 cache->prev_xmm_addr[PEX64_UNWCODE_INFO (p[1])] =
891 save_addr
892 - 16 * extract_unsigned_integer (p + 2, 4, byte_order);
893 break;
894 case UWOP_PUSH_MACHFRAME:
895 if (PEX64_UNWCODE_INFO (p[1]) == 0)
896 {
897 cache->prev_rip_addr = cur_sp + 0;
898 cache->prev_rsp_addr = cur_sp + 24;
899 cur_sp += 40;
900 }
901 else if (PEX64_UNWCODE_INFO (p[1]) == 1)
902 {
903 cache->prev_rip_addr = cur_sp + 8;
904 cache->prev_rsp_addr = cur_sp + 32;
905 cur_sp += 48;
906 }
907 else
908 return;
909 break;
910 default:
911 return;
912 }
913
914 /* Display address where the register was saved. */
915 if (frame_debug && reg >= 0)
916 fprintf_unfiltered
917 (gdb_stdlog, " [reg %s at %s]\n",
918 gdbarch_register_name (gdbarch, reg),
919 paddress (gdbarch, cache->prev_reg_addr[reg]));
920 }
921
922 /* Adjust with the length of the opcode. */
923 switch (PEX64_UNWCODE_CODE (p[1]))
924 {
925 case UWOP_PUSH_NONVOL:
926 case UWOP_ALLOC_SMALL:
927 case UWOP_SET_FPREG:
928 case UWOP_PUSH_MACHFRAME:
929 break;
930 case UWOP_ALLOC_LARGE:
931 if (PEX64_UNWCODE_INFO (p[1]) == 0)
932 p += 2;
933 else if (PEX64_UNWCODE_INFO (p[1]) == 1)
934 p += 4;
935 else
936 return;
937 break;
938 case UWOP_SAVE_NONVOL:
939 case UWOP_SAVE_XMM128:
940 p += 2;
941 break;
942 case UWOP_SAVE_NONVOL_FAR:
943 case UWOP_SAVE_XMM128_FAR:
944 p += 4;
945 break;
946 default:
947 return;
948 }
949 }
950 if (PEX64_UWI_FLAGS (ex_ui.Version_Flags) != UNW_FLAG_CHAININFO)
951 {
952 /* End of unwind info. */
953 break;
954 }
955 else
956 {
957 /* Read the chained unwind info. */
958 struct external_pex64_runtime_function d;
959 CORE_ADDR chain_vma;
960
961 /* Not anymore the first entry. */
962 first = 0;
963
964 /* Stay aligned on word boundary. */
965 chain_vma = cache->image_base + unwind_info
966 + sizeof (ex_ui) + ((codes_count + 1) & ~1) * 2;
967
968 if (target_read_memory (chain_vma, (gdb_byte *) &d, sizeof (d)) != 0)
969 return;
970
971 /* Decode begin/end. This may be different from .pdata index, as
972 an unwind info may be shared by several functions (in particular
973 if many functions have the same prolog and handler. */
974 cache->start_rva =
975 extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
976 cache->end_rva =
977 extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
978 unwind_info =
979 extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
980
981 if (frame_debug)
982 fprintf_unfiltered
983 (gdb_stdlog,
984 "amd64_windows_frame_decodes_insn (next in chain):"
985 " unwind_data=%s, start_rva=%s, end_rva=%s\n",
986 paddress (gdbarch, unwind_info),
987 paddress (gdbarch, cache->start_rva),
988 paddress (gdbarch, cache->end_rva));
989 }
990
991 /* Allow the user to break this loop. */
992 QUIT;
993 }
994 /* PC is saved by the call. */
995 if (cache->prev_rip_addr == 0)
996 cache->prev_rip_addr = cur_sp;
997 cache->prev_sp = cur_sp + 8;
998
999 if (frame_debug)
1000 fprintf_unfiltered (gdb_stdlog, " prev_sp: %s, prev_pc @%s\n",
1001 paddress (gdbarch, cache->prev_sp),
1002 paddress (gdbarch, cache->prev_rip_addr));
1003 }
1004
1005 /* Find SEH unwind info for PC, returning 0 on success.
1006
1007 UNWIND_INFO is set to the rva of unwind info address, IMAGE_BASE
1008 to the base address of the corresponding image, and START_RVA
1009 to the rva of the function containing PC. */
1010
1011 static int
1012 amd64_windows_find_unwind_info (struct gdbarch *gdbarch, CORE_ADDR pc,
1013 CORE_ADDR *unwind_info,
1014 CORE_ADDR *image_base,
1015 CORE_ADDR *start_rva,
1016 CORE_ADDR *end_rva)
1017 {
1018 struct obj_section *sec;
1019 pe_data_type *pe;
1020 IMAGE_DATA_DIRECTORY *dir;
1021 struct objfile *objfile;
1022 unsigned long lo, hi;
1023 CORE_ADDR base;
1024 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1025
1026 /* Get the corresponding exception directory. */
1027 sec = find_pc_section (pc);
1028 if (sec == NULL)
1029 return -1;
1030 objfile = sec->objfile;
1031 pe = pe_data (sec->objfile->obfd);
1032 dir = &pe->pe_opthdr.DataDirectory[PE_EXCEPTION_TABLE];
1033
1034 base = pe->pe_opthdr.ImageBase + objfile->text_section_offset ();
1035 *image_base = base;
1036
1037 /* Find the entry.
1038
1039 Note: This does not handle dynamically added entries (for JIT
1040 engines). For this, we would need to ask the kernel directly,
1041 which means getting some info from the native layer. For the
1042 rest of the code, however, it's probably faster to search
1043 the entry ourselves. */
1044 lo = 0;
1045 hi = dir->Size / sizeof (struct external_pex64_runtime_function);
1046 *unwind_info = 0;
1047 while (lo <= hi)
1048 {
1049 unsigned long mid = lo + (hi - lo) / 2;
1050 struct external_pex64_runtime_function d;
1051 CORE_ADDR sa, ea;
1052
1053 if (target_read_memory (base + dir->VirtualAddress + mid * sizeof (d),
1054 (gdb_byte *) &d, sizeof (d)) != 0)
1055 return -1;
1056
1057 sa = extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
1058 ea = extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
1059 if (pc < base + sa)
1060 hi = mid - 1;
1061 else if (pc >= base + ea)
1062 lo = mid + 1;
1063 else if (pc >= base + sa && pc < base + ea)
1064 {
1065 /* Got it. */
1066 *start_rva = sa;
1067 *end_rva = ea;
1068 *unwind_info =
1069 extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
1070 break;
1071 }
1072 else
1073 break;
1074 }
1075
1076 if (frame_debug)
1077 fprintf_unfiltered
1078 (gdb_stdlog,
1079 "amd64_windows_find_unwind_data: image_base=%s, unwind_data=%s\n",
1080 paddress (gdbarch, base), paddress (gdbarch, *unwind_info));
1081
1082 return 0;
1083 }
1084
1085 /* Fill THIS_CACHE using the native amd64-windows unwinding data
1086 for THIS_FRAME. */
1087
1088 static struct amd64_windows_frame_cache *
1089 amd64_windows_frame_cache (struct frame_info *this_frame, void **this_cache)
1090 {
1091 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1092 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1093 struct amd64_windows_frame_cache *cache;
1094 gdb_byte buf[8];
1095 CORE_ADDR pc;
1096 CORE_ADDR unwind_info = 0;
1097
1098 if (*this_cache)
1099 return (struct amd64_windows_frame_cache *) *this_cache;
1100
1101 cache = FRAME_OBSTACK_ZALLOC (struct amd64_windows_frame_cache);
1102 *this_cache = cache;
1103
1104 /* Get current PC and SP. */
1105 pc = get_frame_pc (this_frame);
1106 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1107 cache->sp = extract_unsigned_integer (buf, 8, byte_order);
1108 cache->pc = pc;
1109
1110 if (amd64_windows_find_unwind_info (gdbarch, pc, &unwind_info,
1111 &cache->image_base,
1112 &cache->start_rva,
1113 &cache->end_rva))
1114 return cache;
1115
1116 if (unwind_info == 0)
1117 {
1118 /* Assume a leaf function. */
1119 cache->prev_sp = cache->sp + 8;
1120 cache->prev_rip_addr = cache->sp;
1121 }
1122 else
1123 {
1124 /* Decode unwind insns to compute saved addresses. */
1125 amd64_windows_frame_decode_insns (this_frame, cache, unwind_info);
1126 }
1127 return cache;
1128 }
1129
1130 /* Implement the "prev_register" method of struct frame_unwind
1131 using the standard Windows x64 SEH info. */
1132
1133 static struct value *
1134 amd64_windows_frame_prev_register (struct frame_info *this_frame,
1135 void **this_cache, int regnum)
1136 {
1137 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1138 struct amd64_windows_frame_cache *cache =
1139 amd64_windows_frame_cache (this_frame, this_cache);
1140 CORE_ADDR prev;
1141
1142 if (frame_debug)
1143 fprintf_unfiltered (gdb_stdlog,
1144 "amd64_windows_frame_prev_register %s for sp=%s\n",
1145 gdbarch_register_name (gdbarch, regnum),
1146 paddress (gdbarch, cache->prev_sp));
1147
1148 if (regnum >= AMD64_XMM0_REGNUM && regnum <= AMD64_XMM0_REGNUM + 15)
1149 prev = cache->prev_xmm_addr[regnum - AMD64_XMM0_REGNUM];
1150 else if (regnum == AMD64_RSP_REGNUM)
1151 {
1152 prev = cache->prev_rsp_addr;
1153 if (prev == 0)
1154 return frame_unwind_got_constant (this_frame, regnum, cache->prev_sp);
1155 }
1156 else if (regnum >= AMD64_RAX_REGNUM && regnum <= AMD64_R15_REGNUM)
1157 prev = cache->prev_reg_addr[regnum - AMD64_RAX_REGNUM];
1158 else if (regnum == AMD64_RIP_REGNUM)
1159 prev = cache->prev_rip_addr;
1160 else
1161 prev = 0;
1162
1163 if (prev && frame_debug)
1164 fprintf_unfiltered (gdb_stdlog, " -> at %s\n", paddress (gdbarch, prev));
1165
1166 if (prev)
1167 {
1168 /* Register was saved. */
1169 return frame_unwind_got_memory (this_frame, regnum, prev);
1170 }
1171 else
1172 {
1173 /* Register is either volatile or not modified. */
1174 return frame_unwind_got_register (this_frame, regnum, regnum);
1175 }
1176 }
1177
1178 /* Implement the "this_id" method of struct frame_unwind using
1179 the standard Windows x64 SEH info. */
1180
1181 static void
1182 amd64_windows_frame_this_id (struct frame_info *this_frame, void **this_cache,
1183 struct frame_id *this_id)
1184 {
1185 struct amd64_windows_frame_cache *cache =
1186 amd64_windows_frame_cache (this_frame, this_cache);
1187
1188 *this_id = frame_id_build (cache->prev_sp,
1189 cache->image_base + cache->start_rva);
1190 }
1191
1192 /* Windows x64 SEH unwinder. */
1193
1194 static const struct frame_unwind amd64_windows_frame_unwind =
1195 {
1196 NORMAL_FRAME,
1197 default_frame_unwind_stop_reason,
1198 &amd64_windows_frame_this_id,
1199 &amd64_windows_frame_prev_register,
1200 NULL,
1201 default_frame_sniffer
1202 };
1203
1204 /* Implement the "skip_prologue" gdbarch method. */
1205
1206 static CORE_ADDR
1207 amd64_windows_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1208 {
1209 CORE_ADDR func_addr;
1210 CORE_ADDR unwind_info = 0;
1211 CORE_ADDR image_base, start_rva, end_rva;
1212 struct external_pex64_unwind_info ex_ui;
1213
1214 /* Use prologue size from unwind info. */
1215 if (amd64_windows_find_unwind_info (gdbarch, pc, &unwind_info,
1216 &image_base, &start_rva, &end_rva) == 0)
1217 {
1218 if (unwind_info == 0)
1219 {
1220 /* Leaf function. */
1221 return pc;
1222 }
1223 else if (target_read_memory (image_base + unwind_info,
1224 (gdb_byte *) &ex_ui, sizeof (ex_ui)) == 0
1225 && PEX64_UWI_VERSION (ex_ui.Version_Flags) == 1)
1226 return std::max (pc, image_base + start_rva + ex_ui.SizeOfPrologue);
1227 }
1228
1229 /* See if we can determine the end of the prologue via the symbol
1230 table. If so, then return either the PC, or the PC after
1231 the prologue, whichever is greater. */
1232 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1233 {
1234 CORE_ADDR post_prologue_pc
1235 = skip_prologue_using_sal (gdbarch, func_addr);
1236
1237 if (post_prologue_pc != 0)
1238 return std::max (pc, post_prologue_pc);
1239 }
1240
1241 return pc;
1242 }
1243
1244 /* Check Win64 DLL jmp trampolines and find jump destination. */
1245
1246 static CORE_ADDR
1247 amd64_windows_skip_trampoline_code (struct frame_info *frame, CORE_ADDR pc)
1248 {
1249 CORE_ADDR destination = 0;
1250 struct gdbarch *gdbarch = get_frame_arch (frame);
1251 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1252
1253 /* Check for jmp *<offset>(%rip) (jump near, absolute indirect (/4)). */
1254 if (pc && read_memory_unsigned_integer (pc, 2, byte_order) == 0x25ff)
1255 {
1256 /* Get opcode offset and see if we can find a reference in our data. */
1257 ULONGEST offset
1258 = read_memory_unsigned_integer (pc + 2, 4, byte_order);
1259
1260 /* Get address of function pointer at end of pc. */
1261 CORE_ADDR indirect_addr = pc + offset + 6;
1262
1263 struct minimal_symbol *indsym
1264 = (indirect_addr
1265 ? lookup_minimal_symbol_by_pc (indirect_addr).minsym
1266 : NULL);
1267 const char *symname = indsym ? indsym->linkage_name () : NULL;
1268
1269 if (symname)
1270 {
1271 if (startswith (symname, "__imp_")
1272 || startswith (symname, "_imp_"))
1273 destination
1274 = read_memory_unsigned_integer (indirect_addr, 8, byte_order);
1275 }
1276 }
1277
1278 return destination;
1279 }
1280
1281 /* Implement the "auto_wide_charset" gdbarch method. */
1282
1283 static const char *
1284 amd64_windows_auto_wide_charset (void)
1285 {
1286 return "UTF-16";
1287 }
1288
1289 /* Common parts for gdbarch initialization for Windows and Cygwin on AMD64. */
1290
1291 static void
1292 amd64_windows_init_abi_common (gdbarch_info info, struct gdbarch *gdbarch)
1293 {
1294 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1295
1296 /* The dwarf2 unwinder (appended very early by i386_gdbarch_init) is
1297 preferred over the SEH one. The reasons are:
1298 - binaries without SEH but with dwarf2 debug info are correctly handled
1299 (although they aren't ABI compliant, gcc before 4.7 didn't emit SEH
1300 info).
1301 - dwarf3 DW_OP_call_frame_cfa is correctly handled (it can only be
1302 handled if the dwarf2 unwinder is used).
1303
1304 The call to amd64_init_abi appends default unwinders, that aren't
1305 compatible with the SEH one.
1306 */
1307 frame_unwind_append_unwinder (gdbarch, &amd64_windows_frame_unwind);
1308
1309 amd64_init_abi (info, gdbarch,
1310 amd64_target_description (X86_XSTATE_SSE_MASK, false));
1311
1312 /* Function calls. */
1313 set_gdbarch_push_dummy_call (gdbarch, amd64_windows_push_dummy_call);
1314 set_gdbarch_return_value (gdbarch, amd64_windows_return_value);
1315 set_gdbarch_skip_main_prologue (gdbarch, amd64_skip_main_prologue);
1316 set_gdbarch_skip_trampoline_code (gdbarch,
1317 amd64_windows_skip_trampoline_code);
1318
1319 set_gdbarch_skip_prologue (gdbarch, amd64_windows_skip_prologue);
1320
1321 tdep->gregset_reg_offset = amd64_windows_gregset_reg_offset;
1322 tdep->gregset_num_regs = ARRAY_SIZE (amd64_windows_gregset_reg_offset);
1323 tdep->sizeof_gregset = AMD64_WINDOWS_SIZEOF_GREGSET;
1324 tdep->sizeof_fpregset = 0;
1325
1326 /* Core file support. */
1327 set_gdbarch_core_xfer_shared_libraries
1328 (gdbarch, windows_core_xfer_shared_libraries);
1329 set_gdbarch_core_pid_to_str (gdbarch, windows_core_pid_to_str);
1330
1331 set_gdbarch_auto_wide_charset (gdbarch, amd64_windows_auto_wide_charset);
1332 }
1333
1334 /* gdbarch initialization for Windows on AMD64. */
1335
1336 static void
1337 amd64_windows_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
1338 {
1339 amd64_windows_init_abi_common (info, gdbarch);
1340 windows_init_abi (info, gdbarch);
1341
1342 /* On Windows, "long"s are only 32bit. */
1343 set_gdbarch_long_bit (gdbarch, 32);
1344 }
1345
1346 /* gdbarch initialization for Cygwin on AMD64. */
1347
1348 static void
1349 amd64_cygwin_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
1350 {
1351 amd64_windows_init_abi_common (info, gdbarch);
1352 cygwin_init_abi (info, gdbarch);
1353 }
1354
1355 static gdb_osabi
1356 amd64_windows_osabi_sniffer (bfd *abfd)
1357 {
1358 const char *target_name = bfd_get_target (abfd);
1359
1360 if (!streq (target_name, "pei-x86-64"))
1361 return GDB_OSABI_UNKNOWN;
1362
1363 if (is_linked_with_cygwin_dll (abfd))
1364 return GDB_OSABI_CYGWIN;
1365
1366 return GDB_OSABI_WINDOWS;
1367 }
1368
1369 static enum gdb_osabi
1370 amd64_cygwin_core_osabi_sniffer (bfd *abfd)
1371 {
1372 const char *target_name = bfd_get_target (abfd);
1373
1374 /* Cygwin uses elf core dumps. Do not claim all ELF executables,
1375 check whether there is a .reg section of proper size. */
1376 if (strcmp (target_name, "elf64-x86-64") == 0)
1377 {
1378 asection *section = bfd_get_section_by_name (abfd, ".reg");
1379 if (section != nullptr
1380 && bfd_section_size (section) == AMD64_WINDOWS_SIZEOF_GREGSET)
1381 return GDB_OSABI_CYGWIN;
1382 }
1383
1384 return GDB_OSABI_UNKNOWN;
1385 }
1386
1387 void _initialize_amd64_windows_tdep ();
1388 void
1389 _initialize_amd64_windows_tdep ()
1390 {
1391 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_WINDOWS,
1392 amd64_windows_init_abi);
1393 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_CYGWIN,
1394 amd64_cygwin_init_abi);
1395
1396 gdbarch_register_osabi_sniffer (bfd_arch_i386, bfd_target_coff_flavour,
1397 amd64_windows_osabi_sniffer);
1398
1399 /* Cygwin uses elf core dumps. */
1400 gdbarch_register_osabi_sniffer (bfd_arch_i386, bfd_target_elf_flavour,
1401 amd64_cygwin_core_osabi_sniffer);
1402
1403 }
This page took 0.063603 seconds and 4 git commands to generate.