gdb: bool-ify follow_fork
[deliverable/binutils-gdb.git] / gdb / amd64-windows-tdep.c
1 /* Copyright (C) 2009-2020 Free Software Foundation, Inc.
2
3 This file is part of GDB.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>. */
17
18 #include "defs.h"
19 #include "osabi.h"
20 #include "amd64-tdep.h"
21 #include "gdbsupport/x86-xstate.h"
22 #include "gdbtypes.h"
23 #include "gdbcore.h"
24 #include "regcache.h"
25 #include "windows-tdep.h"
26 #include "frame.h"
27 #include "objfiles.h"
28 #include "frame-unwind.h"
29 #include "coff/internal.h"
30 #include "coff/i386.h"
31 #include "coff/pe.h"
32 #include "libcoff.h"
33 #include "value.h"
34 #include <algorithm>
35
36 /* The registers used to pass integer arguments during a function call. */
37 static int amd64_windows_dummy_call_integer_regs[] =
38 {
39 AMD64_RCX_REGNUM, /* %rcx */
40 AMD64_RDX_REGNUM, /* %rdx */
41 AMD64_R8_REGNUM, /* %r8 */
42 AMD64_R9_REGNUM /* %r9 */
43 };
44
45 /* Return nonzero if an argument of type TYPE should be passed
46 via one of the integer registers. */
47
48 static int
49 amd64_windows_passed_by_integer_register (struct type *type)
50 {
51 switch (TYPE_CODE (type))
52 {
53 case TYPE_CODE_INT:
54 case TYPE_CODE_ENUM:
55 case TYPE_CODE_BOOL:
56 case TYPE_CODE_RANGE:
57 case TYPE_CODE_CHAR:
58 case TYPE_CODE_PTR:
59 case TYPE_CODE_REF:
60 case TYPE_CODE_RVALUE_REF:
61 case TYPE_CODE_STRUCT:
62 case TYPE_CODE_UNION:
63 return (TYPE_LENGTH (type) == 1
64 || TYPE_LENGTH (type) == 2
65 || TYPE_LENGTH (type) == 4
66 || TYPE_LENGTH (type) == 8);
67
68 default:
69 return 0;
70 }
71 }
72
73 /* Return nonzero if an argument of type TYPE should be passed
74 via one of the XMM registers. */
75
76 static int
77 amd64_windows_passed_by_xmm_register (struct type *type)
78 {
79 return ((TYPE_CODE (type) == TYPE_CODE_FLT
80 || TYPE_CODE (type) == TYPE_CODE_DECFLOAT)
81 && (TYPE_LENGTH (type) == 4 || TYPE_LENGTH (type) == 8));
82 }
83
84 /* Return non-zero iff an argument of the given TYPE should be passed
85 by pointer. */
86
87 static int
88 amd64_windows_passed_by_pointer (struct type *type)
89 {
90 if (amd64_windows_passed_by_integer_register (type))
91 return 0;
92
93 if (amd64_windows_passed_by_xmm_register (type))
94 return 0;
95
96 return 1;
97 }
98
99 /* For each argument that should be passed by pointer, reserve some
100 stack space, store a copy of the argument on the stack, and replace
101 the argument by its address. Return the new Stack Pointer value.
102
103 NARGS is the number of arguments. ARGS is the array containing
104 the value of each argument. SP is value of the Stack Pointer. */
105
106 static CORE_ADDR
107 amd64_windows_adjust_args_passed_by_pointer (struct value **args,
108 int nargs, CORE_ADDR sp)
109 {
110 int i;
111
112 for (i = 0; i < nargs; i++)
113 if (amd64_windows_passed_by_pointer (value_type (args[i])))
114 {
115 struct type *type = value_type (args[i]);
116 const gdb_byte *valbuf = value_contents (args[i]);
117 const int len = TYPE_LENGTH (type);
118
119 /* Store a copy of that argument on the stack, aligned to
120 a 16 bytes boundary, and then use the copy's address as
121 the argument. */
122
123 sp -= len;
124 sp &= ~0xf;
125 write_memory (sp, valbuf, len);
126
127 args[i]
128 = value_addr (value_from_contents_and_address (type, valbuf, sp));
129 }
130
131 return sp;
132 }
133
134 /* Store the value of ARG in register REGNO (right-justified).
135 REGCACHE is the register cache. */
136
137 static void
138 amd64_windows_store_arg_in_reg (struct regcache *regcache,
139 struct value *arg, int regno)
140 {
141 struct type *type = value_type (arg);
142 const gdb_byte *valbuf = value_contents (arg);
143 gdb_byte buf[8];
144
145 gdb_assert (TYPE_LENGTH (type) <= 8);
146 memset (buf, 0, sizeof buf);
147 memcpy (buf, valbuf, std::min (TYPE_LENGTH (type), (ULONGEST) 8));
148 regcache->cooked_write (regno, buf);
149 }
150
151 /* Push the arguments for an inferior function call, and return
152 the updated value of the SP (Stack Pointer).
153
154 All arguments are identical to the arguments used in
155 amd64_windows_push_dummy_call. */
156
157 static CORE_ADDR
158 amd64_windows_push_arguments (struct regcache *regcache, int nargs,
159 struct value **args, CORE_ADDR sp,
160 function_call_return_method return_method)
161 {
162 int reg_idx = 0;
163 int i;
164 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
165 int num_stack_args = 0;
166 int num_elements = 0;
167 int element = 0;
168
169 /* First, handle the arguments passed by pointer.
170
171 These arguments are replaced by pointers to a copy we are making
172 in inferior memory. So use a copy of the ARGS table, to avoid
173 modifying the original one. */
174 {
175 struct value **args1 = XALLOCAVEC (struct value *, nargs);
176
177 memcpy (args1, args, nargs * sizeof (struct value *));
178 sp = amd64_windows_adjust_args_passed_by_pointer (args1, nargs, sp);
179 args = args1;
180 }
181
182 /* Reserve a register for the "hidden" argument. */
183 if (return_method == return_method_struct)
184 reg_idx++;
185
186 for (i = 0; i < nargs; i++)
187 {
188 struct type *type = value_type (args[i]);
189 int len = TYPE_LENGTH (type);
190 int on_stack_p = 1;
191
192 if (reg_idx < ARRAY_SIZE (amd64_windows_dummy_call_integer_regs))
193 {
194 if (amd64_windows_passed_by_integer_register (type))
195 {
196 amd64_windows_store_arg_in_reg
197 (regcache, args[i],
198 amd64_windows_dummy_call_integer_regs[reg_idx]);
199 on_stack_p = 0;
200 reg_idx++;
201 }
202 else if (amd64_windows_passed_by_xmm_register (type))
203 {
204 amd64_windows_store_arg_in_reg
205 (regcache, args[i], AMD64_XMM0_REGNUM + reg_idx);
206 /* In case of varargs, these parameters must also be
207 passed via the integer registers. */
208 amd64_windows_store_arg_in_reg
209 (regcache, args[i],
210 amd64_windows_dummy_call_integer_regs[reg_idx]);
211 on_stack_p = 0;
212 reg_idx++;
213 }
214 }
215
216 if (on_stack_p)
217 {
218 num_elements += ((len + 7) / 8);
219 stack_args[num_stack_args++] = args[i];
220 }
221 }
222
223 /* Allocate space for the arguments on the stack, keeping it
224 aligned on a 16 byte boundary. */
225 sp -= num_elements * 8;
226 sp &= ~0xf;
227
228 /* Write out the arguments to the stack. */
229 for (i = 0; i < num_stack_args; i++)
230 {
231 struct type *type = value_type (stack_args[i]);
232 const gdb_byte *valbuf = value_contents (stack_args[i]);
233
234 write_memory (sp + element * 8, valbuf, TYPE_LENGTH (type));
235 element += ((TYPE_LENGTH (type) + 7) / 8);
236 }
237
238 return sp;
239 }
240
241 /* Implement the "push_dummy_call" gdbarch method. */
242
243 static CORE_ADDR
244 amd64_windows_push_dummy_call
245 (struct gdbarch *gdbarch, struct value *function,
246 struct regcache *regcache, CORE_ADDR bp_addr,
247 int nargs, struct value **args, CORE_ADDR sp,
248 function_call_return_method return_method, CORE_ADDR struct_addr)
249 {
250 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
251 gdb_byte buf[8];
252
253 /* Pass arguments. */
254 sp = amd64_windows_push_arguments (regcache, nargs, args, sp,
255 return_method);
256
257 /* Pass "hidden" argument". */
258 if (return_method == return_method_struct)
259 {
260 /* The "hidden" argument is passed throught the first argument
261 register. */
262 const int arg_regnum = amd64_windows_dummy_call_integer_regs[0];
263
264 store_unsigned_integer (buf, 8, byte_order, struct_addr);
265 regcache->cooked_write (arg_regnum, buf);
266 }
267
268 /* Reserve some memory on the stack for the integer-parameter
269 registers, as required by the ABI. */
270 sp -= ARRAY_SIZE (amd64_windows_dummy_call_integer_regs) * 8;
271
272 /* Store return address. */
273 sp -= 8;
274 store_unsigned_integer (buf, 8, byte_order, bp_addr);
275 write_memory (sp, buf, 8);
276
277 /* Update the stack pointer... */
278 store_unsigned_integer (buf, 8, byte_order, sp);
279 regcache->cooked_write (AMD64_RSP_REGNUM, buf);
280
281 /* ...and fake a frame pointer. */
282 regcache->cooked_write (AMD64_RBP_REGNUM, buf);
283
284 return sp + 16;
285 }
286
287 /* Implement the "return_value" gdbarch method for amd64-windows. */
288
289 static enum return_value_convention
290 amd64_windows_return_value (struct gdbarch *gdbarch, struct value *function,
291 struct type *type, struct regcache *regcache,
292 gdb_byte *readbuf, const gdb_byte *writebuf)
293 {
294 int len = TYPE_LENGTH (type);
295 int regnum = -1;
296
297 /* See if our value is returned through a register. If it is, then
298 store the associated register number in REGNUM. */
299 switch (TYPE_CODE (type))
300 {
301 case TYPE_CODE_FLT:
302 case TYPE_CODE_DECFLOAT:
303 /* __m128, __m128i, __m128d, floats, and doubles are returned
304 via XMM0. */
305 if (len == 4 || len == 8 || len == 16)
306 regnum = AMD64_XMM0_REGNUM;
307 break;
308 default:
309 /* All other values that are 1, 2, 4 or 8 bytes long are returned
310 via RAX. */
311 if (len == 1 || len == 2 || len == 4 || len == 8)
312 regnum = AMD64_RAX_REGNUM;
313 break;
314 }
315
316 if (regnum < 0)
317 {
318 /* RAX contains the address where the return value has been stored. */
319 if (readbuf)
320 {
321 ULONGEST addr;
322
323 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
324 read_memory (addr, readbuf, TYPE_LENGTH (type));
325 }
326 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
327 }
328 else
329 {
330 /* Extract the return value from the register where it was stored. */
331 if (readbuf)
332 regcache->raw_read_part (regnum, 0, len, readbuf);
333 if (writebuf)
334 regcache->raw_write_part (regnum, 0, len, writebuf);
335 return RETURN_VALUE_REGISTER_CONVENTION;
336 }
337 }
338
339 /* Check that the code pointed to by PC corresponds to a call to
340 __main, skip it if so. Return PC otherwise. */
341
342 static CORE_ADDR
343 amd64_skip_main_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
344 {
345 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
346 gdb_byte op;
347
348 target_read_memory (pc, &op, 1);
349 if (op == 0xe8)
350 {
351 gdb_byte buf[4];
352
353 if (target_read_memory (pc + 1, buf, sizeof buf) == 0)
354 {
355 struct bound_minimal_symbol s;
356 CORE_ADDR call_dest;
357
358 call_dest = pc + 5 + extract_signed_integer (buf, 4, byte_order);
359 s = lookup_minimal_symbol_by_pc (call_dest);
360 if (s.minsym != NULL
361 && s.minsym->linkage_name () != NULL
362 && strcmp (s.minsym->linkage_name (), "__main") == 0)
363 pc += 5;
364 }
365 }
366
367 return pc;
368 }
369
370 struct amd64_windows_frame_cache
371 {
372 /* ImageBase for the module. */
373 CORE_ADDR image_base;
374
375 /* Function start and end rva. */
376 CORE_ADDR start_rva;
377 CORE_ADDR end_rva;
378
379 /* Next instruction to be executed. */
380 CORE_ADDR pc;
381
382 /* Current sp. */
383 CORE_ADDR sp;
384
385 /* Address of saved integer and xmm registers. */
386 CORE_ADDR prev_reg_addr[16];
387 CORE_ADDR prev_xmm_addr[16];
388
389 /* These two next fields are set only for machine info frames. */
390
391 /* Likewise for RIP. */
392 CORE_ADDR prev_rip_addr;
393
394 /* Likewise for RSP. */
395 CORE_ADDR prev_rsp_addr;
396
397 /* Address of the previous frame. */
398 CORE_ADDR prev_sp;
399 };
400
401 /* Convert a Windows register number to gdb. */
402 static const enum amd64_regnum amd64_windows_w2gdb_regnum[] =
403 {
404 AMD64_RAX_REGNUM,
405 AMD64_RCX_REGNUM,
406 AMD64_RDX_REGNUM,
407 AMD64_RBX_REGNUM,
408 AMD64_RSP_REGNUM,
409 AMD64_RBP_REGNUM,
410 AMD64_RSI_REGNUM,
411 AMD64_RDI_REGNUM,
412 AMD64_R8_REGNUM,
413 AMD64_R9_REGNUM,
414 AMD64_R10_REGNUM,
415 AMD64_R11_REGNUM,
416 AMD64_R12_REGNUM,
417 AMD64_R13_REGNUM,
418 AMD64_R14_REGNUM,
419 AMD64_R15_REGNUM
420 };
421
422 /* Return TRUE iff PC is the range of the function corresponding to
423 CACHE. */
424
425 static int
426 pc_in_range (CORE_ADDR pc, const struct amd64_windows_frame_cache *cache)
427 {
428 return (pc >= cache->image_base + cache->start_rva
429 && pc < cache->image_base + cache->end_rva);
430 }
431
432 /* Try to recognize and decode an epilogue sequence.
433
434 Return -1 if we fail to read the instructions for any reason.
435 Return 1 if an epilogue sequence was recognized, 0 otherwise. */
436
437 static int
438 amd64_windows_frame_decode_epilogue (struct frame_info *this_frame,
439 struct amd64_windows_frame_cache *cache)
440 {
441 /* According to MSDN an epilogue "must consist of either an add RSP,constant
442 or lea RSP,constant[FPReg], followed by a series of zero or more 8-byte
443 register pops and a return or a jmp".
444
445 Furthermore, according to RtlVirtualUnwind, the complete list of
446 epilog marker is:
447 - ret [c3]
448 - ret n [c2 imm16]
449 - rep ret [f3 c3]
450 - jmp imm8 | imm32 [eb rel8] or [e9 rel32]
451 - jmp qword ptr imm32 - not handled
452 - rex.w jmp reg [4X ff eY]
453 */
454
455 CORE_ADDR pc = cache->pc;
456 CORE_ADDR cur_sp = cache->sp;
457 struct gdbarch *gdbarch = get_frame_arch (this_frame);
458 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
459 gdb_byte op;
460 gdb_byte rex;
461
462 /* We don't care about the instruction deallocating the frame:
463 if it hasn't been executed, the pc is still in the body,
464 if it has been executed, the following epilog decoding will work. */
465
466 /* First decode:
467 - pop reg [41 58-5f] or [58-5f]. */
468
469 while (1)
470 {
471 /* Read opcode. */
472 if (target_read_memory (pc, &op, 1) != 0)
473 return -1;
474
475 if (op >= 0x40 && op <= 0x4f)
476 {
477 /* REX prefix. */
478 rex = op;
479
480 /* Read opcode. */
481 if (target_read_memory (pc + 1, &op, 1) != 0)
482 return -1;
483 }
484 else
485 rex = 0;
486
487 if (op >= 0x58 && op <= 0x5f)
488 {
489 /* pop reg */
490 gdb_byte reg = (op & 0x0f) | ((rex & 1) << 3);
491
492 cache->prev_reg_addr[amd64_windows_w2gdb_regnum[reg]] = cur_sp;
493 cur_sp += 8;
494 pc += rex ? 2 : 1;
495 }
496 else
497 break;
498
499 /* Allow the user to break this loop. This shouldn't happen as the
500 number of consecutive pop should be small. */
501 QUIT;
502 }
503
504 /* Then decode the marker. */
505
506 /* Read opcode. */
507 if (target_read_memory (pc, &op, 1) != 0)
508 return -1;
509
510 switch (op)
511 {
512 case 0xc3:
513 /* Ret. */
514 cache->prev_rip_addr = cur_sp;
515 cache->prev_sp = cur_sp + 8;
516 return 1;
517
518 case 0xeb:
519 {
520 /* jmp rel8 */
521 gdb_byte rel8;
522 CORE_ADDR npc;
523
524 if (target_read_memory (pc + 1, &rel8, 1) != 0)
525 return -1;
526 npc = pc + 2 + (signed char) rel8;
527
528 /* If the jump is within the function, then this is not a marker,
529 otherwise this is a tail-call. */
530 return !pc_in_range (npc, cache);
531 }
532
533 case 0xec:
534 {
535 /* jmp rel32 */
536 gdb_byte rel32[4];
537 CORE_ADDR npc;
538
539 if (target_read_memory (pc + 1, rel32, 4) != 0)
540 return -1;
541 npc = pc + 5 + extract_signed_integer (rel32, 4, byte_order);
542
543 /* If the jump is within the function, then this is not a marker,
544 otherwise this is a tail-call. */
545 return !pc_in_range (npc, cache);
546 }
547
548 case 0xc2:
549 {
550 /* ret n */
551 gdb_byte imm16[2];
552
553 if (target_read_memory (pc + 1, imm16, 2) != 0)
554 return -1;
555 cache->prev_rip_addr = cur_sp;
556 cache->prev_sp = cur_sp
557 + extract_unsigned_integer (imm16, 4, byte_order);
558 return 1;
559 }
560
561 case 0xf3:
562 {
563 /* rep; ret */
564 gdb_byte op1;
565
566 if (target_read_memory (pc + 2, &op1, 1) != 0)
567 return -1;
568 if (op1 != 0xc3)
569 return 0;
570
571 cache->prev_rip_addr = cur_sp;
572 cache->prev_sp = cur_sp + 8;
573 return 1;
574 }
575
576 case 0x40:
577 case 0x41:
578 case 0x42:
579 case 0x43:
580 case 0x44:
581 case 0x45:
582 case 0x46:
583 case 0x47:
584 case 0x48:
585 case 0x49:
586 case 0x4a:
587 case 0x4b:
588 case 0x4c:
589 case 0x4d:
590 case 0x4e:
591 case 0x4f:
592 /* Got a REX prefix, read next byte. */
593 rex = op;
594 if (target_read_memory (pc + 1, &op, 1) != 0)
595 return -1;
596
597 if (op == 0xff)
598 {
599 /* rex jmp reg */
600 gdb_byte op1;
601
602 if (target_read_memory (pc + 2, &op1, 1) != 0)
603 return -1;
604 return (op1 & 0xf8) == 0xe0;
605 }
606 else
607 return 0;
608
609 default:
610 /* Not REX, so unknown. */
611 return 0;
612 }
613 }
614
615 /* Decode and execute unwind insns at UNWIND_INFO. */
616
617 static void
618 amd64_windows_frame_decode_insns (struct frame_info *this_frame,
619 struct amd64_windows_frame_cache *cache,
620 CORE_ADDR unwind_info)
621 {
622 CORE_ADDR save_addr = 0;
623 CORE_ADDR cur_sp = cache->sp;
624 struct gdbarch *gdbarch = get_frame_arch (this_frame);
625 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
626 int first = 1;
627
628 /* There are at least 3 possibilities to share an unwind info entry:
629 1. Two different runtime_function entries (in .pdata) can point to the
630 same unwind info entry. There is no such indication while unwinding,
631 so we don't really care about that case. We suppose this scheme is
632 used to save memory when the unwind entries are exactly the same.
633 2. Chained unwind_info entries, with no unwind codes (no prologue).
634 There is a major difference with the previous case: the pc range for
635 the function is different (in case 1, the pc range comes from the
636 runtime_function entry; in case 2, the pc range for the chained entry
637 comes from the first unwind entry). Case 1 cannot be used instead as
638 the pc is not in the prologue. This case is officially documented.
639 (There might be unwind code in the first unwind entry to handle
640 additional unwinding). GCC (at least until gcc 5.0) doesn't chain
641 entries.
642 3. Undocumented unwind info redirection. Hard to know the exact purpose,
643 so it is considered as a memory optimization of case 2.
644 */
645
646 if (unwind_info & 1)
647 {
648 /* Unofficially documented unwind info redirection, when UNWIND_INFO
649 address is odd (http://www.codemachine.com/article_x64deepdive.html).
650 */
651 struct external_pex64_runtime_function d;
652
653 if (target_read_memory (cache->image_base + (unwind_info & ~1),
654 (gdb_byte *) &d, sizeof (d)) != 0)
655 return;
656
657 cache->start_rva
658 = extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
659 cache->end_rva
660 = extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
661 unwind_info
662 = extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
663 }
664
665 while (1)
666 {
667 struct external_pex64_unwind_info ex_ui;
668 /* There are at most 256 16-bit unwind insns. */
669 gdb_byte insns[2 * 256];
670 gdb_byte *p;
671 gdb_byte *end_insns;
672 unsigned char codes_count;
673 unsigned char frame_reg;
674 CORE_ADDR start;
675
676 /* Read and decode header. */
677 if (target_read_memory (cache->image_base + unwind_info,
678 (gdb_byte *) &ex_ui, sizeof (ex_ui)) != 0)
679 return;
680
681 if (frame_debug)
682 fprintf_unfiltered
683 (gdb_stdlog,
684 "amd64_windows_frame_decodes_insn: "
685 "%s: ver: %02x, plgsz: %02x, cnt: %02x, frame: %02x\n",
686 paddress (gdbarch, unwind_info),
687 ex_ui.Version_Flags, ex_ui.SizeOfPrologue,
688 ex_ui.CountOfCodes, ex_ui.FrameRegisterOffset);
689
690 /* Check version. */
691 if (PEX64_UWI_VERSION (ex_ui.Version_Flags) != 1
692 && PEX64_UWI_VERSION (ex_ui.Version_Flags) != 2)
693 return;
694
695 start = cache->image_base + cache->start_rva;
696 if (first
697 && !(cache->pc >= start && cache->pc < start + ex_ui.SizeOfPrologue))
698 {
699 /* We want to detect if the PC points to an epilogue. This needs
700 to be checked only once, and an epilogue can be anywhere but in
701 the prologue. If so, the epilogue detection+decoding function is
702 sufficient. Otherwise, the unwinder will consider that the PC
703 is in the body of the function and will need to decode unwind
704 info. */
705 if (amd64_windows_frame_decode_epilogue (this_frame, cache) == 1)
706 return;
707
708 /* Not in an epilog. Clear possible side effects. */
709 memset (cache->prev_reg_addr, 0, sizeof (cache->prev_reg_addr));
710 }
711
712 codes_count = ex_ui.CountOfCodes;
713 frame_reg = PEX64_UWI_FRAMEREG (ex_ui.FrameRegisterOffset);
714
715 if (frame_reg != 0)
716 {
717 /* According to msdn:
718 If an FP reg is used, then any unwind code taking an offset must
719 only be used after the FP reg is established in the prolog. */
720 gdb_byte buf[8];
721 int frreg = amd64_windows_w2gdb_regnum[frame_reg];
722
723 get_frame_register (this_frame, frreg, buf);
724 save_addr = extract_unsigned_integer (buf, 8, byte_order);
725
726 if (frame_debug)
727 fprintf_unfiltered (gdb_stdlog, " frame_reg=%s, val=%s\n",
728 gdbarch_register_name (gdbarch, frreg),
729 paddress (gdbarch, save_addr));
730 }
731
732 /* Read opcodes. */
733 if (codes_count != 0
734 && target_read_memory (cache->image_base + unwind_info
735 + sizeof (ex_ui),
736 insns, codes_count * 2) != 0)
737 return;
738
739 end_insns = &insns[codes_count * 2];
740 p = insns;
741
742 /* Skip opcodes 6 of version 2. This opcode is not documented. */
743 if (PEX64_UWI_VERSION (ex_ui.Version_Flags) == 2)
744 {
745 for (; p < end_insns; p += 2)
746 if (PEX64_UNWCODE_CODE (p[1]) != 6)
747 break;
748 }
749
750 for (; p < end_insns; p += 2)
751 {
752 int reg;
753
754 /* Virtually execute the operation if the pc is after the
755 corresponding instruction (that does matter in case of break
756 within the prologue). Note that for chained info (!first), the
757 prologue has been fully executed. */
758 if (cache->pc >= start + p[0] || cache->pc < start)
759 {
760 if (frame_debug)
761 fprintf_unfiltered
762 (gdb_stdlog, " op #%u: off=0x%02x, insn=0x%02x\n",
763 (unsigned) (p - insns), p[0], p[1]);
764
765 /* If there is no frame registers defined, the current value of
766 rsp is used instead. */
767 if (frame_reg == 0)
768 save_addr = cur_sp;
769
770 reg = -1;
771
772 switch (PEX64_UNWCODE_CODE (p[1]))
773 {
774 case UWOP_PUSH_NONVOL:
775 /* Push pre-decrements RSP. */
776 reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
777 cache->prev_reg_addr[reg] = cur_sp;
778 cur_sp += 8;
779 break;
780 case UWOP_ALLOC_LARGE:
781 if (PEX64_UNWCODE_INFO (p[1]) == 0)
782 cur_sp +=
783 8 * extract_unsigned_integer (p + 2, 2, byte_order);
784 else if (PEX64_UNWCODE_INFO (p[1]) == 1)
785 cur_sp += extract_unsigned_integer (p + 2, 4, byte_order);
786 else
787 return;
788 break;
789 case UWOP_ALLOC_SMALL:
790 cur_sp += 8 + 8 * PEX64_UNWCODE_INFO (p[1]);
791 break;
792 case UWOP_SET_FPREG:
793 cur_sp = save_addr
794 - PEX64_UWI_FRAMEOFF (ex_ui.FrameRegisterOffset) * 16;
795 break;
796 case UWOP_SAVE_NONVOL:
797 reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
798 cache->prev_reg_addr[reg] = save_addr
799 + 8 * extract_unsigned_integer (p + 2, 2, byte_order);
800 break;
801 case UWOP_SAVE_NONVOL_FAR:
802 reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
803 cache->prev_reg_addr[reg] = save_addr
804 + 8 * extract_unsigned_integer (p + 2, 4, byte_order);
805 break;
806 case UWOP_SAVE_XMM128:
807 cache->prev_xmm_addr[PEX64_UNWCODE_INFO (p[1])] =
808 save_addr
809 - 16 * extract_unsigned_integer (p + 2, 2, byte_order);
810 break;
811 case UWOP_SAVE_XMM128_FAR:
812 cache->prev_xmm_addr[PEX64_UNWCODE_INFO (p[1])] =
813 save_addr
814 - 16 * extract_unsigned_integer (p + 2, 4, byte_order);
815 break;
816 case UWOP_PUSH_MACHFRAME:
817 if (PEX64_UNWCODE_INFO (p[1]) == 0)
818 {
819 cache->prev_rip_addr = cur_sp + 0;
820 cache->prev_rsp_addr = cur_sp + 24;
821 cur_sp += 40;
822 }
823 else if (PEX64_UNWCODE_INFO (p[1]) == 1)
824 {
825 cache->prev_rip_addr = cur_sp + 8;
826 cache->prev_rsp_addr = cur_sp + 32;
827 cur_sp += 48;
828 }
829 else
830 return;
831 break;
832 default:
833 return;
834 }
835
836 /* Display address where the register was saved. */
837 if (frame_debug && reg >= 0)
838 fprintf_unfiltered
839 (gdb_stdlog, " [reg %s at %s]\n",
840 gdbarch_register_name (gdbarch, reg),
841 paddress (gdbarch, cache->prev_reg_addr[reg]));
842 }
843
844 /* Adjust with the length of the opcode. */
845 switch (PEX64_UNWCODE_CODE (p[1]))
846 {
847 case UWOP_PUSH_NONVOL:
848 case UWOP_ALLOC_SMALL:
849 case UWOP_SET_FPREG:
850 case UWOP_PUSH_MACHFRAME:
851 break;
852 case UWOP_ALLOC_LARGE:
853 if (PEX64_UNWCODE_INFO (p[1]) == 0)
854 p += 2;
855 else if (PEX64_UNWCODE_INFO (p[1]) == 1)
856 p += 4;
857 else
858 return;
859 break;
860 case UWOP_SAVE_NONVOL:
861 case UWOP_SAVE_XMM128:
862 p += 2;
863 break;
864 case UWOP_SAVE_NONVOL_FAR:
865 case UWOP_SAVE_XMM128_FAR:
866 p += 4;
867 break;
868 default:
869 return;
870 }
871 }
872 if (PEX64_UWI_FLAGS (ex_ui.Version_Flags) != UNW_FLAG_CHAININFO)
873 {
874 /* End of unwind info. */
875 break;
876 }
877 else
878 {
879 /* Read the chained unwind info. */
880 struct external_pex64_runtime_function d;
881 CORE_ADDR chain_vma;
882
883 /* Not anymore the first entry. */
884 first = 0;
885
886 /* Stay aligned on word boundary. */
887 chain_vma = cache->image_base + unwind_info
888 + sizeof (ex_ui) + ((codes_count + 1) & ~1) * 2;
889
890 if (target_read_memory (chain_vma, (gdb_byte *) &d, sizeof (d)) != 0)
891 return;
892
893 /* Decode begin/end. This may be different from .pdata index, as
894 an unwind info may be shared by several functions (in particular
895 if many functions have the same prolog and handler. */
896 cache->start_rva =
897 extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
898 cache->end_rva =
899 extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
900 unwind_info =
901 extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
902
903 if (frame_debug)
904 fprintf_unfiltered
905 (gdb_stdlog,
906 "amd64_windows_frame_decodes_insn (next in chain):"
907 " unwind_data=%s, start_rva=%s, end_rva=%s\n",
908 paddress (gdbarch, unwind_info),
909 paddress (gdbarch, cache->start_rva),
910 paddress (gdbarch, cache->end_rva));
911 }
912
913 /* Allow the user to break this loop. */
914 QUIT;
915 }
916 /* PC is saved by the call. */
917 if (cache->prev_rip_addr == 0)
918 cache->prev_rip_addr = cur_sp;
919 cache->prev_sp = cur_sp + 8;
920
921 if (frame_debug)
922 fprintf_unfiltered (gdb_stdlog, " prev_sp: %s, prev_pc @%s\n",
923 paddress (gdbarch, cache->prev_sp),
924 paddress (gdbarch, cache->prev_rip_addr));
925 }
926
927 /* Find SEH unwind info for PC, returning 0 on success.
928
929 UNWIND_INFO is set to the rva of unwind info address, IMAGE_BASE
930 to the base address of the corresponding image, and START_RVA
931 to the rva of the function containing PC. */
932
933 static int
934 amd64_windows_find_unwind_info (struct gdbarch *gdbarch, CORE_ADDR pc,
935 CORE_ADDR *unwind_info,
936 CORE_ADDR *image_base,
937 CORE_ADDR *start_rva,
938 CORE_ADDR *end_rva)
939 {
940 struct obj_section *sec;
941 pe_data_type *pe;
942 IMAGE_DATA_DIRECTORY *dir;
943 struct objfile *objfile;
944 unsigned long lo, hi;
945 CORE_ADDR base;
946 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
947
948 /* Get the corresponding exception directory. */
949 sec = find_pc_section (pc);
950 if (sec == NULL)
951 return -1;
952 objfile = sec->objfile;
953 pe = pe_data (sec->objfile->obfd);
954 dir = &pe->pe_opthdr.DataDirectory[PE_EXCEPTION_TABLE];
955
956 base = pe->pe_opthdr.ImageBase + objfile->text_section_offset ();
957 *image_base = base;
958
959 /* Find the entry.
960
961 Note: This does not handle dynamically added entries (for JIT
962 engines). For this, we would need to ask the kernel directly,
963 which means getting some info from the native layer. For the
964 rest of the code, however, it's probably faster to search
965 the entry ourselves. */
966 lo = 0;
967 hi = dir->Size / sizeof (struct external_pex64_runtime_function);
968 *unwind_info = 0;
969 while (lo <= hi)
970 {
971 unsigned long mid = lo + (hi - lo) / 2;
972 struct external_pex64_runtime_function d;
973 CORE_ADDR sa, ea;
974
975 if (target_read_memory (base + dir->VirtualAddress + mid * sizeof (d),
976 (gdb_byte *) &d, sizeof (d)) != 0)
977 return -1;
978
979 sa = extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
980 ea = extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
981 if (pc < base + sa)
982 hi = mid - 1;
983 else if (pc >= base + ea)
984 lo = mid + 1;
985 else if (pc >= base + sa && pc < base + ea)
986 {
987 /* Got it. */
988 *start_rva = sa;
989 *end_rva = ea;
990 *unwind_info =
991 extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
992 break;
993 }
994 else
995 break;
996 }
997
998 if (frame_debug)
999 fprintf_unfiltered
1000 (gdb_stdlog,
1001 "amd64_windows_find_unwind_data: image_base=%s, unwind_data=%s\n",
1002 paddress (gdbarch, base), paddress (gdbarch, *unwind_info));
1003
1004 return 0;
1005 }
1006
1007 /* Fill THIS_CACHE using the native amd64-windows unwinding data
1008 for THIS_FRAME. */
1009
1010 static struct amd64_windows_frame_cache *
1011 amd64_windows_frame_cache (struct frame_info *this_frame, void **this_cache)
1012 {
1013 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1014 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1015 struct amd64_windows_frame_cache *cache;
1016 gdb_byte buf[8];
1017 CORE_ADDR pc;
1018 CORE_ADDR unwind_info = 0;
1019
1020 if (*this_cache)
1021 return (struct amd64_windows_frame_cache *) *this_cache;
1022
1023 cache = FRAME_OBSTACK_ZALLOC (struct amd64_windows_frame_cache);
1024 *this_cache = cache;
1025
1026 /* Get current PC and SP. */
1027 pc = get_frame_pc (this_frame);
1028 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1029 cache->sp = extract_unsigned_integer (buf, 8, byte_order);
1030 cache->pc = pc;
1031
1032 if (amd64_windows_find_unwind_info (gdbarch, pc, &unwind_info,
1033 &cache->image_base,
1034 &cache->start_rva,
1035 &cache->end_rva))
1036 return cache;
1037
1038 if (unwind_info == 0)
1039 {
1040 /* Assume a leaf function. */
1041 cache->prev_sp = cache->sp + 8;
1042 cache->prev_rip_addr = cache->sp;
1043 }
1044 else
1045 {
1046 /* Decode unwind insns to compute saved addresses. */
1047 amd64_windows_frame_decode_insns (this_frame, cache, unwind_info);
1048 }
1049 return cache;
1050 }
1051
1052 /* Implement the "prev_register" method of struct frame_unwind
1053 using the standard Windows x64 SEH info. */
1054
1055 static struct value *
1056 amd64_windows_frame_prev_register (struct frame_info *this_frame,
1057 void **this_cache, int regnum)
1058 {
1059 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1060 struct amd64_windows_frame_cache *cache =
1061 amd64_windows_frame_cache (this_frame, this_cache);
1062 CORE_ADDR prev;
1063
1064 if (frame_debug)
1065 fprintf_unfiltered (gdb_stdlog,
1066 "amd64_windows_frame_prev_register %s for sp=%s\n",
1067 gdbarch_register_name (gdbarch, regnum),
1068 paddress (gdbarch, cache->prev_sp));
1069
1070 if (regnum >= AMD64_XMM0_REGNUM && regnum <= AMD64_XMM0_REGNUM + 15)
1071 prev = cache->prev_xmm_addr[regnum - AMD64_XMM0_REGNUM];
1072 else if (regnum == AMD64_RSP_REGNUM)
1073 {
1074 prev = cache->prev_rsp_addr;
1075 if (prev == 0)
1076 return frame_unwind_got_constant (this_frame, regnum, cache->prev_sp);
1077 }
1078 else if (regnum >= AMD64_RAX_REGNUM && regnum <= AMD64_R15_REGNUM)
1079 prev = cache->prev_reg_addr[regnum - AMD64_RAX_REGNUM];
1080 else if (regnum == AMD64_RIP_REGNUM)
1081 prev = cache->prev_rip_addr;
1082 else
1083 prev = 0;
1084
1085 if (prev && frame_debug)
1086 fprintf_unfiltered (gdb_stdlog, " -> at %s\n", paddress (gdbarch, prev));
1087
1088 if (prev)
1089 {
1090 /* Register was saved. */
1091 return frame_unwind_got_memory (this_frame, regnum, prev);
1092 }
1093 else
1094 {
1095 /* Register is either volatile or not modified. */
1096 return frame_unwind_got_register (this_frame, regnum, regnum);
1097 }
1098 }
1099
1100 /* Implement the "this_id" method of struct frame_unwind using
1101 the standard Windows x64 SEH info. */
1102
1103 static void
1104 amd64_windows_frame_this_id (struct frame_info *this_frame, void **this_cache,
1105 struct frame_id *this_id)
1106 {
1107 struct amd64_windows_frame_cache *cache =
1108 amd64_windows_frame_cache (this_frame, this_cache);
1109
1110 *this_id = frame_id_build (cache->prev_sp,
1111 cache->image_base + cache->start_rva);
1112 }
1113
1114 /* Windows x64 SEH unwinder. */
1115
1116 static const struct frame_unwind amd64_windows_frame_unwind =
1117 {
1118 NORMAL_FRAME,
1119 default_frame_unwind_stop_reason,
1120 &amd64_windows_frame_this_id,
1121 &amd64_windows_frame_prev_register,
1122 NULL,
1123 default_frame_sniffer
1124 };
1125
1126 /* Implement the "skip_prologue" gdbarch method. */
1127
1128 static CORE_ADDR
1129 amd64_windows_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1130 {
1131 CORE_ADDR func_addr;
1132 CORE_ADDR unwind_info = 0;
1133 CORE_ADDR image_base, start_rva, end_rva;
1134 struct external_pex64_unwind_info ex_ui;
1135
1136 /* Use prologue size from unwind info. */
1137 if (amd64_windows_find_unwind_info (gdbarch, pc, &unwind_info,
1138 &image_base, &start_rva, &end_rva) == 0)
1139 {
1140 if (unwind_info == 0)
1141 {
1142 /* Leaf function. */
1143 return pc;
1144 }
1145 else if (target_read_memory (image_base + unwind_info,
1146 (gdb_byte *) &ex_ui, sizeof (ex_ui)) == 0
1147 && PEX64_UWI_VERSION (ex_ui.Version_Flags) == 1)
1148 return std::max (pc, image_base + start_rva + ex_ui.SizeOfPrologue);
1149 }
1150
1151 /* See if we can determine the end of the prologue via the symbol
1152 table. If so, then return either the PC, or the PC after
1153 the prologue, whichever is greater. */
1154 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1155 {
1156 CORE_ADDR post_prologue_pc
1157 = skip_prologue_using_sal (gdbarch, func_addr);
1158
1159 if (post_prologue_pc != 0)
1160 return std::max (pc, post_prologue_pc);
1161 }
1162
1163 return pc;
1164 }
1165
1166 /* Check Win64 DLL jmp trampolines and find jump destination. */
1167
1168 static CORE_ADDR
1169 amd64_windows_skip_trampoline_code (struct frame_info *frame, CORE_ADDR pc)
1170 {
1171 CORE_ADDR destination = 0;
1172 struct gdbarch *gdbarch = get_frame_arch (frame);
1173 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1174
1175 /* Check for jmp *<offset>(%rip) (jump near, absolute indirect (/4)). */
1176 if (pc && read_memory_unsigned_integer (pc, 2, byte_order) == 0x25ff)
1177 {
1178 /* Get opcode offset and see if we can find a reference in our data. */
1179 ULONGEST offset
1180 = read_memory_unsigned_integer (pc + 2, 4, byte_order);
1181
1182 /* Get address of function pointer at end of pc. */
1183 CORE_ADDR indirect_addr = pc + offset + 6;
1184
1185 struct minimal_symbol *indsym
1186 = (indirect_addr
1187 ? lookup_minimal_symbol_by_pc (indirect_addr).minsym
1188 : NULL);
1189 const char *symname = indsym ? indsym->linkage_name () : NULL;
1190
1191 if (symname)
1192 {
1193 if (startswith (symname, "__imp_")
1194 || startswith (symname, "_imp_"))
1195 destination
1196 = read_memory_unsigned_integer (indirect_addr, 8, byte_order);
1197 }
1198 }
1199
1200 return destination;
1201 }
1202
1203 /* Implement the "auto_wide_charset" gdbarch method. */
1204
1205 static const char *
1206 amd64_windows_auto_wide_charset (void)
1207 {
1208 return "UTF-16";
1209 }
1210
1211 static void
1212 amd64_windows_init_abi_common (gdbarch_info info, struct gdbarch *gdbarch)
1213 {
1214 /* The dwarf2 unwinder (appended very early by i386_gdbarch_init) is
1215 preferred over the SEH one. The reasons are:
1216 - binaries without SEH but with dwarf2 debug info are correctly handled
1217 (although they aren't ABI compliant, gcc before 4.7 didn't emit SEH
1218 info).
1219 - dwarf3 DW_OP_call_frame_cfa is correctly handled (it can only be
1220 handled if the dwarf2 unwinder is used).
1221
1222 The call to amd64_init_abi appends default unwinders, that aren't
1223 compatible with the SEH one.
1224 */
1225 frame_unwind_append_unwinder (gdbarch, &amd64_windows_frame_unwind);
1226
1227 amd64_init_abi (info, gdbarch,
1228 amd64_target_description (X86_XSTATE_SSE_MASK, false));
1229
1230 windows_init_abi (info, gdbarch);
1231
1232 /* Function calls. */
1233 set_gdbarch_push_dummy_call (gdbarch, amd64_windows_push_dummy_call);
1234 set_gdbarch_return_value (gdbarch, amd64_windows_return_value);
1235 set_gdbarch_skip_main_prologue (gdbarch, amd64_skip_main_prologue);
1236 set_gdbarch_skip_trampoline_code (gdbarch,
1237 amd64_windows_skip_trampoline_code);
1238
1239 set_gdbarch_skip_prologue (gdbarch, amd64_windows_skip_prologue);
1240
1241 set_gdbarch_auto_wide_charset (gdbarch, amd64_windows_auto_wide_charset);
1242 }
1243
1244 static void
1245 amd64_windows_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
1246 {
1247 amd64_windows_init_abi_common (info, gdbarch);
1248
1249 /* On Windows, "long"s are only 32bit. */
1250 set_gdbarch_long_bit (gdbarch, 32);
1251 }
1252
1253 static void
1254 amd64_cygwin_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
1255 {
1256 amd64_windows_init_abi_common (info, gdbarch);
1257 }
1258
1259 static gdb_osabi
1260 amd64_windows_osabi_sniffer (bfd *abfd)
1261 {
1262 const char *target_name = bfd_get_target (abfd);
1263
1264 if (!streq (target_name, "pei-x86-64"))
1265 return GDB_OSABI_UNKNOWN;
1266
1267 if (is_linked_with_cygwin_dll (abfd))
1268 return GDB_OSABI_CYGWIN;
1269
1270 return GDB_OSABI_WINDOWS;
1271 }
1272
1273 void _initialize_amd64_windows_tdep ();
1274 void
1275 _initialize_amd64_windows_tdep ()
1276 {
1277 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_WINDOWS,
1278 amd64_windows_init_abi);
1279 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_CYGWIN,
1280 amd64_cygwin_init_abi);
1281
1282 gdbarch_register_osabi_sniffer (bfd_arch_i386, bfd_target_coff_flavour,
1283 amd64_windows_osabi_sniffer);
1284 }
This page took 0.059664 seconds and 4 git commands to generate.