480015d01c6ec8f98809fc95c80f40ce5084b772
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "reggroups.h"
33 #include "doublest.h"
34 #include "value.h"
35 #include "arch-utils.h"
36 #include "osabi.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
40 #include "objfiles.h"
41 #include "dwarf2-frame.h"
42 #include "gdbtypes.h"
43 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
46 #include "observer.h"
47
48 #include "arm-tdep.h"
49 #include "gdb/sim-arm.h"
50
51 #include "elf-bfd.h"
52 #include "coff/internal.h"
53 #include "elf/arm.h"
54
55 #include "gdb_assert.h"
56 #include "vec.h"
57
58 #include "features/arm-with-m.c"
59
60 static int arm_debug;
61
62 /* Macros for setting and testing a bit in a minimal symbol that marks
63 it as Thumb function. The MSB of the minimal symbol's "info" field
64 is used for this purpose.
65
66 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
67 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
68
69 #define MSYMBOL_SET_SPECIAL(msym) \
70 MSYMBOL_TARGET_FLAG_1 (msym) = 1
71
72 #define MSYMBOL_IS_SPECIAL(msym) \
73 MSYMBOL_TARGET_FLAG_1 (msym)
74
75 /* Per-objfile data used for mapping symbols. */
76 static const struct objfile_data *arm_objfile_data_key;
77
78 struct arm_mapping_symbol
79 {
80 bfd_vma value;
81 char type;
82 };
83 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
84 DEF_VEC_O(arm_mapping_symbol_s);
85
86 struct arm_per_objfile
87 {
88 VEC(arm_mapping_symbol_s) **section_maps;
89 };
90
91 /* The list of available "set arm ..." and "show arm ..." commands. */
92 static struct cmd_list_element *setarmcmdlist = NULL;
93 static struct cmd_list_element *showarmcmdlist = NULL;
94
95 /* The type of floating-point to use. Keep this in sync with enum
96 arm_float_model, and the help string in _initialize_arm_tdep. */
97 static const char *fp_model_strings[] =
98 {
99 "auto",
100 "softfpa",
101 "fpa",
102 "softvfp",
103 "vfp",
104 NULL
105 };
106
107 /* A variable that can be configured by the user. */
108 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
109 static const char *current_fp_model = "auto";
110
111 /* The ABI to use. Keep this in sync with arm_abi_kind. */
112 static const char *arm_abi_strings[] =
113 {
114 "auto",
115 "APCS",
116 "AAPCS",
117 NULL
118 };
119
120 /* A variable that can be configured by the user. */
121 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
122 static const char *arm_abi_string = "auto";
123
124 /* The execution mode to assume. */
125 static const char *arm_mode_strings[] =
126 {
127 "auto",
128 "arm",
129 "thumb",
130 NULL
131 };
132
133 static const char *arm_fallback_mode_string = "auto";
134 static const char *arm_force_mode_string = "auto";
135
136 /* Number of different reg name sets (options). */
137 static int num_disassembly_options;
138
139 /* The standard register names, and all the valid aliases for them. Note
140 that `fp', `sp' and `pc' are not added in this alias list, because they
141 have been added as builtin user registers in
142 std-regs.c:_initialize_frame_reg. */
143 static const struct
144 {
145 const char *name;
146 int regnum;
147 } arm_register_aliases[] = {
148 /* Basic register numbers. */
149 { "r0", 0 },
150 { "r1", 1 },
151 { "r2", 2 },
152 { "r3", 3 },
153 { "r4", 4 },
154 { "r5", 5 },
155 { "r6", 6 },
156 { "r7", 7 },
157 { "r8", 8 },
158 { "r9", 9 },
159 { "r10", 10 },
160 { "r11", 11 },
161 { "r12", 12 },
162 { "r13", 13 },
163 { "r14", 14 },
164 { "r15", 15 },
165 /* Synonyms (argument and variable registers). */
166 { "a1", 0 },
167 { "a2", 1 },
168 { "a3", 2 },
169 { "a4", 3 },
170 { "v1", 4 },
171 { "v2", 5 },
172 { "v3", 6 },
173 { "v4", 7 },
174 { "v5", 8 },
175 { "v6", 9 },
176 { "v7", 10 },
177 { "v8", 11 },
178 /* Other platform-specific names for r9. */
179 { "sb", 9 },
180 { "tr", 9 },
181 /* Special names. */
182 { "ip", 12 },
183 { "lr", 14 },
184 /* Names used by GCC (not listed in the ARM EABI). */
185 { "sl", 10 },
186 /* A special name from the older ATPCS. */
187 { "wr", 7 },
188 };
189
190 static const char *const arm_register_names[] =
191 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
192 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
193 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
194 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
195 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
196 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
197 "fps", "cpsr" }; /* 24 25 */
198
199 /* Valid register name styles. */
200 static const char **valid_disassembly_styles;
201
202 /* Disassembly style to use. Default to "std" register names. */
203 static const char *disassembly_style;
204
205 /* This is used to keep the bfd arch_info in sync with the disassembly
206 style. */
207 static void set_disassembly_style_sfunc(char *, int,
208 struct cmd_list_element *);
209 static void set_disassembly_style (void);
210
211 static void convert_from_extended (const struct floatformat *, const void *,
212 void *, int);
213 static void convert_to_extended (const struct floatformat *, void *,
214 const void *, int);
215
216 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
217 struct regcache *regcache,
218 int regnum, gdb_byte *buf);
219 static void arm_neon_quad_write (struct gdbarch *gdbarch,
220 struct regcache *regcache,
221 int regnum, const gdb_byte *buf);
222
223 struct arm_prologue_cache
224 {
225 /* The stack pointer at the time this frame was created; i.e. the
226 caller's stack pointer when this function was called. It is used
227 to identify this frame. */
228 CORE_ADDR prev_sp;
229
230 /* The frame base for this frame is just prev_sp - frame size.
231 FRAMESIZE is the distance from the frame pointer to the
232 initial stack pointer. */
233
234 int framesize;
235
236 /* The register used to hold the frame pointer for this frame. */
237 int framereg;
238
239 /* Saved register offsets. */
240 struct trad_frame_saved_reg *saved_regs;
241 };
242
243 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
244 CORE_ADDR prologue_start,
245 CORE_ADDR prologue_end,
246 struct arm_prologue_cache *cache);
247
248 /* Architecture version for displaced stepping. This effects the behaviour of
249 certain instructions, and really should not be hard-wired. */
250
251 #define DISPLACED_STEPPING_ARCH_VERSION 5
252
253 /* Addresses for calling Thumb functions have the bit 0 set.
254 Here are some macros to test, set, or clear bit 0 of addresses. */
255 #define IS_THUMB_ADDR(addr) ((addr) & 1)
256 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
257 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
258
259 /* Set to true if the 32-bit mode is in use. */
260
261 int arm_apcs_32 = 1;
262
263 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
264
265 int
266 arm_psr_thumb_bit (struct gdbarch *gdbarch)
267 {
268 if (gdbarch_tdep (gdbarch)->is_m)
269 return XPSR_T;
270 else
271 return CPSR_T;
272 }
273
274 /* Determine if FRAME is executing in Thumb mode. */
275
276 int
277 arm_frame_is_thumb (struct frame_info *frame)
278 {
279 CORE_ADDR cpsr;
280 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
281
282 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
283 directly (from a signal frame or dummy frame) or by interpreting
284 the saved LR (from a prologue or DWARF frame). So consult it and
285 trust the unwinders. */
286 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
287
288 return (cpsr & t_bit) != 0;
289 }
290
291 /* Callback for VEC_lower_bound. */
292
293 static inline int
294 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
295 const struct arm_mapping_symbol *rhs)
296 {
297 return lhs->value < rhs->value;
298 }
299
300 /* Search for the mapping symbol covering MEMADDR. If one is found,
301 return its type. Otherwise, return 0. If START is non-NULL,
302 set *START to the location of the mapping symbol. */
303
304 static char
305 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
306 {
307 struct obj_section *sec;
308
309 /* If there are mapping symbols, consult them. */
310 sec = find_pc_section (memaddr);
311 if (sec != NULL)
312 {
313 struct arm_per_objfile *data;
314 VEC(arm_mapping_symbol_s) *map;
315 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
316 0 };
317 unsigned int idx;
318
319 data = objfile_data (sec->objfile, arm_objfile_data_key);
320 if (data != NULL)
321 {
322 map = data->section_maps[sec->the_bfd_section->index];
323 if (!VEC_empty (arm_mapping_symbol_s, map))
324 {
325 struct arm_mapping_symbol *map_sym;
326
327 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
328 arm_compare_mapping_symbols);
329
330 /* VEC_lower_bound finds the earliest ordered insertion
331 point. If the following symbol starts at this exact
332 address, we use that; otherwise, the preceding
333 mapping symbol covers this address. */
334 if (idx < VEC_length (arm_mapping_symbol_s, map))
335 {
336 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
337 if (map_sym->value == map_key.value)
338 {
339 if (start)
340 *start = map_sym->value + obj_section_addr (sec);
341 return map_sym->type;
342 }
343 }
344
345 if (idx > 0)
346 {
347 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
348 if (start)
349 *start = map_sym->value + obj_section_addr (sec);
350 return map_sym->type;
351 }
352 }
353 }
354 }
355
356 return 0;
357 }
358
359 static CORE_ADDR arm_get_next_pc_raw (struct frame_info *frame,
360 CORE_ADDR pc, int insert_bkpt);
361
362 /* Determine if the program counter specified in MEMADDR is in a Thumb
363 function. This function should be called for addresses unrelated to
364 any executing frame; otherwise, prefer arm_frame_is_thumb. */
365
366 int
367 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
368 {
369 struct obj_section *sec;
370 struct minimal_symbol *sym;
371 char type;
372 struct displaced_step_closure* dsc
373 = get_displaced_step_closure_by_addr(memaddr);
374
375 /* If checking the mode of displaced instruction in copy area, the mode
376 should be determined by instruction on the original address. */
377 if (dsc)
378 {
379 if (debug_displaced)
380 fprintf_unfiltered (gdb_stdlog,
381 "displaced: check mode of %.8lx instead of %.8lx\n",
382 (unsigned long) dsc->insn_addr,
383 (unsigned long) memaddr);
384 memaddr = dsc->insn_addr;
385 }
386
387 /* If bit 0 of the address is set, assume this is a Thumb address. */
388 if (IS_THUMB_ADDR (memaddr))
389 return 1;
390
391 /* If the user wants to override the symbol table, let him. */
392 if (strcmp (arm_force_mode_string, "arm") == 0)
393 return 0;
394 if (strcmp (arm_force_mode_string, "thumb") == 0)
395 return 1;
396
397 /* ARM v6-M and v7-M are always in Thumb mode. */
398 if (gdbarch_tdep (gdbarch)->is_m)
399 return 1;
400
401 /* If there are mapping symbols, consult them. */
402 type = arm_find_mapping_symbol (memaddr, NULL);
403 if (type)
404 return type == 't';
405
406 /* Thumb functions have a "special" bit set in minimal symbols. */
407 sym = lookup_minimal_symbol_by_pc (memaddr);
408 if (sym)
409 return (MSYMBOL_IS_SPECIAL (sym));
410
411 /* If the user wants to override the fallback mode, let them. */
412 if (strcmp (arm_fallback_mode_string, "arm") == 0)
413 return 0;
414 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
415 return 1;
416
417 /* If we couldn't find any symbol, but we're talking to a running
418 target, then trust the current value of $cpsr. This lets
419 "display/i $pc" always show the correct mode (though if there is
420 a symbol table we will not reach here, so it still may not be
421 displayed in the mode it will be executed).
422
423 As a further heuristic if we detect that we are doing a single-step we
424 see what state executing the current instruction ends up with us being
425 in. */
426 if (target_has_registers)
427 {
428 struct frame_info *current_frame = get_current_frame ();
429 CORE_ADDR current_pc = get_frame_pc (current_frame);
430 int is_thumb = arm_frame_is_thumb (current_frame);
431 CORE_ADDR next_pc;
432 if (memaddr == current_pc)
433 return is_thumb;
434 else
435 {
436 struct gdbarch *gdbarch = get_frame_arch (current_frame);
437 next_pc = arm_get_next_pc_raw (current_frame, current_pc, FALSE);
438 if (memaddr == gdbarch_addr_bits_remove (gdbarch, next_pc))
439 return IS_THUMB_ADDR (next_pc);
440 else
441 return is_thumb;
442 }
443 }
444
445 /* Otherwise we're out of luck; we assume ARM. */
446 return 0;
447 }
448
449 /* Remove useless bits from addresses in a running program. */
450 static CORE_ADDR
451 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
452 {
453 if (arm_apcs_32)
454 return UNMAKE_THUMB_ADDR (val);
455 else
456 return (val & 0x03fffffc);
457 }
458
459 /* When reading symbols, we need to zap the low bit of the address,
460 which may be set to 1 for Thumb functions. */
461 static CORE_ADDR
462 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
463 {
464 return val & ~1;
465 }
466
467 /* Return 1 if PC is the start of a compiler helper function which
468 can be safely ignored during prologue skipping. IS_THUMB is true
469 if the function is known to be a Thumb function due to the way it
470 is being called. */
471 static int
472 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
473 {
474 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
475 struct minimal_symbol *msym;
476
477 msym = lookup_minimal_symbol_by_pc (pc);
478 if (msym != NULL
479 && SYMBOL_VALUE_ADDRESS (msym) == pc
480 && SYMBOL_LINKAGE_NAME (msym) != NULL)
481 {
482 const char *name = SYMBOL_LINKAGE_NAME (msym);
483
484 /* The GNU linker's Thumb call stub to foo is named
485 __foo_from_thumb. */
486 if (strstr (name, "_from_thumb") != NULL)
487 name += 2;
488
489 /* On soft-float targets, __truncdfsf2 is called to convert promoted
490 arguments to their argument types in non-prototyped
491 functions. */
492 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
493 return 1;
494 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
495 return 1;
496
497 /* Internal functions related to thread-local storage. */
498 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
499 return 1;
500 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
501 return 1;
502 }
503 else
504 {
505 /* If we run against a stripped glibc, we may be unable to identify
506 special functions by name. Check for one important case,
507 __aeabi_read_tp, by comparing the *code* against the default
508 implementation (this is hand-written ARM assembler in glibc). */
509
510 if (!is_thumb
511 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
512 == 0xe3e00a0f /* mov r0, #0xffff0fff */
513 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
514 == 0xe240f01f) /* sub pc, r0, #31 */
515 return 1;
516 }
517
518 return 0;
519 }
520
521 /* Support routines for instruction parsing. */
522 #define submask(x) ((1L << ((x) + 1)) - 1)
523 #define bit(obj,st) (((obj) >> (st)) & 1)
524 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
525 #define sbits(obj,st,fn) \
526 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
527 #define BranchDest(addr,instr) \
528 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
529
530 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
531 the first 16-bit of instruction, and INSN2 is the second 16-bit of
532 instruction. */
533 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
534 ((bits ((insn1), 0, 3) << 12) \
535 | (bits ((insn1), 10, 10) << 11) \
536 | (bits ((insn2), 12, 14) << 8) \
537 | bits ((insn2), 0, 7))
538
539 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
540 the 32-bit instruction. */
541 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
542 ((bits ((insn), 16, 19) << 12) \
543 | bits ((insn), 0, 11))
544
545 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
546
547 static unsigned int
548 thumb_expand_immediate (unsigned int imm)
549 {
550 unsigned int count = imm >> 7;
551
552 if (count < 8)
553 switch (count / 2)
554 {
555 case 0:
556 return imm & 0xff;
557 case 1:
558 return (imm & 0xff) | ((imm & 0xff) << 16);
559 case 2:
560 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
561 case 3:
562 return (imm & 0xff) | ((imm & 0xff) << 8)
563 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
564 }
565
566 return (0x80 | (imm & 0x7f)) << (32 - count);
567 }
568
569 /* Return 1 if the 16-bit Thumb instruction INST might change
570 control flow, 0 otherwise. */
571
572 static int
573 thumb_instruction_changes_pc (unsigned short inst)
574 {
575 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
576 return 1;
577
578 if ((inst & 0xf000) == 0xd000) /* conditional branch */
579 return 1;
580
581 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
582 return 1;
583
584 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
585 return 1;
586
587 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
588 return 1;
589
590 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
591 return 1;
592
593 return 0;
594 }
595
596 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
597 might change control flow, 0 otherwise. */
598
599 static int
600 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
601 {
602 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
603 {
604 /* Branches and miscellaneous control instructions. */
605
606 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
607 {
608 /* B, BL, BLX. */
609 return 1;
610 }
611 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
612 {
613 /* SUBS PC, LR, #imm8. */
614 return 1;
615 }
616 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
617 {
618 /* Conditional branch. */
619 return 1;
620 }
621
622 return 0;
623 }
624
625 if ((inst1 & 0xfe50) == 0xe810)
626 {
627 /* Load multiple or RFE. */
628
629 if (bit (inst1, 7) && !bit (inst1, 8))
630 {
631 /* LDMIA or POP */
632 if (bit (inst2, 15))
633 return 1;
634 }
635 else if (!bit (inst1, 7) && bit (inst1, 8))
636 {
637 /* LDMDB */
638 if (bit (inst2, 15))
639 return 1;
640 }
641 else if (bit (inst1, 7) && bit (inst1, 8))
642 {
643 /* RFEIA */
644 return 1;
645 }
646 else if (!bit (inst1, 7) && !bit (inst1, 8))
647 {
648 /* RFEDB */
649 return 1;
650 }
651
652 return 0;
653 }
654
655 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
656 {
657 /* MOV PC or MOVS PC. */
658 return 1;
659 }
660
661 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
662 {
663 /* LDR PC. */
664 if (bits (inst1, 0, 3) == 15)
665 return 1;
666 if (bit (inst1, 7))
667 return 1;
668 if (bit (inst2, 11))
669 return 1;
670 if ((inst2 & 0x0fc0) == 0x0000)
671 return 1;
672
673 return 0;
674 }
675
676 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
677 {
678 /* TBB. */
679 return 1;
680 }
681
682 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
683 {
684 /* TBH. */
685 return 1;
686 }
687
688 return 0;
689 }
690
691 /* Analyze a Thumb prologue, looking for a recognizable stack frame
692 and frame pointer. Scan until we encounter a store that could
693 clobber the stack frame unexpectedly, or an unknown instruction.
694 Return the last address which is definitely safe to skip for an
695 initial breakpoint. */
696
697 static CORE_ADDR
698 thumb_analyze_prologue (struct gdbarch *gdbarch,
699 CORE_ADDR start, CORE_ADDR limit,
700 struct arm_prologue_cache *cache)
701 {
702 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
703 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
704 int i;
705 pv_t regs[16];
706 struct pv_area *stack;
707 struct cleanup *back_to;
708 CORE_ADDR offset;
709 CORE_ADDR unrecognized_pc = 0;
710
711 for (i = 0; i < 16; i++)
712 regs[i] = pv_register (i, 0);
713 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
714 back_to = make_cleanup_free_pv_area (stack);
715
716 while (start < limit)
717 {
718 unsigned short insn;
719
720 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
721
722 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
723 {
724 int regno;
725 int mask;
726
727 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
728 break;
729
730 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
731 whether to save LR (R14). */
732 mask = (insn & 0xff) | ((insn & 0x100) << 6);
733
734 /* Calculate offsets of saved R0-R7 and LR. */
735 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
736 if (mask & (1 << regno))
737 {
738 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
739 -4);
740 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
741 }
742 }
743 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
744 sub sp, #simm */
745 {
746 offset = (insn & 0x7f) << 2; /* get scaled offset */
747 if (insn & 0x80) /* Check for SUB. */
748 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
749 -offset);
750 else
751 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
752 offset);
753 }
754 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
755 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
756 (insn & 0xff) << 2);
757 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
758 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
759 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
760 bits (insn, 6, 8));
761 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
762 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
763 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
764 bits (insn, 0, 7));
765 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
766 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
767 && pv_is_constant (regs[bits (insn, 3, 5)]))
768 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
769 regs[bits (insn, 6, 8)]);
770 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
771 && pv_is_constant (regs[bits (insn, 3, 6)]))
772 {
773 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
774 int rm = bits (insn, 3, 6);
775 regs[rd] = pv_add (regs[rd], regs[rm]);
776 }
777 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
778 {
779 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
780 int src_reg = (insn & 0x78) >> 3;
781 regs[dst_reg] = regs[src_reg];
782 }
783 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
784 {
785 /* Handle stores to the stack. Normally pushes are used,
786 but with GCC -mtpcs-frame, there may be other stores
787 in the prologue to create the frame. */
788 int regno = (insn >> 8) & 0x7;
789 pv_t addr;
790
791 offset = (insn & 0xff) << 2;
792 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
793
794 if (pv_area_store_would_trash (stack, addr))
795 break;
796
797 pv_area_store (stack, addr, 4, regs[regno]);
798 }
799 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
800 {
801 int rd = bits (insn, 0, 2);
802 int rn = bits (insn, 3, 5);
803 pv_t addr;
804
805 offset = bits (insn, 6, 10) << 2;
806 addr = pv_add_constant (regs[rn], offset);
807
808 if (pv_area_store_would_trash (stack, addr))
809 break;
810
811 pv_area_store (stack, addr, 4, regs[rd]);
812 }
813 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
814 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
815 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
816 /* Ignore stores of argument registers to the stack. */
817 ;
818 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
819 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
820 /* Ignore block loads from the stack, potentially copying
821 parameters from memory. */
822 ;
823 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
824 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
825 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
826 /* Similarly ignore single loads from the stack. */
827 ;
828 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
829 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
830 /* Skip register copies, i.e. saves to another register
831 instead of the stack. */
832 ;
833 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
834 /* Recognize constant loads; even with small stacks these are necessary
835 on Thumb. */
836 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
837 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
838 {
839 /* Constant pool loads, for the same reason. */
840 unsigned int constant;
841 CORE_ADDR loc;
842
843 loc = start + 4 + bits (insn, 0, 7) * 4;
844 constant = read_memory_unsigned_integer (loc, 4, byte_order);
845 regs[bits (insn, 8, 10)] = pv_constant (constant);
846 }
847 else if ((insn & 0xe000) == 0xe000)
848 {
849 unsigned short inst2;
850
851 inst2 = read_memory_unsigned_integer (start + 2, 2,
852 byte_order_for_code);
853
854 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
855 {
856 /* BL, BLX. Allow some special function calls when
857 skipping the prologue; GCC generates these before
858 storing arguments to the stack. */
859 CORE_ADDR nextpc;
860 int j1, j2, imm1, imm2;
861
862 imm1 = sbits (insn, 0, 10);
863 imm2 = bits (inst2, 0, 10);
864 j1 = bit (inst2, 13);
865 j2 = bit (inst2, 11);
866
867 offset = ((imm1 << 12) + (imm2 << 1));
868 offset ^= ((!j2) << 22) | ((!j1) << 23);
869
870 nextpc = start + 4 + offset;
871 /* For BLX make sure to clear the low bits. */
872 if (bit (inst2, 12) == 0)
873 nextpc = nextpc & 0xfffffffc;
874
875 if (!skip_prologue_function (gdbarch, nextpc,
876 bit (inst2, 12) != 0))
877 break;
878 }
879
880 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
881 { registers } */
882 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
883 {
884 pv_t addr = regs[bits (insn, 0, 3)];
885 int regno;
886
887 if (pv_area_store_would_trash (stack, addr))
888 break;
889
890 /* Calculate offsets of saved registers. */
891 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
892 if (inst2 & (1 << regno))
893 {
894 addr = pv_add_constant (addr, -4);
895 pv_area_store (stack, addr, 4, regs[regno]);
896 }
897
898 if (insn & 0x0020)
899 regs[bits (insn, 0, 3)] = addr;
900 }
901
902 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
903 [Rn, #+/-imm]{!} */
904 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
905 {
906 int regno1 = bits (inst2, 12, 15);
907 int regno2 = bits (inst2, 8, 11);
908 pv_t addr = regs[bits (insn, 0, 3)];
909
910 offset = inst2 & 0xff;
911 if (insn & 0x0080)
912 addr = pv_add_constant (addr, offset);
913 else
914 addr = pv_add_constant (addr, -offset);
915
916 if (pv_area_store_would_trash (stack, addr))
917 break;
918
919 pv_area_store (stack, addr, 4, regs[regno1]);
920 pv_area_store (stack, pv_add_constant (addr, 4),
921 4, regs[regno2]);
922
923 if (insn & 0x0020)
924 regs[bits (insn, 0, 3)] = addr;
925 }
926
927 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
928 && (inst2 & 0x0c00) == 0x0c00
929 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
930 {
931 int regno = bits (inst2, 12, 15);
932 pv_t addr = regs[bits (insn, 0, 3)];
933
934 offset = inst2 & 0xff;
935 if (inst2 & 0x0200)
936 addr = pv_add_constant (addr, offset);
937 else
938 addr = pv_add_constant (addr, -offset);
939
940 if (pv_area_store_would_trash (stack, addr))
941 break;
942
943 pv_area_store (stack, addr, 4, regs[regno]);
944
945 if (inst2 & 0x0100)
946 regs[bits (insn, 0, 3)] = addr;
947 }
948
949 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
950 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
951 {
952 int regno = bits (inst2, 12, 15);
953 pv_t addr;
954
955 offset = inst2 & 0xfff;
956 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
957
958 if (pv_area_store_would_trash (stack, addr))
959 break;
960
961 pv_area_store (stack, addr, 4, regs[regno]);
962 }
963
964 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
965 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
966 /* Ignore stores of argument registers to the stack. */
967 ;
968
969 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
970 && (inst2 & 0x0d00) == 0x0c00
971 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
972 /* Ignore stores of argument registers to the stack. */
973 ;
974
975 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
976 { registers } */
977 && (inst2 & 0x8000) == 0x0000
978 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
979 /* Ignore block loads from the stack, potentially copying
980 parameters from memory. */
981 ;
982
983 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
984 [Rn, #+/-imm] */
985 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
986 /* Similarly ignore dual loads from the stack. */
987 ;
988
989 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
990 && (inst2 & 0x0d00) == 0x0c00
991 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
992 /* Similarly ignore single loads from the stack. */
993 ;
994
995 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
996 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
997 /* Similarly ignore single loads from the stack. */
998 ;
999
1000 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
1001 && (inst2 & 0x8000) == 0x0000)
1002 {
1003 unsigned int imm = ((bits (insn, 10, 10) << 11)
1004 | (bits (inst2, 12, 14) << 8)
1005 | bits (inst2, 0, 7));
1006
1007 regs[bits (inst2, 8, 11)]
1008 = pv_add_constant (regs[bits (insn, 0, 3)],
1009 thumb_expand_immediate (imm));
1010 }
1011
1012 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1013 && (inst2 & 0x8000) == 0x0000)
1014 {
1015 unsigned int imm = ((bits (insn, 10, 10) << 11)
1016 | (bits (inst2, 12, 14) << 8)
1017 | bits (inst2, 0, 7));
1018
1019 regs[bits (inst2, 8, 11)]
1020 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1021 }
1022
1023 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1024 && (inst2 & 0x8000) == 0x0000)
1025 {
1026 unsigned int imm = ((bits (insn, 10, 10) << 11)
1027 | (bits (inst2, 12, 14) << 8)
1028 | bits (inst2, 0, 7));
1029
1030 regs[bits (inst2, 8, 11)]
1031 = pv_add_constant (regs[bits (insn, 0, 3)],
1032 - (CORE_ADDR) thumb_expand_immediate (imm));
1033 }
1034
1035 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1036 && (inst2 & 0x8000) == 0x0000)
1037 {
1038 unsigned int imm = ((bits (insn, 10, 10) << 11)
1039 | (bits (inst2, 12, 14) << 8)
1040 | bits (inst2, 0, 7));
1041
1042 regs[bits (inst2, 8, 11)]
1043 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1044 }
1045
1046 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1047 {
1048 unsigned int imm = ((bits (insn, 10, 10) << 11)
1049 | (bits (inst2, 12, 14) << 8)
1050 | bits (inst2, 0, 7));
1051
1052 regs[bits (inst2, 8, 11)]
1053 = pv_constant (thumb_expand_immediate (imm));
1054 }
1055
1056 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1057 {
1058 unsigned int imm
1059 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1060
1061 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1062 }
1063
1064 else if (insn == 0xea5f /* mov.w Rd,Rm */
1065 && (inst2 & 0xf0f0) == 0)
1066 {
1067 int dst_reg = (inst2 & 0x0f00) >> 8;
1068 int src_reg = inst2 & 0xf;
1069 regs[dst_reg] = regs[src_reg];
1070 }
1071
1072 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1073 {
1074 /* Constant pool loads. */
1075 unsigned int constant;
1076 CORE_ADDR loc;
1077
1078 offset = bits (insn, 0, 11);
1079 if (insn & 0x0080)
1080 loc = start + 4 + offset;
1081 else
1082 loc = start + 4 - offset;
1083
1084 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1085 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1086 }
1087
1088 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1089 {
1090 /* Constant pool loads. */
1091 unsigned int constant;
1092 CORE_ADDR loc;
1093
1094 offset = bits (insn, 0, 7) << 2;
1095 if (insn & 0x0080)
1096 loc = start + 4 + offset;
1097 else
1098 loc = start + 4 - offset;
1099
1100 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1101 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1102
1103 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1104 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1105 }
1106
1107 else if (thumb2_instruction_changes_pc (insn, inst2))
1108 {
1109 /* Don't scan past anything that might change control flow. */
1110 break;
1111 }
1112 else
1113 {
1114 /* The optimizer might shove anything into the prologue,
1115 so we just skip what we don't recognize. */
1116 unrecognized_pc = start;
1117 }
1118
1119 start += 2;
1120 }
1121 else if (thumb_instruction_changes_pc (insn))
1122 {
1123 /* Don't scan past anything that might change control flow. */
1124 break;
1125 }
1126 else
1127 {
1128 /* The optimizer might shove anything into the prologue,
1129 so we just skip what we don't recognize. */
1130 unrecognized_pc = start;
1131 }
1132
1133 start += 2;
1134 }
1135
1136 if (arm_debug)
1137 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1138 paddress (gdbarch, start));
1139
1140 if (unrecognized_pc == 0)
1141 unrecognized_pc = start;
1142
1143 if (cache == NULL)
1144 {
1145 do_cleanups (back_to);
1146 return unrecognized_pc;
1147 }
1148
1149 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1150 {
1151 /* Frame pointer is fp. Frame size is constant. */
1152 cache->framereg = ARM_FP_REGNUM;
1153 cache->framesize = -regs[ARM_FP_REGNUM].k;
1154 }
1155 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1156 {
1157 /* Frame pointer is r7. Frame size is constant. */
1158 cache->framereg = THUMB_FP_REGNUM;
1159 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1160 }
1161 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1162 {
1163 /* Try the stack pointer... this is a bit desperate. */
1164 cache->framereg = ARM_SP_REGNUM;
1165 cache->framesize = -regs[ARM_SP_REGNUM].k;
1166 }
1167 else
1168 {
1169 /* We're just out of luck. We don't know where the frame is. */
1170 cache->framereg = -1;
1171 cache->framesize = 0;
1172 }
1173
1174 for (i = 0; i < 16; i++)
1175 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1176 cache->saved_regs[i].addr = offset;
1177
1178 do_cleanups (back_to);
1179 return unrecognized_pc;
1180 }
1181
1182
1183 /* Try to analyze the instructions starting from PC, which load symbol
1184 __stack_chk_guard. Return the address of instruction after loading this
1185 symbol, set the dest register number to *BASEREG, and set the size of
1186 instructions for loading symbol in OFFSET. Return 0 if instructions are
1187 not recognized. */
1188
1189 static CORE_ADDR
1190 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1191 unsigned int *destreg, int *offset)
1192 {
1193 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1194 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1195 unsigned int low, high, address;
1196
1197 address = 0;
1198 if (is_thumb)
1199 {
1200 unsigned short insn1
1201 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1202
1203 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1204 {
1205 *destreg = bits (insn1, 8, 10);
1206 *offset = 2;
1207 address = bits (insn1, 0, 7);
1208 }
1209 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1210 {
1211 unsigned short insn2
1212 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1213
1214 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1215
1216 insn1
1217 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1218 insn2
1219 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1220
1221 /* movt Rd, #const */
1222 if ((insn1 & 0xfbc0) == 0xf2c0)
1223 {
1224 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1225 *destreg = bits (insn2, 8, 11);
1226 *offset = 8;
1227 address = (high << 16 | low);
1228 }
1229 }
1230 }
1231 else
1232 {
1233 unsigned int insn
1234 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1235
1236 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1237 {
1238 address = bits (insn, 0, 11);
1239 *destreg = bits (insn, 12, 15);
1240 *offset = 4;
1241 }
1242 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1243 {
1244 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1245
1246 insn
1247 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1248
1249 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1250 {
1251 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1252 *destreg = bits (insn, 12, 15);
1253 *offset = 8;
1254 address = (high << 16 | low);
1255 }
1256 }
1257 }
1258
1259 return address;
1260 }
1261
1262 /* Try to skip a sequence of instructions used for stack protector. If PC
1263 points to the first instruction of this sequence, return the address of
1264 first instruction after this sequence, otherwise, return original PC.
1265
1266 On arm, this sequence of instructions is composed of mainly three steps,
1267 Step 1: load symbol __stack_chk_guard,
1268 Step 2: load from address of __stack_chk_guard,
1269 Step 3: store it to somewhere else.
1270
1271 Usually, instructions on step 2 and step 3 are the same on various ARM
1272 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1273 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1274 instructions in step 1 vary from different ARM architectures. On ARMv7,
1275 they are,
1276
1277 movw Rn, #:lower16:__stack_chk_guard
1278 movt Rn, #:upper16:__stack_chk_guard
1279
1280 On ARMv5t, it is,
1281
1282 ldr Rn, .Label
1283 ....
1284 .Lable:
1285 .word __stack_chk_guard
1286
1287 Since ldr/str is a very popular instruction, we can't use them as
1288 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1289 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1290 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1291
1292 static CORE_ADDR
1293 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1294 {
1295 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1296 unsigned int address, basereg;
1297 struct minimal_symbol *stack_chk_guard;
1298 int offset;
1299 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1300 CORE_ADDR addr;
1301
1302 /* Try to parse the instructions in Step 1. */
1303 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1304 &basereg, &offset);
1305 if (!addr)
1306 return pc;
1307
1308 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1309 /* If name of symbol doesn't start with '__stack_chk_guard', this
1310 instruction sequence is not for stack protector. If symbol is
1311 removed, we conservatively think this sequence is for stack protector. */
1312 if (stack_chk_guard
1313 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard), "__stack_chk_guard",
1314 strlen ("__stack_chk_guard")) != 0)
1315 return pc;
1316
1317 if (is_thumb)
1318 {
1319 unsigned int destreg;
1320 unsigned short insn
1321 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1322
1323 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1324 if ((insn & 0xf800) != 0x6800)
1325 return pc;
1326 if (bits (insn, 3, 5) != basereg)
1327 return pc;
1328 destreg = bits (insn, 0, 2);
1329
1330 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1331 byte_order_for_code);
1332 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1333 if ((insn & 0xf800) != 0x6000)
1334 return pc;
1335 if (destreg != bits (insn, 0, 2))
1336 return pc;
1337 }
1338 else
1339 {
1340 unsigned int destreg;
1341 unsigned int insn
1342 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1343
1344 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1345 if ((insn & 0x0e500000) != 0x04100000)
1346 return pc;
1347 if (bits (insn, 16, 19) != basereg)
1348 return pc;
1349 destreg = bits (insn, 12, 15);
1350 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1351 insn = read_memory_unsigned_integer (pc + offset + 4,
1352 4, byte_order_for_code);
1353 if ((insn & 0x0e500000) != 0x04000000)
1354 return pc;
1355 if (bits (insn, 12, 15) != destreg)
1356 return pc;
1357 }
1358 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1359 on arm. */
1360 if (is_thumb)
1361 return pc + offset + 4;
1362 else
1363 return pc + offset + 8;
1364 }
1365
1366 /* Advance the PC across any function entry prologue instructions to
1367 reach some "real" code.
1368
1369 The APCS (ARM Procedure Call Standard) defines the following
1370 prologue:
1371
1372 mov ip, sp
1373 [stmfd sp!, {a1,a2,a3,a4}]
1374 stmfd sp!, {...,fp,ip,lr,pc}
1375 [stfe f7, [sp, #-12]!]
1376 [stfe f6, [sp, #-12]!]
1377 [stfe f5, [sp, #-12]!]
1378 [stfe f4, [sp, #-12]!]
1379 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1380
1381 static CORE_ADDR
1382 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1383 {
1384 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1385 unsigned long inst;
1386 CORE_ADDR skip_pc;
1387 CORE_ADDR func_addr, limit_pc;
1388 struct symtab_and_line sal;
1389
1390 /* See if we can determine the end of the prologue via the symbol table.
1391 If so, then return either PC, or the PC after the prologue, whichever
1392 is greater. */
1393 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1394 {
1395 CORE_ADDR post_prologue_pc
1396 = skip_prologue_using_sal (gdbarch, func_addr);
1397 struct symtab *s = find_pc_symtab (func_addr);
1398
1399 if (post_prologue_pc)
1400 post_prologue_pc
1401 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1402
1403
1404 /* GCC always emits a line note before the prologue and another
1405 one after, even if the two are at the same address or on the
1406 same line. Take advantage of this so that we do not need to
1407 know every instruction that might appear in the prologue. We
1408 will have producer information for most binaries; if it is
1409 missing (e.g. for -gstabs), assuming the GNU tools. */
1410 if (post_prologue_pc
1411 && (s == NULL
1412 || s->producer == NULL
1413 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1414 return post_prologue_pc;
1415
1416 if (post_prologue_pc != 0)
1417 {
1418 CORE_ADDR analyzed_limit;
1419
1420 /* For non-GCC compilers, make sure the entire line is an
1421 acceptable prologue; GDB will round this function's
1422 return value up to the end of the following line so we
1423 can not skip just part of a line (and we do not want to).
1424
1425 RealView does not treat the prologue specially, but does
1426 associate prologue code with the opening brace; so this
1427 lets us skip the first line if we think it is the opening
1428 brace. */
1429 if (arm_pc_is_thumb (gdbarch, func_addr))
1430 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1431 post_prologue_pc, NULL);
1432 else
1433 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1434 post_prologue_pc, NULL);
1435
1436 if (analyzed_limit != post_prologue_pc)
1437 return func_addr;
1438
1439 return post_prologue_pc;
1440 }
1441 }
1442
1443 /* Can't determine prologue from the symbol table, need to examine
1444 instructions. */
1445
1446 /* Find an upper limit on the function prologue using the debug
1447 information. If the debug information could not be used to provide
1448 that bound, then use an arbitrary large number as the upper bound. */
1449 /* Like arm_scan_prologue, stop no later than pc + 64. */
1450 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1451 if (limit_pc == 0)
1452 limit_pc = pc + 64; /* Magic. */
1453
1454
1455 /* Check if this is Thumb code. */
1456 if (arm_pc_is_thumb (gdbarch, pc))
1457 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1458
1459 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1460 {
1461 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1462
1463 /* "mov ip, sp" is no longer a required part of the prologue. */
1464 if (inst == 0xe1a0c00d) /* mov ip, sp */
1465 continue;
1466
1467 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1468 continue;
1469
1470 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1471 continue;
1472
1473 /* Some prologues begin with "str lr, [sp, #-4]!". */
1474 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1475 continue;
1476
1477 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1478 continue;
1479
1480 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1481 continue;
1482
1483 /* Any insns after this point may float into the code, if it makes
1484 for better instruction scheduling, so we skip them only if we
1485 find them, but still consider the function to be frame-ful. */
1486
1487 /* We may have either one sfmfd instruction here, or several stfe
1488 insns, depending on the version of floating point code we
1489 support. */
1490 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1491 continue;
1492
1493 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1494 continue;
1495
1496 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1497 continue;
1498
1499 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1500 continue;
1501
1502 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1503 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1504 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1505 continue;
1506
1507 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1508 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1509 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1510 continue;
1511
1512 /* Un-recognized instruction; stop scanning. */
1513 break;
1514 }
1515
1516 return skip_pc; /* End of prologue. */
1517 }
1518
1519 /* *INDENT-OFF* */
1520 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1521 This function decodes a Thumb function prologue to determine:
1522 1) the size of the stack frame
1523 2) which registers are saved on it
1524 3) the offsets of saved regs
1525 4) the offset from the stack pointer to the frame pointer
1526
1527 A typical Thumb function prologue would create this stack frame
1528 (offsets relative to FP)
1529 old SP -> 24 stack parameters
1530 20 LR
1531 16 R7
1532 R7 -> 0 local variables (16 bytes)
1533 SP -> -12 additional stack space (12 bytes)
1534 The frame size would thus be 36 bytes, and the frame offset would be
1535 12 bytes. The frame register is R7.
1536
1537 The comments for thumb_skip_prolog() describe the algorithm we use
1538 to detect the end of the prolog. */
1539 /* *INDENT-ON* */
1540
1541 static void
1542 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1543 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1544 {
1545 CORE_ADDR prologue_start;
1546 CORE_ADDR prologue_end;
1547 CORE_ADDR current_pc;
1548
1549 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1550 &prologue_end))
1551 {
1552 /* See comment in arm_scan_prologue for an explanation of
1553 this heuristics. */
1554 if (prologue_end > prologue_start + 64)
1555 {
1556 prologue_end = prologue_start + 64;
1557 }
1558 }
1559 else
1560 /* We're in the boondocks: we have no idea where the start of the
1561 function is. */
1562 return;
1563
1564 prologue_end = min (prologue_end, prev_pc);
1565
1566 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1567 }
1568
1569 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1570
1571 static int
1572 arm_instruction_changes_pc (uint32_t this_instr)
1573 {
1574 if (bits (this_instr, 28, 31) == INST_NV)
1575 /* Unconditional instructions. */
1576 switch (bits (this_instr, 24, 27))
1577 {
1578 case 0xa:
1579 case 0xb:
1580 /* Branch with Link and change to Thumb. */
1581 return 1;
1582 case 0xc:
1583 case 0xd:
1584 case 0xe:
1585 /* Coprocessor register transfer. */
1586 if (bits (this_instr, 12, 15) == 15)
1587 error (_("Invalid update to pc in instruction"));
1588 return 0;
1589 default:
1590 return 0;
1591 }
1592 else
1593 switch (bits (this_instr, 25, 27))
1594 {
1595 case 0x0:
1596 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1597 {
1598 /* Multiplies and extra load/stores. */
1599 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1600 /* Neither multiplies nor extension load/stores are allowed
1601 to modify PC. */
1602 return 0;
1603
1604 /* Otherwise, miscellaneous instructions. */
1605
1606 /* BX <reg>, BXJ <reg>, BLX <reg> */
1607 if (bits (this_instr, 4, 27) == 0x12fff1
1608 || bits (this_instr, 4, 27) == 0x12fff2
1609 || bits (this_instr, 4, 27) == 0x12fff3)
1610 return 1;
1611
1612 /* Other miscellaneous instructions are unpredictable if they
1613 modify PC. */
1614 return 0;
1615 }
1616 /* Data processing instruction. Fall through. */
1617
1618 case 0x1:
1619 if (bits (this_instr, 12, 15) == 15)
1620 return 1;
1621 else
1622 return 0;
1623
1624 case 0x2:
1625 case 0x3:
1626 /* Media instructions and architecturally undefined instructions. */
1627 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1628 return 0;
1629
1630 /* Stores. */
1631 if (bit (this_instr, 20) == 0)
1632 return 0;
1633
1634 /* Loads. */
1635 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1636 return 1;
1637 else
1638 return 0;
1639
1640 case 0x4:
1641 /* Load/store multiple. */
1642 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1643 return 1;
1644 else
1645 return 0;
1646
1647 case 0x5:
1648 /* Branch and branch with link. */
1649 return 1;
1650
1651 case 0x6:
1652 case 0x7:
1653 /* Coprocessor transfers or SWIs can not affect PC. */
1654 return 0;
1655
1656 default:
1657 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1658 }
1659 }
1660
1661 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1662 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1663 fill it in. Return the first address not recognized as a prologue
1664 instruction.
1665
1666 We recognize all the instructions typically found in ARM prologues,
1667 plus harmless instructions which can be skipped (either for analysis
1668 purposes, or a more restrictive set that can be skipped when finding
1669 the end of the prologue). */
1670
1671 static CORE_ADDR
1672 arm_analyze_prologue (struct gdbarch *gdbarch,
1673 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1674 struct arm_prologue_cache *cache)
1675 {
1676 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1677 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1678 int regno;
1679 CORE_ADDR offset, current_pc;
1680 pv_t regs[ARM_FPS_REGNUM];
1681 struct pv_area *stack;
1682 struct cleanup *back_to;
1683 int framereg, framesize;
1684 CORE_ADDR unrecognized_pc = 0;
1685
1686 /* Search the prologue looking for instructions that set up the
1687 frame pointer, adjust the stack pointer, and save registers.
1688
1689 Be careful, however, and if it doesn't look like a prologue,
1690 don't try to scan it. If, for instance, a frameless function
1691 begins with stmfd sp!, then we will tell ourselves there is
1692 a frame, which will confuse stack traceback, as well as "finish"
1693 and other operations that rely on a knowledge of the stack
1694 traceback. */
1695
1696 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1697 regs[regno] = pv_register (regno, 0);
1698 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1699 back_to = make_cleanup_free_pv_area (stack);
1700
1701 for (current_pc = prologue_start;
1702 current_pc < prologue_end;
1703 current_pc += 4)
1704 {
1705 unsigned int insn
1706 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1707
1708 if (insn == 0xe1a0c00d) /* mov ip, sp */
1709 {
1710 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1711 continue;
1712 }
1713 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1714 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1715 {
1716 unsigned imm = insn & 0xff; /* immediate value */
1717 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1718 int rd = bits (insn, 12, 15);
1719 imm = (imm >> rot) | (imm << (32 - rot));
1720 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1721 continue;
1722 }
1723 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1724 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1725 {
1726 unsigned imm = insn & 0xff; /* immediate value */
1727 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1728 int rd = bits (insn, 12, 15);
1729 imm = (imm >> rot) | (imm << (32 - rot));
1730 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1731 continue;
1732 }
1733 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1734 [sp, #-4]! */
1735 {
1736 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1737 break;
1738 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1739 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1740 regs[bits (insn, 12, 15)]);
1741 continue;
1742 }
1743 else if ((insn & 0xffff0000) == 0xe92d0000)
1744 /* stmfd sp!, {..., fp, ip, lr, pc}
1745 or
1746 stmfd sp!, {a1, a2, a3, a4} */
1747 {
1748 int mask = insn & 0xffff;
1749
1750 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1751 break;
1752
1753 /* Calculate offsets of saved registers. */
1754 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1755 if (mask & (1 << regno))
1756 {
1757 regs[ARM_SP_REGNUM]
1758 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1759 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1760 }
1761 }
1762 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1763 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1764 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1765 {
1766 /* No need to add this to saved_regs -- it's just an arg reg. */
1767 continue;
1768 }
1769 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1770 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1771 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1772 {
1773 /* No need to add this to saved_regs -- it's just an arg reg. */
1774 continue;
1775 }
1776 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1777 { registers } */
1778 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1779 {
1780 /* No need to add this to saved_regs -- it's just arg regs. */
1781 continue;
1782 }
1783 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1784 {
1785 unsigned imm = insn & 0xff; /* immediate value */
1786 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1787 imm = (imm >> rot) | (imm << (32 - rot));
1788 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1789 }
1790 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1791 {
1792 unsigned imm = insn & 0xff; /* immediate value */
1793 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1794 imm = (imm >> rot) | (imm << (32 - rot));
1795 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1796 }
1797 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1798 [sp, -#c]! */
1799 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1800 {
1801 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1802 break;
1803
1804 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1805 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1806 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1807 }
1808 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1809 [sp!] */
1810 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1811 {
1812 int n_saved_fp_regs;
1813 unsigned int fp_start_reg, fp_bound_reg;
1814
1815 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1816 break;
1817
1818 if ((insn & 0x800) == 0x800) /* N0 is set */
1819 {
1820 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1821 n_saved_fp_regs = 3;
1822 else
1823 n_saved_fp_regs = 1;
1824 }
1825 else
1826 {
1827 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1828 n_saved_fp_regs = 2;
1829 else
1830 n_saved_fp_regs = 4;
1831 }
1832
1833 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1834 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1835 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1836 {
1837 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1838 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1839 regs[fp_start_reg++]);
1840 }
1841 }
1842 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1843 {
1844 /* Allow some special function calls when skipping the
1845 prologue; GCC generates these before storing arguments to
1846 the stack. */
1847 CORE_ADDR dest = BranchDest (current_pc, insn);
1848
1849 if (skip_prologue_function (gdbarch, dest, 0))
1850 continue;
1851 else
1852 break;
1853 }
1854 else if ((insn & 0xf0000000) != 0xe0000000)
1855 break; /* Condition not true, exit early. */
1856 else if (arm_instruction_changes_pc (insn))
1857 /* Don't scan past anything that might change control flow. */
1858 break;
1859 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1860 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1861 /* Ignore block loads from the stack, potentially copying
1862 parameters from memory. */
1863 continue;
1864 else if ((insn & 0xfc500000) == 0xe4100000
1865 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1866 /* Similarly ignore single loads from the stack. */
1867 continue;
1868 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1869 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1870 register instead of the stack. */
1871 continue;
1872 else
1873 {
1874 /* The optimizer might shove anything into the prologue,
1875 so we just skip what we don't recognize. */
1876 unrecognized_pc = current_pc;
1877 continue;
1878 }
1879 }
1880
1881 if (unrecognized_pc == 0)
1882 unrecognized_pc = current_pc;
1883
1884 /* The frame size is just the distance from the frame register
1885 to the original stack pointer. */
1886 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1887 {
1888 /* Frame pointer is fp. */
1889 framereg = ARM_FP_REGNUM;
1890 framesize = -regs[ARM_FP_REGNUM].k;
1891 }
1892 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1893 {
1894 /* Try the stack pointer... this is a bit desperate. */
1895 framereg = ARM_SP_REGNUM;
1896 framesize = -regs[ARM_SP_REGNUM].k;
1897 }
1898 else
1899 {
1900 /* We're just out of luck. We don't know where the frame is. */
1901 framereg = -1;
1902 framesize = 0;
1903 }
1904
1905 if (cache)
1906 {
1907 cache->framereg = framereg;
1908 cache->framesize = framesize;
1909
1910 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1911 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1912 cache->saved_regs[regno].addr = offset;
1913 }
1914
1915 if (arm_debug)
1916 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1917 paddress (gdbarch, unrecognized_pc));
1918
1919 do_cleanups (back_to);
1920 return unrecognized_pc;
1921 }
1922
1923 static void
1924 arm_scan_prologue (struct frame_info *this_frame,
1925 struct arm_prologue_cache *cache)
1926 {
1927 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1928 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1929 int regno;
1930 CORE_ADDR prologue_start, prologue_end, current_pc;
1931 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1932 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1933 pv_t regs[ARM_FPS_REGNUM];
1934 struct pv_area *stack;
1935 struct cleanup *back_to;
1936 CORE_ADDR offset;
1937
1938 /* Assume there is no frame until proven otherwise. */
1939 cache->framereg = ARM_SP_REGNUM;
1940 cache->framesize = 0;
1941
1942 /* Check for Thumb prologue. */
1943 if (arm_frame_is_thumb (this_frame))
1944 {
1945 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1946 return;
1947 }
1948
1949 /* Find the function prologue. If we can't find the function in
1950 the symbol table, peek in the stack frame to find the PC. */
1951 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1952 &prologue_end))
1953 {
1954 /* One way to find the end of the prologue (which works well
1955 for unoptimized code) is to do the following:
1956
1957 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1958
1959 if (sal.line == 0)
1960 prologue_end = prev_pc;
1961 else if (sal.end < prologue_end)
1962 prologue_end = sal.end;
1963
1964 This mechanism is very accurate so long as the optimizer
1965 doesn't move any instructions from the function body into the
1966 prologue. If this happens, sal.end will be the last
1967 instruction in the first hunk of prologue code just before
1968 the first instruction that the scheduler has moved from
1969 the body to the prologue.
1970
1971 In order to make sure that we scan all of the prologue
1972 instructions, we use a slightly less accurate mechanism which
1973 may scan more than necessary. To help compensate for this
1974 lack of accuracy, the prologue scanning loop below contains
1975 several clauses which'll cause the loop to terminate early if
1976 an implausible prologue instruction is encountered.
1977
1978 The expression
1979
1980 prologue_start + 64
1981
1982 is a suitable endpoint since it accounts for the largest
1983 possible prologue plus up to five instructions inserted by
1984 the scheduler. */
1985
1986 if (prologue_end > prologue_start + 64)
1987 {
1988 prologue_end = prologue_start + 64; /* See above. */
1989 }
1990 }
1991 else
1992 {
1993 /* We have no symbol information. Our only option is to assume this
1994 function has a standard stack frame and the normal frame register.
1995 Then, we can find the value of our frame pointer on entrance to
1996 the callee (or at the present moment if this is the innermost frame).
1997 The value stored there should be the address of the stmfd + 8. */
1998 CORE_ADDR frame_loc;
1999 LONGEST return_value;
2000
2001 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
2002 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
2003 return;
2004 else
2005 {
2006 prologue_start = gdbarch_addr_bits_remove
2007 (gdbarch, return_value) - 8;
2008 prologue_end = prologue_start + 64; /* See above. */
2009 }
2010 }
2011
2012 if (prev_pc < prologue_end)
2013 prologue_end = prev_pc;
2014
2015 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
2016 }
2017
2018 static struct arm_prologue_cache *
2019 arm_make_prologue_cache (struct frame_info *this_frame)
2020 {
2021 int reg;
2022 struct arm_prologue_cache *cache;
2023 CORE_ADDR unwound_fp;
2024
2025 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2026 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2027
2028 arm_scan_prologue (this_frame, cache);
2029
2030 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2031 if (unwound_fp == 0)
2032 return cache;
2033
2034 cache->prev_sp = unwound_fp + cache->framesize;
2035
2036 /* Calculate actual addresses of saved registers using offsets
2037 determined by arm_scan_prologue. */
2038 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2039 if (trad_frame_addr_p (cache->saved_regs, reg))
2040 cache->saved_regs[reg].addr += cache->prev_sp;
2041
2042 return cache;
2043 }
2044
2045 /* Our frame ID for a normal frame is the current function's starting PC
2046 and the caller's SP when we were called. */
2047
2048 static void
2049 arm_prologue_this_id (struct frame_info *this_frame,
2050 void **this_cache,
2051 struct frame_id *this_id)
2052 {
2053 struct arm_prologue_cache *cache;
2054 struct frame_id id;
2055 CORE_ADDR pc, func;
2056
2057 if (*this_cache == NULL)
2058 *this_cache = arm_make_prologue_cache (this_frame);
2059 cache = *this_cache;
2060
2061 /* This is meant to halt the backtrace at "_start". */
2062 pc = get_frame_pc (this_frame);
2063 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2064 return;
2065
2066 /* If we've hit a wall, stop. */
2067 if (cache->prev_sp == 0)
2068 return;
2069
2070 /* Use function start address as part of the frame ID. If we cannot
2071 identify the start address (due to missing symbol information),
2072 fall back to just using the current PC. */
2073 func = get_frame_func (this_frame);
2074 if (!func)
2075 func = pc;
2076
2077 id = frame_id_build (cache->prev_sp, func);
2078 *this_id = id;
2079 }
2080
2081 static struct value *
2082 arm_prologue_prev_register (struct frame_info *this_frame,
2083 void **this_cache,
2084 int prev_regnum)
2085 {
2086 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2087 struct arm_prologue_cache *cache;
2088
2089 if (*this_cache == NULL)
2090 *this_cache = arm_make_prologue_cache (this_frame);
2091 cache = *this_cache;
2092
2093 /* If we are asked to unwind the PC, then we need to return the LR
2094 instead. The prologue may save PC, but it will point into this
2095 frame's prologue, not the next frame's resume location. Also
2096 strip the saved T bit. A valid LR may have the low bit set, but
2097 a valid PC never does. */
2098 if (prev_regnum == ARM_PC_REGNUM)
2099 {
2100 CORE_ADDR lr;
2101
2102 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2103 return frame_unwind_got_constant (this_frame, prev_regnum,
2104 arm_addr_bits_remove (gdbarch, lr));
2105 }
2106
2107 /* SP is generally not saved to the stack, but this frame is
2108 identified by the next frame's stack pointer at the time of the call.
2109 The value was already reconstructed into PREV_SP. */
2110 if (prev_regnum == ARM_SP_REGNUM)
2111 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2112
2113 /* The CPSR may have been changed by the call instruction and by the
2114 called function. The only bit we can reconstruct is the T bit,
2115 by checking the low bit of LR as of the call. This is a reliable
2116 indicator of Thumb-ness except for some ARM v4T pre-interworking
2117 Thumb code, which could get away with a clear low bit as long as
2118 the called function did not use bx. Guess that all other
2119 bits are unchanged; the condition flags are presumably lost,
2120 but the processor status is likely valid. */
2121 if (prev_regnum == ARM_PS_REGNUM)
2122 {
2123 CORE_ADDR lr, cpsr;
2124 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2125
2126 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2127 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2128 if (IS_THUMB_ADDR (lr))
2129 cpsr |= t_bit;
2130 else
2131 cpsr &= ~t_bit;
2132 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2133 }
2134
2135 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2136 prev_regnum);
2137 }
2138
2139 struct frame_unwind arm_prologue_unwind = {
2140 NORMAL_FRAME,
2141 default_frame_unwind_stop_reason,
2142 arm_prologue_this_id,
2143 arm_prologue_prev_register,
2144 NULL,
2145 default_frame_sniffer
2146 };
2147
2148 /* Maintain a list of ARM exception table entries per objfile, similar to the
2149 list of mapping symbols. We only cache entries for standard ARM-defined
2150 personality routines; the cache will contain only the frame unwinding
2151 instructions associated with the entry (not the descriptors). */
2152
2153 static const struct objfile_data *arm_exidx_data_key;
2154
2155 struct arm_exidx_entry
2156 {
2157 bfd_vma addr;
2158 gdb_byte *entry;
2159 };
2160 typedef struct arm_exidx_entry arm_exidx_entry_s;
2161 DEF_VEC_O(arm_exidx_entry_s);
2162
2163 struct arm_exidx_data
2164 {
2165 VEC(arm_exidx_entry_s) **section_maps;
2166 };
2167
2168 static void
2169 arm_exidx_data_free (struct objfile *objfile, void *arg)
2170 {
2171 struct arm_exidx_data *data = arg;
2172 unsigned int i;
2173
2174 for (i = 0; i < objfile->obfd->section_count; i++)
2175 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2176 }
2177
2178 static inline int
2179 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2180 const struct arm_exidx_entry *rhs)
2181 {
2182 return lhs->addr < rhs->addr;
2183 }
2184
2185 static struct obj_section *
2186 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2187 {
2188 struct obj_section *osect;
2189
2190 ALL_OBJFILE_OSECTIONS (objfile, osect)
2191 if (bfd_get_section_flags (objfile->obfd,
2192 osect->the_bfd_section) & SEC_ALLOC)
2193 {
2194 bfd_vma start, size;
2195 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2196 size = bfd_get_section_size (osect->the_bfd_section);
2197
2198 if (start <= vma && vma < start + size)
2199 return osect;
2200 }
2201
2202 return NULL;
2203 }
2204
2205 /* Parse contents of exception table and exception index sections
2206 of OBJFILE, and fill in the exception table entry cache.
2207
2208 For each entry that refers to a standard ARM-defined personality
2209 routine, extract the frame unwinding instructions (from either
2210 the index or the table section). The unwinding instructions
2211 are normalized by:
2212 - extracting them from the rest of the table data
2213 - converting to host endianness
2214 - appending the implicit 0xb0 ("Finish") code
2215
2216 The extracted and normalized instructions are stored for later
2217 retrieval by the arm_find_exidx_entry routine. */
2218
2219 static void
2220 arm_exidx_new_objfile (struct objfile *objfile)
2221 {
2222 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2223 struct arm_exidx_data *data;
2224 asection *exidx, *extab;
2225 bfd_vma exidx_vma = 0, extab_vma = 0;
2226 bfd_size_type exidx_size = 0, extab_size = 0;
2227 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2228 LONGEST i;
2229
2230 /* If we've already touched this file, do nothing. */
2231 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2232 return;
2233
2234 /* Read contents of exception table and index. */
2235 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2236 if (exidx)
2237 {
2238 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2239 exidx_size = bfd_get_section_size (exidx);
2240 exidx_data = xmalloc (exidx_size);
2241 make_cleanup (xfree, exidx_data);
2242
2243 if (!bfd_get_section_contents (objfile->obfd, exidx,
2244 exidx_data, 0, exidx_size))
2245 {
2246 do_cleanups (cleanups);
2247 return;
2248 }
2249 }
2250
2251 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2252 if (extab)
2253 {
2254 extab_vma = bfd_section_vma (objfile->obfd, extab);
2255 extab_size = bfd_get_section_size (extab);
2256 extab_data = xmalloc (extab_size);
2257 make_cleanup (xfree, extab_data);
2258
2259 if (!bfd_get_section_contents (objfile->obfd, extab,
2260 extab_data, 0, extab_size))
2261 {
2262 do_cleanups (cleanups);
2263 return;
2264 }
2265 }
2266
2267 /* Allocate exception table data structure. */
2268 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2269 set_objfile_data (objfile, arm_exidx_data_key, data);
2270 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2271 objfile->obfd->section_count,
2272 VEC(arm_exidx_entry_s) *);
2273
2274 /* Fill in exception table. */
2275 for (i = 0; i < exidx_size / 8; i++)
2276 {
2277 struct arm_exidx_entry new_exidx_entry;
2278 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2279 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2280 bfd_vma addr = 0, word = 0;
2281 int n_bytes = 0, n_words = 0;
2282 struct obj_section *sec;
2283 gdb_byte *entry = NULL;
2284
2285 /* Extract address of start of function. */
2286 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2287 idx += exidx_vma + i * 8;
2288
2289 /* Find section containing function and compute section offset. */
2290 sec = arm_obj_section_from_vma (objfile, idx);
2291 if (sec == NULL)
2292 continue;
2293 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2294
2295 /* Determine address of exception table entry. */
2296 if (val == 1)
2297 {
2298 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2299 }
2300 else if ((val & 0xff000000) == 0x80000000)
2301 {
2302 /* Exception table entry embedded in .ARM.exidx
2303 -- must be short form. */
2304 word = val;
2305 n_bytes = 3;
2306 }
2307 else if (!(val & 0x80000000))
2308 {
2309 /* Exception table entry in .ARM.extab. */
2310 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2311 addr += exidx_vma + i * 8 + 4;
2312
2313 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2314 {
2315 word = bfd_h_get_32 (objfile->obfd,
2316 extab_data + addr - extab_vma);
2317 addr += 4;
2318
2319 if ((word & 0xff000000) == 0x80000000)
2320 {
2321 /* Short form. */
2322 n_bytes = 3;
2323 }
2324 else if ((word & 0xff000000) == 0x81000000
2325 || (word & 0xff000000) == 0x82000000)
2326 {
2327 /* Long form. */
2328 n_bytes = 2;
2329 n_words = ((word >> 16) & 0xff);
2330 }
2331 else if (!(word & 0x80000000))
2332 {
2333 bfd_vma pers;
2334 struct obj_section *pers_sec;
2335 int gnu_personality = 0;
2336
2337 /* Custom personality routine. */
2338 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2339 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2340
2341 /* Check whether we've got one of the variants of the
2342 GNU personality routines. */
2343 pers_sec = arm_obj_section_from_vma (objfile, pers);
2344 if (pers_sec)
2345 {
2346 static const char *personality[] =
2347 {
2348 "__gcc_personality_v0",
2349 "__gxx_personality_v0",
2350 "__gcj_personality_v0",
2351 "__gnu_objc_personality_v0",
2352 NULL
2353 };
2354
2355 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2356 int k;
2357
2358 for (k = 0; personality[k]; k++)
2359 if (lookup_minimal_symbol_by_pc_name
2360 (pc, personality[k], objfile))
2361 {
2362 gnu_personality = 1;
2363 break;
2364 }
2365 }
2366
2367 /* If so, the next word contains a word count in the high
2368 byte, followed by the same unwind instructions as the
2369 pre-defined forms. */
2370 if (gnu_personality
2371 && addr + 4 <= extab_vma + extab_size)
2372 {
2373 word = bfd_h_get_32 (objfile->obfd,
2374 extab_data + addr - extab_vma);
2375 addr += 4;
2376 n_bytes = 3;
2377 n_words = ((word >> 24) & 0xff);
2378 }
2379 }
2380 }
2381 }
2382
2383 /* Sanity check address. */
2384 if (n_words)
2385 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2386 n_words = n_bytes = 0;
2387
2388 /* The unwind instructions reside in WORD (only the N_BYTES least
2389 significant bytes are valid), followed by N_WORDS words in the
2390 extab section starting at ADDR. */
2391 if (n_bytes || n_words)
2392 {
2393 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2394 n_bytes + n_words * 4 + 1);
2395
2396 while (n_bytes--)
2397 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2398
2399 while (n_words--)
2400 {
2401 word = bfd_h_get_32 (objfile->obfd,
2402 extab_data + addr - extab_vma);
2403 addr += 4;
2404
2405 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2406 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2407 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2408 *p++ = (gdb_byte) (word & 0xff);
2409 }
2410
2411 /* Implied "Finish" to terminate the list. */
2412 *p++ = 0xb0;
2413 }
2414
2415 /* Push entry onto vector. They are guaranteed to always
2416 appear in order of increasing addresses. */
2417 new_exidx_entry.addr = idx;
2418 new_exidx_entry.entry = entry;
2419 VEC_safe_push (arm_exidx_entry_s,
2420 data->section_maps[sec->the_bfd_section->index],
2421 &new_exidx_entry);
2422 }
2423
2424 do_cleanups (cleanups);
2425 }
2426
2427 /* Search for the exception table entry covering MEMADDR. If one is found,
2428 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2429 set *START to the start of the region covered by this entry. */
2430
2431 static gdb_byte *
2432 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2433 {
2434 struct obj_section *sec;
2435
2436 sec = find_pc_section (memaddr);
2437 if (sec != NULL)
2438 {
2439 struct arm_exidx_data *data;
2440 VEC(arm_exidx_entry_s) *map;
2441 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2442 unsigned int idx;
2443
2444 data = objfile_data (sec->objfile, arm_exidx_data_key);
2445 if (data != NULL)
2446 {
2447 map = data->section_maps[sec->the_bfd_section->index];
2448 if (!VEC_empty (arm_exidx_entry_s, map))
2449 {
2450 struct arm_exidx_entry *map_sym;
2451
2452 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2453 arm_compare_exidx_entries);
2454
2455 /* VEC_lower_bound finds the earliest ordered insertion
2456 point. If the following symbol starts at this exact
2457 address, we use that; otherwise, the preceding
2458 exception table entry covers this address. */
2459 if (idx < VEC_length (arm_exidx_entry_s, map))
2460 {
2461 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2462 if (map_sym->addr == map_key.addr)
2463 {
2464 if (start)
2465 *start = map_sym->addr + obj_section_addr (sec);
2466 return map_sym->entry;
2467 }
2468 }
2469
2470 if (idx > 0)
2471 {
2472 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2473 if (start)
2474 *start = map_sym->addr + obj_section_addr (sec);
2475 return map_sym->entry;
2476 }
2477 }
2478 }
2479 }
2480
2481 return NULL;
2482 }
2483
2484 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2485 instruction list from the ARM exception table entry ENTRY, allocate and
2486 return a prologue cache structure describing how to unwind this frame.
2487
2488 Return NULL if the unwinding instruction list contains a "spare",
2489 "reserved" or "refuse to unwind" instruction as defined in section
2490 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2491 for the ARM Architecture" document. */
2492
2493 static struct arm_prologue_cache *
2494 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2495 {
2496 CORE_ADDR vsp = 0;
2497 int vsp_valid = 0;
2498
2499 struct arm_prologue_cache *cache;
2500 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2501 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2502
2503 for (;;)
2504 {
2505 gdb_byte insn;
2506
2507 /* Whenever we reload SP, we actually have to retrieve its
2508 actual value in the current frame. */
2509 if (!vsp_valid)
2510 {
2511 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2512 {
2513 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2514 vsp = get_frame_register_unsigned (this_frame, reg);
2515 }
2516 else
2517 {
2518 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2519 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2520 }
2521
2522 vsp_valid = 1;
2523 }
2524
2525 /* Decode next unwind instruction. */
2526 insn = *entry++;
2527
2528 if ((insn & 0xc0) == 0)
2529 {
2530 int offset = insn & 0x3f;
2531 vsp += (offset << 2) + 4;
2532 }
2533 else if ((insn & 0xc0) == 0x40)
2534 {
2535 int offset = insn & 0x3f;
2536 vsp -= (offset << 2) + 4;
2537 }
2538 else if ((insn & 0xf0) == 0x80)
2539 {
2540 int mask = ((insn & 0xf) << 8) | *entry++;
2541 int i;
2542
2543 /* The special case of an all-zero mask identifies
2544 "Refuse to unwind". We return NULL to fall back
2545 to the prologue analyzer. */
2546 if (mask == 0)
2547 return NULL;
2548
2549 /* Pop registers r4..r15 under mask. */
2550 for (i = 0; i < 12; i++)
2551 if (mask & (1 << i))
2552 {
2553 cache->saved_regs[4 + i].addr = vsp;
2554 vsp += 4;
2555 }
2556
2557 /* Special-case popping SP -- we need to reload vsp. */
2558 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2559 vsp_valid = 0;
2560 }
2561 else if ((insn & 0xf0) == 0x90)
2562 {
2563 int reg = insn & 0xf;
2564
2565 /* Reserved cases. */
2566 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2567 return NULL;
2568
2569 /* Set SP from another register and mark VSP for reload. */
2570 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2571 vsp_valid = 0;
2572 }
2573 else if ((insn & 0xf0) == 0xa0)
2574 {
2575 int count = insn & 0x7;
2576 int pop_lr = (insn & 0x8) != 0;
2577 int i;
2578
2579 /* Pop r4..r[4+count]. */
2580 for (i = 0; i <= count; i++)
2581 {
2582 cache->saved_regs[4 + i].addr = vsp;
2583 vsp += 4;
2584 }
2585
2586 /* If indicated by flag, pop LR as well. */
2587 if (pop_lr)
2588 {
2589 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2590 vsp += 4;
2591 }
2592 }
2593 else if (insn == 0xb0)
2594 {
2595 /* We could only have updated PC by popping into it; if so, it
2596 will show up as address. Otherwise, copy LR into PC. */
2597 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2598 cache->saved_regs[ARM_PC_REGNUM]
2599 = cache->saved_regs[ARM_LR_REGNUM];
2600
2601 /* We're done. */
2602 break;
2603 }
2604 else if (insn == 0xb1)
2605 {
2606 int mask = *entry++;
2607 int i;
2608
2609 /* All-zero mask and mask >= 16 is "spare". */
2610 if (mask == 0 || mask >= 16)
2611 return NULL;
2612
2613 /* Pop r0..r3 under mask. */
2614 for (i = 0; i < 4; i++)
2615 if (mask & (1 << i))
2616 {
2617 cache->saved_regs[i].addr = vsp;
2618 vsp += 4;
2619 }
2620 }
2621 else if (insn == 0xb2)
2622 {
2623 ULONGEST offset = 0;
2624 unsigned shift = 0;
2625
2626 do
2627 {
2628 offset |= (*entry & 0x7f) << shift;
2629 shift += 7;
2630 }
2631 while (*entry++ & 0x80);
2632
2633 vsp += 0x204 + (offset << 2);
2634 }
2635 else if (insn == 0xb3)
2636 {
2637 int start = *entry >> 4;
2638 int count = (*entry++) & 0xf;
2639 int i;
2640
2641 /* Only registers D0..D15 are valid here. */
2642 if (start + count >= 16)
2643 return NULL;
2644
2645 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2646 for (i = 0; i <= count; i++)
2647 {
2648 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2649 vsp += 8;
2650 }
2651
2652 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2653 vsp += 4;
2654 }
2655 else if ((insn & 0xf8) == 0xb8)
2656 {
2657 int count = insn & 0x7;
2658 int i;
2659
2660 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2661 for (i = 0; i <= count; i++)
2662 {
2663 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2664 vsp += 8;
2665 }
2666
2667 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2668 vsp += 4;
2669 }
2670 else if (insn == 0xc6)
2671 {
2672 int start = *entry >> 4;
2673 int count = (*entry++) & 0xf;
2674 int i;
2675
2676 /* Only registers WR0..WR15 are valid. */
2677 if (start + count >= 16)
2678 return NULL;
2679
2680 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2681 for (i = 0; i <= count; i++)
2682 {
2683 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2684 vsp += 8;
2685 }
2686 }
2687 else if (insn == 0xc7)
2688 {
2689 int mask = *entry++;
2690 int i;
2691
2692 /* All-zero mask and mask >= 16 is "spare". */
2693 if (mask == 0 || mask >= 16)
2694 return NULL;
2695
2696 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2697 for (i = 0; i < 4; i++)
2698 if (mask & (1 << i))
2699 {
2700 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2701 vsp += 4;
2702 }
2703 }
2704 else if ((insn & 0xf8) == 0xc0)
2705 {
2706 int count = insn & 0x7;
2707 int i;
2708
2709 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2710 for (i = 0; i <= count; i++)
2711 {
2712 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2713 vsp += 8;
2714 }
2715 }
2716 else if (insn == 0xc8)
2717 {
2718 int start = *entry >> 4;
2719 int count = (*entry++) & 0xf;
2720 int i;
2721
2722 /* Only registers D0..D31 are valid. */
2723 if (start + count >= 16)
2724 return NULL;
2725
2726 /* Pop VFP double-precision registers
2727 D[16+start]..D[16+start+count]. */
2728 for (i = 0; i <= count; i++)
2729 {
2730 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2731 vsp += 8;
2732 }
2733 }
2734 else if (insn == 0xc9)
2735 {
2736 int start = *entry >> 4;
2737 int count = (*entry++) & 0xf;
2738 int i;
2739
2740 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2741 for (i = 0; i <= count; i++)
2742 {
2743 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2744 vsp += 8;
2745 }
2746 }
2747 else if ((insn & 0xf8) == 0xd0)
2748 {
2749 int count = insn & 0x7;
2750 int i;
2751
2752 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2753 for (i = 0; i <= count; i++)
2754 {
2755 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2756 vsp += 8;
2757 }
2758 }
2759 else
2760 {
2761 /* Everything else is "spare". */
2762 return NULL;
2763 }
2764 }
2765
2766 /* If we restore SP from a register, assume this was the frame register.
2767 Otherwise just fall back to SP as frame register. */
2768 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2769 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2770 else
2771 cache->framereg = ARM_SP_REGNUM;
2772
2773 /* Determine offset to previous frame. */
2774 cache->framesize
2775 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2776
2777 /* We already got the previous SP. */
2778 cache->prev_sp = vsp;
2779
2780 return cache;
2781 }
2782
2783 /* Unwinding via ARM exception table entries. Note that the sniffer
2784 already computes a filled-in prologue cache, which is then used
2785 with the same arm_prologue_this_id and arm_prologue_prev_register
2786 routines also used for prologue-parsing based unwinding. */
2787
2788 static int
2789 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2790 struct frame_info *this_frame,
2791 void **this_prologue_cache)
2792 {
2793 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2794 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2795 CORE_ADDR addr_in_block, exidx_region, func_start;
2796 struct arm_prologue_cache *cache;
2797 gdb_byte *entry;
2798
2799 /* See if we have an ARM exception table entry covering this address. */
2800 addr_in_block = get_frame_address_in_block (this_frame);
2801 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2802 if (!entry)
2803 return 0;
2804
2805 /* The ARM exception table does not describe unwind information
2806 for arbitrary PC values, but is guaranteed to be correct only
2807 at call sites. We have to decide here whether we want to use
2808 ARM exception table information for this frame, or fall back
2809 to using prologue parsing. (Note that if we have DWARF CFI,
2810 this sniffer isn't even called -- CFI is always preferred.)
2811
2812 Before we make this decision, however, we check whether we
2813 actually have *symbol* information for the current frame.
2814 If not, prologue parsing would not work anyway, so we might
2815 as well use the exception table and hope for the best. */
2816 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2817 {
2818 int exc_valid = 0;
2819
2820 /* If the next frame is "normal", we are at a call site in this
2821 frame, so exception information is guaranteed to be valid. */
2822 if (get_next_frame (this_frame)
2823 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2824 exc_valid = 1;
2825
2826 /* We also assume exception information is valid if we're currently
2827 blocked in a system call. The system library is supposed to
2828 ensure this, so that e.g. pthread cancellation works. */
2829 if (arm_frame_is_thumb (this_frame))
2830 {
2831 LONGEST insn;
2832
2833 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2834 byte_order_for_code, &insn)
2835 && (insn & 0xff00) == 0xdf00 /* svc */)
2836 exc_valid = 1;
2837 }
2838 else
2839 {
2840 LONGEST insn;
2841
2842 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2843 byte_order_for_code, &insn)
2844 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2845 exc_valid = 1;
2846 }
2847
2848 /* Bail out if we don't know that exception information is valid. */
2849 if (!exc_valid)
2850 return 0;
2851
2852 /* The ARM exception index does not mark the *end* of the region
2853 covered by the entry, and some functions will not have any entry.
2854 To correctly recognize the end of the covered region, the linker
2855 should have inserted dummy records with a CANTUNWIND marker.
2856
2857 Unfortunately, current versions of GNU ld do not reliably do
2858 this, and thus we may have found an incorrect entry above.
2859 As a (temporary) sanity check, we only use the entry if it
2860 lies *within* the bounds of the function. Note that this check
2861 might reject perfectly valid entries that just happen to cover
2862 multiple functions; therefore this check ought to be removed
2863 once the linker is fixed. */
2864 if (func_start > exidx_region)
2865 return 0;
2866 }
2867
2868 /* Decode the list of unwinding instructions into a prologue cache.
2869 Note that this may fail due to e.g. a "refuse to unwind" code. */
2870 cache = arm_exidx_fill_cache (this_frame, entry);
2871 if (!cache)
2872 return 0;
2873
2874 *this_prologue_cache = cache;
2875 return 1;
2876 }
2877
2878 struct frame_unwind arm_exidx_unwind = {
2879 NORMAL_FRAME,
2880 default_frame_unwind_stop_reason,
2881 arm_prologue_this_id,
2882 arm_prologue_prev_register,
2883 NULL,
2884 arm_exidx_unwind_sniffer
2885 };
2886
2887 static struct arm_prologue_cache *
2888 arm_make_stub_cache (struct frame_info *this_frame)
2889 {
2890 struct arm_prologue_cache *cache;
2891
2892 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2893 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2894
2895 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2896
2897 return cache;
2898 }
2899
2900 /* Our frame ID for a stub frame is the current SP and LR. */
2901
2902 static void
2903 arm_stub_this_id (struct frame_info *this_frame,
2904 void **this_cache,
2905 struct frame_id *this_id)
2906 {
2907 struct arm_prologue_cache *cache;
2908
2909 if (*this_cache == NULL)
2910 *this_cache = arm_make_stub_cache (this_frame);
2911 cache = *this_cache;
2912
2913 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2914 }
2915
2916 static int
2917 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2918 struct frame_info *this_frame,
2919 void **this_prologue_cache)
2920 {
2921 CORE_ADDR addr_in_block;
2922 char dummy[4];
2923
2924 addr_in_block = get_frame_address_in_block (this_frame);
2925 if (in_plt_section (addr_in_block, NULL)
2926 /* We also use the stub winder if the target memory is unreadable
2927 to avoid having the prologue unwinder trying to read it. */
2928 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
2929 return 1;
2930
2931 return 0;
2932 }
2933
2934 struct frame_unwind arm_stub_unwind = {
2935 NORMAL_FRAME,
2936 default_frame_unwind_stop_reason,
2937 arm_stub_this_id,
2938 arm_prologue_prev_register,
2939 NULL,
2940 arm_stub_unwind_sniffer
2941 };
2942
2943 static CORE_ADDR
2944 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
2945 {
2946 struct arm_prologue_cache *cache;
2947
2948 if (*this_cache == NULL)
2949 *this_cache = arm_make_prologue_cache (this_frame);
2950 cache = *this_cache;
2951
2952 return cache->prev_sp - cache->framesize;
2953 }
2954
2955 struct frame_base arm_normal_base = {
2956 &arm_prologue_unwind,
2957 arm_normal_frame_base,
2958 arm_normal_frame_base,
2959 arm_normal_frame_base
2960 };
2961
2962 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2963 dummy frame. The frame ID's base needs to match the TOS value
2964 saved by save_dummy_frame_tos() and returned from
2965 arm_push_dummy_call, and the PC needs to match the dummy frame's
2966 breakpoint. */
2967
2968 static struct frame_id
2969 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2970 {
2971 return frame_id_build (get_frame_register_unsigned (this_frame,
2972 ARM_SP_REGNUM),
2973 get_frame_pc (this_frame));
2974 }
2975
2976 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2977 be used to construct the previous frame's ID, after looking up the
2978 containing function). */
2979
2980 static CORE_ADDR
2981 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
2982 {
2983 CORE_ADDR pc;
2984 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2985 return arm_addr_bits_remove (gdbarch, pc);
2986 }
2987
2988 static CORE_ADDR
2989 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2990 {
2991 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2992 }
2993
2994 static struct value *
2995 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
2996 int regnum)
2997 {
2998 struct gdbarch * gdbarch = get_frame_arch (this_frame);
2999 CORE_ADDR lr, cpsr;
3000 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
3001
3002 switch (regnum)
3003 {
3004 case ARM_PC_REGNUM:
3005 /* The PC is normally copied from the return column, which
3006 describes saves of LR. However, that version may have an
3007 extra bit set to indicate Thumb state. The bit is not
3008 part of the PC. */
3009 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3010 return frame_unwind_got_constant (this_frame, regnum,
3011 arm_addr_bits_remove (gdbarch, lr));
3012
3013 case ARM_PS_REGNUM:
3014 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3015 cpsr = get_frame_register_unsigned (this_frame, regnum);
3016 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3017 if (IS_THUMB_ADDR (lr))
3018 cpsr |= t_bit;
3019 else
3020 cpsr &= ~t_bit;
3021 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3022
3023 default:
3024 internal_error (__FILE__, __LINE__,
3025 _("Unexpected register %d"), regnum);
3026 }
3027 }
3028
3029 static void
3030 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3031 struct dwarf2_frame_state_reg *reg,
3032 struct frame_info *this_frame)
3033 {
3034 switch (regnum)
3035 {
3036 case ARM_PC_REGNUM:
3037 case ARM_PS_REGNUM:
3038 reg->how = DWARF2_FRAME_REG_FN;
3039 reg->loc.fn = arm_dwarf2_prev_register;
3040 break;
3041 case ARM_SP_REGNUM:
3042 reg->how = DWARF2_FRAME_REG_CFA;
3043 break;
3044 }
3045 }
3046
3047 /* Return true if we are in the function's epilogue, i.e. after the
3048 instruction that destroyed the function's stack frame. */
3049
3050 static int
3051 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3052 {
3053 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3054 unsigned int insn, insn2;
3055 int found_return = 0, found_stack_adjust = 0;
3056 CORE_ADDR func_start, func_end;
3057 CORE_ADDR scan_pc;
3058 gdb_byte buf[4];
3059
3060 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3061 return 0;
3062
3063 /* The epilogue is a sequence of instructions along the following lines:
3064
3065 - add stack frame size to SP or FP
3066 - [if frame pointer used] restore SP from FP
3067 - restore registers from SP [may include PC]
3068 - a return-type instruction [if PC wasn't already restored]
3069
3070 In a first pass, we scan forward from the current PC and verify the
3071 instructions we find as compatible with this sequence, ending in a
3072 return instruction.
3073
3074 However, this is not sufficient to distinguish indirect function calls
3075 within a function from indirect tail calls in the epilogue in some cases.
3076 Therefore, if we didn't already find any SP-changing instruction during
3077 forward scan, we add a backward scanning heuristic to ensure we actually
3078 are in the epilogue. */
3079
3080 scan_pc = pc;
3081 while (scan_pc < func_end && !found_return)
3082 {
3083 if (target_read_memory (scan_pc, buf, 2))
3084 break;
3085
3086 scan_pc += 2;
3087 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3088
3089 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3090 found_return = 1;
3091 else if (insn == 0x46f7) /* mov pc, lr */
3092 found_return = 1;
3093 else if (insn == 0x46bd) /* mov sp, r7 */
3094 found_stack_adjust = 1;
3095 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3096 found_stack_adjust = 1;
3097 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
3098 {
3099 found_stack_adjust = 1;
3100 if (insn & 0x0100) /* <registers> include PC. */
3101 found_return = 1;
3102 }
3103 else if ((insn & 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
3104 {
3105 if (target_read_memory (scan_pc, buf, 2))
3106 break;
3107
3108 scan_pc += 2;
3109 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3110
3111 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3112 {
3113 found_stack_adjust = 1;
3114 if (insn2 & 0x8000) /* <registers> include PC. */
3115 found_return = 1;
3116 }
3117 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3118 && (insn2 & 0x0fff) == 0x0b04)
3119 {
3120 found_stack_adjust = 1;
3121 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3122 found_return = 1;
3123 }
3124 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3125 && (insn2 & 0x0e00) == 0x0a00)
3126 found_stack_adjust = 1;
3127 else
3128 break;
3129 }
3130 else
3131 break;
3132 }
3133
3134 if (!found_return)
3135 return 0;
3136
3137 /* Since any instruction in the epilogue sequence, with the possible
3138 exception of return itself, updates the stack pointer, we need to
3139 scan backwards for at most one instruction. Try either a 16-bit or
3140 a 32-bit instruction. This is just a heuristic, so we do not worry
3141 too much about false positives. */
3142
3143 if (!found_stack_adjust)
3144 {
3145 if (pc - 4 < func_start)
3146 return 0;
3147 if (target_read_memory (pc - 4, buf, 4))
3148 return 0;
3149
3150 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3151 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3152
3153 if (insn2 == 0x46bd) /* mov sp, r7 */
3154 found_stack_adjust = 1;
3155 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3156 found_stack_adjust = 1;
3157 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
3158 found_stack_adjust = 1;
3159 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3160 found_stack_adjust = 1;
3161 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3162 && (insn2 & 0x0fff) == 0x0b04)
3163 found_stack_adjust = 1;
3164 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3165 && (insn2 & 0x0e00) == 0x0a00)
3166 found_stack_adjust = 1;
3167 }
3168
3169 return found_stack_adjust;
3170 }
3171
3172 /* Return true if we are in the function's epilogue, i.e. after the
3173 instruction that destroyed the function's stack frame. */
3174
3175 static int
3176 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3177 {
3178 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3179 unsigned int insn;
3180 int found_return, found_stack_adjust;
3181 CORE_ADDR func_start, func_end;
3182
3183 if (arm_pc_is_thumb (gdbarch, pc))
3184 return thumb_in_function_epilogue_p (gdbarch, pc);
3185
3186 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3187 return 0;
3188
3189 /* We are in the epilogue if the previous instruction was a stack
3190 adjustment and the next instruction is a possible return (bx, mov
3191 pc, or pop). We could have to scan backwards to find the stack
3192 adjustment, or forwards to find the return, but this is a decent
3193 approximation. First scan forwards. */
3194
3195 found_return = 0;
3196 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3197 if (bits (insn, 28, 31) != INST_NV)
3198 {
3199 if ((insn & 0x0ffffff0) == 0x012fff10)
3200 /* BX. */
3201 found_return = 1;
3202 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3203 /* MOV PC. */
3204 found_return = 1;
3205 else if ((insn & 0x0fff0000) == 0x08bd0000
3206 && (insn & 0x0000c000) != 0)
3207 /* POP (LDMIA), including PC or LR. */
3208 found_return = 1;
3209 }
3210
3211 if (!found_return)
3212 return 0;
3213
3214 /* Scan backwards. This is just a heuristic, so do not worry about
3215 false positives from mode changes. */
3216
3217 if (pc < func_start + 4)
3218 return 0;
3219
3220 found_stack_adjust = 0;
3221 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3222 if (bits (insn, 28, 31) != INST_NV)
3223 {
3224 if ((insn & 0x0df0f000) == 0x0080d000)
3225 /* ADD SP (register or immediate). */
3226 found_stack_adjust = 1;
3227 else if ((insn & 0x0df0f000) == 0x0040d000)
3228 /* SUB SP (register or immediate). */
3229 found_stack_adjust = 1;
3230 else if ((insn & 0x0ffffff0) == 0x01a0d000)
3231 /* MOV SP. */
3232 found_stack_adjust = 1;
3233 else if ((insn & 0x0fff0000) == 0x08bd0000)
3234 /* POP (LDMIA). */
3235 found_stack_adjust = 1;
3236 }
3237
3238 if (found_stack_adjust)
3239 return 1;
3240
3241 return 0;
3242 }
3243
3244
3245 /* When arguments must be pushed onto the stack, they go on in reverse
3246 order. The code below implements a FILO (stack) to do this. */
3247
3248 struct stack_item
3249 {
3250 int len;
3251 struct stack_item *prev;
3252 void *data;
3253 };
3254
3255 static struct stack_item *
3256 push_stack_item (struct stack_item *prev, const void *contents, int len)
3257 {
3258 struct stack_item *si;
3259 si = xmalloc (sizeof (struct stack_item));
3260 si->data = xmalloc (len);
3261 si->len = len;
3262 si->prev = prev;
3263 memcpy (si->data, contents, len);
3264 return si;
3265 }
3266
3267 static struct stack_item *
3268 pop_stack_item (struct stack_item *si)
3269 {
3270 struct stack_item *dead = si;
3271 si = si->prev;
3272 xfree (dead->data);
3273 xfree (dead);
3274 return si;
3275 }
3276
3277
3278 /* Return the alignment (in bytes) of the given type. */
3279
3280 static int
3281 arm_type_align (struct type *t)
3282 {
3283 int n;
3284 int align;
3285 int falign;
3286
3287 t = check_typedef (t);
3288 switch (TYPE_CODE (t))
3289 {
3290 default:
3291 /* Should never happen. */
3292 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3293 return 4;
3294
3295 case TYPE_CODE_PTR:
3296 case TYPE_CODE_ENUM:
3297 case TYPE_CODE_INT:
3298 case TYPE_CODE_FLT:
3299 case TYPE_CODE_SET:
3300 case TYPE_CODE_RANGE:
3301 case TYPE_CODE_BITSTRING:
3302 case TYPE_CODE_REF:
3303 case TYPE_CODE_CHAR:
3304 case TYPE_CODE_BOOL:
3305 return TYPE_LENGTH (t);
3306
3307 case TYPE_CODE_ARRAY:
3308 case TYPE_CODE_COMPLEX:
3309 /* TODO: What about vector types? */
3310 return arm_type_align (TYPE_TARGET_TYPE (t));
3311
3312 case TYPE_CODE_STRUCT:
3313 case TYPE_CODE_UNION:
3314 align = 1;
3315 for (n = 0; n < TYPE_NFIELDS (t); n++)
3316 {
3317 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3318 if (falign > align)
3319 align = falign;
3320 }
3321 return align;
3322 }
3323 }
3324
3325 /* Possible base types for a candidate for passing and returning in
3326 VFP registers. */
3327
3328 enum arm_vfp_cprc_base_type
3329 {
3330 VFP_CPRC_UNKNOWN,
3331 VFP_CPRC_SINGLE,
3332 VFP_CPRC_DOUBLE,
3333 VFP_CPRC_VEC64,
3334 VFP_CPRC_VEC128
3335 };
3336
3337 /* The length of one element of base type B. */
3338
3339 static unsigned
3340 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3341 {
3342 switch (b)
3343 {
3344 case VFP_CPRC_SINGLE:
3345 return 4;
3346 case VFP_CPRC_DOUBLE:
3347 return 8;
3348 case VFP_CPRC_VEC64:
3349 return 8;
3350 case VFP_CPRC_VEC128:
3351 return 16;
3352 default:
3353 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3354 (int) b);
3355 }
3356 }
3357
3358 /* The character ('s', 'd' or 'q') for the type of VFP register used
3359 for passing base type B. */
3360
3361 static int
3362 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3363 {
3364 switch (b)
3365 {
3366 case VFP_CPRC_SINGLE:
3367 return 's';
3368 case VFP_CPRC_DOUBLE:
3369 return 'd';
3370 case VFP_CPRC_VEC64:
3371 return 'd';
3372 case VFP_CPRC_VEC128:
3373 return 'q';
3374 default:
3375 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3376 (int) b);
3377 }
3378 }
3379
3380 /* Determine whether T may be part of a candidate for passing and
3381 returning in VFP registers, ignoring the limit on the total number
3382 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3383 classification of the first valid component found; if it is not
3384 VFP_CPRC_UNKNOWN, all components must have the same classification
3385 as *BASE_TYPE. If it is found that T contains a type not permitted
3386 for passing and returning in VFP registers, a type differently
3387 classified from *BASE_TYPE, or two types differently classified
3388 from each other, return -1, otherwise return the total number of
3389 base-type elements found (possibly 0 in an empty structure or
3390 array). Vectors and complex types are not currently supported,
3391 matching the generic AAPCS support. */
3392
3393 static int
3394 arm_vfp_cprc_sub_candidate (struct type *t,
3395 enum arm_vfp_cprc_base_type *base_type)
3396 {
3397 t = check_typedef (t);
3398 switch (TYPE_CODE (t))
3399 {
3400 case TYPE_CODE_FLT:
3401 switch (TYPE_LENGTH (t))
3402 {
3403 case 4:
3404 if (*base_type == VFP_CPRC_UNKNOWN)
3405 *base_type = VFP_CPRC_SINGLE;
3406 else if (*base_type != VFP_CPRC_SINGLE)
3407 return -1;
3408 return 1;
3409
3410 case 8:
3411 if (*base_type == VFP_CPRC_UNKNOWN)
3412 *base_type = VFP_CPRC_DOUBLE;
3413 else if (*base_type != VFP_CPRC_DOUBLE)
3414 return -1;
3415 return 1;
3416
3417 default:
3418 return -1;
3419 }
3420 break;
3421
3422 case TYPE_CODE_ARRAY:
3423 {
3424 int count;
3425 unsigned unitlen;
3426 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3427 if (count == -1)
3428 return -1;
3429 if (TYPE_LENGTH (t) == 0)
3430 {
3431 gdb_assert (count == 0);
3432 return 0;
3433 }
3434 else if (count == 0)
3435 return -1;
3436 unitlen = arm_vfp_cprc_unit_length (*base_type);
3437 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3438 return TYPE_LENGTH (t) / unitlen;
3439 }
3440 break;
3441
3442 case TYPE_CODE_STRUCT:
3443 {
3444 int count = 0;
3445 unsigned unitlen;
3446 int i;
3447 for (i = 0; i < TYPE_NFIELDS (t); i++)
3448 {
3449 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3450 base_type);
3451 if (sub_count == -1)
3452 return -1;
3453 count += sub_count;
3454 }
3455 if (TYPE_LENGTH (t) == 0)
3456 {
3457 gdb_assert (count == 0);
3458 return 0;
3459 }
3460 else if (count == 0)
3461 return -1;
3462 unitlen = arm_vfp_cprc_unit_length (*base_type);
3463 if (TYPE_LENGTH (t) != unitlen * count)
3464 return -1;
3465 return count;
3466 }
3467
3468 case TYPE_CODE_UNION:
3469 {
3470 int count = 0;
3471 unsigned unitlen;
3472 int i;
3473 for (i = 0; i < TYPE_NFIELDS (t); i++)
3474 {
3475 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3476 base_type);
3477 if (sub_count == -1)
3478 return -1;
3479 count = (count > sub_count ? count : sub_count);
3480 }
3481 if (TYPE_LENGTH (t) == 0)
3482 {
3483 gdb_assert (count == 0);
3484 return 0;
3485 }
3486 else if (count == 0)
3487 return -1;
3488 unitlen = arm_vfp_cprc_unit_length (*base_type);
3489 if (TYPE_LENGTH (t) != unitlen * count)
3490 return -1;
3491 return count;
3492 }
3493
3494 default:
3495 break;
3496 }
3497
3498 return -1;
3499 }
3500
3501 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3502 if passed to or returned from a non-variadic function with the VFP
3503 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3504 *BASE_TYPE to the base type for T and *COUNT to the number of
3505 elements of that base type before returning. */
3506
3507 static int
3508 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3509 int *count)
3510 {
3511 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3512 int c = arm_vfp_cprc_sub_candidate (t, &b);
3513 if (c <= 0 || c > 4)
3514 return 0;
3515 *base_type = b;
3516 *count = c;
3517 return 1;
3518 }
3519
3520 /* Return 1 if the VFP ABI should be used for passing arguments to and
3521 returning values from a function of type FUNC_TYPE, 0
3522 otherwise. */
3523
3524 static int
3525 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3526 {
3527 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3528 /* Variadic functions always use the base ABI. Assume that functions
3529 without debug info are not variadic. */
3530 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3531 return 0;
3532 /* The VFP ABI is only supported as a variant of AAPCS. */
3533 if (tdep->arm_abi != ARM_ABI_AAPCS)
3534 return 0;
3535 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3536 }
3537
3538 /* We currently only support passing parameters in integer registers, which
3539 conforms with GCC's default model, and VFP argument passing following
3540 the VFP variant of AAPCS. Several other variants exist and
3541 we should probably support some of them based on the selected ABI. */
3542
3543 static CORE_ADDR
3544 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3545 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3546 struct value **args, CORE_ADDR sp, int struct_return,
3547 CORE_ADDR struct_addr)
3548 {
3549 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3550 int argnum;
3551 int argreg;
3552 int nstack;
3553 struct stack_item *si = NULL;
3554 int use_vfp_abi;
3555 struct type *ftype;
3556 unsigned vfp_regs_free = (1 << 16) - 1;
3557
3558 /* Determine the type of this function and whether the VFP ABI
3559 applies. */
3560 ftype = check_typedef (value_type (function));
3561 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3562 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3563 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3564
3565 /* Set the return address. For the ARM, the return breakpoint is
3566 always at BP_ADDR. */
3567 if (arm_pc_is_thumb (gdbarch, bp_addr))
3568 bp_addr |= 1;
3569 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3570
3571 /* Walk through the list of args and determine how large a temporary
3572 stack is required. Need to take care here as structs may be
3573 passed on the stack, and we have to to push them. */
3574 nstack = 0;
3575
3576 argreg = ARM_A1_REGNUM;
3577 nstack = 0;
3578
3579 /* The struct_return pointer occupies the first parameter
3580 passing register. */
3581 if (struct_return)
3582 {
3583 if (arm_debug)
3584 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3585 gdbarch_register_name (gdbarch, argreg),
3586 paddress (gdbarch, struct_addr));
3587 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3588 argreg++;
3589 }
3590
3591 for (argnum = 0; argnum < nargs; argnum++)
3592 {
3593 int len;
3594 struct type *arg_type;
3595 struct type *target_type;
3596 enum type_code typecode;
3597 const bfd_byte *val;
3598 int align;
3599 enum arm_vfp_cprc_base_type vfp_base_type;
3600 int vfp_base_count;
3601 int may_use_core_reg = 1;
3602
3603 arg_type = check_typedef (value_type (args[argnum]));
3604 len = TYPE_LENGTH (arg_type);
3605 target_type = TYPE_TARGET_TYPE (arg_type);
3606 typecode = TYPE_CODE (arg_type);
3607 val = value_contents (args[argnum]);
3608
3609 align = arm_type_align (arg_type);
3610 /* Round alignment up to a whole number of words. */
3611 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3612 /* Different ABIs have different maximum alignments. */
3613 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3614 {
3615 /* The APCS ABI only requires word alignment. */
3616 align = INT_REGISTER_SIZE;
3617 }
3618 else
3619 {
3620 /* The AAPCS requires at most doubleword alignment. */
3621 if (align > INT_REGISTER_SIZE * 2)
3622 align = INT_REGISTER_SIZE * 2;
3623 }
3624
3625 if (use_vfp_abi
3626 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3627 &vfp_base_count))
3628 {
3629 int regno;
3630 int unit_length;
3631 int shift;
3632 unsigned mask;
3633
3634 /* Because this is a CPRC it cannot go in a core register or
3635 cause a core register to be skipped for alignment.
3636 Either it goes in VFP registers and the rest of this loop
3637 iteration is skipped for this argument, or it goes on the
3638 stack (and the stack alignment code is correct for this
3639 case). */
3640 may_use_core_reg = 0;
3641
3642 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3643 shift = unit_length / 4;
3644 mask = (1 << (shift * vfp_base_count)) - 1;
3645 for (regno = 0; regno < 16; regno += shift)
3646 if (((vfp_regs_free >> regno) & mask) == mask)
3647 break;
3648
3649 if (regno < 16)
3650 {
3651 int reg_char;
3652 int reg_scaled;
3653 int i;
3654
3655 vfp_regs_free &= ~(mask << regno);
3656 reg_scaled = regno / shift;
3657 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3658 for (i = 0; i < vfp_base_count; i++)
3659 {
3660 char name_buf[4];
3661 int regnum;
3662 if (reg_char == 'q')
3663 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3664 val + i * unit_length);
3665 else
3666 {
3667 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
3668 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3669 strlen (name_buf));
3670 regcache_cooked_write (regcache, regnum,
3671 val + i * unit_length);
3672 }
3673 }
3674 continue;
3675 }
3676 else
3677 {
3678 /* This CPRC could not go in VFP registers, so all VFP
3679 registers are now marked as used. */
3680 vfp_regs_free = 0;
3681 }
3682 }
3683
3684 /* Push stack padding for dowubleword alignment. */
3685 if (nstack & (align - 1))
3686 {
3687 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3688 nstack += INT_REGISTER_SIZE;
3689 }
3690
3691 /* Doubleword aligned quantities must go in even register pairs. */
3692 if (may_use_core_reg
3693 && argreg <= ARM_LAST_ARG_REGNUM
3694 && align > INT_REGISTER_SIZE
3695 && argreg & 1)
3696 argreg++;
3697
3698 /* If the argument is a pointer to a function, and it is a
3699 Thumb function, create a LOCAL copy of the value and set
3700 the THUMB bit in it. */
3701 if (TYPE_CODE_PTR == typecode
3702 && target_type != NULL
3703 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3704 {
3705 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3706 if (arm_pc_is_thumb (gdbarch, regval))
3707 {
3708 bfd_byte *copy = alloca (len);
3709 store_unsigned_integer (copy, len, byte_order,
3710 MAKE_THUMB_ADDR (regval));
3711 val = copy;
3712 }
3713 }
3714
3715 /* Copy the argument to general registers or the stack in
3716 register-sized pieces. Large arguments are split between
3717 registers and stack. */
3718 while (len > 0)
3719 {
3720 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3721
3722 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3723 {
3724 /* The argument is being passed in a general purpose
3725 register. */
3726 CORE_ADDR regval
3727 = extract_unsigned_integer (val, partial_len, byte_order);
3728 if (byte_order == BFD_ENDIAN_BIG)
3729 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3730 if (arm_debug)
3731 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3732 argnum,
3733 gdbarch_register_name
3734 (gdbarch, argreg),
3735 phex (regval, INT_REGISTER_SIZE));
3736 regcache_cooked_write_unsigned (regcache, argreg, regval);
3737 argreg++;
3738 }
3739 else
3740 {
3741 /* Push the arguments onto the stack. */
3742 if (arm_debug)
3743 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3744 argnum, nstack);
3745 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3746 nstack += INT_REGISTER_SIZE;
3747 }
3748
3749 len -= partial_len;
3750 val += partial_len;
3751 }
3752 }
3753 /* If we have an odd number of words to push, then decrement the stack
3754 by one word now, so first stack argument will be dword aligned. */
3755 if (nstack & 4)
3756 sp -= 4;
3757
3758 while (si)
3759 {
3760 sp -= si->len;
3761 write_memory (sp, si->data, si->len);
3762 si = pop_stack_item (si);
3763 }
3764
3765 /* Finally, update teh SP register. */
3766 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3767
3768 return sp;
3769 }
3770
3771
3772 /* Always align the frame to an 8-byte boundary. This is required on
3773 some platforms and harmless on the rest. */
3774
3775 static CORE_ADDR
3776 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3777 {
3778 /* Align the stack to eight bytes. */
3779 return sp & ~ (CORE_ADDR) 7;
3780 }
3781
3782 static void
3783 print_fpu_flags (int flags)
3784 {
3785 if (flags & (1 << 0))
3786 fputs ("IVO ", stdout);
3787 if (flags & (1 << 1))
3788 fputs ("DVZ ", stdout);
3789 if (flags & (1 << 2))
3790 fputs ("OFL ", stdout);
3791 if (flags & (1 << 3))
3792 fputs ("UFL ", stdout);
3793 if (flags & (1 << 4))
3794 fputs ("INX ", stdout);
3795 putchar ('\n');
3796 }
3797
3798 /* Print interesting information about the floating point processor
3799 (if present) or emulator. */
3800 static void
3801 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3802 struct frame_info *frame, const char *args)
3803 {
3804 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3805 int type;
3806
3807 type = (status >> 24) & 127;
3808 if (status & (1 << 31))
3809 printf (_("Hardware FPU type %d\n"), type);
3810 else
3811 printf (_("Software FPU type %d\n"), type);
3812 /* i18n: [floating point unit] mask */
3813 fputs (_("mask: "), stdout);
3814 print_fpu_flags (status >> 16);
3815 /* i18n: [floating point unit] flags */
3816 fputs (_("flags: "), stdout);
3817 print_fpu_flags (status);
3818 }
3819
3820 /* Construct the ARM extended floating point type. */
3821 static struct type *
3822 arm_ext_type (struct gdbarch *gdbarch)
3823 {
3824 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3825
3826 if (!tdep->arm_ext_type)
3827 tdep->arm_ext_type
3828 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3829 floatformats_arm_ext);
3830
3831 return tdep->arm_ext_type;
3832 }
3833
3834 static struct type *
3835 arm_neon_double_type (struct gdbarch *gdbarch)
3836 {
3837 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3838
3839 if (tdep->neon_double_type == NULL)
3840 {
3841 struct type *t, *elem;
3842
3843 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3844 TYPE_CODE_UNION);
3845 elem = builtin_type (gdbarch)->builtin_uint8;
3846 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3847 elem = builtin_type (gdbarch)->builtin_uint16;
3848 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3849 elem = builtin_type (gdbarch)->builtin_uint32;
3850 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3851 elem = builtin_type (gdbarch)->builtin_uint64;
3852 append_composite_type_field (t, "u64", elem);
3853 elem = builtin_type (gdbarch)->builtin_float;
3854 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3855 elem = builtin_type (gdbarch)->builtin_double;
3856 append_composite_type_field (t, "f64", elem);
3857
3858 TYPE_VECTOR (t) = 1;
3859 TYPE_NAME (t) = "neon_d";
3860 tdep->neon_double_type = t;
3861 }
3862
3863 return tdep->neon_double_type;
3864 }
3865
3866 /* FIXME: The vector types are not correctly ordered on big-endian
3867 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3868 bits of d0 - regardless of what unit size is being held in d0. So
3869 the offset of the first uint8 in d0 is 7, but the offset of the
3870 first float is 4. This code works as-is for little-endian
3871 targets. */
3872
3873 static struct type *
3874 arm_neon_quad_type (struct gdbarch *gdbarch)
3875 {
3876 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3877
3878 if (tdep->neon_quad_type == NULL)
3879 {
3880 struct type *t, *elem;
3881
3882 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3883 TYPE_CODE_UNION);
3884 elem = builtin_type (gdbarch)->builtin_uint8;
3885 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3886 elem = builtin_type (gdbarch)->builtin_uint16;
3887 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3888 elem = builtin_type (gdbarch)->builtin_uint32;
3889 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3890 elem = builtin_type (gdbarch)->builtin_uint64;
3891 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
3892 elem = builtin_type (gdbarch)->builtin_float;
3893 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
3894 elem = builtin_type (gdbarch)->builtin_double;
3895 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
3896
3897 TYPE_VECTOR (t) = 1;
3898 TYPE_NAME (t) = "neon_q";
3899 tdep->neon_quad_type = t;
3900 }
3901
3902 return tdep->neon_quad_type;
3903 }
3904
3905 /* Return the GDB type object for the "standard" data type of data in
3906 register N. */
3907
3908 static struct type *
3909 arm_register_type (struct gdbarch *gdbarch, int regnum)
3910 {
3911 int num_regs = gdbarch_num_regs (gdbarch);
3912
3913 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
3914 && regnum >= num_regs && regnum < num_regs + 32)
3915 return builtin_type (gdbarch)->builtin_float;
3916
3917 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
3918 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
3919 return arm_neon_quad_type (gdbarch);
3920
3921 /* If the target description has register information, we are only
3922 in this function so that we can override the types of
3923 double-precision registers for NEON. */
3924 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
3925 {
3926 struct type *t = tdesc_register_type (gdbarch, regnum);
3927
3928 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
3929 && TYPE_CODE (t) == TYPE_CODE_FLT
3930 && gdbarch_tdep (gdbarch)->have_neon)
3931 return arm_neon_double_type (gdbarch);
3932 else
3933 return t;
3934 }
3935
3936 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
3937 {
3938 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
3939 return builtin_type (gdbarch)->builtin_void;
3940
3941 return arm_ext_type (gdbarch);
3942 }
3943 else if (regnum == ARM_SP_REGNUM)
3944 return builtin_type (gdbarch)->builtin_data_ptr;
3945 else if (regnum == ARM_PC_REGNUM)
3946 return builtin_type (gdbarch)->builtin_func_ptr;
3947 else if (regnum >= ARRAY_SIZE (arm_register_names))
3948 /* These registers are only supported on targets which supply
3949 an XML description. */
3950 return builtin_type (gdbarch)->builtin_int0;
3951 else
3952 return builtin_type (gdbarch)->builtin_uint32;
3953 }
3954
3955 /* Map a DWARF register REGNUM onto the appropriate GDB register
3956 number. */
3957
3958 static int
3959 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
3960 {
3961 /* Core integer regs. */
3962 if (reg >= 0 && reg <= 15)
3963 return reg;
3964
3965 /* Legacy FPA encoding. These were once used in a way which
3966 overlapped with VFP register numbering, so their use is
3967 discouraged, but GDB doesn't support the ARM toolchain
3968 which used them for VFP. */
3969 if (reg >= 16 && reg <= 23)
3970 return ARM_F0_REGNUM + reg - 16;
3971
3972 /* New assignments for the FPA registers. */
3973 if (reg >= 96 && reg <= 103)
3974 return ARM_F0_REGNUM + reg - 96;
3975
3976 /* WMMX register assignments. */
3977 if (reg >= 104 && reg <= 111)
3978 return ARM_WCGR0_REGNUM + reg - 104;
3979
3980 if (reg >= 112 && reg <= 127)
3981 return ARM_WR0_REGNUM + reg - 112;
3982
3983 if (reg >= 192 && reg <= 199)
3984 return ARM_WC0_REGNUM + reg - 192;
3985
3986 /* VFP v2 registers. A double precision value is actually
3987 in d1 rather than s2, but the ABI only defines numbering
3988 for the single precision registers. This will "just work"
3989 in GDB for little endian targets (we'll read eight bytes,
3990 starting in s0 and then progressing to s1), but will be
3991 reversed on big endian targets with VFP. This won't
3992 be a problem for the new Neon quad registers; you're supposed
3993 to use DW_OP_piece for those. */
3994 if (reg >= 64 && reg <= 95)
3995 {
3996 char name_buf[4];
3997
3998 sprintf (name_buf, "s%d", reg - 64);
3999 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4000 strlen (name_buf));
4001 }
4002
4003 /* VFP v3 / Neon registers. This range is also used for VFP v2
4004 registers, except that it now describes d0 instead of s0. */
4005 if (reg >= 256 && reg <= 287)
4006 {
4007 char name_buf[4];
4008
4009 sprintf (name_buf, "d%d", reg - 256);
4010 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4011 strlen (name_buf));
4012 }
4013
4014 return -1;
4015 }
4016
4017 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4018 static int
4019 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4020 {
4021 int reg = regnum;
4022 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4023
4024 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4025 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4026
4027 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4028 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4029
4030 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4031 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4032
4033 if (reg < NUM_GREGS)
4034 return SIM_ARM_R0_REGNUM + reg;
4035 reg -= NUM_GREGS;
4036
4037 if (reg < NUM_FREGS)
4038 return SIM_ARM_FP0_REGNUM + reg;
4039 reg -= NUM_FREGS;
4040
4041 if (reg < NUM_SREGS)
4042 return SIM_ARM_FPS_REGNUM + reg;
4043 reg -= NUM_SREGS;
4044
4045 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4046 }
4047
4048 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4049 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4050 It is thought that this is is the floating-point register format on
4051 little-endian systems. */
4052
4053 static void
4054 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4055 void *dbl, int endianess)
4056 {
4057 DOUBLEST d;
4058
4059 if (endianess == BFD_ENDIAN_BIG)
4060 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4061 else
4062 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4063 ptr, &d);
4064 floatformat_from_doublest (fmt, &d, dbl);
4065 }
4066
4067 static void
4068 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4069 int endianess)
4070 {
4071 DOUBLEST d;
4072
4073 floatformat_to_doublest (fmt, ptr, &d);
4074 if (endianess == BFD_ENDIAN_BIG)
4075 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4076 else
4077 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4078 &d, dbl);
4079 }
4080
4081 static int
4082 condition_true (unsigned long cond, unsigned long status_reg)
4083 {
4084 if (cond == INST_AL || cond == INST_NV)
4085 return 1;
4086
4087 switch (cond)
4088 {
4089 case INST_EQ:
4090 return ((status_reg & FLAG_Z) != 0);
4091 case INST_NE:
4092 return ((status_reg & FLAG_Z) == 0);
4093 case INST_CS:
4094 return ((status_reg & FLAG_C) != 0);
4095 case INST_CC:
4096 return ((status_reg & FLAG_C) == 0);
4097 case INST_MI:
4098 return ((status_reg & FLAG_N) != 0);
4099 case INST_PL:
4100 return ((status_reg & FLAG_N) == 0);
4101 case INST_VS:
4102 return ((status_reg & FLAG_V) != 0);
4103 case INST_VC:
4104 return ((status_reg & FLAG_V) == 0);
4105 case INST_HI:
4106 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4107 case INST_LS:
4108 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4109 case INST_GE:
4110 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4111 case INST_LT:
4112 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4113 case INST_GT:
4114 return (((status_reg & FLAG_Z) == 0)
4115 && (((status_reg & FLAG_N) == 0)
4116 == ((status_reg & FLAG_V) == 0)));
4117 case INST_LE:
4118 return (((status_reg & FLAG_Z) != 0)
4119 || (((status_reg & FLAG_N) == 0)
4120 != ((status_reg & FLAG_V) == 0)));
4121 }
4122 return 1;
4123 }
4124
4125 static unsigned long
4126 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4127 unsigned long pc_val, unsigned long status_reg)
4128 {
4129 unsigned long res, shift;
4130 int rm = bits (inst, 0, 3);
4131 unsigned long shifttype = bits (inst, 5, 6);
4132
4133 if (bit (inst, 4))
4134 {
4135 int rs = bits (inst, 8, 11);
4136 shift = (rs == 15 ? pc_val + 8
4137 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4138 }
4139 else
4140 shift = bits (inst, 7, 11);
4141
4142 res = (rm == ARM_PC_REGNUM
4143 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4144 : get_frame_register_unsigned (frame, rm));
4145
4146 switch (shifttype)
4147 {
4148 case 0: /* LSL */
4149 res = shift >= 32 ? 0 : res << shift;
4150 break;
4151
4152 case 1: /* LSR */
4153 res = shift >= 32 ? 0 : res >> shift;
4154 break;
4155
4156 case 2: /* ASR */
4157 if (shift >= 32)
4158 shift = 31;
4159 res = ((res & 0x80000000L)
4160 ? ~((~res) >> shift) : res >> shift);
4161 break;
4162
4163 case 3: /* ROR/RRX */
4164 shift &= 31;
4165 if (shift == 0)
4166 res = (res >> 1) | (carry ? 0x80000000L : 0);
4167 else
4168 res = (res >> shift) | (res << (32 - shift));
4169 break;
4170 }
4171
4172 return res & 0xffffffff;
4173 }
4174
4175 /* Return number of 1-bits in VAL. */
4176
4177 static int
4178 bitcount (unsigned long val)
4179 {
4180 int nbits;
4181 for (nbits = 0; val != 0; nbits++)
4182 val &= val - 1; /* Delete rightmost 1-bit in val. */
4183 return nbits;
4184 }
4185
4186 /* Return the size in bytes of the complete Thumb instruction whose
4187 first halfword is INST1. */
4188
4189 static int
4190 thumb_insn_size (unsigned short inst1)
4191 {
4192 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4193 return 4;
4194 else
4195 return 2;
4196 }
4197
4198 static int
4199 thumb_advance_itstate (unsigned int itstate)
4200 {
4201 /* Preserve IT[7:5], the first three bits of the condition. Shift
4202 the upcoming condition flags left by one bit. */
4203 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4204
4205 /* If we have finished the IT block, clear the state. */
4206 if ((itstate & 0x0f) == 0)
4207 itstate = 0;
4208
4209 return itstate;
4210 }
4211
4212 /* Find the next PC after the current instruction executes. In some
4213 cases we can not statically determine the answer (see the IT state
4214 handling in this function); in that case, a breakpoint may be
4215 inserted in addition to the returned PC, which will be used to set
4216 another breakpoint by our caller. */
4217
4218 static CORE_ADDR
4219 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
4220 {
4221 struct gdbarch *gdbarch = get_frame_arch (frame);
4222 struct address_space *aspace = get_frame_address_space (frame);
4223 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4224 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4225 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4226 unsigned short inst1;
4227 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4228 unsigned long offset;
4229 ULONGEST status, itstate;
4230
4231 nextpc = MAKE_THUMB_ADDR (nextpc);
4232 pc_val = MAKE_THUMB_ADDR (pc_val);
4233
4234 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4235
4236 /* Thumb-2 conditional execution support. There are eight bits in
4237 the CPSR which describe conditional execution state. Once
4238 reconstructed (they're in a funny order), the low five bits
4239 describe the low bit of the condition for each instruction and
4240 how many instructions remain. The high three bits describe the
4241 base condition. One of the low four bits will be set if an IT
4242 block is active. These bits read as zero on earlier
4243 processors. */
4244 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4245 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4246
4247 /* If-Then handling. On GNU/Linux, where this routine is used, we
4248 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4249 can disable execution of the undefined instruction. So we might
4250 miss the breakpoint if we set it on a skipped conditional
4251 instruction. Because conditional instructions can change the
4252 flags, affecting the execution of further instructions, we may
4253 need to set two breakpoints. */
4254
4255 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4256 {
4257 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4258 {
4259 /* An IT instruction. Because this instruction does not
4260 modify the flags, we can accurately predict the next
4261 executed instruction. */
4262 itstate = inst1 & 0x00ff;
4263 pc += thumb_insn_size (inst1);
4264
4265 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4266 {
4267 inst1 = read_memory_unsigned_integer (pc, 2,
4268 byte_order_for_code);
4269 pc += thumb_insn_size (inst1);
4270 itstate = thumb_advance_itstate (itstate);
4271 }
4272
4273 return MAKE_THUMB_ADDR (pc);
4274 }
4275 else if (itstate != 0)
4276 {
4277 /* We are in a conditional block. Check the condition. */
4278 if (! condition_true (itstate >> 4, status))
4279 {
4280 /* Advance to the next executed instruction. */
4281 pc += thumb_insn_size (inst1);
4282 itstate = thumb_advance_itstate (itstate);
4283
4284 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4285 {
4286 inst1 = read_memory_unsigned_integer (pc, 2,
4287 byte_order_for_code);
4288 pc += thumb_insn_size (inst1);
4289 itstate = thumb_advance_itstate (itstate);
4290 }
4291
4292 return MAKE_THUMB_ADDR (pc);
4293 }
4294 else if ((itstate & 0x0f) == 0x08)
4295 {
4296 /* This is the last instruction of the conditional
4297 block, and it is executed. We can handle it normally
4298 because the following instruction is not conditional,
4299 and we must handle it normally because it is
4300 permitted to branch. Fall through. */
4301 }
4302 else
4303 {
4304 int cond_negated;
4305
4306 /* There are conditional instructions after this one.
4307 If this instruction modifies the flags, then we can
4308 not predict what the next executed instruction will
4309 be. Fortunately, this instruction is architecturally
4310 forbidden to branch; we know it will fall through.
4311 Start by skipping past it. */
4312 pc += thumb_insn_size (inst1);
4313 itstate = thumb_advance_itstate (itstate);
4314
4315 /* Set a breakpoint on the following instruction. */
4316 gdb_assert ((itstate & 0x0f) != 0);
4317 if (insert_bkpt)
4318 insert_single_step_breakpoint (gdbarch, aspace, pc);
4319 cond_negated = (itstate >> 4) & 1;
4320
4321 /* Skip all following instructions with the same
4322 condition. If there is a later instruction in the IT
4323 block with the opposite condition, set the other
4324 breakpoint there. If not, then set a breakpoint on
4325 the instruction after the IT block. */
4326 do
4327 {
4328 inst1 = read_memory_unsigned_integer (pc, 2,
4329 byte_order_for_code);
4330 pc += thumb_insn_size (inst1);
4331 itstate = thumb_advance_itstate (itstate);
4332 }
4333 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4334
4335 return MAKE_THUMB_ADDR (pc);
4336 }
4337 }
4338 }
4339 else if (itstate & 0x0f)
4340 {
4341 /* We are in a conditional block. Check the condition. */
4342 int cond = itstate >> 4;
4343
4344 if (! condition_true (cond, status))
4345 {
4346 /* Advance to the next instruction. All the 32-bit
4347 instructions share a common prefix. */
4348 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4349 return MAKE_THUMB_ADDR (pc + 4);
4350 else
4351 return MAKE_THUMB_ADDR (pc + 2);
4352 }
4353
4354 /* Otherwise, handle the instruction normally. */
4355 }
4356
4357 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4358 {
4359 CORE_ADDR sp;
4360
4361 /* Fetch the saved PC from the stack. It's stored above
4362 all of the other registers. */
4363 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4364 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4365 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4366 }
4367 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4368 {
4369 unsigned long cond = bits (inst1, 8, 11);
4370 if (cond == 0x0f) /* 0x0f = SWI */
4371 {
4372 struct gdbarch_tdep *tdep;
4373 tdep = gdbarch_tdep (gdbarch);
4374
4375 if (tdep->syscall_next_pc != NULL)
4376 nextpc = tdep->syscall_next_pc (frame);
4377
4378 }
4379 else if (cond != 0x0f && condition_true (cond, status))
4380 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4381 }
4382 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4383 {
4384 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4385 }
4386 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
4387 {
4388 unsigned short inst2;
4389 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4390
4391 /* Default to the next instruction. */
4392 nextpc = pc + 4;
4393 nextpc = MAKE_THUMB_ADDR (nextpc);
4394
4395 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4396 {
4397 /* Branches and miscellaneous control instructions. */
4398
4399 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4400 {
4401 /* B, BL, BLX. */
4402 int j1, j2, imm1, imm2;
4403
4404 imm1 = sbits (inst1, 0, 10);
4405 imm2 = bits (inst2, 0, 10);
4406 j1 = bit (inst2, 13);
4407 j2 = bit (inst2, 11);
4408
4409 offset = ((imm1 << 12) + (imm2 << 1));
4410 offset ^= ((!j2) << 22) | ((!j1) << 23);
4411
4412 nextpc = pc_val + offset;
4413 /* For BLX make sure to clear the low bits. */
4414 if (bit (inst2, 12) == 0)
4415 nextpc = nextpc & 0xfffffffc;
4416 }
4417 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4418 {
4419 /* SUBS PC, LR, #imm8. */
4420 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4421 nextpc -= inst2 & 0x00ff;
4422 }
4423 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4424 {
4425 /* Conditional branch. */
4426 if (condition_true (bits (inst1, 6, 9), status))
4427 {
4428 int sign, j1, j2, imm1, imm2;
4429
4430 sign = sbits (inst1, 10, 10);
4431 imm1 = bits (inst1, 0, 5);
4432 imm2 = bits (inst2, 0, 10);
4433 j1 = bit (inst2, 13);
4434 j2 = bit (inst2, 11);
4435
4436 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4437 offset += (imm1 << 12) + (imm2 << 1);
4438
4439 nextpc = pc_val + offset;
4440 }
4441 }
4442 }
4443 else if ((inst1 & 0xfe50) == 0xe810)
4444 {
4445 /* Load multiple or RFE. */
4446 int rn, offset, load_pc = 1;
4447
4448 rn = bits (inst1, 0, 3);
4449 if (bit (inst1, 7) && !bit (inst1, 8))
4450 {
4451 /* LDMIA or POP */
4452 if (!bit (inst2, 15))
4453 load_pc = 0;
4454 offset = bitcount (inst2) * 4 - 4;
4455 }
4456 else if (!bit (inst1, 7) && bit (inst1, 8))
4457 {
4458 /* LDMDB */
4459 if (!bit (inst2, 15))
4460 load_pc = 0;
4461 offset = -4;
4462 }
4463 else if (bit (inst1, 7) && bit (inst1, 8))
4464 {
4465 /* RFEIA */
4466 offset = 0;
4467 }
4468 else if (!bit (inst1, 7) && !bit (inst1, 8))
4469 {
4470 /* RFEDB */
4471 offset = -8;
4472 }
4473 else
4474 load_pc = 0;
4475
4476 if (load_pc)
4477 {
4478 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4479 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4480 }
4481 }
4482 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4483 {
4484 /* MOV PC or MOVS PC. */
4485 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4486 nextpc = MAKE_THUMB_ADDR (nextpc);
4487 }
4488 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4489 {
4490 /* LDR PC. */
4491 CORE_ADDR base;
4492 int rn, load_pc = 1;
4493
4494 rn = bits (inst1, 0, 3);
4495 base = get_frame_register_unsigned (frame, rn);
4496 if (rn == ARM_PC_REGNUM)
4497 {
4498 base = (base + 4) & ~(CORE_ADDR) 0x3;
4499 if (bit (inst1, 7))
4500 base += bits (inst2, 0, 11);
4501 else
4502 base -= bits (inst2, 0, 11);
4503 }
4504 else if (bit (inst1, 7))
4505 base += bits (inst2, 0, 11);
4506 else if (bit (inst2, 11))
4507 {
4508 if (bit (inst2, 10))
4509 {
4510 if (bit (inst2, 9))
4511 base += bits (inst2, 0, 7);
4512 else
4513 base -= bits (inst2, 0, 7);
4514 }
4515 }
4516 else if ((inst2 & 0x0fc0) == 0x0000)
4517 {
4518 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4519 base += get_frame_register_unsigned (frame, rm) << shift;
4520 }
4521 else
4522 /* Reserved. */
4523 load_pc = 0;
4524
4525 if (load_pc)
4526 nextpc = get_frame_memory_unsigned (frame, base, 4);
4527 }
4528 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4529 {
4530 /* TBB. */
4531 CORE_ADDR tbl_reg, table, offset, length;
4532
4533 tbl_reg = bits (inst1, 0, 3);
4534 if (tbl_reg == 0x0f)
4535 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4536 else
4537 table = get_frame_register_unsigned (frame, tbl_reg);
4538
4539 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4540 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4541 nextpc = pc_val + length;
4542 }
4543 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4544 {
4545 /* TBH. */
4546 CORE_ADDR tbl_reg, table, offset, length;
4547
4548 tbl_reg = bits (inst1, 0, 3);
4549 if (tbl_reg == 0x0f)
4550 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4551 else
4552 table = get_frame_register_unsigned (frame, tbl_reg);
4553
4554 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4555 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4556 nextpc = pc_val + length;
4557 }
4558 }
4559 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4560 {
4561 if (bits (inst1, 3, 6) == 0x0f)
4562 nextpc = pc_val;
4563 else
4564 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4565 }
4566 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4567 {
4568 if (bits (inst1, 3, 6) == 0x0f)
4569 nextpc = pc_val;
4570 else
4571 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4572
4573 nextpc = MAKE_THUMB_ADDR (nextpc);
4574 }
4575 else if ((inst1 & 0xf500) == 0xb100)
4576 {
4577 /* CBNZ or CBZ. */
4578 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4579 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4580
4581 if (bit (inst1, 11) && reg != 0)
4582 nextpc = pc_val + imm;
4583 else if (!bit (inst1, 11) && reg == 0)
4584 nextpc = pc_val + imm;
4585 }
4586 return nextpc;
4587 }
4588
4589 /* Get the raw next address. PC is the current program counter, in
4590 FRAME. INSERT_BKPT should be TRUE if we want a breakpoint set on
4591 the alternative next instruction if there are two options.
4592
4593 The value returned has the execution state of the next instruction
4594 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4595 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4596 address. */
4597
4598 static CORE_ADDR
4599 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
4600 {
4601 struct gdbarch *gdbarch = get_frame_arch (frame);
4602 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4603 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4604 unsigned long pc_val;
4605 unsigned long this_instr;
4606 unsigned long status;
4607 CORE_ADDR nextpc;
4608
4609 if (arm_frame_is_thumb (frame))
4610 return thumb_get_next_pc_raw (frame, pc, insert_bkpt);
4611
4612 pc_val = (unsigned long) pc;
4613 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4614
4615 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4616 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4617
4618 if (bits (this_instr, 28, 31) == INST_NV)
4619 switch (bits (this_instr, 24, 27))
4620 {
4621 case 0xa:
4622 case 0xb:
4623 {
4624 /* Branch with Link and change to Thumb. */
4625 nextpc = BranchDest (pc, this_instr);
4626 nextpc |= bit (this_instr, 24) << 1;
4627 nextpc = MAKE_THUMB_ADDR (nextpc);
4628 break;
4629 }
4630 case 0xc:
4631 case 0xd:
4632 case 0xe:
4633 /* Coprocessor register transfer. */
4634 if (bits (this_instr, 12, 15) == 15)
4635 error (_("Invalid update to pc in instruction"));
4636 break;
4637 }
4638 else if (condition_true (bits (this_instr, 28, 31), status))
4639 {
4640 switch (bits (this_instr, 24, 27))
4641 {
4642 case 0x0:
4643 case 0x1: /* data processing */
4644 case 0x2:
4645 case 0x3:
4646 {
4647 unsigned long operand1, operand2, result = 0;
4648 unsigned long rn;
4649 int c;
4650
4651 if (bits (this_instr, 12, 15) != 15)
4652 break;
4653
4654 if (bits (this_instr, 22, 25) == 0
4655 && bits (this_instr, 4, 7) == 9) /* multiply */
4656 error (_("Invalid update to pc in instruction"));
4657
4658 /* BX <reg>, BLX <reg> */
4659 if (bits (this_instr, 4, 27) == 0x12fff1
4660 || bits (this_instr, 4, 27) == 0x12fff3)
4661 {
4662 rn = bits (this_instr, 0, 3);
4663 nextpc = ((rn == ARM_PC_REGNUM)
4664 ? (pc_val + 8)
4665 : get_frame_register_unsigned (frame, rn));
4666
4667 return nextpc;
4668 }
4669
4670 /* Multiply into PC. */
4671 c = (status & FLAG_C) ? 1 : 0;
4672 rn = bits (this_instr, 16, 19);
4673 operand1 = ((rn == ARM_PC_REGNUM)
4674 ? (pc_val + 8)
4675 : get_frame_register_unsigned (frame, rn));
4676
4677 if (bit (this_instr, 25))
4678 {
4679 unsigned long immval = bits (this_instr, 0, 7);
4680 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4681 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4682 & 0xffffffff;
4683 }
4684 else /* operand 2 is a shifted register. */
4685 operand2 = shifted_reg_val (frame, this_instr, c,
4686 pc_val, status);
4687
4688 switch (bits (this_instr, 21, 24))
4689 {
4690 case 0x0: /*and */
4691 result = operand1 & operand2;
4692 break;
4693
4694 case 0x1: /*eor */
4695 result = operand1 ^ operand2;
4696 break;
4697
4698 case 0x2: /*sub */
4699 result = operand1 - operand2;
4700 break;
4701
4702 case 0x3: /*rsb */
4703 result = operand2 - operand1;
4704 break;
4705
4706 case 0x4: /*add */
4707 result = operand1 + operand2;
4708 break;
4709
4710 case 0x5: /*adc */
4711 result = operand1 + operand2 + c;
4712 break;
4713
4714 case 0x6: /*sbc */
4715 result = operand1 - operand2 + c;
4716 break;
4717
4718 case 0x7: /*rsc */
4719 result = operand2 - operand1 + c;
4720 break;
4721
4722 case 0x8:
4723 case 0x9:
4724 case 0xa:
4725 case 0xb: /* tst, teq, cmp, cmn */
4726 result = (unsigned long) nextpc;
4727 break;
4728
4729 case 0xc: /*orr */
4730 result = operand1 | operand2;
4731 break;
4732
4733 case 0xd: /*mov */
4734 /* Always step into a function. */
4735 result = operand2;
4736 break;
4737
4738 case 0xe: /*bic */
4739 result = operand1 & ~operand2;
4740 break;
4741
4742 case 0xf: /*mvn */
4743 result = ~operand2;
4744 break;
4745 }
4746
4747 /* In 26-bit APCS the bottom two bits of the result are
4748 ignored, and we always end up in ARM state. */
4749 if (!arm_apcs_32)
4750 nextpc = arm_addr_bits_remove (gdbarch, result);
4751 else
4752 nextpc = result;
4753
4754 break;
4755 }
4756
4757 case 0x4:
4758 case 0x5: /* data transfer */
4759 case 0x6:
4760 case 0x7:
4761 if (bit (this_instr, 20))
4762 {
4763 /* load */
4764 if (bits (this_instr, 12, 15) == 15)
4765 {
4766 /* rd == pc */
4767 unsigned long rn;
4768 unsigned long base;
4769
4770 if (bit (this_instr, 22))
4771 error (_("Invalid update to pc in instruction"));
4772
4773 /* byte write to PC */
4774 rn = bits (this_instr, 16, 19);
4775 base = ((rn == ARM_PC_REGNUM)
4776 ? (pc_val + 8)
4777 : get_frame_register_unsigned (frame, rn));
4778
4779 if (bit (this_instr, 24))
4780 {
4781 /* pre-indexed */
4782 int c = (status & FLAG_C) ? 1 : 0;
4783 unsigned long offset =
4784 (bit (this_instr, 25)
4785 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4786 : bits (this_instr, 0, 11));
4787
4788 if (bit (this_instr, 23))
4789 base += offset;
4790 else
4791 base -= offset;
4792 }
4793 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
4794 4, byte_order);
4795 }
4796 }
4797 break;
4798
4799 case 0x8:
4800 case 0x9: /* block transfer */
4801 if (bit (this_instr, 20))
4802 {
4803 /* LDM */
4804 if (bit (this_instr, 15))
4805 {
4806 /* loading pc */
4807 int offset = 0;
4808
4809 if (bit (this_instr, 23))
4810 {
4811 /* up */
4812 unsigned long reglist = bits (this_instr, 0, 14);
4813 offset = bitcount (reglist) * 4;
4814 if (bit (this_instr, 24)) /* pre */
4815 offset += 4;
4816 }
4817 else if (bit (this_instr, 24))
4818 offset = -4;
4819
4820 {
4821 unsigned long rn_val =
4822 get_frame_register_unsigned (frame,
4823 bits (this_instr, 16, 19));
4824 nextpc =
4825 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
4826 + offset),
4827 4, byte_order);
4828 }
4829 }
4830 }
4831 break;
4832
4833 case 0xb: /* branch & link */
4834 case 0xa: /* branch */
4835 {
4836 nextpc = BranchDest (pc, this_instr);
4837 break;
4838 }
4839
4840 case 0xc:
4841 case 0xd:
4842 case 0xe: /* coproc ops */
4843 break;
4844 case 0xf: /* SWI */
4845 {
4846 struct gdbarch_tdep *tdep;
4847 tdep = gdbarch_tdep (gdbarch);
4848
4849 if (tdep->syscall_next_pc != NULL)
4850 nextpc = tdep->syscall_next_pc (frame);
4851
4852 }
4853 break;
4854
4855 default:
4856 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
4857 return (pc);
4858 }
4859 }
4860
4861 return nextpc;
4862 }
4863
4864 CORE_ADDR
4865 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
4866 {
4867 struct gdbarch *gdbarch = get_frame_arch (frame);
4868 CORE_ADDR nextpc =
4869 gdbarch_addr_bits_remove (gdbarch,
4870 arm_get_next_pc_raw (frame, pc, TRUE));
4871 if (nextpc == pc)
4872 error (_("Infinite loop detected"));
4873 return nextpc;
4874 }
4875
4876 /* single_step() is called just before we want to resume the inferior,
4877 if we want to single-step it but there is no hardware or kernel
4878 single-step support. We find the target of the coming instruction
4879 and breakpoint it. */
4880
4881 int
4882 arm_software_single_step (struct frame_info *frame)
4883 {
4884 struct gdbarch *gdbarch = get_frame_arch (frame);
4885 struct address_space *aspace = get_frame_address_space (frame);
4886
4887 /* NOTE: This may insert the wrong breakpoint instruction when
4888 single-stepping over a mode-changing instruction, if the
4889 CPSR heuristics are used. */
4890
4891 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
4892 insert_single_step_breakpoint (gdbarch, aspace, next_pc);
4893
4894 return 1;
4895 }
4896
4897 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4898 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4899 NULL if an error occurs. BUF is freed. */
4900
4901 static gdb_byte *
4902 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
4903 int old_len, int new_len)
4904 {
4905 gdb_byte *new_buf, *middle;
4906 int bytes_to_read = new_len - old_len;
4907
4908 new_buf = xmalloc (new_len);
4909 memcpy (new_buf + bytes_to_read, buf, old_len);
4910 xfree (buf);
4911 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
4912 {
4913 xfree (new_buf);
4914 return NULL;
4915 }
4916 return new_buf;
4917 }
4918
4919 /* An IT block is at most the 2-byte IT instruction followed by
4920 four 4-byte instructions. The furthest back we must search to
4921 find an IT block that affects the current instruction is thus
4922 2 + 3 * 4 == 14 bytes. */
4923 #define MAX_IT_BLOCK_PREFIX 14
4924
4925 /* Use a quick scan if there are more than this many bytes of
4926 code. */
4927 #define IT_SCAN_THRESHOLD 32
4928
4929 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4930 A breakpoint in an IT block may not be hit, depending on the
4931 condition flags. */
4932 static CORE_ADDR
4933 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
4934 {
4935 gdb_byte *buf;
4936 char map_type;
4937 CORE_ADDR boundary, func_start;
4938 int buf_len, buf2_len;
4939 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
4940 int i, any, last_it, last_it_count;
4941
4942 /* If we are using BKPT breakpoints, none of this is necessary. */
4943 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
4944 return bpaddr;
4945
4946 /* ARM mode does not have this problem. */
4947 if (!arm_pc_is_thumb (gdbarch, bpaddr))
4948 return bpaddr;
4949
4950 /* We are setting a breakpoint in Thumb code that could potentially
4951 contain an IT block. The first step is to find how much Thumb
4952 code there is; we do not need to read outside of known Thumb
4953 sequences. */
4954 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
4955 if (map_type == 0)
4956 /* Thumb-2 code must have mapping symbols to have a chance. */
4957 return bpaddr;
4958
4959 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
4960
4961 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
4962 && func_start > boundary)
4963 boundary = func_start;
4964
4965 /* Search for a candidate IT instruction. We have to do some fancy
4966 footwork to distinguish a real IT instruction from the second
4967 half of a 32-bit instruction, but there is no need for that if
4968 there's no candidate. */
4969 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
4970 if (buf_len == 0)
4971 /* No room for an IT instruction. */
4972 return bpaddr;
4973
4974 buf = xmalloc (buf_len);
4975 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
4976 return bpaddr;
4977 any = 0;
4978 for (i = 0; i < buf_len; i += 2)
4979 {
4980 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4981 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4982 {
4983 any = 1;
4984 break;
4985 }
4986 }
4987 if (any == 0)
4988 {
4989 xfree (buf);
4990 return bpaddr;
4991 }
4992
4993 /* OK, the code bytes before this instruction contain at least one
4994 halfword which resembles an IT instruction. We know that it's
4995 Thumb code, but there are still two possibilities. Either the
4996 halfword really is an IT instruction, or it is the second half of
4997 a 32-bit Thumb instruction. The only way we can tell is to
4998 scan forwards from a known instruction boundary. */
4999 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5000 {
5001 int definite;
5002
5003 /* There's a lot of code before this instruction. Start with an
5004 optimistic search; it's easy to recognize halfwords that can
5005 not be the start of a 32-bit instruction, and use that to
5006 lock on to the instruction boundaries. */
5007 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5008 if (buf == NULL)
5009 return bpaddr;
5010 buf_len = IT_SCAN_THRESHOLD;
5011
5012 definite = 0;
5013 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5014 {
5015 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5016 if (thumb_insn_size (inst1) == 2)
5017 {
5018 definite = 1;
5019 break;
5020 }
5021 }
5022
5023 /* At this point, if DEFINITE, BUF[I] is the first place we
5024 are sure that we know the instruction boundaries, and it is far
5025 enough from BPADDR that we could not miss an IT instruction
5026 affecting BPADDR. If ! DEFINITE, give up - start from a
5027 known boundary. */
5028 if (! definite)
5029 {
5030 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5031 bpaddr - boundary);
5032 if (buf == NULL)
5033 return bpaddr;
5034 buf_len = bpaddr - boundary;
5035 i = 0;
5036 }
5037 }
5038 else
5039 {
5040 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5041 if (buf == NULL)
5042 return bpaddr;
5043 buf_len = bpaddr - boundary;
5044 i = 0;
5045 }
5046
5047 /* Scan forwards. Find the last IT instruction before BPADDR. */
5048 last_it = -1;
5049 last_it_count = 0;
5050 while (i < buf_len)
5051 {
5052 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5053 last_it_count--;
5054 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5055 {
5056 last_it = i;
5057 if (inst1 & 0x0001)
5058 last_it_count = 4;
5059 else if (inst1 & 0x0002)
5060 last_it_count = 3;
5061 else if (inst1 & 0x0004)
5062 last_it_count = 2;
5063 else
5064 last_it_count = 1;
5065 }
5066 i += thumb_insn_size (inst1);
5067 }
5068
5069 xfree (buf);
5070
5071 if (last_it == -1)
5072 /* There wasn't really an IT instruction after all. */
5073 return bpaddr;
5074
5075 if (last_it_count < 1)
5076 /* It was too far away. */
5077 return bpaddr;
5078
5079 /* This really is a trouble spot. Move the breakpoint to the IT
5080 instruction. */
5081 return bpaddr - buf_len + last_it;
5082 }
5083
5084 /* ARM displaced stepping support.
5085
5086 Generally ARM displaced stepping works as follows:
5087
5088 1. When an instruction is to be single-stepped, it is first decoded by
5089 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5090 Depending on the type of instruction, it is then copied to a scratch
5091 location, possibly in a modified form. The copy_* set of functions
5092 performs such modification, as necessary. A breakpoint is placed after
5093 the modified instruction in the scratch space to return control to GDB.
5094 Note in particular that instructions which modify the PC will no longer
5095 do so after modification.
5096
5097 2. The instruction is single-stepped, by setting the PC to the scratch
5098 location address, and resuming. Control returns to GDB when the
5099 breakpoint is hit.
5100
5101 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5102 function used for the current instruction. This function's job is to
5103 put the CPU/memory state back to what it would have been if the
5104 instruction had been executed unmodified in its original location. */
5105
5106 /* NOP instruction (mov r0, r0). */
5107 #define ARM_NOP 0xe1a00000
5108
5109 /* Helper for register reads for displaced stepping. In particular, this
5110 returns the PC as it would be seen by the instruction at its original
5111 location. */
5112
5113 ULONGEST
5114 displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5115 int regno)
5116 {
5117 ULONGEST ret;
5118 CORE_ADDR from = dsc->insn_addr;
5119
5120 if (regno == ARM_PC_REGNUM)
5121 {
5122 /* Compute pipeline offset:
5123 - When executing an ARM instruction, PC reads as the address of the
5124 current instruction plus 8.
5125 - When executing a Thumb instruction, PC reads as the address of the
5126 current instruction plus 4. */
5127
5128 if (!dsc->is_thumb)
5129 from += 8;
5130 else
5131 from += 4;
5132
5133 if (debug_displaced)
5134 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5135 (unsigned long) from);
5136 return (ULONGEST) from;
5137 }
5138 else
5139 {
5140 regcache_cooked_read_unsigned (regs, regno, &ret);
5141 if (debug_displaced)
5142 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5143 regno, (unsigned long) ret);
5144 return ret;
5145 }
5146 }
5147
5148 static int
5149 displaced_in_arm_mode (struct regcache *regs)
5150 {
5151 ULONGEST ps;
5152 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5153
5154 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5155
5156 return (ps & t_bit) == 0;
5157 }
5158
5159 /* Write to the PC as from a branch instruction. */
5160
5161 static void
5162 branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5163 ULONGEST val)
5164 {
5165 if (!dsc->is_thumb)
5166 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5167 architecture versions < 6. */
5168 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5169 val & ~(ULONGEST) 0x3);
5170 else
5171 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5172 val & ~(ULONGEST) 0x1);
5173 }
5174
5175 /* Write to the PC as from a branch-exchange instruction. */
5176
5177 static void
5178 bx_write_pc (struct regcache *regs, ULONGEST val)
5179 {
5180 ULONGEST ps;
5181 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5182
5183 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5184
5185 if ((val & 1) == 1)
5186 {
5187 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5188 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5189 }
5190 else if ((val & 2) == 0)
5191 {
5192 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5193 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5194 }
5195 else
5196 {
5197 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5198 mode, align dest to 4 bytes). */
5199 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5200 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5201 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5202 }
5203 }
5204
5205 /* Write to the PC as if from a load instruction. */
5206
5207 static void
5208 load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5209 ULONGEST val)
5210 {
5211 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5212 bx_write_pc (regs, val);
5213 else
5214 branch_write_pc (regs, dsc, val);
5215 }
5216
5217 /* Write to the PC as if from an ALU instruction. */
5218
5219 static void
5220 alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5221 ULONGEST val)
5222 {
5223 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
5224 bx_write_pc (regs, val);
5225 else
5226 branch_write_pc (regs, dsc, val);
5227 }
5228
5229 /* Helper for writing to registers for displaced stepping. Writing to the PC
5230 has a varying effects depending on the instruction which does the write:
5231 this is controlled by the WRITE_PC argument. */
5232
5233 void
5234 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5235 int regno, ULONGEST val, enum pc_write_style write_pc)
5236 {
5237 if (regno == ARM_PC_REGNUM)
5238 {
5239 if (debug_displaced)
5240 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5241 (unsigned long) val);
5242 switch (write_pc)
5243 {
5244 case BRANCH_WRITE_PC:
5245 branch_write_pc (regs, dsc, val);
5246 break;
5247
5248 case BX_WRITE_PC:
5249 bx_write_pc (regs, val);
5250 break;
5251
5252 case LOAD_WRITE_PC:
5253 load_write_pc (regs, dsc, val);
5254 break;
5255
5256 case ALU_WRITE_PC:
5257 alu_write_pc (regs, dsc, val);
5258 break;
5259
5260 case CANNOT_WRITE_PC:
5261 warning (_("Instruction wrote to PC in an unexpected way when "
5262 "single-stepping"));
5263 break;
5264
5265 default:
5266 internal_error (__FILE__, __LINE__,
5267 _("Invalid argument to displaced_write_reg"));
5268 }
5269
5270 dsc->wrote_to_pc = 1;
5271 }
5272 else
5273 {
5274 if (debug_displaced)
5275 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5276 regno, (unsigned long) val);
5277 regcache_cooked_write_unsigned (regs, regno, val);
5278 }
5279 }
5280
5281 /* This function is used to concisely determine if an instruction INSN
5282 references PC. Register fields of interest in INSN should have the
5283 corresponding fields of BITMASK set to 0b1111. The function
5284 returns return 1 if any of these fields in INSN reference the PC
5285 (also 0b1111, r15), else it returns 0. */
5286
5287 static int
5288 insn_references_pc (uint32_t insn, uint32_t bitmask)
5289 {
5290 uint32_t lowbit = 1;
5291
5292 while (bitmask != 0)
5293 {
5294 uint32_t mask;
5295
5296 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5297 ;
5298
5299 if (!lowbit)
5300 break;
5301
5302 mask = lowbit * 0xf;
5303
5304 if ((insn & mask) == mask)
5305 return 1;
5306
5307 bitmask &= ~mask;
5308 }
5309
5310 return 0;
5311 }
5312
5313 /* The simplest copy function. Many instructions have the same effect no
5314 matter what address they are executed at: in those cases, use this. */
5315
5316 static int
5317 copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5318 const char *iname, struct displaced_step_closure *dsc)
5319 {
5320 if (debug_displaced)
5321 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5322 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5323 iname);
5324
5325 dsc->modinsn[0] = insn;
5326
5327 return 0;
5328 }
5329
5330 /* Preload instructions with immediate offset. */
5331
5332 static void
5333 cleanup_preload (struct gdbarch *gdbarch,
5334 struct regcache *regs, struct displaced_step_closure *dsc)
5335 {
5336 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5337 if (!dsc->u.preload.immed)
5338 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5339 }
5340
5341 static int
5342 copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5343 struct displaced_step_closure *dsc)
5344 {
5345 unsigned int rn = bits (insn, 16, 19);
5346 ULONGEST rn_val;
5347
5348 if (!insn_references_pc (insn, 0x000f0000ul))
5349 return copy_unmodified (gdbarch, insn, "preload", dsc);
5350
5351 if (debug_displaced)
5352 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5353 (unsigned long) insn);
5354
5355 /* Preload instructions:
5356
5357 {pli/pld} [rn, #+/-imm]
5358 ->
5359 {pli/pld} [r0, #+/-imm]. */
5360
5361 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5362 rn_val = displaced_read_reg (regs, dsc, rn);
5363 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5364
5365 dsc->u.preload.immed = 1;
5366
5367 dsc->modinsn[0] = insn & 0xfff0ffff;
5368
5369 dsc->cleanup = &cleanup_preload;
5370
5371 return 0;
5372 }
5373
5374 /* Preload instructions with register offset. */
5375
5376 static int
5377 copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5378 struct regcache *regs,
5379 struct displaced_step_closure *dsc)
5380 {
5381 unsigned int rn = bits (insn, 16, 19);
5382 unsigned int rm = bits (insn, 0, 3);
5383 ULONGEST rn_val, rm_val;
5384
5385 if (!insn_references_pc (insn, 0x000f000ful))
5386 return copy_unmodified (gdbarch, insn, "preload reg", dsc);
5387
5388 if (debug_displaced)
5389 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5390 (unsigned long) insn);
5391
5392 /* Preload register-offset instructions:
5393
5394 {pli/pld} [rn, rm {, shift}]
5395 ->
5396 {pli/pld} [r0, r1 {, shift}]. */
5397
5398 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5399 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5400 rn_val = displaced_read_reg (regs, dsc, rn);
5401 rm_val = displaced_read_reg (regs, dsc, rm);
5402 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5403 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5404
5405 dsc->u.preload.immed = 0;
5406
5407 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5408
5409 dsc->cleanup = &cleanup_preload;
5410
5411 return 0;
5412 }
5413
5414 /* Copy/cleanup coprocessor load and store instructions. */
5415
5416 static void
5417 cleanup_copro_load_store (struct gdbarch *gdbarch,
5418 struct regcache *regs,
5419 struct displaced_step_closure *dsc)
5420 {
5421 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
5422
5423 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5424
5425 if (dsc->u.ldst.writeback)
5426 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5427 }
5428
5429 static int
5430 copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5431 struct regcache *regs,
5432 struct displaced_step_closure *dsc)
5433 {
5434 unsigned int rn = bits (insn, 16, 19);
5435 ULONGEST rn_val;
5436
5437 if (!insn_references_pc (insn, 0x000f0000ul))
5438 return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5439
5440 if (debug_displaced)
5441 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5442 "load/store insn %.8lx\n", (unsigned long) insn);
5443
5444 /* Coprocessor load/store instructions:
5445
5446 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5447 ->
5448 {stc/stc2} [r0, #+/-imm].
5449
5450 ldc/ldc2 are handled identically. */
5451
5452 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5453 rn_val = displaced_read_reg (regs, dsc, rn);
5454 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5455
5456 dsc->u.ldst.writeback = bit (insn, 25);
5457 dsc->u.ldst.rn = rn;
5458
5459 dsc->modinsn[0] = insn & 0xfff0ffff;
5460
5461 dsc->cleanup = &cleanup_copro_load_store;
5462
5463 return 0;
5464 }
5465
5466 /* Clean up branch instructions (actually perform the branch, by setting
5467 PC). */
5468
5469 static void
5470 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
5471 struct displaced_step_closure *dsc)
5472 {
5473 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5474 int branch_taken = condition_true (dsc->u.branch.cond, status);
5475 enum pc_write_style write_pc = dsc->u.branch.exchange
5476 ? BX_WRITE_PC : BRANCH_WRITE_PC;
5477
5478 if (!branch_taken)
5479 return;
5480
5481 if (dsc->u.branch.link)
5482 {
5483 ULONGEST pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
5484 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, pc - 4, CANNOT_WRITE_PC);
5485 }
5486
5487 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
5488 }
5489
5490 /* Copy B/BL/BLX instructions with immediate destinations. */
5491
5492 static int
5493 copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
5494 struct regcache *regs, struct displaced_step_closure *dsc)
5495 {
5496 unsigned int cond = bits (insn, 28, 31);
5497 int exchange = (cond == 0xf);
5498 int link = exchange || bit (insn, 24);
5499 CORE_ADDR from = dsc->insn_addr;
5500 long offset;
5501
5502 if (debug_displaced)
5503 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
5504 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
5505 (unsigned long) insn);
5506
5507 /* Implement "BL<cond> <label>" as:
5508
5509 Preparation: cond <- instruction condition
5510 Insn: mov r0, r0 (nop)
5511 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5512
5513 B<cond> similar, but don't set r14 in cleanup. */
5514
5515 if (exchange)
5516 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5517 then arrange the switch into Thumb mode. */
5518 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
5519 else
5520 offset = bits (insn, 0, 23) << 2;
5521
5522 if (bit (offset, 25))
5523 offset = offset | ~0x3ffffff;
5524
5525 dsc->u.branch.cond = cond;
5526 dsc->u.branch.link = link;
5527 dsc->u.branch.exchange = exchange;
5528 dsc->u.branch.dest = from + 8 + offset;
5529
5530 dsc->modinsn[0] = ARM_NOP;
5531
5532 dsc->cleanup = &cleanup_branch;
5533
5534 return 0;
5535 }
5536
5537 /* Copy BX/BLX with register-specified destinations. */
5538
5539 static int
5540 copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
5541 struct regcache *regs, struct displaced_step_closure *dsc)
5542 {
5543 unsigned int cond = bits (insn, 28, 31);
5544 /* BX: x12xxx1x
5545 BLX: x12xxx3x. */
5546 int link = bit (insn, 5);
5547 unsigned int rm = bits (insn, 0, 3);
5548
5549 if (debug_displaced)
5550 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
5551 "%.8lx\n", (link) ? "blx" : "bx",
5552 (unsigned long) insn);
5553
5554 /* Implement {BX,BLX}<cond> <reg>" as:
5555
5556 Preparation: cond <- instruction condition
5557 Insn: mov r0, r0 (nop)
5558 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
5559
5560 Don't set r14 in cleanup for BX. */
5561
5562 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
5563
5564 dsc->u.branch.cond = cond;
5565 dsc->u.branch.link = link;
5566 dsc->u.branch.exchange = 1;
5567
5568 dsc->modinsn[0] = ARM_NOP;
5569
5570 dsc->cleanup = &cleanup_branch;
5571
5572 return 0;
5573 }
5574
5575 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
5576
5577 static void
5578 cleanup_alu_imm (struct gdbarch *gdbarch,
5579 struct regcache *regs, struct displaced_step_closure *dsc)
5580 {
5581 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5582 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5583 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5584 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5585 }
5586
5587 static int
5588 copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5589 struct displaced_step_closure *dsc)
5590 {
5591 unsigned int rn = bits (insn, 16, 19);
5592 unsigned int rd = bits (insn, 12, 15);
5593 unsigned int op = bits (insn, 21, 24);
5594 int is_mov = (op == 0xd);
5595 ULONGEST rd_val, rn_val;
5596
5597 if (!insn_references_pc (insn, 0x000ff000ul))
5598 return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
5599
5600 if (debug_displaced)
5601 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
5602 "%.8lx\n", is_mov ? "move" : "ALU",
5603 (unsigned long) insn);
5604
5605 /* Instruction is of form:
5606
5607 <op><cond> rd, [rn,] #imm
5608
5609 Rewrite as:
5610
5611 Preparation: tmp1, tmp2 <- r0, r1;
5612 r0, r1 <- rd, rn
5613 Insn: <op><cond> r0, r1, #imm
5614 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5615 */
5616
5617 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5618 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5619 rn_val = displaced_read_reg (regs, dsc, rn);
5620 rd_val = displaced_read_reg (regs, dsc, rd);
5621 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5622 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5623 dsc->rd = rd;
5624
5625 if (is_mov)
5626 dsc->modinsn[0] = insn & 0xfff00fff;
5627 else
5628 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
5629
5630 dsc->cleanup = &cleanup_alu_imm;
5631
5632 return 0;
5633 }
5634
5635 /* Copy/cleanup arithmetic/logic insns with register RHS. */
5636
5637 static void
5638 cleanup_alu_reg (struct gdbarch *gdbarch,
5639 struct regcache *regs, struct displaced_step_closure *dsc)
5640 {
5641 ULONGEST rd_val;
5642 int i;
5643
5644 rd_val = displaced_read_reg (regs, dsc, 0);
5645
5646 for (i = 0; i < 3; i++)
5647 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5648
5649 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5650 }
5651
5652 static int
5653 copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5654 struct displaced_step_closure *dsc)
5655 {
5656 unsigned int rn = bits (insn, 16, 19);
5657 unsigned int rm = bits (insn, 0, 3);
5658 unsigned int rd = bits (insn, 12, 15);
5659 unsigned int op = bits (insn, 21, 24);
5660 int is_mov = (op == 0xd);
5661 ULONGEST rd_val, rn_val, rm_val;
5662
5663 if (!insn_references_pc (insn, 0x000ff00ful))
5664 return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
5665
5666 if (debug_displaced)
5667 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
5668 is_mov ? "move" : "ALU", (unsigned long) insn);
5669
5670 /* Instruction is of form:
5671
5672 <op><cond> rd, [rn,] rm [, <shift>]
5673
5674 Rewrite as:
5675
5676 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
5677 r0, r1, r2 <- rd, rn, rm
5678 Insn: <op><cond> r0, r1, r2 [, <shift>]
5679 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
5680 */
5681
5682 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5683 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5684 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5685 rd_val = displaced_read_reg (regs, dsc, rd);
5686 rn_val = displaced_read_reg (regs, dsc, rn);
5687 rm_val = displaced_read_reg (regs, dsc, rm);
5688 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5689 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5690 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5691 dsc->rd = rd;
5692
5693 if (is_mov)
5694 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
5695 else
5696 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
5697
5698 dsc->cleanup = &cleanup_alu_reg;
5699
5700 return 0;
5701 }
5702
5703 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
5704
5705 static void
5706 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
5707 struct regcache *regs,
5708 struct displaced_step_closure *dsc)
5709 {
5710 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5711 int i;
5712
5713 for (i = 0; i < 4; i++)
5714 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5715
5716 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5717 }
5718
5719 static int
5720 copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
5721 struct regcache *regs,
5722 struct displaced_step_closure *dsc)
5723 {
5724 unsigned int rn = bits (insn, 16, 19);
5725 unsigned int rm = bits (insn, 0, 3);
5726 unsigned int rd = bits (insn, 12, 15);
5727 unsigned int rs = bits (insn, 8, 11);
5728 unsigned int op = bits (insn, 21, 24);
5729 int is_mov = (op == 0xd), i;
5730 ULONGEST rd_val, rn_val, rm_val, rs_val;
5731
5732 if (!insn_references_pc (insn, 0x000fff0ful))
5733 return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
5734
5735 if (debug_displaced)
5736 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
5737 "%.8lx\n", is_mov ? "move" : "ALU",
5738 (unsigned long) insn);
5739
5740 /* Instruction is of form:
5741
5742 <op><cond> rd, [rn,] rm, <shift> rs
5743
5744 Rewrite as:
5745
5746 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
5747 r0, r1, r2, r3 <- rd, rn, rm, rs
5748 Insn: <op><cond> r0, r1, r2, <shift> r3
5749 Cleanup: tmp5 <- r0
5750 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
5751 rd <- tmp5
5752 */
5753
5754 for (i = 0; i < 4; i++)
5755 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
5756
5757 rd_val = displaced_read_reg (regs, dsc, rd);
5758 rn_val = displaced_read_reg (regs, dsc, rn);
5759 rm_val = displaced_read_reg (regs, dsc, rm);
5760 rs_val = displaced_read_reg (regs, dsc, rs);
5761 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5762 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5763 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5764 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
5765 dsc->rd = rd;
5766
5767 if (is_mov)
5768 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
5769 else
5770 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
5771
5772 dsc->cleanup = &cleanup_alu_shifted_reg;
5773
5774 return 0;
5775 }
5776
5777 /* Clean up load instructions. */
5778
5779 static void
5780 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
5781 struct displaced_step_closure *dsc)
5782 {
5783 ULONGEST rt_val, rt_val2 = 0, rn_val;
5784
5785 rt_val = displaced_read_reg (regs, dsc, 0);
5786 if (dsc->u.ldst.xfersize == 8)
5787 rt_val2 = displaced_read_reg (regs, dsc, 1);
5788 rn_val = displaced_read_reg (regs, dsc, 2);
5789
5790 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5791 if (dsc->u.ldst.xfersize > 4)
5792 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5793 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5794 if (!dsc->u.ldst.immed)
5795 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5796
5797 /* Handle register writeback. */
5798 if (dsc->u.ldst.writeback)
5799 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5800 /* Put result in right place. */
5801 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
5802 if (dsc->u.ldst.xfersize == 8)
5803 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
5804 }
5805
5806 /* Clean up store instructions. */
5807
5808 static void
5809 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
5810 struct displaced_step_closure *dsc)
5811 {
5812 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
5813
5814 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5815 if (dsc->u.ldst.xfersize > 4)
5816 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5817 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5818 if (!dsc->u.ldst.immed)
5819 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5820 if (!dsc->u.ldst.restore_r4)
5821 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
5822
5823 /* Writeback. */
5824 if (dsc->u.ldst.writeback)
5825 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5826 }
5827
5828 /* Copy "extra" load/store instructions. These are halfword/doubleword
5829 transfers, which have a different encoding to byte/word transfers. */
5830
5831 static int
5832 copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
5833 struct regcache *regs, struct displaced_step_closure *dsc)
5834 {
5835 unsigned int op1 = bits (insn, 20, 24);
5836 unsigned int op2 = bits (insn, 5, 6);
5837 unsigned int rt = bits (insn, 12, 15);
5838 unsigned int rn = bits (insn, 16, 19);
5839 unsigned int rm = bits (insn, 0, 3);
5840 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
5841 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
5842 int immed = (op1 & 0x4) != 0;
5843 int opcode;
5844 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
5845
5846 if (!insn_references_pc (insn, 0x000ff00ful))
5847 return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
5848
5849 if (debug_displaced)
5850 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
5851 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
5852 (unsigned long) insn);
5853
5854 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
5855
5856 if (opcode < 0)
5857 internal_error (__FILE__, __LINE__,
5858 _("copy_extra_ld_st: instruction decode error"));
5859
5860 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5861 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5862 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5863 if (!immed)
5864 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5865
5866 rt_val = displaced_read_reg (regs, dsc, rt);
5867 if (bytesize[opcode] == 8)
5868 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
5869 rn_val = displaced_read_reg (regs, dsc, rn);
5870 if (!immed)
5871 rm_val = displaced_read_reg (regs, dsc, rm);
5872
5873 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5874 if (bytesize[opcode] == 8)
5875 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
5876 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5877 if (!immed)
5878 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5879
5880 dsc->rd = rt;
5881 dsc->u.ldst.xfersize = bytesize[opcode];
5882 dsc->u.ldst.rn = rn;
5883 dsc->u.ldst.immed = immed;
5884 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5885 dsc->u.ldst.restore_r4 = 0;
5886
5887 if (immed)
5888 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
5889 ->
5890 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
5891 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5892 else
5893 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
5894 ->
5895 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
5896 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5897
5898 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
5899
5900 return 0;
5901 }
5902
5903 /* Copy byte/word loads and stores. */
5904
5905 static int
5906 copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
5907 struct regcache *regs,
5908 struct displaced_step_closure *dsc, int load, int byte,
5909 int usermode)
5910 {
5911 int immed = !bit (insn, 25);
5912 unsigned int rt = bits (insn, 12, 15);
5913 unsigned int rn = bits (insn, 16, 19);
5914 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
5915 ULONGEST rt_val, rn_val, rm_val = 0;
5916
5917 if (!insn_references_pc (insn, 0x000ff00ful))
5918 return copy_unmodified (gdbarch, insn, "load/store", dsc);
5919
5920 if (debug_displaced)
5921 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
5922 load ? (byte ? "ldrb" : "ldr")
5923 : (byte ? "strb" : "str"), usermode ? "t" : "",
5924 (unsigned long) insn);
5925
5926 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5927 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5928 if (!immed)
5929 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5930 if (!load)
5931 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
5932
5933 rt_val = displaced_read_reg (regs, dsc, rt);
5934 rn_val = displaced_read_reg (regs, dsc, rn);
5935 if (!immed)
5936 rm_val = displaced_read_reg (regs, dsc, rm);
5937
5938 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5939 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5940 if (!immed)
5941 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5942
5943 dsc->rd = rt;
5944 dsc->u.ldst.xfersize = byte ? 1 : 4;
5945 dsc->u.ldst.rn = rn;
5946 dsc->u.ldst.immed = immed;
5947 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5948
5949 /* To write PC we can do:
5950
5951 Before this sequence of instructions:
5952 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
5953 r2 is the Rn value got from dispalced_read_reg.
5954
5955 Insn1: push {pc} Write address of STR instruction + offset on stack
5956 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
5957 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
5958 = addr(Insn1) + offset - addr(Insn3) - 8
5959 = offset - 16
5960 Insn4: add r4, r4, #8 r4 = offset - 8
5961 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
5962 = from + offset
5963 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
5964
5965 Otherwise we don't know what value to write for PC, since the offset is
5966 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
5967 of this can be found in Section "Saving from r15" in
5968 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
5969
5970 if (load || rt != ARM_PC_REGNUM)
5971 {
5972 dsc->u.ldst.restore_r4 = 0;
5973
5974 if (immed)
5975 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
5976 ->
5977 {ldr,str}[b]<cond> r0, [r2, #imm]. */
5978 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5979 else
5980 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
5981 ->
5982 {ldr,str}[b]<cond> r0, [r2, r3]. */
5983 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5984 }
5985 else
5986 {
5987 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
5988 dsc->u.ldst.restore_r4 = 1;
5989 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
5990 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
5991 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
5992 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
5993 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
5994
5995 /* As above. */
5996 if (immed)
5997 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
5998 else
5999 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
6000
6001 dsc->numinsns = 6;
6002 }
6003
6004 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6005
6006 return 0;
6007 }
6008
6009 /* Cleanup LDM instructions with fully-populated register list. This is an
6010 unfortunate corner case: it's impossible to implement correctly by modifying
6011 the instruction. The issue is as follows: we have an instruction,
6012
6013 ldm rN, {r0-r15}
6014
6015 which we must rewrite to avoid loading PC. A possible solution would be to
6016 do the load in two halves, something like (with suitable cleanup
6017 afterwards):
6018
6019 mov r8, rN
6020 ldm[id][ab] r8!, {r0-r7}
6021 str r7, <temp>
6022 ldm[id][ab] r8, {r7-r14}
6023 <bkpt>
6024
6025 but at present there's no suitable place for <temp>, since the scratch space
6026 is overwritten before the cleanup routine is called. For now, we simply
6027 emulate the instruction. */
6028
6029 static void
6030 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6031 struct displaced_step_closure *dsc)
6032 {
6033 int inc = dsc->u.block.increment;
6034 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6035 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6036 uint32_t regmask = dsc->u.block.regmask;
6037 int regno = inc ? 0 : 15;
6038 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6039 int exception_return = dsc->u.block.load && dsc->u.block.user
6040 && (regmask & 0x8000) != 0;
6041 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6042 int do_transfer = condition_true (dsc->u.block.cond, status);
6043 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6044
6045 if (!do_transfer)
6046 return;
6047
6048 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6049 sensible we can do here. Complain loudly. */
6050 if (exception_return)
6051 error (_("Cannot single-step exception return"));
6052
6053 /* We don't handle any stores here for now. */
6054 gdb_assert (dsc->u.block.load != 0);
6055
6056 if (debug_displaced)
6057 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6058 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6059 dsc->u.block.increment ? "inc" : "dec",
6060 dsc->u.block.before ? "before" : "after");
6061
6062 while (regmask)
6063 {
6064 uint32_t memword;
6065
6066 if (inc)
6067 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
6068 regno++;
6069 else
6070 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6071 regno--;
6072
6073 xfer_addr += bump_before;
6074
6075 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6076 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6077
6078 xfer_addr += bump_after;
6079
6080 regmask &= ~(1 << regno);
6081 }
6082
6083 if (dsc->u.block.writeback)
6084 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6085 CANNOT_WRITE_PC);
6086 }
6087
6088 /* Clean up an STM which included the PC in the register list. */
6089
6090 static void
6091 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6092 struct displaced_step_closure *dsc)
6093 {
6094 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6095 int store_executed = condition_true (dsc->u.block.cond, status);
6096 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
6097 CORE_ADDR stm_insn_addr;
6098 uint32_t pc_val;
6099 long offset;
6100 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6101
6102 /* If condition code fails, there's nothing else to do. */
6103 if (!store_executed)
6104 return;
6105
6106 if (dsc->u.block.increment)
6107 {
6108 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
6109
6110 if (dsc->u.block.before)
6111 pc_stored_at += 4;
6112 }
6113 else
6114 {
6115 pc_stored_at = dsc->u.block.xfer_addr;
6116
6117 if (dsc->u.block.before)
6118 pc_stored_at -= 4;
6119 }
6120
6121 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
6122 stm_insn_addr = dsc->scratch_base;
6123 offset = pc_val - stm_insn_addr;
6124
6125 if (debug_displaced)
6126 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
6127 "STM instruction\n", offset);
6128
6129 /* Rewrite the stored PC to the proper value for the non-displaced original
6130 instruction. */
6131 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
6132 dsc->insn_addr + offset);
6133 }
6134
6135 /* Clean up an LDM which includes the PC in the register list. We clumped all
6136 the registers in the transferred list into a contiguous range r0...rX (to
6137 avoid loading PC directly and losing control of the debugged program), so we
6138 must undo that here. */
6139
6140 static void
6141 cleanup_block_load_pc (struct gdbarch *gdbarch,
6142 struct regcache *regs,
6143 struct displaced_step_closure *dsc)
6144 {
6145 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6146 int load_executed = condition_true (dsc->u.block.cond, status), i;
6147 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
6148 unsigned int regs_loaded = bitcount (mask);
6149 unsigned int num_to_shuffle = regs_loaded, clobbered;
6150
6151 /* The method employed here will fail if the register list is fully populated
6152 (we need to avoid loading PC directly). */
6153 gdb_assert (num_to_shuffle < 16);
6154
6155 if (!load_executed)
6156 return;
6157
6158 clobbered = (1 << num_to_shuffle) - 1;
6159
6160 while (num_to_shuffle > 0)
6161 {
6162 if ((mask & (1 << write_reg)) != 0)
6163 {
6164 unsigned int read_reg = num_to_shuffle - 1;
6165
6166 if (read_reg != write_reg)
6167 {
6168 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
6169 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
6170 if (debug_displaced)
6171 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
6172 "loaded register r%d to r%d\n"), read_reg,
6173 write_reg);
6174 }
6175 else if (debug_displaced)
6176 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
6177 "r%d already in the right place\n"),
6178 write_reg);
6179
6180 clobbered &= ~(1 << write_reg);
6181
6182 num_to_shuffle--;
6183 }
6184
6185 write_reg--;
6186 }
6187
6188 /* Restore any registers we scribbled over. */
6189 for (write_reg = 0; clobbered != 0; write_reg++)
6190 {
6191 if ((clobbered & (1 << write_reg)) != 0)
6192 {
6193 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
6194 CANNOT_WRITE_PC);
6195 if (debug_displaced)
6196 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
6197 "clobbered register r%d\n"), write_reg);
6198 clobbered &= ~(1 << write_reg);
6199 }
6200 }
6201
6202 /* Perform register writeback manually. */
6203 if (dsc->u.block.writeback)
6204 {
6205 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
6206
6207 if (dsc->u.block.increment)
6208 new_rn_val += regs_loaded * 4;
6209 else
6210 new_rn_val -= regs_loaded * 4;
6211
6212 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
6213 CANNOT_WRITE_PC);
6214 }
6215 }
6216
6217 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6218 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6219
6220 static int
6221 copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6222 struct displaced_step_closure *dsc)
6223 {
6224 int load = bit (insn, 20);
6225 int user = bit (insn, 22);
6226 int increment = bit (insn, 23);
6227 int before = bit (insn, 24);
6228 int writeback = bit (insn, 21);
6229 int rn = bits (insn, 16, 19);
6230
6231 /* Block transfers which don't mention PC can be run directly
6232 out-of-line. */
6233 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
6234 return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
6235
6236 if (rn == ARM_PC_REGNUM)
6237 {
6238 warning (_("displaced: Unpredictable LDM or STM with "
6239 "base register r15"));
6240 return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
6241 }
6242
6243 if (debug_displaced)
6244 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6245 "%.8lx\n", (unsigned long) insn);
6246
6247 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6248 dsc->u.block.rn = rn;
6249
6250 dsc->u.block.load = load;
6251 dsc->u.block.user = user;
6252 dsc->u.block.increment = increment;
6253 dsc->u.block.before = before;
6254 dsc->u.block.writeback = writeback;
6255 dsc->u.block.cond = bits (insn, 28, 31);
6256
6257 dsc->u.block.regmask = insn & 0xffff;
6258
6259 if (load)
6260 {
6261 if ((insn & 0xffff) == 0xffff)
6262 {
6263 /* LDM with a fully-populated register list. This case is
6264 particularly tricky. Implement for now by fully emulating the
6265 instruction (which might not behave perfectly in all cases, but
6266 these instructions should be rare enough for that not to matter
6267 too much). */
6268 dsc->modinsn[0] = ARM_NOP;
6269
6270 dsc->cleanup = &cleanup_block_load_all;
6271 }
6272 else
6273 {
6274 /* LDM of a list of registers which includes PC. Implement by
6275 rewriting the list of registers to be transferred into a
6276 contiguous chunk r0...rX before doing the transfer, then shuffling
6277 registers into the correct places in the cleanup routine. */
6278 unsigned int regmask = insn & 0xffff;
6279 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
6280 unsigned int to = 0, from = 0, i, new_rn;
6281
6282 for (i = 0; i < num_in_list; i++)
6283 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6284
6285 /* Writeback makes things complicated. We need to avoid clobbering
6286 the base register with one of the registers in our modified
6287 register list, but just using a different register can't work in
6288 all cases, e.g.:
6289
6290 ldm r14!, {r0-r13,pc}
6291
6292 which would need to be rewritten as:
6293
6294 ldm rN!, {r0-r14}
6295
6296 but that can't work, because there's no free register for N.
6297
6298 Solve this by turning off the writeback bit, and emulating
6299 writeback manually in the cleanup routine. */
6300
6301 if (writeback)
6302 insn &= ~(1 << 21);
6303
6304 new_regmask = (1 << num_in_list) - 1;
6305
6306 if (debug_displaced)
6307 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
6308 "{..., pc}: original reg list %.4x, modified "
6309 "list %.4x\n"), rn, writeback ? "!" : "",
6310 (int) insn & 0xffff, new_regmask);
6311
6312 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
6313
6314 dsc->cleanup = &cleanup_block_load_pc;
6315 }
6316 }
6317 else
6318 {
6319 /* STM of a list of registers which includes PC. Run the instruction
6320 as-is, but out of line: this will store the wrong value for the PC,
6321 so we must manually fix up the memory in the cleanup routine.
6322 Doing things this way has the advantage that we can auto-detect
6323 the offset of the PC write (which is architecture-dependent) in
6324 the cleanup routine. */
6325 dsc->modinsn[0] = insn;
6326
6327 dsc->cleanup = &cleanup_block_store_pc;
6328 }
6329
6330 return 0;
6331 }
6332
6333 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
6334 for Linux, where some SVC instructions must be treated specially. */
6335
6336 static void
6337 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
6338 struct displaced_step_closure *dsc)
6339 {
6340 CORE_ADDR resume_addr = dsc->insn_addr + 4;
6341
6342 if (debug_displaced)
6343 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
6344 "%.8lx\n", (unsigned long) resume_addr);
6345
6346 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
6347 }
6348
6349 static int
6350 copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
6351 struct regcache *regs, struct displaced_step_closure *dsc)
6352 {
6353 /* Allow OS-specific code to override SVC handling. */
6354 if (dsc->u.svc.copy_svc_os)
6355 return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
6356
6357 if (debug_displaced)
6358 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
6359 (unsigned long) insn);
6360
6361 /* Preparation: none.
6362 Insn: unmodified svc.
6363 Cleanup: pc <- insn_addr + 4. */
6364
6365 dsc->modinsn[0] = insn;
6366
6367 dsc->cleanup = &cleanup_svc;
6368 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
6369 instruction. */
6370 dsc->wrote_to_pc = 1;
6371
6372 return 0;
6373 }
6374
6375 /* Copy undefined instructions. */
6376
6377 static int
6378 copy_undef (struct gdbarch *gdbarch, uint32_t insn,
6379 struct displaced_step_closure *dsc)
6380 {
6381 if (debug_displaced)
6382 fprintf_unfiltered (gdb_stdlog,
6383 "displaced: copying undefined insn %.8lx\n",
6384 (unsigned long) insn);
6385
6386 dsc->modinsn[0] = insn;
6387
6388 return 0;
6389 }
6390
6391 /* Copy unpredictable instructions. */
6392
6393 static int
6394 copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
6395 struct displaced_step_closure *dsc)
6396 {
6397 if (debug_displaced)
6398 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
6399 "%.8lx\n", (unsigned long) insn);
6400
6401 dsc->modinsn[0] = insn;
6402
6403 return 0;
6404 }
6405
6406 /* The decode_* functions are instruction decoding helpers. They mostly follow
6407 the presentation in the ARM ARM. */
6408
6409 static int
6410 decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
6411 struct regcache *regs,
6412 struct displaced_step_closure *dsc)
6413 {
6414 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
6415 unsigned int rn = bits (insn, 16, 19);
6416
6417 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
6418 return copy_unmodified (gdbarch, insn, "cps", dsc);
6419 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
6420 return copy_unmodified (gdbarch, insn, "setend", dsc);
6421 else if ((op1 & 0x60) == 0x20)
6422 return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
6423 else if ((op1 & 0x71) == 0x40)
6424 return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
6425 else if ((op1 & 0x77) == 0x41)
6426 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6427 else if ((op1 & 0x77) == 0x45)
6428 return copy_preload (gdbarch, insn, regs, dsc); /* pli. */
6429 else if ((op1 & 0x77) == 0x51)
6430 {
6431 if (rn != 0xf)
6432 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6433 else
6434 return copy_unpred (gdbarch, insn, dsc);
6435 }
6436 else if ((op1 & 0x77) == 0x55)
6437 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6438 else if (op1 == 0x57)
6439 switch (op2)
6440 {
6441 case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
6442 case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
6443 case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
6444 case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
6445 default: return copy_unpred (gdbarch, insn, dsc);
6446 }
6447 else if ((op1 & 0x63) == 0x43)
6448 return copy_unpred (gdbarch, insn, dsc);
6449 else if ((op2 & 0x1) == 0x0)
6450 switch (op1 & ~0x80)
6451 {
6452 case 0x61:
6453 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6454 case 0x65:
6455 return copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
6456 case 0x71: case 0x75:
6457 /* pld/pldw reg. */
6458 return copy_preload_reg (gdbarch, insn, regs, dsc);
6459 case 0x63: case 0x67: case 0x73: case 0x77:
6460 return copy_unpred (gdbarch, insn, dsc);
6461 default:
6462 return copy_undef (gdbarch, insn, dsc);
6463 }
6464 else
6465 return copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
6466 }
6467
6468 static int
6469 decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
6470 struct regcache *regs,
6471 struct displaced_step_closure *dsc)
6472 {
6473 if (bit (insn, 27) == 0)
6474 return decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
6475 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
6476 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
6477 {
6478 case 0x0: case 0x2:
6479 return copy_unmodified (gdbarch, insn, "srs", dsc);
6480
6481 case 0x1: case 0x3:
6482 return copy_unmodified (gdbarch, insn, "rfe", dsc);
6483
6484 case 0x4: case 0x5: case 0x6: case 0x7:
6485 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
6486
6487 case 0x8:
6488 switch ((insn & 0xe00000) >> 21)
6489 {
6490 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
6491 /* stc/stc2. */
6492 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6493
6494 case 0x2:
6495 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6496
6497 default:
6498 return copy_undef (gdbarch, insn, dsc);
6499 }
6500
6501 case 0x9:
6502 {
6503 int rn_f = (bits (insn, 16, 19) == 0xf);
6504 switch ((insn & 0xe00000) >> 21)
6505 {
6506 case 0x1: case 0x3:
6507 /* ldc/ldc2 imm (undefined for rn == pc). */
6508 return rn_f ? copy_undef (gdbarch, insn, dsc)
6509 : copy_copro_load_store (gdbarch, insn, regs, dsc);
6510
6511 case 0x2:
6512 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6513
6514 case 0x4: case 0x5: case 0x6: case 0x7:
6515 /* ldc/ldc2 lit (undefined for rn != pc). */
6516 return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
6517 : copy_undef (gdbarch, insn, dsc);
6518
6519 default:
6520 return copy_undef (gdbarch, insn, dsc);
6521 }
6522 }
6523
6524 case 0xa:
6525 return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
6526
6527 case 0xb:
6528 if (bits (insn, 16, 19) == 0xf)
6529 /* ldc/ldc2 lit. */
6530 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6531 else
6532 return copy_undef (gdbarch, insn, dsc);
6533
6534 case 0xc:
6535 if (bit (insn, 4))
6536 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6537 else
6538 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6539
6540 case 0xd:
6541 if (bit (insn, 4))
6542 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6543 else
6544 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6545
6546 default:
6547 return copy_undef (gdbarch, insn, dsc);
6548 }
6549 }
6550
6551 /* Decode miscellaneous instructions in dp/misc encoding space. */
6552
6553 static int
6554 decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
6555 struct regcache *regs,
6556 struct displaced_step_closure *dsc)
6557 {
6558 unsigned int op2 = bits (insn, 4, 6);
6559 unsigned int op = bits (insn, 21, 22);
6560 unsigned int op1 = bits (insn, 16, 19);
6561
6562 switch (op2)
6563 {
6564 case 0x0:
6565 return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
6566
6567 case 0x1:
6568 if (op == 0x1) /* bx. */
6569 return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
6570 else if (op == 0x3)
6571 return copy_unmodified (gdbarch, insn, "clz", dsc);
6572 else
6573 return copy_undef (gdbarch, insn, dsc);
6574
6575 case 0x2:
6576 if (op == 0x1)
6577 /* Not really supported. */
6578 return copy_unmodified (gdbarch, insn, "bxj", dsc);
6579 else
6580 return copy_undef (gdbarch, insn, dsc);
6581
6582 case 0x3:
6583 if (op == 0x1)
6584 return copy_bx_blx_reg (gdbarch, insn,
6585 regs, dsc); /* blx register. */
6586 else
6587 return copy_undef (gdbarch, insn, dsc);
6588
6589 case 0x5:
6590 return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
6591
6592 case 0x7:
6593 if (op == 0x1)
6594 return copy_unmodified (gdbarch, insn, "bkpt", dsc);
6595 else if (op == 0x3)
6596 /* Not really supported. */
6597 return copy_unmodified (gdbarch, insn, "smc", dsc);
6598
6599 default:
6600 return copy_undef (gdbarch, insn, dsc);
6601 }
6602 }
6603
6604 static int
6605 decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6606 struct displaced_step_closure *dsc)
6607 {
6608 if (bit (insn, 25))
6609 switch (bits (insn, 20, 24))
6610 {
6611 case 0x10:
6612 return copy_unmodified (gdbarch, insn, "movw", dsc);
6613
6614 case 0x14:
6615 return copy_unmodified (gdbarch, insn, "movt", dsc);
6616
6617 case 0x12: case 0x16:
6618 return copy_unmodified (gdbarch, insn, "msr imm", dsc);
6619
6620 default:
6621 return copy_alu_imm (gdbarch, insn, regs, dsc);
6622 }
6623 else
6624 {
6625 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
6626
6627 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
6628 return copy_alu_reg (gdbarch, insn, regs, dsc);
6629 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
6630 return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
6631 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
6632 return decode_miscellaneous (gdbarch, insn, regs, dsc);
6633 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
6634 return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
6635 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
6636 return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
6637 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
6638 return copy_unmodified (gdbarch, insn, "synch", dsc);
6639 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
6640 /* 2nd arg means "unpriveleged". */
6641 return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
6642 dsc);
6643 }
6644
6645 /* Should be unreachable. */
6646 return 1;
6647 }
6648
6649 static int
6650 decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
6651 struct regcache *regs,
6652 struct displaced_step_closure *dsc)
6653 {
6654 int a = bit (insn, 25), b = bit (insn, 4);
6655 uint32_t op1 = bits (insn, 20, 24);
6656 int rn_f = bits (insn, 16, 19) == 0xf;
6657
6658 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
6659 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
6660 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
6661 else if ((!a && (op1 & 0x17) == 0x02)
6662 || (a && (op1 & 0x17) == 0x02 && !b))
6663 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
6664 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
6665 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
6666 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
6667 else if ((!a && (op1 & 0x17) == 0x03)
6668 || (a && (op1 & 0x17) == 0x03 && !b))
6669 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
6670 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
6671 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
6672 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
6673 else if ((!a && (op1 & 0x17) == 0x06)
6674 || (a && (op1 & 0x17) == 0x06 && !b))
6675 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
6676 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
6677 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
6678 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
6679 else if ((!a && (op1 & 0x17) == 0x07)
6680 || (a && (op1 & 0x17) == 0x07 && !b))
6681 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
6682
6683 /* Should be unreachable. */
6684 return 1;
6685 }
6686
6687 static int
6688 decode_media (struct gdbarch *gdbarch, uint32_t insn,
6689 struct displaced_step_closure *dsc)
6690 {
6691 switch (bits (insn, 20, 24))
6692 {
6693 case 0x00: case 0x01: case 0x02: case 0x03:
6694 return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
6695
6696 case 0x04: case 0x05: case 0x06: case 0x07:
6697 return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
6698
6699 case 0x08: case 0x09: case 0x0a: case 0x0b:
6700 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
6701 return copy_unmodified (gdbarch, insn,
6702 "decode/pack/unpack/saturate/reverse", dsc);
6703
6704 case 0x18:
6705 if (bits (insn, 5, 7) == 0) /* op2. */
6706 {
6707 if (bits (insn, 12, 15) == 0xf)
6708 return copy_unmodified (gdbarch, insn, "usad8", dsc);
6709 else
6710 return copy_unmodified (gdbarch, insn, "usada8", dsc);
6711 }
6712 else
6713 return copy_undef (gdbarch, insn, dsc);
6714
6715 case 0x1a: case 0x1b:
6716 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6717 return copy_unmodified (gdbarch, insn, "sbfx", dsc);
6718 else
6719 return copy_undef (gdbarch, insn, dsc);
6720
6721 case 0x1c: case 0x1d:
6722 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
6723 {
6724 if (bits (insn, 0, 3) == 0xf)
6725 return copy_unmodified (gdbarch, insn, "bfc", dsc);
6726 else
6727 return copy_unmodified (gdbarch, insn, "bfi", dsc);
6728 }
6729 else
6730 return copy_undef (gdbarch, insn, dsc);
6731
6732 case 0x1e: case 0x1f:
6733 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6734 return copy_unmodified (gdbarch, insn, "ubfx", dsc);
6735 else
6736 return copy_undef (gdbarch, insn, dsc);
6737 }
6738
6739 /* Should be unreachable. */
6740 return 1;
6741 }
6742
6743 static int
6744 decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
6745 struct regcache *regs, struct displaced_step_closure *dsc)
6746 {
6747 if (bit (insn, 25))
6748 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
6749 else
6750 return copy_block_xfer (gdbarch, insn, regs, dsc);
6751 }
6752
6753 static int
6754 decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
6755 struct regcache *regs,
6756 struct displaced_step_closure *dsc)
6757 {
6758 unsigned int opcode = bits (insn, 20, 24);
6759
6760 switch (opcode)
6761 {
6762 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
6763 return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
6764
6765 case 0x08: case 0x0a: case 0x0c: case 0x0e:
6766 case 0x12: case 0x16:
6767 return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
6768
6769 case 0x09: case 0x0b: case 0x0d: case 0x0f:
6770 case 0x13: case 0x17:
6771 return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
6772
6773 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
6774 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
6775 /* Note: no writeback for these instructions. Bit 25 will always be
6776 zero though (via caller), so the following works OK. */
6777 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6778 }
6779
6780 /* Should be unreachable. */
6781 return 1;
6782 }
6783
6784 static int
6785 decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
6786 struct regcache *regs, struct displaced_step_closure *dsc)
6787 {
6788 unsigned int op1 = bits (insn, 20, 25);
6789 int op = bit (insn, 4);
6790 unsigned int coproc = bits (insn, 8, 11);
6791 unsigned int rn = bits (insn, 16, 19);
6792
6793 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
6794 return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
6795 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
6796 && (coproc & 0xe) != 0xa)
6797 /* stc/stc2. */
6798 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6799 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
6800 && (coproc & 0xe) != 0xa)
6801 /* ldc/ldc2 imm/lit. */
6802 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6803 else if ((op1 & 0x3e) == 0x00)
6804 return copy_undef (gdbarch, insn, dsc);
6805 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
6806 return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
6807 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
6808 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6809 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
6810 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6811 else if ((op1 & 0x30) == 0x20 && !op)
6812 {
6813 if ((coproc & 0xe) == 0xa)
6814 return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
6815 else
6816 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6817 }
6818 else if ((op1 & 0x30) == 0x20 && op)
6819 return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
6820 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
6821 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6822 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
6823 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6824 else if ((op1 & 0x30) == 0x30)
6825 return copy_svc (gdbarch, insn, to, regs, dsc);
6826 else
6827 return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
6828 }
6829
6830 static void
6831 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
6832 CORE_ADDR to, struct regcache *regs,
6833 struct displaced_step_closure *dsc)
6834 {
6835 error (_("Displaced stepping is only supported in ARM mode"));
6836 }
6837
6838 void
6839 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
6840 CORE_ADDR to, struct regcache *regs,
6841 struct displaced_step_closure *dsc)
6842 {
6843 int err = 0;
6844 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6845 uint32_t insn;
6846
6847 /* Most displaced instructions use a 1-instruction scratch space, so set this
6848 here and override below if/when necessary. */
6849 dsc->numinsns = 1;
6850 dsc->insn_addr = from;
6851 dsc->scratch_base = to;
6852 dsc->cleanup = NULL;
6853 dsc->wrote_to_pc = 0;
6854
6855 if (!displaced_in_arm_mode (regs))
6856 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
6857
6858 dsc->is_thumb = 0;
6859 dsc->insn_size = 4;
6860 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
6861 if (debug_displaced)
6862 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
6863 "at %.8lx\n", (unsigned long) insn,
6864 (unsigned long) from);
6865
6866 if ((insn & 0xf0000000) == 0xf0000000)
6867 err = decode_unconditional (gdbarch, insn, regs, dsc);
6868 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
6869 {
6870 case 0x0: case 0x1: case 0x2: case 0x3:
6871 err = decode_dp_misc (gdbarch, insn, regs, dsc);
6872 break;
6873
6874 case 0x4: case 0x5: case 0x6:
6875 err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
6876 break;
6877
6878 case 0x7:
6879 err = decode_media (gdbarch, insn, dsc);
6880 break;
6881
6882 case 0x8: case 0x9: case 0xa: case 0xb:
6883 err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
6884 break;
6885
6886 case 0xc: case 0xd: case 0xe: case 0xf:
6887 err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
6888 break;
6889 }
6890
6891 if (err)
6892 internal_error (__FILE__, __LINE__,
6893 _("arm_process_displaced_insn: Instruction decode error"));
6894 }
6895
6896 /* Actually set up the scratch space for a displaced instruction. */
6897
6898 void
6899 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
6900 CORE_ADDR to, struct displaced_step_closure *dsc)
6901 {
6902 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6903 unsigned int i, len, offset;
6904 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6905 int size = dsc->is_thumb? 2 : 4;
6906 const unsigned char *bkp_insn;
6907
6908 offset = 0;
6909 /* Poke modified instruction(s). */
6910 for (i = 0; i < dsc->numinsns; i++)
6911 {
6912 if (debug_displaced)
6913 {
6914 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
6915 if (size == 4)
6916 fprintf_unfiltered (gdb_stdlog, "%.8lx",
6917 dsc->modinsn[i]);
6918 else if (size == 2)
6919 fprintf_unfiltered (gdb_stdlog, "%.4x",
6920 (unsigned short)dsc->modinsn[i]);
6921
6922 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
6923 (unsigned long) to + offset);
6924
6925 }
6926 write_memory_unsigned_integer (to + offset, size,
6927 byte_order_for_code,
6928 dsc->modinsn[i]);
6929 offset += size;
6930 }
6931
6932 /* Choose the correct breakpoint instruction. */
6933 if (dsc->is_thumb)
6934 {
6935 bkp_insn = tdep->thumb_breakpoint;
6936 len = tdep->thumb_breakpoint_size;
6937 }
6938 else
6939 {
6940 bkp_insn = tdep->arm_breakpoint;
6941 len = tdep->arm_breakpoint_size;
6942 }
6943
6944 /* Put breakpoint afterwards. */
6945 write_memory (to + offset, bkp_insn, len);
6946
6947 if (debug_displaced)
6948 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
6949 paddress (gdbarch, from), paddress (gdbarch, to));
6950 }
6951
6952 /* Entry point for copying an instruction into scratch space for displaced
6953 stepping. */
6954
6955 struct displaced_step_closure *
6956 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
6957 CORE_ADDR from, CORE_ADDR to,
6958 struct regcache *regs)
6959 {
6960 struct displaced_step_closure *dsc
6961 = xmalloc (sizeof (struct displaced_step_closure));
6962 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
6963 arm_displaced_init_closure (gdbarch, from, to, dsc);
6964
6965 return dsc;
6966 }
6967
6968 /* Entry point for cleaning things up after a displaced instruction has been
6969 single-stepped. */
6970
6971 void
6972 arm_displaced_step_fixup (struct gdbarch *gdbarch,
6973 struct displaced_step_closure *dsc,
6974 CORE_ADDR from, CORE_ADDR to,
6975 struct regcache *regs)
6976 {
6977 if (dsc->cleanup)
6978 dsc->cleanup (gdbarch, regs, dsc);
6979
6980 if (!dsc->wrote_to_pc)
6981 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
6982 dsc->insn_addr + dsc->insn_size);
6983
6984 }
6985
6986 #include "bfd-in2.h"
6987 #include "libcoff.h"
6988
6989 static int
6990 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
6991 {
6992 struct gdbarch *gdbarch = info->application_data;
6993
6994 if (arm_pc_is_thumb (gdbarch, memaddr))
6995 {
6996 static asymbol *asym;
6997 static combined_entry_type ce;
6998 static struct coff_symbol_struct csym;
6999 static struct bfd fake_bfd;
7000 static bfd_target fake_target;
7001
7002 if (csym.native == NULL)
7003 {
7004 /* Create a fake symbol vector containing a Thumb symbol.
7005 This is solely so that the code in print_insn_little_arm()
7006 and print_insn_big_arm() in opcodes/arm-dis.c will detect
7007 the presence of a Thumb symbol and switch to decoding
7008 Thumb instructions. */
7009
7010 fake_target.flavour = bfd_target_coff_flavour;
7011 fake_bfd.xvec = &fake_target;
7012 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
7013 csym.native = &ce;
7014 csym.symbol.the_bfd = &fake_bfd;
7015 csym.symbol.name = "fake";
7016 asym = (asymbol *) & csym;
7017 }
7018
7019 memaddr = UNMAKE_THUMB_ADDR (memaddr);
7020 info->symbols = &asym;
7021 }
7022 else
7023 info->symbols = NULL;
7024
7025 if (info->endian == BFD_ENDIAN_BIG)
7026 return print_insn_big_arm (memaddr, info);
7027 else
7028 return print_insn_little_arm (memaddr, info);
7029 }
7030
7031 /* The following define instruction sequences that will cause ARM
7032 cpu's to take an undefined instruction trap. These are used to
7033 signal a breakpoint to GDB.
7034
7035 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
7036 modes. A different instruction is required for each mode. The ARM
7037 cpu's can also be big or little endian. Thus four different
7038 instructions are needed to support all cases.
7039
7040 Note: ARMv4 defines several new instructions that will take the
7041 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
7042 not in fact add the new instructions. The new undefined
7043 instructions in ARMv4 are all instructions that had no defined
7044 behaviour in earlier chips. There is no guarantee that they will
7045 raise an exception, but may be treated as NOP's. In practice, it
7046 may only safe to rely on instructions matching:
7047
7048 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
7049 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
7050 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
7051
7052 Even this may only true if the condition predicate is true. The
7053 following use a condition predicate of ALWAYS so it is always TRUE.
7054
7055 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
7056 and NetBSD all use a software interrupt rather than an undefined
7057 instruction to force a trap. This can be handled by by the
7058 abi-specific code during establishment of the gdbarch vector. */
7059
7060 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
7061 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
7062 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
7063 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
7064
7065 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
7066 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
7067 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
7068 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
7069
7070 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
7071 the program counter value to determine whether a 16-bit or 32-bit
7072 breakpoint should be used. It returns a pointer to a string of
7073 bytes that encode a breakpoint instruction, stores the length of
7074 the string to *lenptr, and adjusts the program counter (if
7075 necessary) to point to the actual memory location where the
7076 breakpoint should be inserted. */
7077
7078 static const unsigned char *
7079 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
7080 {
7081 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7082 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7083
7084 if (arm_pc_is_thumb (gdbarch, *pcptr))
7085 {
7086 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
7087
7088 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
7089 check whether we are replacing a 32-bit instruction. */
7090 if (tdep->thumb2_breakpoint != NULL)
7091 {
7092 gdb_byte buf[2];
7093 if (target_read_memory (*pcptr, buf, 2) == 0)
7094 {
7095 unsigned short inst1;
7096 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
7097 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
7098 {
7099 *lenptr = tdep->thumb2_breakpoint_size;
7100 return tdep->thumb2_breakpoint;
7101 }
7102 }
7103 }
7104
7105 *lenptr = tdep->thumb_breakpoint_size;
7106 return tdep->thumb_breakpoint;
7107 }
7108 else
7109 {
7110 *lenptr = tdep->arm_breakpoint_size;
7111 return tdep->arm_breakpoint;
7112 }
7113 }
7114
7115 static void
7116 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
7117 int *kindptr)
7118 {
7119 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7120
7121 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
7122
7123 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
7124 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
7125 that this is not confused with a 32-bit ARM breakpoint. */
7126 *kindptr = 3;
7127 }
7128
7129 /* Extract from an array REGBUF containing the (raw) register state a
7130 function return value of type TYPE, and copy that, in virtual
7131 format, into VALBUF. */
7132
7133 static void
7134 arm_extract_return_value (struct type *type, struct regcache *regs,
7135 gdb_byte *valbuf)
7136 {
7137 struct gdbarch *gdbarch = get_regcache_arch (regs);
7138 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7139
7140 if (TYPE_CODE_FLT == TYPE_CODE (type))
7141 {
7142 switch (gdbarch_tdep (gdbarch)->fp_model)
7143 {
7144 case ARM_FLOAT_FPA:
7145 {
7146 /* The value is in register F0 in internal format. We need to
7147 extract the raw value and then convert it to the desired
7148 internal type. */
7149 bfd_byte tmpbuf[FP_REGISTER_SIZE];
7150
7151 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
7152 convert_from_extended (floatformat_from_type (type), tmpbuf,
7153 valbuf, gdbarch_byte_order (gdbarch));
7154 }
7155 break;
7156
7157 case ARM_FLOAT_SOFT_FPA:
7158 case ARM_FLOAT_SOFT_VFP:
7159 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7160 not using the VFP ABI code. */
7161 case ARM_FLOAT_VFP:
7162 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
7163 if (TYPE_LENGTH (type) > 4)
7164 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
7165 valbuf + INT_REGISTER_SIZE);
7166 break;
7167
7168 default:
7169 internal_error (__FILE__, __LINE__,
7170 _("arm_extract_return_value: "
7171 "Floating point model not supported"));
7172 break;
7173 }
7174 }
7175 else if (TYPE_CODE (type) == TYPE_CODE_INT
7176 || TYPE_CODE (type) == TYPE_CODE_CHAR
7177 || TYPE_CODE (type) == TYPE_CODE_BOOL
7178 || TYPE_CODE (type) == TYPE_CODE_PTR
7179 || TYPE_CODE (type) == TYPE_CODE_REF
7180 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7181 {
7182 /* If the type is a plain integer, then the access is
7183 straight-forward. Otherwise we have to play around a bit
7184 more. */
7185 int len = TYPE_LENGTH (type);
7186 int regno = ARM_A1_REGNUM;
7187 ULONGEST tmp;
7188
7189 while (len > 0)
7190 {
7191 /* By using store_unsigned_integer we avoid having to do
7192 anything special for small big-endian values. */
7193 regcache_cooked_read_unsigned (regs, regno++, &tmp);
7194 store_unsigned_integer (valbuf,
7195 (len > INT_REGISTER_SIZE
7196 ? INT_REGISTER_SIZE : len),
7197 byte_order, tmp);
7198 len -= INT_REGISTER_SIZE;
7199 valbuf += INT_REGISTER_SIZE;
7200 }
7201 }
7202 else
7203 {
7204 /* For a structure or union the behaviour is as if the value had
7205 been stored to word-aligned memory and then loaded into
7206 registers with 32-bit load instruction(s). */
7207 int len = TYPE_LENGTH (type);
7208 int regno = ARM_A1_REGNUM;
7209 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7210
7211 while (len > 0)
7212 {
7213 regcache_cooked_read (regs, regno++, tmpbuf);
7214 memcpy (valbuf, tmpbuf,
7215 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7216 len -= INT_REGISTER_SIZE;
7217 valbuf += INT_REGISTER_SIZE;
7218 }
7219 }
7220 }
7221
7222
7223 /* Will a function return an aggregate type in memory or in a
7224 register? Return 0 if an aggregate type can be returned in a
7225 register, 1 if it must be returned in memory. */
7226
7227 static int
7228 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
7229 {
7230 int nRc;
7231 enum type_code code;
7232
7233 CHECK_TYPEDEF (type);
7234
7235 /* In the ARM ABI, "integer" like aggregate types are returned in
7236 registers. For an aggregate type to be integer like, its size
7237 must be less than or equal to INT_REGISTER_SIZE and the
7238 offset of each addressable subfield must be zero. Note that bit
7239 fields are not addressable, and all addressable subfields of
7240 unions always start at offset zero.
7241
7242 This function is based on the behaviour of GCC 2.95.1.
7243 See: gcc/arm.c: arm_return_in_memory() for details.
7244
7245 Note: All versions of GCC before GCC 2.95.2 do not set up the
7246 parameters correctly for a function returning the following
7247 structure: struct { float f;}; This should be returned in memory,
7248 not a register. Richard Earnshaw sent me a patch, but I do not
7249 know of any way to detect if a function like the above has been
7250 compiled with the correct calling convention. */
7251
7252 /* All aggregate types that won't fit in a register must be returned
7253 in memory. */
7254 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
7255 {
7256 return 1;
7257 }
7258
7259 /* The AAPCS says all aggregates not larger than a word are returned
7260 in a register. */
7261 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
7262 return 0;
7263
7264 /* The only aggregate types that can be returned in a register are
7265 structs and unions. Arrays must be returned in memory. */
7266 code = TYPE_CODE (type);
7267 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
7268 {
7269 return 1;
7270 }
7271
7272 /* Assume all other aggregate types can be returned in a register.
7273 Run a check for structures, unions and arrays. */
7274 nRc = 0;
7275
7276 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
7277 {
7278 int i;
7279 /* Need to check if this struct/union is "integer" like. For
7280 this to be true, its size must be less than or equal to
7281 INT_REGISTER_SIZE and the offset of each addressable
7282 subfield must be zero. Note that bit fields are not
7283 addressable, and unions always start at offset zero. If any
7284 of the subfields is a floating point type, the struct/union
7285 cannot be an integer type. */
7286
7287 /* For each field in the object, check:
7288 1) Is it FP? --> yes, nRc = 1;
7289 2) Is it addressable (bitpos != 0) and
7290 not packed (bitsize == 0)?
7291 --> yes, nRc = 1
7292 */
7293
7294 for (i = 0; i < TYPE_NFIELDS (type); i++)
7295 {
7296 enum type_code field_type_code;
7297 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
7298 i)));
7299
7300 /* Is it a floating point type field? */
7301 if (field_type_code == TYPE_CODE_FLT)
7302 {
7303 nRc = 1;
7304 break;
7305 }
7306
7307 /* If bitpos != 0, then we have to care about it. */
7308 if (TYPE_FIELD_BITPOS (type, i) != 0)
7309 {
7310 /* Bitfields are not addressable. If the field bitsize is
7311 zero, then the field is not packed. Hence it cannot be
7312 a bitfield or any other packed type. */
7313 if (TYPE_FIELD_BITSIZE (type, i) == 0)
7314 {
7315 nRc = 1;
7316 break;
7317 }
7318 }
7319 }
7320 }
7321
7322 return nRc;
7323 }
7324
7325 /* Write into appropriate registers a function return value of type
7326 TYPE, given in virtual format. */
7327
7328 static void
7329 arm_store_return_value (struct type *type, struct regcache *regs,
7330 const gdb_byte *valbuf)
7331 {
7332 struct gdbarch *gdbarch = get_regcache_arch (regs);
7333 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7334
7335 if (TYPE_CODE (type) == TYPE_CODE_FLT)
7336 {
7337 char buf[MAX_REGISTER_SIZE];
7338
7339 switch (gdbarch_tdep (gdbarch)->fp_model)
7340 {
7341 case ARM_FLOAT_FPA:
7342
7343 convert_to_extended (floatformat_from_type (type), buf, valbuf,
7344 gdbarch_byte_order (gdbarch));
7345 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
7346 break;
7347
7348 case ARM_FLOAT_SOFT_FPA:
7349 case ARM_FLOAT_SOFT_VFP:
7350 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7351 not using the VFP ABI code. */
7352 case ARM_FLOAT_VFP:
7353 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
7354 if (TYPE_LENGTH (type) > 4)
7355 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
7356 valbuf + INT_REGISTER_SIZE);
7357 break;
7358
7359 default:
7360 internal_error (__FILE__, __LINE__,
7361 _("arm_store_return_value: Floating "
7362 "point model not supported"));
7363 break;
7364 }
7365 }
7366 else if (TYPE_CODE (type) == TYPE_CODE_INT
7367 || TYPE_CODE (type) == TYPE_CODE_CHAR
7368 || TYPE_CODE (type) == TYPE_CODE_BOOL
7369 || TYPE_CODE (type) == TYPE_CODE_PTR
7370 || TYPE_CODE (type) == TYPE_CODE_REF
7371 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7372 {
7373 if (TYPE_LENGTH (type) <= 4)
7374 {
7375 /* Values of one word or less are zero/sign-extended and
7376 returned in r0. */
7377 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7378 LONGEST val = unpack_long (type, valbuf);
7379
7380 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
7381 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
7382 }
7383 else
7384 {
7385 /* Integral values greater than one word are stored in consecutive
7386 registers starting with r0. This will always be a multiple of
7387 the regiser size. */
7388 int len = TYPE_LENGTH (type);
7389 int regno = ARM_A1_REGNUM;
7390
7391 while (len > 0)
7392 {
7393 regcache_cooked_write (regs, regno++, valbuf);
7394 len -= INT_REGISTER_SIZE;
7395 valbuf += INT_REGISTER_SIZE;
7396 }
7397 }
7398 }
7399 else
7400 {
7401 /* For a structure or union the behaviour is as if the value had
7402 been stored to word-aligned memory and then loaded into
7403 registers with 32-bit load instruction(s). */
7404 int len = TYPE_LENGTH (type);
7405 int regno = ARM_A1_REGNUM;
7406 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7407
7408 while (len > 0)
7409 {
7410 memcpy (tmpbuf, valbuf,
7411 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7412 regcache_cooked_write (regs, regno++, tmpbuf);
7413 len -= INT_REGISTER_SIZE;
7414 valbuf += INT_REGISTER_SIZE;
7415 }
7416 }
7417 }
7418
7419
7420 /* Handle function return values. */
7421
7422 static enum return_value_convention
7423 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
7424 struct type *valtype, struct regcache *regcache,
7425 gdb_byte *readbuf, const gdb_byte *writebuf)
7426 {
7427 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7428 enum arm_vfp_cprc_base_type vfp_base_type;
7429 int vfp_base_count;
7430
7431 if (arm_vfp_abi_for_function (gdbarch, func_type)
7432 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
7433 {
7434 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
7435 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
7436 int i;
7437 for (i = 0; i < vfp_base_count; i++)
7438 {
7439 if (reg_char == 'q')
7440 {
7441 if (writebuf)
7442 arm_neon_quad_write (gdbarch, regcache, i,
7443 writebuf + i * unit_length);
7444
7445 if (readbuf)
7446 arm_neon_quad_read (gdbarch, regcache, i,
7447 readbuf + i * unit_length);
7448 }
7449 else
7450 {
7451 char name_buf[4];
7452 int regnum;
7453
7454 sprintf (name_buf, "%c%d", reg_char, i);
7455 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7456 strlen (name_buf));
7457 if (writebuf)
7458 regcache_cooked_write (regcache, regnum,
7459 writebuf + i * unit_length);
7460 if (readbuf)
7461 regcache_cooked_read (regcache, regnum,
7462 readbuf + i * unit_length);
7463 }
7464 }
7465 return RETURN_VALUE_REGISTER_CONVENTION;
7466 }
7467
7468 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
7469 || TYPE_CODE (valtype) == TYPE_CODE_UNION
7470 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
7471 {
7472 if (tdep->struct_return == pcc_struct_return
7473 || arm_return_in_memory (gdbarch, valtype))
7474 return RETURN_VALUE_STRUCT_CONVENTION;
7475 }
7476
7477 if (writebuf)
7478 arm_store_return_value (valtype, regcache, writebuf);
7479
7480 if (readbuf)
7481 arm_extract_return_value (valtype, regcache, readbuf);
7482
7483 return RETURN_VALUE_REGISTER_CONVENTION;
7484 }
7485
7486
7487 static int
7488 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
7489 {
7490 struct gdbarch *gdbarch = get_frame_arch (frame);
7491 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7492 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7493 CORE_ADDR jb_addr;
7494 char buf[INT_REGISTER_SIZE];
7495
7496 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
7497
7498 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
7499 INT_REGISTER_SIZE))
7500 return 0;
7501
7502 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
7503 return 1;
7504 }
7505
7506 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
7507 return the target PC. Otherwise return 0. */
7508
7509 CORE_ADDR
7510 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
7511 {
7512 char *name;
7513 int namelen;
7514 CORE_ADDR start_addr;
7515
7516 /* Find the starting address and name of the function containing the PC. */
7517 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
7518 return 0;
7519
7520 /* If PC is in a Thumb call or return stub, return the address of the
7521 target PC, which is in a register. The thunk functions are called
7522 _call_via_xx, where x is the register name. The possible names
7523 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
7524 functions, named __ARM_call_via_r[0-7]. */
7525 if (strncmp (name, "_call_via_", 10) == 0
7526 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
7527 {
7528 /* Use the name suffix to determine which register contains the
7529 target PC. */
7530 static char *table[15] =
7531 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
7532 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
7533 };
7534 int regno;
7535 int offset = strlen (name) - 2;
7536
7537 for (regno = 0; regno <= 14; regno++)
7538 if (strcmp (&name[offset], table[regno]) == 0)
7539 return get_frame_register_unsigned (frame, regno);
7540 }
7541
7542 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
7543 non-interworking calls to foo. We could decode the stubs
7544 to find the target but it's easier to use the symbol table. */
7545 namelen = strlen (name);
7546 if (name[0] == '_' && name[1] == '_'
7547 && ((namelen > 2 + strlen ("_from_thumb")
7548 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
7549 strlen ("_from_thumb")) == 0)
7550 || (namelen > 2 + strlen ("_from_arm")
7551 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
7552 strlen ("_from_arm")) == 0)))
7553 {
7554 char *target_name;
7555 int target_len = namelen - 2;
7556 struct minimal_symbol *minsym;
7557 struct objfile *objfile;
7558 struct obj_section *sec;
7559
7560 if (name[namelen - 1] == 'b')
7561 target_len -= strlen ("_from_thumb");
7562 else
7563 target_len -= strlen ("_from_arm");
7564
7565 target_name = alloca (target_len + 1);
7566 memcpy (target_name, name + 2, target_len);
7567 target_name[target_len] = '\0';
7568
7569 sec = find_pc_section (pc);
7570 objfile = (sec == NULL) ? NULL : sec->objfile;
7571 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
7572 if (minsym != NULL)
7573 return SYMBOL_VALUE_ADDRESS (minsym);
7574 else
7575 return 0;
7576 }
7577
7578 return 0; /* not a stub */
7579 }
7580
7581 static void
7582 set_arm_command (char *args, int from_tty)
7583 {
7584 printf_unfiltered (_("\
7585 \"set arm\" must be followed by an apporpriate subcommand.\n"));
7586 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
7587 }
7588
7589 static void
7590 show_arm_command (char *args, int from_tty)
7591 {
7592 cmd_show_list (showarmcmdlist, from_tty, "");
7593 }
7594
7595 static void
7596 arm_update_current_architecture (void)
7597 {
7598 struct gdbarch_info info;
7599
7600 /* If the current architecture is not ARM, we have nothing to do. */
7601 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
7602 return;
7603
7604 /* Update the architecture. */
7605 gdbarch_info_init (&info);
7606
7607 if (!gdbarch_update_p (info))
7608 internal_error (__FILE__, __LINE__, _("could not update architecture"));
7609 }
7610
7611 static void
7612 set_fp_model_sfunc (char *args, int from_tty,
7613 struct cmd_list_element *c)
7614 {
7615 enum arm_float_model fp_model;
7616
7617 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
7618 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
7619 {
7620 arm_fp_model = fp_model;
7621 break;
7622 }
7623
7624 if (fp_model == ARM_FLOAT_LAST)
7625 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
7626 current_fp_model);
7627
7628 arm_update_current_architecture ();
7629 }
7630
7631 static void
7632 show_fp_model (struct ui_file *file, int from_tty,
7633 struct cmd_list_element *c, const char *value)
7634 {
7635 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7636
7637 if (arm_fp_model == ARM_FLOAT_AUTO
7638 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
7639 fprintf_filtered (file, _("\
7640 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
7641 fp_model_strings[tdep->fp_model]);
7642 else
7643 fprintf_filtered (file, _("\
7644 The current ARM floating point model is \"%s\".\n"),
7645 fp_model_strings[arm_fp_model]);
7646 }
7647
7648 static void
7649 arm_set_abi (char *args, int from_tty,
7650 struct cmd_list_element *c)
7651 {
7652 enum arm_abi_kind arm_abi;
7653
7654 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
7655 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
7656 {
7657 arm_abi_global = arm_abi;
7658 break;
7659 }
7660
7661 if (arm_abi == ARM_ABI_LAST)
7662 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
7663 arm_abi_string);
7664
7665 arm_update_current_architecture ();
7666 }
7667
7668 static void
7669 arm_show_abi (struct ui_file *file, int from_tty,
7670 struct cmd_list_element *c, const char *value)
7671 {
7672 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7673
7674 if (arm_abi_global == ARM_ABI_AUTO
7675 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
7676 fprintf_filtered (file, _("\
7677 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
7678 arm_abi_strings[tdep->arm_abi]);
7679 else
7680 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
7681 arm_abi_string);
7682 }
7683
7684 static void
7685 arm_show_fallback_mode (struct ui_file *file, int from_tty,
7686 struct cmd_list_element *c, const char *value)
7687 {
7688 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7689
7690 fprintf_filtered (file,
7691 _("The current execution mode assumed "
7692 "(when symbols are unavailable) is \"%s\".\n"),
7693 arm_fallback_mode_string);
7694 }
7695
7696 static void
7697 arm_show_force_mode (struct ui_file *file, int from_tty,
7698 struct cmd_list_element *c, const char *value)
7699 {
7700 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7701
7702 fprintf_filtered (file,
7703 _("The current execution mode assumed "
7704 "(even when symbols are available) is \"%s\".\n"),
7705 arm_force_mode_string);
7706 }
7707
7708 /* If the user changes the register disassembly style used for info
7709 register and other commands, we have to also switch the style used
7710 in opcodes for disassembly output. This function is run in the "set
7711 arm disassembly" command, and does that. */
7712
7713 static void
7714 set_disassembly_style_sfunc (char *args, int from_tty,
7715 struct cmd_list_element *c)
7716 {
7717 set_disassembly_style ();
7718 }
7719 \f
7720 /* Return the ARM register name corresponding to register I. */
7721 static const char *
7722 arm_register_name (struct gdbarch *gdbarch, int i)
7723 {
7724 const int num_regs = gdbarch_num_regs (gdbarch);
7725
7726 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
7727 && i >= num_regs && i < num_regs + 32)
7728 {
7729 static const char *const vfp_pseudo_names[] = {
7730 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
7731 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
7732 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
7733 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
7734 };
7735
7736 return vfp_pseudo_names[i - num_regs];
7737 }
7738
7739 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
7740 && i >= num_regs + 32 && i < num_regs + 32 + 16)
7741 {
7742 static const char *const neon_pseudo_names[] = {
7743 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
7744 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
7745 };
7746
7747 return neon_pseudo_names[i - num_regs - 32];
7748 }
7749
7750 if (i >= ARRAY_SIZE (arm_register_names))
7751 /* These registers are only supported on targets which supply
7752 an XML description. */
7753 return "";
7754
7755 return arm_register_names[i];
7756 }
7757
7758 static void
7759 set_disassembly_style (void)
7760 {
7761 int current;
7762
7763 /* Find the style that the user wants. */
7764 for (current = 0; current < num_disassembly_options; current++)
7765 if (disassembly_style == valid_disassembly_styles[current])
7766 break;
7767 gdb_assert (current < num_disassembly_options);
7768
7769 /* Synchronize the disassembler. */
7770 set_arm_regname_option (current);
7771 }
7772
7773 /* Test whether the coff symbol specific value corresponds to a Thumb
7774 function. */
7775
7776 static int
7777 coff_sym_is_thumb (int val)
7778 {
7779 return (val == C_THUMBEXT
7780 || val == C_THUMBSTAT
7781 || val == C_THUMBEXTFUNC
7782 || val == C_THUMBSTATFUNC
7783 || val == C_THUMBLABEL);
7784 }
7785
7786 /* arm_coff_make_msymbol_special()
7787 arm_elf_make_msymbol_special()
7788
7789 These functions test whether the COFF or ELF symbol corresponds to
7790 an address in thumb code, and set a "special" bit in a minimal
7791 symbol to indicate that it does. */
7792
7793 static void
7794 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
7795 {
7796 /* Thumb symbols are of type STT_LOPROC, (synonymous with
7797 STT_ARM_TFUNC). */
7798 if (ELF_ST_TYPE (((elf_symbol_type *)sym)->internal_elf_sym.st_info)
7799 == STT_LOPROC)
7800 MSYMBOL_SET_SPECIAL (msym);
7801 }
7802
7803 static void
7804 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
7805 {
7806 if (coff_sym_is_thumb (val))
7807 MSYMBOL_SET_SPECIAL (msym);
7808 }
7809
7810 static void
7811 arm_objfile_data_free (struct objfile *objfile, void *arg)
7812 {
7813 struct arm_per_objfile *data = arg;
7814 unsigned int i;
7815
7816 for (i = 0; i < objfile->obfd->section_count; i++)
7817 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
7818 }
7819
7820 static void
7821 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
7822 asymbol *sym)
7823 {
7824 const char *name = bfd_asymbol_name (sym);
7825 struct arm_per_objfile *data;
7826 VEC(arm_mapping_symbol_s) **map_p;
7827 struct arm_mapping_symbol new_map_sym;
7828
7829 gdb_assert (name[0] == '$');
7830 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
7831 return;
7832
7833 data = objfile_data (objfile, arm_objfile_data_key);
7834 if (data == NULL)
7835 {
7836 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
7837 struct arm_per_objfile);
7838 set_objfile_data (objfile, arm_objfile_data_key, data);
7839 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
7840 objfile->obfd->section_count,
7841 VEC(arm_mapping_symbol_s) *);
7842 }
7843 map_p = &data->section_maps[bfd_get_section (sym)->index];
7844
7845 new_map_sym.value = sym->value;
7846 new_map_sym.type = name[1];
7847
7848 /* Assume that most mapping symbols appear in order of increasing
7849 value. If they were randomly distributed, it would be faster to
7850 always push here and then sort at first use. */
7851 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
7852 {
7853 struct arm_mapping_symbol *prev_map_sym;
7854
7855 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
7856 if (prev_map_sym->value >= sym->value)
7857 {
7858 unsigned int idx;
7859 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
7860 arm_compare_mapping_symbols);
7861 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
7862 return;
7863 }
7864 }
7865
7866 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
7867 }
7868
7869 static void
7870 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
7871 {
7872 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7873 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
7874
7875 /* If necessary, set the T bit. */
7876 if (arm_apcs_32)
7877 {
7878 ULONGEST val, t_bit;
7879 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
7880 t_bit = arm_psr_thumb_bit (gdbarch);
7881 if (arm_pc_is_thumb (gdbarch, pc))
7882 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7883 val | t_bit);
7884 else
7885 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7886 val & ~t_bit);
7887 }
7888 }
7889
7890 /* Read the contents of a NEON quad register, by reading from two
7891 double registers. This is used to implement the quad pseudo
7892 registers, and for argument passing in case the quad registers are
7893 missing; vectors are passed in quad registers when using the VFP
7894 ABI, even if a NEON unit is not present. REGNUM is the index of
7895 the quad register, in [0, 15]. */
7896
7897 static enum register_status
7898 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
7899 int regnum, gdb_byte *buf)
7900 {
7901 char name_buf[4];
7902 gdb_byte reg_buf[8];
7903 int offset, double_regnum;
7904 enum register_status status;
7905
7906 sprintf (name_buf, "d%d", regnum << 1);
7907 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7908 strlen (name_buf));
7909
7910 /* d0 is always the least significant half of q0. */
7911 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7912 offset = 8;
7913 else
7914 offset = 0;
7915
7916 status = regcache_raw_read (regcache, double_regnum, reg_buf);
7917 if (status != REG_VALID)
7918 return status;
7919 memcpy (buf + offset, reg_buf, 8);
7920
7921 offset = 8 - offset;
7922 status = regcache_raw_read (regcache, double_regnum + 1, reg_buf);
7923 if (status != REG_VALID)
7924 return status;
7925 memcpy (buf + offset, reg_buf, 8);
7926
7927 return REG_VALID;
7928 }
7929
7930 static enum register_status
7931 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
7932 int regnum, gdb_byte *buf)
7933 {
7934 const int num_regs = gdbarch_num_regs (gdbarch);
7935 char name_buf[4];
7936 gdb_byte reg_buf[8];
7937 int offset, double_regnum;
7938
7939 gdb_assert (regnum >= num_regs);
7940 regnum -= num_regs;
7941
7942 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
7943 /* Quad-precision register. */
7944 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
7945 else
7946 {
7947 enum register_status status;
7948
7949 /* Single-precision register. */
7950 gdb_assert (regnum < 32);
7951
7952 /* s0 is always the least significant half of d0. */
7953 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7954 offset = (regnum & 1) ? 0 : 4;
7955 else
7956 offset = (regnum & 1) ? 4 : 0;
7957
7958 sprintf (name_buf, "d%d", regnum >> 1);
7959 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7960 strlen (name_buf));
7961
7962 status = regcache_raw_read (regcache, double_regnum, reg_buf);
7963 if (status == REG_VALID)
7964 memcpy (buf, reg_buf + offset, 4);
7965 return status;
7966 }
7967 }
7968
7969 /* Store the contents of BUF to a NEON quad register, by writing to
7970 two double registers. This is used to implement the quad pseudo
7971 registers, and for argument passing in case the quad registers are
7972 missing; vectors are passed in quad registers when using the VFP
7973 ABI, even if a NEON unit is not present. REGNUM is the index
7974 of the quad register, in [0, 15]. */
7975
7976 static void
7977 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
7978 int regnum, const gdb_byte *buf)
7979 {
7980 char name_buf[4];
7981 gdb_byte reg_buf[8];
7982 int offset, double_regnum;
7983
7984 sprintf (name_buf, "d%d", regnum << 1);
7985 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7986 strlen (name_buf));
7987
7988 /* d0 is always the least significant half of q0. */
7989 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7990 offset = 8;
7991 else
7992 offset = 0;
7993
7994 regcache_raw_write (regcache, double_regnum, buf + offset);
7995 offset = 8 - offset;
7996 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
7997 }
7998
7999 static void
8000 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
8001 int regnum, const gdb_byte *buf)
8002 {
8003 const int num_regs = gdbarch_num_regs (gdbarch);
8004 char name_buf[4];
8005 gdb_byte reg_buf[8];
8006 int offset, double_regnum;
8007
8008 gdb_assert (regnum >= num_regs);
8009 regnum -= num_regs;
8010
8011 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
8012 /* Quad-precision register. */
8013 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
8014 else
8015 {
8016 /* Single-precision register. */
8017 gdb_assert (regnum < 32);
8018
8019 /* s0 is always the least significant half of d0. */
8020 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8021 offset = (regnum & 1) ? 0 : 4;
8022 else
8023 offset = (regnum & 1) ? 4 : 0;
8024
8025 sprintf (name_buf, "d%d", regnum >> 1);
8026 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8027 strlen (name_buf));
8028
8029 regcache_raw_read (regcache, double_regnum, reg_buf);
8030 memcpy (reg_buf + offset, buf, 4);
8031 regcache_raw_write (regcache, double_regnum, reg_buf);
8032 }
8033 }
8034
8035 static struct value *
8036 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
8037 {
8038 const int *reg_p = baton;
8039 return value_of_register (*reg_p, frame);
8040 }
8041 \f
8042 static enum gdb_osabi
8043 arm_elf_osabi_sniffer (bfd *abfd)
8044 {
8045 unsigned int elfosabi;
8046 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
8047
8048 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
8049
8050 if (elfosabi == ELFOSABI_ARM)
8051 /* GNU tools use this value. Check note sections in this case,
8052 as well. */
8053 bfd_map_over_sections (abfd,
8054 generic_elf_osabi_sniff_abi_tag_sections,
8055 &osabi);
8056
8057 /* Anything else will be handled by the generic ELF sniffer. */
8058 return osabi;
8059 }
8060
8061 static int
8062 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
8063 struct reggroup *group)
8064 {
8065 /* FPS register's type is INT, but belongs to float_reggroup. Beside
8066 this, FPS register belongs to save_regroup, restore_reggroup, and
8067 all_reggroup, of course. */
8068 if (regnum == ARM_FPS_REGNUM)
8069 return (group == float_reggroup
8070 || group == save_reggroup
8071 || group == restore_reggroup
8072 || group == all_reggroup);
8073 else
8074 return default_register_reggroup_p (gdbarch, regnum, group);
8075 }
8076
8077 \f
8078 /* Initialize the current architecture based on INFO. If possible,
8079 re-use an architecture from ARCHES, which is a list of
8080 architectures already created during this debugging session.
8081
8082 Called e.g. at program startup, when reading a core file, and when
8083 reading a binary file. */
8084
8085 static struct gdbarch *
8086 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
8087 {
8088 struct gdbarch_tdep *tdep;
8089 struct gdbarch *gdbarch;
8090 struct gdbarch_list *best_arch;
8091 enum arm_abi_kind arm_abi = arm_abi_global;
8092 enum arm_float_model fp_model = arm_fp_model;
8093 struct tdesc_arch_data *tdesc_data = NULL;
8094 int i, is_m = 0;
8095 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
8096 int have_neon = 0;
8097 int have_fpa_registers = 1;
8098 const struct target_desc *tdesc = info.target_desc;
8099
8100 /* If we have an object to base this architecture on, try to determine
8101 its ABI. */
8102
8103 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
8104 {
8105 int ei_osabi, e_flags;
8106
8107 switch (bfd_get_flavour (info.abfd))
8108 {
8109 case bfd_target_aout_flavour:
8110 /* Assume it's an old APCS-style ABI. */
8111 arm_abi = ARM_ABI_APCS;
8112 break;
8113
8114 case bfd_target_coff_flavour:
8115 /* Assume it's an old APCS-style ABI. */
8116 /* XXX WinCE? */
8117 arm_abi = ARM_ABI_APCS;
8118 break;
8119
8120 case bfd_target_elf_flavour:
8121 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
8122 e_flags = elf_elfheader (info.abfd)->e_flags;
8123
8124 if (ei_osabi == ELFOSABI_ARM)
8125 {
8126 /* GNU tools used to use this value, but do not for EABI
8127 objects. There's nowhere to tag an EABI version
8128 anyway, so assume APCS. */
8129 arm_abi = ARM_ABI_APCS;
8130 }
8131 else if (ei_osabi == ELFOSABI_NONE)
8132 {
8133 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
8134 int attr_arch, attr_profile;
8135
8136 switch (eabi_ver)
8137 {
8138 case EF_ARM_EABI_UNKNOWN:
8139 /* Assume GNU tools. */
8140 arm_abi = ARM_ABI_APCS;
8141 break;
8142
8143 case EF_ARM_EABI_VER4:
8144 case EF_ARM_EABI_VER5:
8145 arm_abi = ARM_ABI_AAPCS;
8146 /* EABI binaries default to VFP float ordering.
8147 They may also contain build attributes that can
8148 be used to identify if the VFP argument-passing
8149 ABI is in use. */
8150 if (fp_model == ARM_FLOAT_AUTO)
8151 {
8152 #ifdef HAVE_ELF
8153 switch (bfd_elf_get_obj_attr_int (info.abfd,
8154 OBJ_ATTR_PROC,
8155 Tag_ABI_VFP_args))
8156 {
8157 case 0:
8158 /* "The user intended FP parameter/result
8159 passing to conform to AAPCS, base
8160 variant". */
8161 fp_model = ARM_FLOAT_SOFT_VFP;
8162 break;
8163 case 1:
8164 /* "The user intended FP parameter/result
8165 passing to conform to AAPCS, VFP
8166 variant". */
8167 fp_model = ARM_FLOAT_VFP;
8168 break;
8169 case 2:
8170 /* "The user intended FP parameter/result
8171 passing to conform to tool chain-specific
8172 conventions" - we don't know any such
8173 conventions, so leave it as "auto". */
8174 break;
8175 default:
8176 /* Attribute value not mentioned in the
8177 October 2008 ABI, so leave it as
8178 "auto". */
8179 break;
8180 }
8181 #else
8182 fp_model = ARM_FLOAT_SOFT_VFP;
8183 #endif
8184 }
8185 break;
8186
8187 default:
8188 /* Leave it as "auto". */
8189 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
8190 break;
8191 }
8192
8193 #ifdef HAVE_ELF
8194 /* Detect M-profile programs. This only works if the
8195 executable file includes build attributes; GCC does
8196 copy them to the executable, but e.g. RealView does
8197 not. */
8198 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
8199 Tag_CPU_arch);
8200 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
8201 OBJ_ATTR_PROC,
8202 Tag_CPU_arch_profile);
8203 /* GCC specifies the profile for v6-M; RealView only
8204 specifies the profile for architectures starting with
8205 V7 (as opposed to architectures with a tag
8206 numerically greater than TAG_CPU_ARCH_V7). */
8207 if (!tdesc_has_registers (tdesc)
8208 && (attr_arch == TAG_CPU_ARCH_V6_M
8209 || attr_arch == TAG_CPU_ARCH_V6S_M
8210 || attr_profile == 'M'))
8211 tdesc = tdesc_arm_with_m;
8212 #endif
8213 }
8214
8215 if (fp_model == ARM_FLOAT_AUTO)
8216 {
8217 int e_flags = elf_elfheader (info.abfd)->e_flags;
8218
8219 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
8220 {
8221 case 0:
8222 /* Leave it as "auto". Strictly speaking this case
8223 means FPA, but almost nobody uses that now, and
8224 many toolchains fail to set the appropriate bits
8225 for the floating-point model they use. */
8226 break;
8227 case EF_ARM_SOFT_FLOAT:
8228 fp_model = ARM_FLOAT_SOFT_FPA;
8229 break;
8230 case EF_ARM_VFP_FLOAT:
8231 fp_model = ARM_FLOAT_VFP;
8232 break;
8233 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
8234 fp_model = ARM_FLOAT_SOFT_VFP;
8235 break;
8236 }
8237 }
8238
8239 if (e_flags & EF_ARM_BE8)
8240 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
8241
8242 break;
8243
8244 default:
8245 /* Leave it as "auto". */
8246 break;
8247 }
8248 }
8249
8250 /* Check any target description for validity. */
8251 if (tdesc_has_registers (tdesc))
8252 {
8253 /* For most registers we require GDB's default names; but also allow
8254 the numeric names for sp / lr / pc, as a convenience. */
8255 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
8256 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
8257 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
8258
8259 const struct tdesc_feature *feature;
8260 int valid_p;
8261
8262 feature = tdesc_find_feature (tdesc,
8263 "org.gnu.gdb.arm.core");
8264 if (feature == NULL)
8265 {
8266 feature = tdesc_find_feature (tdesc,
8267 "org.gnu.gdb.arm.m-profile");
8268 if (feature == NULL)
8269 return NULL;
8270 else
8271 is_m = 1;
8272 }
8273
8274 tdesc_data = tdesc_data_alloc ();
8275
8276 valid_p = 1;
8277 for (i = 0; i < ARM_SP_REGNUM; i++)
8278 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
8279 arm_register_names[i]);
8280 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8281 ARM_SP_REGNUM,
8282 arm_sp_names);
8283 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8284 ARM_LR_REGNUM,
8285 arm_lr_names);
8286 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8287 ARM_PC_REGNUM,
8288 arm_pc_names);
8289 if (is_m)
8290 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8291 ARM_PS_REGNUM, "xpsr");
8292 else
8293 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8294 ARM_PS_REGNUM, "cpsr");
8295
8296 if (!valid_p)
8297 {
8298 tdesc_data_cleanup (tdesc_data);
8299 return NULL;
8300 }
8301
8302 feature = tdesc_find_feature (tdesc,
8303 "org.gnu.gdb.arm.fpa");
8304 if (feature != NULL)
8305 {
8306 valid_p = 1;
8307 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
8308 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
8309 arm_register_names[i]);
8310 if (!valid_p)
8311 {
8312 tdesc_data_cleanup (tdesc_data);
8313 return NULL;
8314 }
8315 }
8316 else
8317 have_fpa_registers = 0;
8318
8319 feature = tdesc_find_feature (tdesc,
8320 "org.gnu.gdb.xscale.iwmmxt");
8321 if (feature != NULL)
8322 {
8323 static const char *const iwmmxt_names[] = {
8324 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
8325 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
8326 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
8327 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
8328 };
8329
8330 valid_p = 1;
8331 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
8332 valid_p
8333 &= tdesc_numbered_register (feature, tdesc_data, i,
8334 iwmmxt_names[i - ARM_WR0_REGNUM]);
8335
8336 /* Check for the control registers, but do not fail if they
8337 are missing. */
8338 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
8339 tdesc_numbered_register (feature, tdesc_data, i,
8340 iwmmxt_names[i - ARM_WR0_REGNUM]);
8341
8342 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
8343 valid_p
8344 &= tdesc_numbered_register (feature, tdesc_data, i,
8345 iwmmxt_names[i - ARM_WR0_REGNUM]);
8346
8347 if (!valid_p)
8348 {
8349 tdesc_data_cleanup (tdesc_data);
8350 return NULL;
8351 }
8352 }
8353
8354 /* If we have a VFP unit, check whether the single precision registers
8355 are present. If not, then we will synthesize them as pseudo
8356 registers. */
8357 feature = tdesc_find_feature (tdesc,
8358 "org.gnu.gdb.arm.vfp");
8359 if (feature != NULL)
8360 {
8361 static const char *const vfp_double_names[] = {
8362 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
8363 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
8364 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
8365 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
8366 };
8367
8368 /* Require the double precision registers. There must be either
8369 16 or 32. */
8370 valid_p = 1;
8371 for (i = 0; i < 32; i++)
8372 {
8373 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8374 ARM_D0_REGNUM + i,
8375 vfp_double_names[i]);
8376 if (!valid_p)
8377 break;
8378 }
8379
8380 if (!valid_p && i != 16)
8381 {
8382 tdesc_data_cleanup (tdesc_data);
8383 return NULL;
8384 }
8385
8386 if (tdesc_unnumbered_register (feature, "s0") == 0)
8387 have_vfp_pseudos = 1;
8388
8389 have_vfp_registers = 1;
8390
8391 /* If we have VFP, also check for NEON. The architecture allows
8392 NEON without VFP (integer vector operations only), but GDB
8393 does not support that. */
8394 feature = tdesc_find_feature (tdesc,
8395 "org.gnu.gdb.arm.neon");
8396 if (feature != NULL)
8397 {
8398 /* NEON requires 32 double-precision registers. */
8399 if (i != 32)
8400 {
8401 tdesc_data_cleanup (tdesc_data);
8402 return NULL;
8403 }
8404
8405 /* If there are quad registers defined by the stub, use
8406 their type; otherwise (normally) provide them with
8407 the default type. */
8408 if (tdesc_unnumbered_register (feature, "q0") == 0)
8409 have_neon_pseudos = 1;
8410
8411 have_neon = 1;
8412 }
8413 }
8414 }
8415
8416 /* If there is already a candidate, use it. */
8417 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
8418 best_arch != NULL;
8419 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
8420 {
8421 if (arm_abi != ARM_ABI_AUTO
8422 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
8423 continue;
8424
8425 if (fp_model != ARM_FLOAT_AUTO
8426 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
8427 continue;
8428
8429 /* There are various other properties in tdep that we do not
8430 need to check here: those derived from a target description,
8431 since gdbarches with a different target description are
8432 automatically disqualified. */
8433
8434 /* Do check is_m, though, since it might come from the binary. */
8435 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
8436 continue;
8437
8438 /* Found a match. */
8439 break;
8440 }
8441
8442 if (best_arch != NULL)
8443 {
8444 if (tdesc_data != NULL)
8445 tdesc_data_cleanup (tdesc_data);
8446 return best_arch->gdbarch;
8447 }
8448
8449 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
8450 gdbarch = gdbarch_alloc (&info, tdep);
8451
8452 /* Record additional information about the architecture we are defining.
8453 These are gdbarch discriminators, like the OSABI. */
8454 tdep->arm_abi = arm_abi;
8455 tdep->fp_model = fp_model;
8456 tdep->is_m = is_m;
8457 tdep->have_fpa_registers = have_fpa_registers;
8458 tdep->have_vfp_registers = have_vfp_registers;
8459 tdep->have_vfp_pseudos = have_vfp_pseudos;
8460 tdep->have_neon_pseudos = have_neon_pseudos;
8461 tdep->have_neon = have_neon;
8462
8463 /* Breakpoints. */
8464 switch (info.byte_order_for_code)
8465 {
8466 case BFD_ENDIAN_BIG:
8467 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
8468 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
8469 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
8470 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
8471
8472 break;
8473
8474 case BFD_ENDIAN_LITTLE:
8475 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
8476 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
8477 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
8478 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
8479
8480 break;
8481
8482 default:
8483 internal_error (__FILE__, __LINE__,
8484 _("arm_gdbarch_init: bad byte order for float format"));
8485 }
8486
8487 /* On ARM targets char defaults to unsigned. */
8488 set_gdbarch_char_signed (gdbarch, 0);
8489
8490 /* Note: for displaced stepping, this includes the breakpoint, and one word
8491 of additional scratch space. This setting isn't used for anything beside
8492 displaced stepping at present. */
8493 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
8494
8495 /* This should be low enough for everything. */
8496 tdep->lowest_pc = 0x20;
8497 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
8498
8499 /* The default, for both APCS and AAPCS, is to return small
8500 structures in registers. */
8501 tdep->struct_return = reg_struct_return;
8502
8503 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
8504 set_gdbarch_frame_align (gdbarch, arm_frame_align);
8505
8506 set_gdbarch_write_pc (gdbarch, arm_write_pc);
8507
8508 /* Frame handling. */
8509 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
8510 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
8511 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
8512
8513 frame_base_set_default (gdbarch, &arm_normal_base);
8514
8515 /* Address manipulation. */
8516 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
8517 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
8518
8519 /* Advance PC across function entry code. */
8520 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
8521
8522 /* Detect whether PC is in function epilogue. */
8523 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
8524
8525 /* Skip trampolines. */
8526 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
8527
8528 /* The stack grows downward. */
8529 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
8530
8531 /* Breakpoint manipulation. */
8532 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
8533 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
8534 arm_remote_breakpoint_from_pc);
8535
8536 /* Information about registers, etc. */
8537 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
8538 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
8539 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
8540 set_gdbarch_register_type (gdbarch, arm_register_type);
8541 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
8542
8543 /* This "info float" is FPA-specific. Use the generic version if we
8544 do not have FPA. */
8545 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
8546 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
8547
8548 /* Internal <-> external register number maps. */
8549 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
8550 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
8551
8552 set_gdbarch_register_name (gdbarch, arm_register_name);
8553
8554 /* Returning results. */
8555 set_gdbarch_return_value (gdbarch, arm_return_value);
8556
8557 /* Disassembly. */
8558 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
8559
8560 /* Minsymbol frobbing. */
8561 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
8562 set_gdbarch_coff_make_msymbol_special (gdbarch,
8563 arm_coff_make_msymbol_special);
8564 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
8565
8566 /* Thumb-2 IT block support. */
8567 set_gdbarch_adjust_breakpoint_address (gdbarch,
8568 arm_adjust_breakpoint_address);
8569
8570 /* Virtual tables. */
8571 set_gdbarch_vbit_in_delta (gdbarch, 1);
8572
8573 /* Hook in the ABI-specific overrides, if they have been registered. */
8574 gdbarch_init_osabi (info, gdbarch);
8575
8576 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
8577
8578 /* Add some default predicates. */
8579 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
8580 dwarf2_append_unwinders (gdbarch);
8581 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
8582 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
8583
8584 /* Now we have tuned the configuration, set a few final things,
8585 based on what the OS ABI has told us. */
8586
8587 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
8588 binaries are always marked. */
8589 if (tdep->arm_abi == ARM_ABI_AUTO)
8590 tdep->arm_abi = ARM_ABI_APCS;
8591
8592 /* Watchpoints are not steppable. */
8593 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
8594
8595 /* We used to default to FPA for generic ARM, but almost nobody
8596 uses that now, and we now provide a way for the user to force
8597 the model. So default to the most useful variant. */
8598 if (tdep->fp_model == ARM_FLOAT_AUTO)
8599 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
8600
8601 if (tdep->jb_pc >= 0)
8602 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
8603
8604 /* Floating point sizes and format. */
8605 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
8606 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
8607 {
8608 set_gdbarch_double_format
8609 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
8610 set_gdbarch_long_double_format
8611 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
8612 }
8613 else
8614 {
8615 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
8616 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
8617 }
8618
8619 if (have_vfp_pseudos)
8620 {
8621 /* NOTE: These are the only pseudo registers used by
8622 the ARM target at the moment. If more are added, a
8623 little more care in numbering will be needed. */
8624
8625 int num_pseudos = 32;
8626 if (have_neon_pseudos)
8627 num_pseudos += 16;
8628 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
8629 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
8630 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
8631 }
8632
8633 if (tdesc_data)
8634 {
8635 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
8636
8637 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
8638
8639 /* Override tdesc_register_type to adjust the types of VFP
8640 registers for NEON. */
8641 set_gdbarch_register_type (gdbarch, arm_register_type);
8642 }
8643
8644 /* Add standard register aliases. We add aliases even for those
8645 nanes which are used by the current architecture - it's simpler,
8646 and does no harm, since nothing ever lists user registers. */
8647 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
8648 user_reg_add (gdbarch, arm_register_aliases[i].name,
8649 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
8650
8651 return gdbarch;
8652 }
8653
8654 static void
8655 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
8656 {
8657 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8658
8659 if (tdep == NULL)
8660 return;
8661
8662 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
8663 (unsigned long) tdep->lowest_pc);
8664 }
8665
8666 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
8667
8668 void
8669 _initialize_arm_tdep (void)
8670 {
8671 struct ui_file *stb;
8672 long length;
8673 struct cmd_list_element *new_set, *new_show;
8674 const char *setname;
8675 const char *setdesc;
8676 const char *const *regnames;
8677 int numregs, i, j;
8678 static char *helptext;
8679 char regdesc[1024], *rdptr = regdesc;
8680 size_t rest = sizeof (regdesc);
8681
8682 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
8683
8684 arm_objfile_data_key
8685 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
8686
8687 /* Add ourselves to objfile event chain. */
8688 observer_attach_new_objfile (arm_exidx_new_objfile);
8689 arm_exidx_data_key
8690 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
8691
8692 /* Register an ELF OS ABI sniffer for ARM binaries. */
8693 gdbarch_register_osabi_sniffer (bfd_arch_arm,
8694 bfd_target_elf_flavour,
8695 arm_elf_osabi_sniffer);
8696
8697 /* Initialize the standard target descriptions. */
8698 initialize_tdesc_arm_with_m ();
8699
8700 /* Get the number of possible sets of register names defined in opcodes. */
8701 num_disassembly_options = get_arm_regname_num_options ();
8702
8703 /* Add root prefix command for all "set arm"/"show arm" commands. */
8704 add_prefix_cmd ("arm", no_class, set_arm_command,
8705 _("Various ARM-specific commands."),
8706 &setarmcmdlist, "set arm ", 0, &setlist);
8707
8708 add_prefix_cmd ("arm", no_class, show_arm_command,
8709 _("Various ARM-specific commands."),
8710 &showarmcmdlist, "show arm ", 0, &showlist);
8711
8712 /* Sync the opcode insn printer with our register viewer. */
8713 parse_arm_disassembler_option ("reg-names-std");
8714
8715 /* Initialize the array that will be passed to
8716 add_setshow_enum_cmd(). */
8717 valid_disassembly_styles
8718 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
8719 for (i = 0; i < num_disassembly_options; i++)
8720 {
8721 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
8722 valid_disassembly_styles[i] = setname;
8723 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
8724 rdptr += length;
8725 rest -= length;
8726 /* When we find the default names, tell the disassembler to use
8727 them. */
8728 if (!strcmp (setname, "std"))
8729 {
8730 disassembly_style = setname;
8731 set_arm_regname_option (i);
8732 }
8733 }
8734 /* Mark the end of valid options. */
8735 valid_disassembly_styles[num_disassembly_options] = NULL;
8736
8737 /* Create the help text. */
8738 stb = mem_fileopen ();
8739 fprintf_unfiltered (stb, "%s%s%s",
8740 _("The valid values are:\n"),
8741 regdesc,
8742 _("The default is \"std\"."));
8743 helptext = ui_file_xstrdup (stb, NULL);
8744 ui_file_delete (stb);
8745
8746 add_setshow_enum_cmd("disassembler", no_class,
8747 valid_disassembly_styles, &disassembly_style,
8748 _("Set the disassembly style."),
8749 _("Show the disassembly style."),
8750 helptext,
8751 set_disassembly_style_sfunc,
8752 NULL, /* FIXME: i18n: The disassembly style is
8753 \"%s\". */
8754 &setarmcmdlist, &showarmcmdlist);
8755
8756 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
8757 _("Set usage of ARM 32-bit mode."),
8758 _("Show usage of ARM 32-bit mode."),
8759 _("When off, a 26-bit PC will be used."),
8760 NULL,
8761 NULL, /* FIXME: i18n: Usage of ARM 32-bit
8762 mode is %s. */
8763 &setarmcmdlist, &showarmcmdlist);
8764
8765 /* Add a command to allow the user to force the FPU model. */
8766 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
8767 _("Set the floating point type."),
8768 _("Show the floating point type."),
8769 _("auto - Determine the FP typefrom the OS-ABI.\n\
8770 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
8771 fpa - FPA co-processor (GCC compiled).\n\
8772 softvfp - Software FP with pure-endian doubles.\n\
8773 vfp - VFP co-processor."),
8774 set_fp_model_sfunc, show_fp_model,
8775 &setarmcmdlist, &showarmcmdlist);
8776
8777 /* Add a command to allow the user to force the ABI. */
8778 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
8779 _("Set the ABI."),
8780 _("Show the ABI."),
8781 NULL, arm_set_abi, arm_show_abi,
8782 &setarmcmdlist, &showarmcmdlist);
8783
8784 /* Add two commands to allow the user to force the assumed
8785 execution mode. */
8786 add_setshow_enum_cmd ("fallback-mode", class_support,
8787 arm_mode_strings, &arm_fallback_mode_string,
8788 _("Set the mode assumed when symbols are unavailable."),
8789 _("Show the mode assumed when symbols are unavailable."),
8790 NULL, NULL, arm_show_fallback_mode,
8791 &setarmcmdlist, &showarmcmdlist);
8792 add_setshow_enum_cmd ("force-mode", class_support,
8793 arm_mode_strings, &arm_force_mode_string,
8794 _("Set the mode assumed even when symbols are available."),
8795 _("Show the mode assumed even when symbols are available."),
8796 NULL, NULL, arm_show_force_mode,
8797 &setarmcmdlist, &showarmcmdlist);
8798
8799 /* Debugging flag. */
8800 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
8801 _("Set ARM debugging."),
8802 _("Show ARM debugging."),
8803 _("When on, arm-specific debugging is enabled."),
8804 NULL,
8805 NULL, /* FIXME: i18n: "ARM debugging is %s. */
8806 &setdebuglist, &showdebuglist);
8807 }
This page took 0.242279 seconds and 4 git commands to generate.