gdb/
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "reggroups.h"
33 #include "doublest.h"
34 #include "value.h"
35 #include "arch-utils.h"
36 #include "osabi.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
40 #include "objfiles.h"
41 #include "dwarf2-frame.h"
42 #include "gdbtypes.h"
43 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
46 #include "observer.h"
47
48 #include "arm-tdep.h"
49 #include "gdb/sim-arm.h"
50
51 #include "elf-bfd.h"
52 #include "coff/internal.h"
53 #include "elf/arm.h"
54
55 #include "gdb_assert.h"
56 #include "vec.h"
57
58 #include "features/arm-with-m.c"
59
60 static int arm_debug;
61
62 /* Macros for setting and testing a bit in a minimal symbol that marks
63 it as Thumb function. The MSB of the minimal symbol's "info" field
64 is used for this purpose.
65
66 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
67 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
68
69 #define MSYMBOL_SET_SPECIAL(msym) \
70 MSYMBOL_TARGET_FLAG_1 (msym) = 1
71
72 #define MSYMBOL_IS_SPECIAL(msym) \
73 MSYMBOL_TARGET_FLAG_1 (msym)
74
75 /* Per-objfile data used for mapping symbols. */
76 static const struct objfile_data *arm_objfile_data_key;
77
78 struct arm_mapping_symbol
79 {
80 bfd_vma value;
81 char type;
82 };
83 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
84 DEF_VEC_O(arm_mapping_symbol_s);
85
86 struct arm_per_objfile
87 {
88 VEC(arm_mapping_symbol_s) **section_maps;
89 };
90
91 /* The list of available "set arm ..." and "show arm ..." commands. */
92 static struct cmd_list_element *setarmcmdlist = NULL;
93 static struct cmd_list_element *showarmcmdlist = NULL;
94
95 /* The type of floating-point to use. Keep this in sync with enum
96 arm_float_model, and the help string in _initialize_arm_tdep. */
97 static const char *fp_model_strings[] =
98 {
99 "auto",
100 "softfpa",
101 "fpa",
102 "softvfp",
103 "vfp",
104 NULL
105 };
106
107 /* A variable that can be configured by the user. */
108 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
109 static const char *current_fp_model = "auto";
110
111 /* The ABI to use. Keep this in sync with arm_abi_kind. */
112 static const char *arm_abi_strings[] =
113 {
114 "auto",
115 "APCS",
116 "AAPCS",
117 NULL
118 };
119
120 /* A variable that can be configured by the user. */
121 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
122 static const char *arm_abi_string = "auto";
123
124 /* The execution mode to assume. */
125 static const char *arm_mode_strings[] =
126 {
127 "auto",
128 "arm",
129 "thumb",
130 NULL
131 };
132
133 static const char *arm_fallback_mode_string = "auto";
134 static const char *arm_force_mode_string = "auto";
135
136 /* Number of different reg name sets (options). */
137 static int num_disassembly_options;
138
139 /* The standard register names, and all the valid aliases for them. Note
140 that `fp', `sp' and `pc' are not added in this alias list, because they
141 have been added as builtin user registers in
142 std-regs.c:_initialize_frame_reg. */
143 static const struct
144 {
145 const char *name;
146 int regnum;
147 } arm_register_aliases[] = {
148 /* Basic register numbers. */
149 { "r0", 0 },
150 { "r1", 1 },
151 { "r2", 2 },
152 { "r3", 3 },
153 { "r4", 4 },
154 { "r5", 5 },
155 { "r6", 6 },
156 { "r7", 7 },
157 { "r8", 8 },
158 { "r9", 9 },
159 { "r10", 10 },
160 { "r11", 11 },
161 { "r12", 12 },
162 { "r13", 13 },
163 { "r14", 14 },
164 { "r15", 15 },
165 /* Synonyms (argument and variable registers). */
166 { "a1", 0 },
167 { "a2", 1 },
168 { "a3", 2 },
169 { "a4", 3 },
170 { "v1", 4 },
171 { "v2", 5 },
172 { "v3", 6 },
173 { "v4", 7 },
174 { "v5", 8 },
175 { "v6", 9 },
176 { "v7", 10 },
177 { "v8", 11 },
178 /* Other platform-specific names for r9. */
179 { "sb", 9 },
180 { "tr", 9 },
181 /* Special names. */
182 { "ip", 12 },
183 { "lr", 14 },
184 /* Names used by GCC (not listed in the ARM EABI). */
185 { "sl", 10 },
186 /* A special name from the older ATPCS. */
187 { "wr", 7 },
188 };
189
190 static const char *const arm_register_names[] =
191 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
192 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
193 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
194 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
195 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
196 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
197 "fps", "cpsr" }; /* 24 25 */
198
199 /* Valid register name styles. */
200 static const char **valid_disassembly_styles;
201
202 /* Disassembly style to use. Default to "std" register names. */
203 static const char *disassembly_style;
204
205 /* This is used to keep the bfd arch_info in sync with the disassembly
206 style. */
207 static void set_disassembly_style_sfunc(char *, int,
208 struct cmd_list_element *);
209 static void set_disassembly_style (void);
210
211 static void convert_from_extended (const struct floatformat *, const void *,
212 void *, int);
213 static void convert_to_extended (const struct floatformat *, void *,
214 const void *, int);
215
216 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
217 struct regcache *regcache,
218 int regnum, gdb_byte *buf);
219 static void arm_neon_quad_write (struct gdbarch *gdbarch,
220 struct regcache *regcache,
221 int regnum, const gdb_byte *buf);
222
223 struct arm_prologue_cache
224 {
225 /* The stack pointer at the time this frame was created; i.e. the
226 caller's stack pointer when this function was called. It is used
227 to identify this frame. */
228 CORE_ADDR prev_sp;
229
230 /* The frame base for this frame is just prev_sp - frame size.
231 FRAMESIZE is the distance from the frame pointer to the
232 initial stack pointer. */
233
234 int framesize;
235
236 /* The register used to hold the frame pointer for this frame. */
237 int framereg;
238
239 /* Saved register offsets. */
240 struct trad_frame_saved_reg *saved_regs;
241 };
242
243 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
244 CORE_ADDR prologue_start,
245 CORE_ADDR prologue_end,
246 struct arm_prologue_cache *cache);
247
248 /* Architecture version for displaced stepping. This effects the behaviour of
249 certain instructions, and really should not be hard-wired. */
250
251 #define DISPLACED_STEPPING_ARCH_VERSION 5
252
253 /* Addresses for calling Thumb functions have the bit 0 set.
254 Here are some macros to test, set, or clear bit 0 of addresses. */
255 #define IS_THUMB_ADDR(addr) ((addr) & 1)
256 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
257 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
258
259 /* Set to true if the 32-bit mode is in use. */
260
261 int arm_apcs_32 = 1;
262
263 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
264
265 int
266 arm_psr_thumb_bit (struct gdbarch *gdbarch)
267 {
268 if (gdbarch_tdep (gdbarch)->is_m)
269 return XPSR_T;
270 else
271 return CPSR_T;
272 }
273
274 /* Determine if FRAME is executing in Thumb mode. */
275
276 int
277 arm_frame_is_thumb (struct frame_info *frame)
278 {
279 CORE_ADDR cpsr;
280 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
281
282 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
283 directly (from a signal frame or dummy frame) or by interpreting
284 the saved LR (from a prologue or DWARF frame). So consult it and
285 trust the unwinders. */
286 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
287
288 return (cpsr & t_bit) != 0;
289 }
290
291 /* Callback for VEC_lower_bound. */
292
293 static inline int
294 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
295 const struct arm_mapping_symbol *rhs)
296 {
297 return lhs->value < rhs->value;
298 }
299
300 /* Search for the mapping symbol covering MEMADDR. If one is found,
301 return its type. Otherwise, return 0. If START is non-NULL,
302 set *START to the location of the mapping symbol. */
303
304 static char
305 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
306 {
307 struct obj_section *sec;
308
309 /* If there are mapping symbols, consult them. */
310 sec = find_pc_section (memaddr);
311 if (sec != NULL)
312 {
313 struct arm_per_objfile *data;
314 VEC(arm_mapping_symbol_s) *map;
315 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
316 0 };
317 unsigned int idx;
318
319 data = objfile_data (sec->objfile, arm_objfile_data_key);
320 if (data != NULL)
321 {
322 map = data->section_maps[sec->the_bfd_section->index];
323 if (!VEC_empty (arm_mapping_symbol_s, map))
324 {
325 struct arm_mapping_symbol *map_sym;
326
327 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
328 arm_compare_mapping_symbols);
329
330 /* VEC_lower_bound finds the earliest ordered insertion
331 point. If the following symbol starts at this exact
332 address, we use that; otherwise, the preceding
333 mapping symbol covers this address. */
334 if (idx < VEC_length (arm_mapping_symbol_s, map))
335 {
336 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
337 if (map_sym->value == map_key.value)
338 {
339 if (start)
340 *start = map_sym->value + obj_section_addr (sec);
341 return map_sym->type;
342 }
343 }
344
345 if (idx > 0)
346 {
347 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
348 if (start)
349 *start = map_sym->value + obj_section_addr (sec);
350 return map_sym->type;
351 }
352 }
353 }
354 }
355
356 return 0;
357 }
358
359 static CORE_ADDR arm_get_next_pc_raw (struct frame_info *frame,
360 CORE_ADDR pc, int insert_bkpt);
361
362 /* Determine if the program counter specified in MEMADDR is in a Thumb
363 function. This function should be called for addresses unrelated to
364 any executing frame; otherwise, prefer arm_frame_is_thumb. */
365
366 int
367 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
368 {
369 struct obj_section *sec;
370 struct minimal_symbol *sym;
371 char type;
372 struct displaced_step_closure* dsc
373 = get_displaced_step_closure_by_addr(memaddr);
374
375 /* If checking the mode of displaced instruction in copy area, the mode
376 should be determined by instruction on the original address. */
377 if (dsc)
378 {
379 if (debug_displaced)
380 fprintf_unfiltered (gdb_stdlog,
381 "displaced: check mode of %.8lx instead of %.8lx\n",
382 (unsigned long) dsc->insn_addr,
383 (unsigned long) memaddr);
384 memaddr = dsc->insn_addr;
385 }
386
387 /* If bit 0 of the address is set, assume this is a Thumb address. */
388 if (IS_THUMB_ADDR (memaddr))
389 return 1;
390
391 /* If the user wants to override the symbol table, let him. */
392 if (strcmp (arm_force_mode_string, "arm") == 0)
393 return 0;
394 if (strcmp (arm_force_mode_string, "thumb") == 0)
395 return 1;
396
397 /* ARM v6-M and v7-M are always in Thumb mode. */
398 if (gdbarch_tdep (gdbarch)->is_m)
399 return 1;
400
401 /* If there are mapping symbols, consult them. */
402 type = arm_find_mapping_symbol (memaddr, NULL);
403 if (type)
404 return type == 't';
405
406 /* Thumb functions have a "special" bit set in minimal symbols. */
407 sym = lookup_minimal_symbol_by_pc (memaddr);
408 if (sym)
409 return (MSYMBOL_IS_SPECIAL (sym));
410
411 /* If the user wants to override the fallback mode, let them. */
412 if (strcmp (arm_fallback_mode_string, "arm") == 0)
413 return 0;
414 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
415 return 1;
416
417 /* If we couldn't find any symbol, but we're talking to a running
418 target, then trust the current value of $cpsr. This lets
419 "display/i $pc" always show the correct mode (though if there is
420 a symbol table we will not reach here, so it still may not be
421 displayed in the mode it will be executed).
422
423 As a further heuristic if we detect that we are doing a single-step we
424 see what state executing the current instruction ends up with us being
425 in. */
426 if (target_has_registers)
427 {
428 struct frame_info *current_frame = get_current_frame ();
429 CORE_ADDR current_pc = get_frame_pc (current_frame);
430 int is_thumb = arm_frame_is_thumb (current_frame);
431 CORE_ADDR next_pc;
432 if (memaddr == current_pc)
433 return is_thumb;
434 else
435 {
436 struct gdbarch *gdbarch = get_frame_arch (current_frame);
437 next_pc = arm_get_next_pc_raw (current_frame, current_pc, FALSE);
438 if (memaddr == gdbarch_addr_bits_remove (gdbarch, next_pc))
439 return IS_THUMB_ADDR (next_pc);
440 else
441 return is_thumb;
442 }
443 }
444
445 /* Otherwise we're out of luck; we assume ARM. */
446 return 0;
447 }
448
449 /* Remove useless bits from addresses in a running program. */
450 static CORE_ADDR
451 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
452 {
453 if (arm_apcs_32)
454 return UNMAKE_THUMB_ADDR (val);
455 else
456 return (val & 0x03fffffc);
457 }
458
459 /* When reading symbols, we need to zap the low bit of the address,
460 which may be set to 1 for Thumb functions. */
461 static CORE_ADDR
462 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
463 {
464 return val & ~1;
465 }
466
467 /* Return 1 if PC is the start of a compiler helper function which
468 can be safely ignored during prologue skipping. IS_THUMB is true
469 if the function is known to be a Thumb function due to the way it
470 is being called. */
471 static int
472 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
473 {
474 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
475 struct minimal_symbol *msym;
476
477 msym = lookup_minimal_symbol_by_pc (pc);
478 if (msym != NULL
479 && SYMBOL_VALUE_ADDRESS (msym) == pc
480 && SYMBOL_LINKAGE_NAME (msym) != NULL)
481 {
482 const char *name = SYMBOL_LINKAGE_NAME (msym);
483
484 /* The GNU linker's Thumb call stub to foo is named
485 __foo_from_thumb. */
486 if (strstr (name, "_from_thumb") != NULL)
487 name += 2;
488
489 /* On soft-float targets, __truncdfsf2 is called to convert promoted
490 arguments to their argument types in non-prototyped
491 functions. */
492 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
493 return 1;
494 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
495 return 1;
496
497 /* Internal functions related to thread-local storage. */
498 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
499 return 1;
500 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
501 return 1;
502 }
503 else
504 {
505 /* If we run against a stripped glibc, we may be unable to identify
506 special functions by name. Check for one important case,
507 __aeabi_read_tp, by comparing the *code* against the default
508 implementation (this is hand-written ARM assembler in glibc). */
509
510 if (!is_thumb
511 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
512 == 0xe3e00a0f /* mov r0, #0xffff0fff */
513 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
514 == 0xe240f01f) /* sub pc, r0, #31 */
515 return 1;
516 }
517
518 return 0;
519 }
520
521 /* Support routines for instruction parsing. */
522 #define submask(x) ((1L << ((x) + 1)) - 1)
523 #define bit(obj,st) (((obj) >> (st)) & 1)
524 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
525 #define sbits(obj,st,fn) \
526 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
527 #define BranchDest(addr,instr) \
528 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
529
530 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
531 the first 16-bit of instruction, and INSN2 is the second 16-bit of
532 instruction. */
533 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
534 ((bits ((insn1), 0, 3) << 12) \
535 | (bits ((insn1), 10, 10) << 11) \
536 | (bits ((insn2), 12, 14) << 8) \
537 | bits ((insn2), 0, 7))
538
539 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
540 the 32-bit instruction. */
541 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
542 ((bits ((insn), 16, 19) << 12) \
543 | bits ((insn), 0, 11))
544
545 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
546
547 static unsigned int
548 thumb_expand_immediate (unsigned int imm)
549 {
550 unsigned int count = imm >> 7;
551
552 if (count < 8)
553 switch (count / 2)
554 {
555 case 0:
556 return imm & 0xff;
557 case 1:
558 return (imm & 0xff) | ((imm & 0xff) << 16);
559 case 2:
560 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
561 case 3:
562 return (imm & 0xff) | ((imm & 0xff) << 8)
563 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
564 }
565
566 return (0x80 | (imm & 0x7f)) << (32 - count);
567 }
568
569 /* Return 1 if the 16-bit Thumb instruction INST might change
570 control flow, 0 otherwise. */
571
572 static int
573 thumb_instruction_changes_pc (unsigned short inst)
574 {
575 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
576 return 1;
577
578 if ((inst & 0xf000) == 0xd000) /* conditional branch */
579 return 1;
580
581 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
582 return 1;
583
584 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
585 return 1;
586
587 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
588 return 1;
589
590 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
591 return 1;
592
593 return 0;
594 }
595
596 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
597 might change control flow, 0 otherwise. */
598
599 static int
600 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
601 {
602 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
603 {
604 /* Branches and miscellaneous control instructions. */
605
606 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
607 {
608 /* B, BL, BLX. */
609 return 1;
610 }
611 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
612 {
613 /* SUBS PC, LR, #imm8. */
614 return 1;
615 }
616 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
617 {
618 /* Conditional branch. */
619 return 1;
620 }
621
622 return 0;
623 }
624
625 if ((inst1 & 0xfe50) == 0xe810)
626 {
627 /* Load multiple or RFE. */
628
629 if (bit (inst1, 7) && !bit (inst1, 8))
630 {
631 /* LDMIA or POP */
632 if (bit (inst2, 15))
633 return 1;
634 }
635 else if (!bit (inst1, 7) && bit (inst1, 8))
636 {
637 /* LDMDB */
638 if (bit (inst2, 15))
639 return 1;
640 }
641 else if (bit (inst1, 7) && bit (inst1, 8))
642 {
643 /* RFEIA */
644 return 1;
645 }
646 else if (!bit (inst1, 7) && !bit (inst1, 8))
647 {
648 /* RFEDB */
649 return 1;
650 }
651
652 return 0;
653 }
654
655 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
656 {
657 /* MOV PC or MOVS PC. */
658 return 1;
659 }
660
661 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
662 {
663 /* LDR PC. */
664 if (bits (inst1, 0, 3) == 15)
665 return 1;
666 if (bit (inst1, 7))
667 return 1;
668 if (bit (inst2, 11))
669 return 1;
670 if ((inst2 & 0x0fc0) == 0x0000)
671 return 1;
672
673 return 0;
674 }
675
676 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
677 {
678 /* TBB. */
679 return 1;
680 }
681
682 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
683 {
684 /* TBH. */
685 return 1;
686 }
687
688 return 0;
689 }
690
691 /* Analyze a Thumb prologue, looking for a recognizable stack frame
692 and frame pointer. Scan until we encounter a store that could
693 clobber the stack frame unexpectedly, or an unknown instruction.
694 Return the last address which is definitely safe to skip for an
695 initial breakpoint. */
696
697 static CORE_ADDR
698 thumb_analyze_prologue (struct gdbarch *gdbarch,
699 CORE_ADDR start, CORE_ADDR limit,
700 struct arm_prologue_cache *cache)
701 {
702 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
703 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
704 int i;
705 pv_t regs[16];
706 struct pv_area *stack;
707 struct cleanup *back_to;
708 CORE_ADDR offset;
709 CORE_ADDR unrecognized_pc = 0;
710
711 for (i = 0; i < 16; i++)
712 regs[i] = pv_register (i, 0);
713 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
714 back_to = make_cleanup_free_pv_area (stack);
715
716 while (start < limit)
717 {
718 unsigned short insn;
719
720 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
721
722 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
723 {
724 int regno;
725 int mask;
726
727 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
728 break;
729
730 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
731 whether to save LR (R14). */
732 mask = (insn & 0xff) | ((insn & 0x100) << 6);
733
734 /* Calculate offsets of saved R0-R7 and LR. */
735 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
736 if (mask & (1 << regno))
737 {
738 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
739 -4);
740 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
741 }
742 }
743 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
744 sub sp, #simm */
745 {
746 offset = (insn & 0x7f) << 2; /* get scaled offset */
747 if (insn & 0x80) /* Check for SUB. */
748 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
749 -offset);
750 else
751 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
752 offset);
753 }
754 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
755 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
756 (insn & 0xff) << 2);
757 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
758 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
759 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
760 bits (insn, 6, 8));
761 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
762 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
763 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
764 bits (insn, 0, 7));
765 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
766 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
767 && pv_is_constant (regs[bits (insn, 3, 5)]))
768 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
769 regs[bits (insn, 6, 8)]);
770 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
771 && pv_is_constant (regs[bits (insn, 3, 6)]))
772 {
773 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
774 int rm = bits (insn, 3, 6);
775 regs[rd] = pv_add (regs[rd], regs[rm]);
776 }
777 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
778 {
779 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
780 int src_reg = (insn & 0x78) >> 3;
781 regs[dst_reg] = regs[src_reg];
782 }
783 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
784 {
785 /* Handle stores to the stack. Normally pushes are used,
786 but with GCC -mtpcs-frame, there may be other stores
787 in the prologue to create the frame. */
788 int regno = (insn >> 8) & 0x7;
789 pv_t addr;
790
791 offset = (insn & 0xff) << 2;
792 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
793
794 if (pv_area_store_would_trash (stack, addr))
795 break;
796
797 pv_area_store (stack, addr, 4, regs[regno]);
798 }
799 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
800 {
801 int rd = bits (insn, 0, 2);
802 int rn = bits (insn, 3, 5);
803 pv_t addr;
804
805 offset = bits (insn, 6, 10) << 2;
806 addr = pv_add_constant (regs[rn], offset);
807
808 if (pv_area_store_would_trash (stack, addr))
809 break;
810
811 pv_area_store (stack, addr, 4, regs[rd]);
812 }
813 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
814 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
815 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
816 /* Ignore stores of argument registers to the stack. */
817 ;
818 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
819 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
820 /* Ignore block loads from the stack, potentially copying
821 parameters from memory. */
822 ;
823 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
824 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
825 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
826 /* Similarly ignore single loads from the stack. */
827 ;
828 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
829 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
830 /* Skip register copies, i.e. saves to another register
831 instead of the stack. */
832 ;
833 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
834 /* Recognize constant loads; even with small stacks these are necessary
835 on Thumb. */
836 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
837 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
838 {
839 /* Constant pool loads, for the same reason. */
840 unsigned int constant;
841 CORE_ADDR loc;
842
843 loc = start + 4 + bits (insn, 0, 7) * 4;
844 constant = read_memory_unsigned_integer (loc, 4, byte_order);
845 regs[bits (insn, 8, 10)] = pv_constant (constant);
846 }
847 else if ((insn & 0xe000) == 0xe000)
848 {
849 unsigned short inst2;
850
851 inst2 = read_memory_unsigned_integer (start + 2, 2,
852 byte_order_for_code);
853
854 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
855 {
856 /* BL, BLX. Allow some special function calls when
857 skipping the prologue; GCC generates these before
858 storing arguments to the stack. */
859 CORE_ADDR nextpc;
860 int j1, j2, imm1, imm2;
861
862 imm1 = sbits (insn, 0, 10);
863 imm2 = bits (inst2, 0, 10);
864 j1 = bit (inst2, 13);
865 j2 = bit (inst2, 11);
866
867 offset = ((imm1 << 12) + (imm2 << 1));
868 offset ^= ((!j2) << 22) | ((!j1) << 23);
869
870 nextpc = start + 4 + offset;
871 /* For BLX make sure to clear the low bits. */
872 if (bit (inst2, 12) == 0)
873 nextpc = nextpc & 0xfffffffc;
874
875 if (!skip_prologue_function (gdbarch, nextpc,
876 bit (inst2, 12) != 0))
877 break;
878 }
879
880 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
881 { registers } */
882 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
883 {
884 pv_t addr = regs[bits (insn, 0, 3)];
885 int regno;
886
887 if (pv_area_store_would_trash (stack, addr))
888 break;
889
890 /* Calculate offsets of saved registers. */
891 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
892 if (inst2 & (1 << regno))
893 {
894 addr = pv_add_constant (addr, -4);
895 pv_area_store (stack, addr, 4, regs[regno]);
896 }
897
898 if (insn & 0x0020)
899 regs[bits (insn, 0, 3)] = addr;
900 }
901
902 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
903 [Rn, #+/-imm]{!} */
904 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
905 {
906 int regno1 = bits (inst2, 12, 15);
907 int regno2 = bits (inst2, 8, 11);
908 pv_t addr = regs[bits (insn, 0, 3)];
909
910 offset = inst2 & 0xff;
911 if (insn & 0x0080)
912 addr = pv_add_constant (addr, offset);
913 else
914 addr = pv_add_constant (addr, -offset);
915
916 if (pv_area_store_would_trash (stack, addr))
917 break;
918
919 pv_area_store (stack, addr, 4, regs[regno1]);
920 pv_area_store (stack, pv_add_constant (addr, 4),
921 4, regs[regno2]);
922
923 if (insn & 0x0020)
924 regs[bits (insn, 0, 3)] = addr;
925 }
926
927 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
928 && (inst2 & 0x0c00) == 0x0c00
929 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
930 {
931 int regno = bits (inst2, 12, 15);
932 pv_t addr = regs[bits (insn, 0, 3)];
933
934 offset = inst2 & 0xff;
935 if (inst2 & 0x0200)
936 addr = pv_add_constant (addr, offset);
937 else
938 addr = pv_add_constant (addr, -offset);
939
940 if (pv_area_store_would_trash (stack, addr))
941 break;
942
943 pv_area_store (stack, addr, 4, regs[regno]);
944
945 if (inst2 & 0x0100)
946 regs[bits (insn, 0, 3)] = addr;
947 }
948
949 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
950 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
951 {
952 int regno = bits (inst2, 12, 15);
953 pv_t addr;
954
955 offset = inst2 & 0xfff;
956 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
957
958 if (pv_area_store_would_trash (stack, addr))
959 break;
960
961 pv_area_store (stack, addr, 4, regs[regno]);
962 }
963
964 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
965 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
966 /* Ignore stores of argument registers to the stack. */
967 ;
968
969 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
970 && (inst2 & 0x0d00) == 0x0c00
971 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
972 /* Ignore stores of argument registers to the stack. */
973 ;
974
975 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
976 { registers } */
977 && (inst2 & 0x8000) == 0x0000
978 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
979 /* Ignore block loads from the stack, potentially copying
980 parameters from memory. */
981 ;
982
983 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
984 [Rn, #+/-imm] */
985 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
986 /* Similarly ignore dual loads from the stack. */
987 ;
988
989 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
990 && (inst2 & 0x0d00) == 0x0c00
991 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
992 /* Similarly ignore single loads from the stack. */
993 ;
994
995 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
996 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
997 /* Similarly ignore single loads from the stack. */
998 ;
999
1000 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
1001 && (inst2 & 0x8000) == 0x0000)
1002 {
1003 unsigned int imm = ((bits (insn, 10, 10) << 11)
1004 | (bits (inst2, 12, 14) << 8)
1005 | bits (inst2, 0, 7));
1006
1007 regs[bits (inst2, 8, 11)]
1008 = pv_add_constant (regs[bits (insn, 0, 3)],
1009 thumb_expand_immediate (imm));
1010 }
1011
1012 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1013 && (inst2 & 0x8000) == 0x0000)
1014 {
1015 unsigned int imm = ((bits (insn, 10, 10) << 11)
1016 | (bits (inst2, 12, 14) << 8)
1017 | bits (inst2, 0, 7));
1018
1019 regs[bits (inst2, 8, 11)]
1020 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1021 }
1022
1023 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1024 && (inst2 & 0x8000) == 0x0000)
1025 {
1026 unsigned int imm = ((bits (insn, 10, 10) << 11)
1027 | (bits (inst2, 12, 14) << 8)
1028 | bits (inst2, 0, 7));
1029
1030 regs[bits (inst2, 8, 11)]
1031 = pv_add_constant (regs[bits (insn, 0, 3)],
1032 - (CORE_ADDR) thumb_expand_immediate (imm));
1033 }
1034
1035 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1036 && (inst2 & 0x8000) == 0x0000)
1037 {
1038 unsigned int imm = ((bits (insn, 10, 10) << 11)
1039 | (bits (inst2, 12, 14) << 8)
1040 | bits (inst2, 0, 7));
1041
1042 regs[bits (inst2, 8, 11)]
1043 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1044 }
1045
1046 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1047 {
1048 unsigned int imm = ((bits (insn, 10, 10) << 11)
1049 | (bits (inst2, 12, 14) << 8)
1050 | bits (inst2, 0, 7));
1051
1052 regs[bits (inst2, 8, 11)]
1053 = pv_constant (thumb_expand_immediate (imm));
1054 }
1055
1056 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1057 {
1058 unsigned int imm
1059 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1060
1061 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1062 }
1063
1064 else if (insn == 0xea5f /* mov.w Rd,Rm */
1065 && (inst2 & 0xf0f0) == 0)
1066 {
1067 int dst_reg = (inst2 & 0x0f00) >> 8;
1068 int src_reg = inst2 & 0xf;
1069 regs[dst_reg] = regs[src_reg];
1070 }
1071
1072 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1073 {
1074 /* Constant pool loads. */
1075 unsigned int constant;
1076 CORE_ADDR loc;
1077
1078 offset = bits (insn, 0, 11);
1079 if (insn & 0x0080)
1080 loc = start + 4 + offset;
1081 else
1082 loc = start + 4 - offset;
1083
1084 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1085 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1086 }
1087
1088 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1089 {
1090 /* Constant pool loads. */
1091 unsigned int constant;
1092 CORE_ADDR loc;
1093
1094 offset = bits (insn, 0, 7) << 2;
1095 if (insn & 0x0080)
1096 loc = start + 4 + offset;
1097 else
1098 loc = start + 4 - offset;
1099
1100 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1101 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1102
1103 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1104 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1105 }
1106
1107 else if (thumb2_instruction_changes_pc (insn, inst2))
1108 {
1109 /* Don't scan past anything that might change control flow. */
1110 break;
1111 }
1112 else
1113 {
1114 /* The optimizer might shove anything into the prologue,
1115 so we just skip what we don't recognize. */
1116 unrecognized_pc = start;
1117 }
1118
1119 start += 2;
1120 }
1121 else if (thumb_instruction_changes_pc (insn))
1122 {
1123 /* Don't scan past anything that might change control flow. */
1124 break;
1125 }
1126 else
1127 {
1128 /* The optimizer might shove anything into the prologue,
1129 so we just skip what we don't recognize. */
1130 unrecognized_pc = start;
1131 }
1132
1133 start += 2;
1134 }
1135
1136 if (arm_debug)
1137 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1138 paddress (gdbarch, start));
1139
1140 if (unrecognized_pc == 0)
1141 unrecognized_pc = start;
1142
1143 if (cache == NULL)
1144 {
1145 do_cleanups (back_to);
1146 return unrecognized_pc;
1147 }
1148
1149 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1150 {
1151 /* Frame pointer is fp. Frame size is constant. */
1152 cache->framereg = ARM_FP_REGNUM;
1153 cache->framesize = -regs[ARM_FP_REGNUM].k;
1154 }
1155 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1156 {
1157 /* Frame pointer is r7. Frame size is constant. */
1158 cache->framereg = THUMB_FP_REGNUM;
1159 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1160 }
1161 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1162 {
1163 /* Try the stack pointer... this is a bit desperate. */
1164 cache->framereg = ARM_SP_REGNUM;
1165 cache->framesize = -regs[ARM_SP_REGNUM].k;
1166 }
1167 else
1168 {
1169 /* We're just out of luck. We don't know where the frame is. */
1170 cache->framereg = -1;
1171 cache->framesize = 0;
1172 }
1173
1174 for (i = 0; i < 16; i++)
1175 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1176 cache->saved_regs[i].addr = offset;
1177
1178 do_cleanups (back_to);
1179 return unrecognized_pc;
1180 }
1181
1182
1183 /* Try to analyze the instructions starting from PC, which load symbol
1184 __stack_chk_guard. Return the address of instruction after loading this
1185 symbol, set the dest register number to *BASEREG, and set the size of
1186 instructions for loading symbol in OFFSET. Return 0 if instructions are
1187 not recognized. */
1188
1189 static CORE_ADDR
1190 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1191 unsigned int *destreg, int *offset)
1192 {
1193 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1194 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1195 unsigned int low, high, address;
1196
1197 address = 0;
1198 if (is_thumb)
1199 {
1200 unsigned short insn1
1201 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1202
1203 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1204 {
1205 *destreg = bits (insn1, 8, 10);
1206 *offset = 2;
1207 address = bits (insn1, 0, 7);
1208 }
1209 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1210 {
1211 unsigned short insn2
1212 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1213
1214 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1215
1216 insn1
1217 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1218 insn2
1219 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1220
1221 /* movt Rd, #const */
1222 if ((insn1 & 0xfbc0) == 0xf2c0)
1223 {
1224 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1225 *destreg = bits (insn2, 8, 11);
1226 *offset = 8;
1227 address = (high << 16 | low);
1228 }
1229 }
1230 }
1231 else
1232 {
1233 unsigned int insn
1234 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1235
1236 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1237 {
1238 address = bits (insn, 0, 11);
1239 *destreg = bits (insn, 12, 15);
1240 *offset = 4;
1241 }
1242 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1243 {
1244 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1245
1246 insn
1247 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1248
1249 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1250 {
1251 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1252 *destreg = bits (insn, 12, 15);
1253 *offset = 8;
1254 address = (high << 16 | low);
1255 }
1256 }
1257 }
1258
1259 return address;
1260 }
1261
1262 /* Try to skip a sequence of instructions used for stack protector. If PC
1263 points to the first instruction of this sequence, return the address of
1264 first instruction after this sequence, otherwise, return original PC.
1265
1266 On arm, this sequence of instructions is composed of mainly three steps,
1267 Step 1: load symbol __stack_chk_guard,
1268 Step 2: load from address of __stack_chk_guard,
1269 Step 3: store it to somewhere else.
1270
1271 Usually, instructions on step 2 and step 3 are the same on various ARM
1272 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1273 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1274 instructions in step 1 vary from different ARM architectures. On ARMv7,
1275 they are,
1276
1277 movw Rn, #:lower16:__stack_chk_guard
1278 movt Rn, #:upper16:__stack_chk_guard
1279
1280 On ARMv5t, it is,
1281
1282 ldr Rn, .Label
1283 ....
1284 .Lable:
1285 .word __stack_chk_guard
1286
1287 Since ldr/str is a very popular instruction, we can't use them as
1288 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1289 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1290 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1291
1292 static CORE_ADDR
1293 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1294 {
1295 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1296 unsigned int address, basereg;
1297 struct minimal_symbol *stack_chk_guard;
1298 int offset;
1299 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1300 CORE_ADDR addr;
1301
1302 /* Try to parse the instructions in Step 1. */
1303 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1304 &basereg, &offset);
1305 if (!addr)
1306 return pc;
1307
1308 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1309 /* If name of symbol doesn't start with '__stack_chk_guard', this
1310 instruction sequence is not for stack protector. If symbol is
1311 removed, we conservatively think this sequence is for stack protector. */
1312 if (stack_chk_guard
1313 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard), "__stack_chk_guard",
1314 strlen ("__stack_chk_guard")) != 0)
1315 return pc;
1316
1317 if (is_thumb)
1318 {
1319 unsigned int destreg;
1320 unsigned short insn
1321 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1322
1323 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1324 if ((insn & 0xf800) != 0x6800)
1325 return pc;
1326 if (bits (insn, 3, 5) != basereg)
1327 return pc;
1328 destreg = bits (insn, 0, 2);
1329
1330 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1331 byte_order_for_code);
1332 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1333 if ((insn & 0xf800) != 0x6000)
1334 return pc;
1335 if (destreg != bits (insn, 0, 2))
1336 return pc;
1337 }
1338 else
1339 {
1340 unsigned int destreg;
1341 unsigned int insn
1342 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1343
1344 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1345 if ((insn & 0x0e500000) != 0x04100000)
1346 return pc;
1347 if (bits (insn, 16, 19) != basereg)
1348 return pc;
1349 destreg = bits (insn, 12, 15);
1350 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1351 insn = read_memory_unsigned_integer (pc + offset + 4,
1352 4, byte_order_for_code);
1353 if ((insn & 0x0e500000) != 0x04000000)
1354 return pc;
1355 if (bits (insn, 12, 15) != destreg)
1356 return pc;
1357 }
1358 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1359 on arm. */
1360 if (is_thumb)
1361 return pc + offset + 4;
1362 else
1363 return pc + offset + 8;
1364 }
1365
1366 /* Advance the PC across any function entry prologue instructions to
1367 reach some "real" code.
1368
1369 The APCS (ARM Procedure Call Standard) defines the following
1370 prologue:
1371
1372 mov ip, sp
1373 [stmfd sp!, {a1,a2,a3,a4}]
1374 stmfd sp!, {...,fp,ip,lr,pc}
1375 [stfe f7, [sp, #-12]!]
1376 [stfe f6, [sp, #-12]!]
1377 [stfe f5, [sp, #-12]!]
1378 [stfe f4, [sp, #-12]!]
1379 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1380
1381 static CORE_ADDR
1382 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1383 {
1384 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1385 unsigned long inst;
1386 CORE_ADDR skip_pc;
1387 CORE_ADDR func_addr, limit_pc;
1388 struct symtab_and_line sal;
1389
1390 /* See if we can determine the end of the prologue via the symbol table.
1391 If so, then return either PC, or the PC after the prologue, whichever
1392 is greater. */
1393 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1394 {
1395 CORE_ADDR post_prologue_pc
1396 = skip_prologue_using_sal (gdbarch, func_addr);
1397 struct symtab *s = find_pc_symtab (func_addr);
1398
1399 if (post_prologue_pc)
1400 post_prologue_pc
1401 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1402
1403
1404 /* GCC always emits a line note before the prologue and another
1405 one after, even if the two are at the same address or on the
1406 same line. Take advantage of this so that we do not need to
1407 know every instruction that might appear in the prologue. We
1408 will have producer information for most binaries; if it is
1409 missing (e.g. for -gstabs), assuming the GNU tools. */
1410 if (post_prologue_pc
1411 && (s == NULL
1412 || s->producer == NULL
1413 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1414 return post_prologue_pc;
1415
1416 if (post_prologue_pc != 0)
1417 {
1418 CORE_ADDR analyzed_limit;
1419
1420 /* For non-GCC compilers, make sure the entire line is an
1421 acceptable prologue; GDB will round this function's
1422 return value up to the end of the following line so we
1423 can not skip just part of a line (and we do not want to).
1424
1425 RealView does not treat the prologue specially, but does
1426 associate prologue code with the opening brace; so this
1427 lets us skip the first line if we think it is the opening
1428 brace. */
1429 if (arm_pc_is_thumb (gdbarch, func_addr))
1430 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1431 post_prologue_pc, NULL);
1432 else
1433 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1434 post_prologue_pc, NULL);
1435
1436 if (analyzed_limit != post_prologue_pc)
1437 return func_addr;
1438
1439 return post_prologue_pc;
1440 }
1441 }
1442
1443 /* Can't determine prologue from the symbol table, need to examine
1444 instructions. */
1445
1446 /* Find an upper limit on the function prologue using the debug
1447 information. If the debug information could not be used to provide
1448 that bound, then use an arbitrary large number as the upper bound. */
1449 /* Like arm_scan_prologue, stop no later than pc + 64. */
1450 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1451 if (limit_pc == 0)
1452 limit_pc = pc + 64; /* Magic. */
1453
1454
1455 /* Check if this is Thumb code. */
1456 if (arm_pc_is_thumb (gdbarch, pc))
1457 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1458
1459 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1460 {
1461 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1462
1463 /* "mov ip, sp" is no longer a required part of the prologue. */
1464 if (inst == 0xe1a0c00d) /* mov ip, sp */
1465 continue;
1466
1467 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1468 continue;
1469
1470 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1471 continue;
1472
1473 /* Some prologues begin with "str lr, [sp, #-4]!". */
1474 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1475 continue;
1476
1477 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1478 continue;
1479
1480 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1481 continue;
1482
1483 /* Any insns after this point may float into the code, if it makes
1484 for better instruction scheduling, so we skip them only if we
1485 find them, but still consider the function to be frame-ful. */
1486
1487 /* We may have either one sfmfd instruction here, or several stfe
1488 insns, depending on the version of floating point code we
1489 support. */
1490 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1491 continue;
1492
1493 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1494 continue;
1495
1496 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1497 continue;
1498
1499 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1500 continue;
1501
1502 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1503 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1504 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1505 continue;
1506
1507 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1508 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1509 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1510 continue;
1511
1512 /* Un-recognized instruction; stop scanning. */
1513 break;
1514 }
1515
1516 return skip_pc; /* End of prologue. */
1517 }
1518
1519 /* *INDENT-OFF* */
1520 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1521 This function decodes a Thumb function prologue to determine:
1522 1) the size of the stack frame
1523 2) which registers are saved on it
1524 3) the offsets of saved regs
1525 4) the offset from the stack pointer to the frame pointer
1526
1527 A typical Thumb function prologue would create this stack frame
1528 (offsets relative to FP)
1529 old SP -> 24 stack parameters
1530 20 LR
1531 16 R7
1532 R7 -> 0 local variables (16 bytes)
1533 SP -> -12 additional stack space (12 bytes)
1534 The frame size would thus be 36 bytes, and the frame offset would be
1535 12 bytes. The frame register is R7.
1536
1537 The comments for thumb_skip_prolog() describe the algorithm we use
1538 to detect the end of the prolog. */
1539 /* *INDENT-ON* */
1540
1541 static void
1542 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1543 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1544 {
1545 CORE_ADDR prologue_start;
1546 CORE_ADDR prologue_end;
1547 CORE_ADDR current_pc;
1548
1549 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1550 &prologue_end))
1551 {
1552 /* See comment in arm_scan_prologue for an explanation of
1553 this heuristics. */
1554 if (prologue_end > prologue_start + 64)
1555 {
1556 prologue_end = prologue_start + 64;
1557 }
1558 }
1559 else
1560 /* We're in the boondocks: we have no idea where the start of the
1561 function is. */
1562 return;
1563
1564 prologue_end = min (prologue_end, prev_pc);
1565
1566 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1567 }
1568
1569 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1570
1571 static int
1572 arm_instruction_changes_pc (uint32_t this_instr)
1573 {
1574 if (bits (this_instr, 28, 31) == INST_NV)
1575 /* Unconditional instructions. */
1576 switch (bits (this_instr, 24, 27))
1577 {
1578 case 0xa:
1579 case 0xb:
1580 /* Branch with Link and change to Thumb. */
1581 return 1;
1582 case 0xc:
1583 case 0xd:
1584 case 0xe:
1585 /* Coprocessor register transfer. */
1586 if (bits (this_instr, 12, 15) == 15)
1587 error (_("Invalid update to pc in instruction"));
1588 return 0;
1589 default:
1590 return 0;
1591 }
1592 else
1593 switch (bits (this_instr, 25, 27))
1594 {
1595 case 0x0:
1596 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1597 {
1598 /* Multiplies and extra load/stores. */
1599 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1600 /* Neither multiplies nor extension load/stores are allowed
1601 to modify PC. */
1602 return 0;
1603
1604 /* Otherwise, miscellaneous instructions. */
1605
1606 /* BX <reg>, BXJ <reg>, BLX <reg> */
1607 if (bits (this_instr, 4, 27) == 0x12fff1
1608 || bits (this_instr, 4, 27) == 0x12fff2
1609 || bits (this_instr, 4, 27) == 0x12fff3)
1610 return 1;
1611
1612 /* Other miscellaneous instructions are unpredictable if they
1613 modify PC. */
1614 return 0;
1615 }
1616 /* Data processing instruction. Fall through. */
1617
1618 case 0x1:
1619 if (bits (this_instr, 12, 15) == 15)
1620 return 1;
1621 else
1622 return 0;
1623
1624 case 0x2:
1625 case 0x3:
1626 /* Media instructions and architecturally undefined instructions. */
1627 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1628 return 0;
1629
1630 /* Stores. */
1631 if (bit (this_instr, 20) == 0)
1632 return 0;
1633
1634 /* Loads. */
1635 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1636 return 1;
1637 else
1638 return 0;
1639
1640 case 0x4:
1641 /* Load/store multiple. */
1642 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1643 return 1;
1644 else
1645 return 0;
1646
1647 case 0x5:
1648 /* Branch and branch with link. */
1649 return 1;
1650
1651 case 0x6:
1652 case 0x7:
1653 /* Coprocessor transfers or SWIs can not affect PC. */
1654 return 0;
1655
1656 default:
1657 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1658 }
1659 }
1660
1661 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1662 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1663 fill it in. Return the first address not recognized as a prologue
1664 instruction.
1665
1666 We recognize all the instructions typically found in ARM prologues,
1667 plus harmless instructions which can be skipped (either for analysis
1668 purposes, or a more restrictive set that can be skipped when finding
1669 the end of the prologue). */
1670
1671 static CORE_ADDR
1672 arm_analyze_prologue (struct gdbarch *gdbarch,
1673 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1674 struct arm_prologue_cache *cache)
1675 {
1676 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1677 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1678 int regno;
1679 CORE_ADDR offset, current_pc;
1680 pv_t regs[ARM_FPS_REGNUM];
1681 struct pv_area *stack;
1682 struct cleanup *back_to;
1683 int framereg, framesize;
1684 CORE_ADDR unrecognized_pc = 0;
1685
1686 /* Search the prologue looking for instructions that set up the
1687 frame pointer, adjust the stack pointer, and save registers.
1688
1689 Be careful, however, and if it doesn't look like a prologue,
1690 don't try to scan it. If, for instance, a frameless function
1691 begins with stmfd sp!, then we will tell ourselves there is
1692 a frame, which will confuse stack traceback, as well as "finish"
1693 and other operations that rely on a knowledge of the stack
1694 traceback. */
1695
1696 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1697 regs[regno] = pv_register (regno, 0);
1698 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1699 back_to = make_cleanup_free_pv_area (stack);
1700
1701 for (current_pc = prologue_start;
1702 current_pc < prologue_end;
1703 current_pc += 4)
1704 {
1705 unsigned int insn
1706 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1707
1708 if (insn == 0xe1a0c00d) /* mov ip, sp */
1709 {
1710 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1711 continue;
1712 }
1713 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1714 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1715 {
1716 unsigned imm = insn & 0xff; /* immediate value */
1717 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1718 int rd = bits (insn, 12, 15);
1719 imm = (imm >> rot) | (imm << (32 - rot));
1720 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1721 continue;
1722 }
1723 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1724 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1725 {
1726 unsigned imm = insn & 0xff; /* immediate value */
1727 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1728 int rd = bits (insn, 12, 15);
1729 imm = (imm >> rot) | (imm << (32 - rot));
1730 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1731 continue;
1732 }
1733 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1734 [sp, #-4]! */
1735 {
1736 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1737 break;
1738 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1739 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1740 regs[bits (insn, 12, 15)]);
1741 continue;
1742 }
1743 else if ((insn & 0xffff0000) == 0xe92d0000)
1744 /* stmfd sp!, {..., fp, ip, lr, pc}
1745 or
1746 stmfd sp!, {a1, a2, a3, a4} */
1747 {
1748 int mask = insn & 0xffff;
1749
1750 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1751 break;
1752
1753 /* Calculate offsets of saved registers. */
1754 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1755 if (mask & (1 << regno))
1756 {
1757 regs[ARM_SP_REGNUM]
1758 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1759 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1760 }
1761 }
1762 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1763 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1764 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1765 {
1766 /* No need to add this to saved_regs -- it's just an arg reg. */
1767 continue;
1768 }
1769 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1770 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1771 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1772 {
1773 /* No need to add this to saved_regs -- it's just an arg reg. */
1774 continue;
1775 }
1776 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1777 { registers } */
1778 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1779 {
1780 /* No need to add this to saved_regs -- it's just arg regs. */
1781 continue;
1782 }
1783 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1784 {
1785 unsigned imm = insn & 0xff; /* immediate value */
1786 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1787 imm = (imm >> rot) | (imm << (32 - rot));
1788 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1789 }
1790 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1791 {
1792 unsigned imm = insn & 0xff; /* immediate value */
1793 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1794 imm = (imm >> rot) | (imm << (32 - rot));
1795 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1796 }
1797 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1798 [sp, -#c]! */
1799 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1800 {
1801 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1802 break;
1803
1804 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1805 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1806 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1807 }
1808 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1809 [sp!] */
1810 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1811 {
1812 int n_saved_fp_regs;
1813 unsigned int fp_start_reg, fp_bound_reg;
1814
1815 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1816 break;
1817
1818 if ((insn & 0x800) == 0x800) /* N0 is set */
1819 {
1820 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1821 n_saved_fp_regs = 3;
1822 else
1823 n_saved_fp_regs = 1;
1824 }
1825 else
1826 {
1827 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1828 n_saved_fp_regs = 2;
1829 else
1830 n_saved_fp_regs = 4;
1831 }
1832
1833 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1834 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1835 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1836 {
1837 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1838 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1839 regs[fp_start_reg++]);
1840 }
1841 }
1842 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1843 {
1844 /* Allow some special function calls when skipping the
1845 prologue; GCC generates these before storing arguments to
1846 the stack. */
1847 CORE_ADDR dest = BranchDest (current_pc, insn);
1848
1849 if (skip_prologue_function (gdbarch, dest, 0))
1850 continue;
1851 else
1852 break;
1853 }
1854 else if ((insn & 0xf0000000) != 0xe0000000)
1855 break; /* Condition not true, exit early. */
1856 else if (arm_instruction_changes_pc (insn))
1857 /* Don't scan past anything that might change control flow. */
1858 break;
1859 else if ((insn & 0xfe500000) == 0xe8100000) /* ldm */
1860 {
1861 /* Ignore block loads from the stack, potentially copying
1862 parameters from memory. */
1863 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1864 continue;
1865 else
1866 break;
1867 }
1868 else if ((insn & 0xfc500000) == 0xe4100000)
1869 {
1870 /* Similarly ignore single loads from the stack. */
1871 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1872 continue;
1873 else
1874 break;
1875 }
1876 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1877 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1878 register instead of the stack. */
1879 continue;
1880 else
1881 {
1882 /* The optimizer might shove anything into the prologue,
1883 so we just skip what we don't recognize. */
1884 unrecognized_pc = current_pc;
1885 continue;
1886 }
1887 }
1888
1889 if (unrecognized_pc == 0)
1890 unrecognized_pc = current_pc;
1891
1892 /* The frame size is just the distance from the frame register
1893 to the original stack pointer. */
1894 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1895 {
1896 /* Frame pointer is fp. */
1897 framereg = ARM_FP_REGNUM;
1898 framesize = -regs[ARM_FP_REGNUM].k;
1899 }
1900 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1901 {
1902 /* Try the stack pointer... this is a bit desperate. */
1903 framereg = ARM_SP_REGNUM;
1904 framesize = -regs[ARM_SP_REGNUM].k;
1905 }
1906 else
1907 {
1908 /* We're just out of luck. We don't know where the frame is. */
1909 framereg = -1;
1910 framesize = 0;
1911 }
1912
1913 if (cache)
1914 {
1915 cache->framereg = framereg;
1916 cache->framesize = framesize;
1917
1918 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1919 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1920 cache->saved_regs[regno].addr = offset;
1921 }
1922
1923 if (arm_debug)
1924 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1925 paddress (gdbarch, unrecognized_pc));
1926
1927 do_cleanups (back_to);
1928 return unrecognized_pc;
1929 }
1930
1931 static void
1932 arm_scan_prologue (struct frame_info *this_frame,
1933 struct arm_prologue_cache *cache)
1934 {
1935 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1936 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1937 int regno;
1938 CORE_ADDR prologue_start, prologue_end, current_pc;
1939 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1940 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1941 pv_t regs[ARM_FPS_REGNUM];
1942 struct pv_area *stack;
1943 struct cleanup *back_to;
1944 CORE_ADDR offset;
1945
1946 /* Assume there is no frame until proven otherwise. */
1947 cache->framereg = ARM_SP_REGNUM;
1948 cache->framesize = 0;
1949
1950 /* Check for Thumb prologue. */
1951 if (arm_frame_is_thumb (this_frame))
1952 {
1953 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1954 return;
1955 }
1956
1957 /* Find the function prologue. If we can't find the function in
1958 the symbol table, peek in the stack frame to find the PC. */
1959 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1960 &prologue_end))
1961 {
1962 /* One way to find the end of the prologue (which works well
1963 for unoptimized code) is to do the following:
1964
1965 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1966
1967 if (sal.line == 0)
1968 prologue_end = prev_pc;
1969 else if (sal.end < prologue_end)
1970 prologue_end = sal.end;
1971
1972 This mechanism is very accurate so long as the optimizer
1973 doesn't move any instructions from the function body into the
1974 prologue. If this happens, sal.end will be the last
1975 instruction in the first hunk of prologue code just before
1976 the first instruction that the scheduler has moved from
1977 the body to the prologue.
1978
1979 In order to make sure that we scan all of the prologue
1980 instructions, we use a slightly less accurate mechanism which
1981 may scan more than necessary. To help compensate for this
1982 lack of accuracy, the prologue scanning loop below contains
1983 several clauses which'll cause the loop to terminate early if
1984 an implausible prologue instruction is encountered.
1985
1986 The expression
1987
1988 prologue_start + 64
1989
1990 is a suitable endpoint since it accounts for the largest
1991 possible prologue plus up to five instructions inserted by
1992 the scheduler. */
1993
1994 if (prologue_end > prologue_start + 64)
1995 {
1996 prologue_end = prologue_start + 64; /* See above. */
1997 }
1998 }
1999 else
2000 {
2001 /* We have no symbol information. Our only option is to assume this
2002 function has a standard stack frame and the normal frame register.
2003 Then, we can find the value of our frame pointer on entrance to
2004 the callee (or at the present moment if this is the innermost frame).
2005 The value stored there should be the address of the stmfd + 8. */
2006 CORE_ADDR frame_loc;
2007 LONGEST return_value;
2008
2009 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
2010 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
2011 return;
2012 else
2013 {
2014 prologue_start = gdbarch_addr_bits_remove
2015 (gdbarch, return_value) - 8;
2016 prologue_end = prologue_start + 64; /* See above. */
2017 }
2018 }
2019
2020 if (prev_pc < prologue_end)
2021 prologue_end = prev_pc;
2022
2023 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
2024 }
2025
2026 static struct arm_prologue_cache *
2027 arm_make_prologue_cache (struct frame_info *this_frame)
2028 {
2029 int reg;
2030 struct arm_prologue_cache *cache;
2031 CORE_ADDR unwound_fp;
2032
2033 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2034 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2035
2036 arm_scan_prologue (this_frame, cache);
2037
2038 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2039 if (unwound_fp == 0)
2040 return cache;
2041
2042 cache->prev_sp = unwound_fp + cache->framesize;
2043
2044 /* Calculate actual addresses of saved registers using offsets
2045 determined by arm_scan_prologue. */
2046 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2047 if (trad_frame_addr_p (cache->saved_regs, reg))
2048 cache->saved_regs[reg].addr += cache->prev_sp;
2049
2050 return cache;
2051 }
2052
2053 /* Our frame ID for a normal frame is the current function's starting PC
2054 and the caller's SP when we were called. */
2055
2056 static void
2057 arm_prologue_this_id (struct frame_info *this_frame,
2058 void **this_cache,
2059 struct frame_id *this_id)
2060 {
2061 struct arm_prologue_cache *cache;
2062 struct frame_id id;
2063 CORE_ADDR pc, func;
2064
2065 if (*this_cache == NULL)
2066 *this_cache = arm_make_prologue_cache (this_frame);
2067 cache = *this_cache;
2068
2069 /* This is meant to halt the backtrace at "_start". */
2070 pc = get_frame_pc (this_frame);
2071 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2072 return;
2073
2074 /* If we've hit a wall, stop. */
2075 if (cache->prev_sp == 0)
2076 return;
2077
2078 /* Use function start address as part of the frame ID. If we cannot
2079 identify the start address (due to missing symbol information),
2080 fall back to just using the current PC. */
2081 func = get_frame_func (this_frame);
2082 if (!func)
2083 func = pc;
2084
2085 id = frame_id_build (cache->prev_sp, func);
2086 *this_id = id;
2087 }
2088
2089 static struct value *
2090 arm_prologue_prev_register (struct frame_info *this_frame,
2091 void **this_cache,
2092 int prev_regnum)
2093 {
2094 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2095 struct arm_prologue_cache *cache;
2096
2097 if (*this_cache == NULL)
2098 *this_cache = arm_make_prologue_cache (this_frame);
2099 cache = *this_cache;
2100
2101 /* If we are asked to unwind the PC, then we need to return the LR
2102 instead. The prologue may save PC, but it will point into this
2103 frame's prologue, not the next frame's resume location. Also
2104 strip the saved T bit. A valid LR may have the low bit set, but
2105 a valid PC never does. */
2106 if (prev_regnum == ARM_PC_REGNUM)
2107 {
2108 CORE_ADDR lr;
2109
2110 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2111 return frame_unwind_got_constant (this_frame, prev_regnum,
2112 arm_addr_bits_remove (gdbarch, lr));
2113 }
2114
2115 /* SP is generally not saved to the stack, but this frame is
2116 identified by the next frame's stack pointer at the time of the call.
2117 The value was already reconstructed into PREV_SP. */
2118 if (prev_regnum == ARM_SP_REGNUM)
2119 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2120
2121 /* The CPSR may have been changed by the call instruction and by the
2122 called function. The only bit we can reconstruct is the T bit,
2123 by checking the low bit of LR as of the call. This is a reliable
2124 indicator of Thumb-ness except for some ARM v4T pre-interworking
2125 Thumb code, which could get away with a clear low bit as long as
2126 the called function did not use bx. Guess that all other
2127 bits are unchanged; the condition flags are presumably lost,
2128 but the processor status is likely valid. */
2129 if (prev_regnum == ARM_PS_REGNUM)
2130 {
2131 CORE_ADDR lr, cpsr;
2132 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2133
2134 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2135 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2136 if (IS_THUMB_ADDR (lr))
2137 cpsr |= t_bit;
2138 else
2139 cpsr &= ~t_bit;
2140 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2141 }
2142
2143 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2144 prev_regnum);
2145 }
2146
2147 struct frame_unwind arm_prologue_unwind = {
2148 NORMAL_FRAME,
2149 default_frame_unwind_stop_reason,
2150 arm_prologue_this_id,
2151 arm_prologue_prev_register,
2152 NULL,
2153 default_frame_sniffer
2154 };
2155
2156 /* Maintain a list of ARM exception table entries per objfile, similar to the
2157 list of mapping symbols. We only cache entries for standard ARM-defined
2158 personality routines; the cache will contain only the frame unwinding
2159 instructions associated with the entry (not the descriptors). */
2160
2161 static const struct objfile_data *arm_exidx_data_key;
2162
2163 struct arm_exidx_entry
2164 {
2165 bfd_vma addr;
2166 gdb_byte *entry;
2167 };
2168 typedef struct arm_exidx_entry arm_exidx_entry_s;
2169 DEF_VEC_O(arm_exidx_entry_s);
2170
2171 struct arm_exidx_data
2172 {
2173 VEC(arm_exidx_entry_s) **section_maps;
2174 };
2175
2176 static void
2177 arm_exidx_data_free (struct objfile *objfile, void *arg)
2178 {
2179 struct arm_exidx_data *data = arg;
2180 unsigned int i;
2181
2182 for (i = 0; i < objfile->obfd->section_count; i++)
2183 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2184 }
2185
2186 static inline int
2187 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2188 const struct arm_exidx_entry *rhs)
2189 {
2190 return lhs->addr < rhs->addr;
2191 }
2192
2193 static struct obj_section *
2194 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2195 {
2196 struct obj_section *osect;
2197
2198 ALL_OBJFILE_OSECTIONS (objfile, osect)
2199 if (bfd_get_section_flags (objfile->obfd,
2200 osect->the_bfd_section) & SEC_ALLOC)
2201 {
2202 bfd_vma start, size;
2203 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2204 size = bfd_get_section_size (osect->the_bfd_section);
2205
2206 if (start <= vma && vma < start + size)
2207 return osect;
2208 }
2209
2210 return NULL;
2211 }
2212
2213 /* Parse contents of exception table and exception index sections
2214 of OBJFILE, and fill in the exception table entry cache.
2215
2216 For each entry that refers to a standard ARM-defined personality
2217 routine, extract the frame unwinding instructions (from either
2218 the index or the table section). The unwinding instructions
2219 are normalized by:
2220 - extracting them from the rest of the table data
2221 - converting to host endianness
2222 - appending the implicit 0xb0 ("Finish") code
2223
2224 The extracted and normalized instructions are stored for later
2225 retrieval by the arm_find_exidx_entry routine. */
2226
2227 static void
2228 arm_exidx_new_objfile (struct objfile *objfile)
2229 {
2230 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2231 struct arm_exidx_data *data;
2232 asection *exidx, *extab;
2233 bfd_vma exidx_vma = 0, extab_vma = 0;
2234 bfd_size_type exidx_size = 0, extab_size = 0;
2235 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2236 LONGEST i;
2237
2238 /* If we've already touched this file, do nothing. */
2239 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2240 return;
2241
2242 /* Read contents of exception table and index. */
2243 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2244 if (exidx)
2245 {
2246 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2247 exidx_size = bfd_get_section_size (exidx);
2248 exidx_data = xmalloc (exidx_size);
2249 make_cleanup (xfree, exidx_data);
2250
2251 if (!bfd_get_section_contents (objfile->obfd, exidx,
2252 exidx_data, 0, exidx_size))
2253 {
2254 do_cleanups (cleanups);
2255 return;
2256 }
2257 }
2258
2259 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2260 if (extab)
2261 {
2262 extab_vma = bfd_section_vma (objfile->obfd, extab);
2263 extab_size = bfd_get_section_size (extab);
2264 extab_data = xmalloc (extab_size);
2265 make_cleanup (xfree, extab_data);
2266
2267 if (!bfd_get_section_contents (objfile->obfd, extab,
2268 extab_data, 0, extab_size))
2269 {
2270 do_cleanups (cleanups);
2271 return;
2272 }
2273 }
2274
2275 /* Allocate exception table data structure. */
2276 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2277 set_objfile_data (objfile, arm_exidx_data_key, data);
2278 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2279 objfile->obfd->section_count,
2280 VEC(arm_exidx_entry_s) *);
2281
2282 /* Fill in exception table. */
2283 for (i = 0; i < exidx_size / 8; i++)
2284 {
2285 struct arm_exidx_entry new_exidx_entry;
2286 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2287 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2288 bfd_vma addr = 0, word = 0;
2289 int n_bytes = 0, n_words = 0;
2290 struct obj_section *sec;
2291 gdb_byte *entry = NULL;
2292
2293 /* Extract address of start of function. */
2294 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2295 idx += exidx_vma + i * 8;
2296
2297 /* Find section containing function and compute section offset. */
2298 sec = arm_obj_section_from_vma (objfile, idx);
2299 if (sec == NULL)
2300 continue;
2301 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2302
2303 /* Determine address of exception table entry. */
2304 if (val == 1)
2305 {
2306 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2307 }
2308 else if ((val & 0xff000000) == 0x80000000)
2309 {
2310 /* Exception table entry embedded in .ARM.exidx
2311 -- must be short form. */
2312 word = val;
2313 n_bytes = 3;
2314 }
2315 else if (!(val & 0x80000000))
2316 {
2317 /* Exception table entry in .ARM.extab. */
2318 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2319 addr += exidx_vma + i * 8 + 4;
2320
2321 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2322 {
2323 word = bfd_h_get_32 (objfile->obfd,
2324 extab_data + addr - extab_vma);
2325 addr += 4;
2326
2327 if ((word & 0xff000000) == 0x80000000)
2328 {
2329 /* Short form. */
2330 n_bytes = 3;
2331 }
2332 else if ((word & 0xff000000) == 0x81000000
2333 || (word & 0xff000000) == 0x82000000)
2334 {
2335 /* Long form. */
2336 n_bytes = 2;
2337 n_words = ((word >> 16) & 0xff);
2338 }
2339 else if (!(word & 0x80000000))
2340 {
2341 bfd_vma pers;
2342 struct obj_section *pers_sec;
2343 int gnu_personality = 0;
2344
2345 /* Custom personality routine. */
2346 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2347 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2348
2349 /* Check whether we've got one of the variants of the
2350 GNU personality routines. */
2351 pers_sec = arm_obj_section_from_vma (objfile, pers);
2352 if (pers_sec)
2353 {
2354 static const char *personality[] =
2355 {
2356 "__gcc_personality_v0",
2357 "__gxx_personality_v0",
2358 "__gcj_personality_v0",
2359 "__gnu_objc_personality_v0",
2360 NULL
2361 };
2362
2363 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2364 int k;
2365
2366 for (k = 0; personality[k]; k++)
2367 if (lookup_minimal_symbol_by_pc_name
2368 (pc, personality[k], objfile))
2369 {
2370 gnu_personality = 1;
2371 break;
2372 }
2373 }
2374
2375 /* If so, the next word contains a word count in the high
2376 byte, followed by the same unwind instructions as the
2377 pre-defined forms. */
2378 if (gnu_personality
2379 && addr + 4 <= extab_vma + extab_size)
2380 {
2381 word = bfd_h_get_32 (objfile->obfd,
2382 extab_data + addr - extab_vma);
2383 addr += 4;
2384 n_bytes = 3;
2385 n_words = ((word >> 24) & 0xff);
2386 }
2387 }
2388 }
2389 }
2390
2391 /* Sanity check address. */
2392 if (n_words)
2393 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2394 n_words = n_bytes = 0;
2395
2396 /* The unwind instructions reside in WORD (only the N_BYTES least
2397 significant bytes are valid), followed by N_WORDS words in the
2398 extab section starting at ADDR. */
2399 if (n_bytes || n_words)
2400 {
2401 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2402 n_bytes + n_words * 4 + 1);
2403
2404 while (n_bytes--)
2405 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2406
2407 while (n_words--)
2408 {
2409 word = bfd_h_get_32 (objfile->obfd,
2410 extab_data + addr - extab_vma);
2411 addr += 4;
2412
2413 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2414 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2415 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2416 *p++ = (gdb_byte) (word & 0xff);
2417 }
2418
2419 /* Implied "Finish" to terminate the list. */
2420 *p++ = 0xb0;
2421 }
2422
2423 /* Push entry onto vector. They are guaranteed to always
2424 appear in order of increasing addresses. */
2425 new_exidx_entry.addr = idx;
2426 new_exidx_entry.entry = entry;
2427 VEC_safe_push (arm_exidx_entry_s,
2428 data->section_maps[sec->the_bfd_section->index],
2429 &new_exidx_entry);
2430 }
2431
2432 do_cleanups (cleanups);
2433 }
2434
2435 /* Search for the exception table entry covering MEMADDR. If one is found,
2436 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2437 set *START to the start of the region covered by this entry. */
2438
2439 static gdb_byte *
2440 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2441 {
2442 struct obj_section *sec;
2443
2444 sec = find_pc_section (memaddr);
2445 if (sec != NULL)
2446 {
2447 struct arm_exidx_data *data;
2448 VEC(arm_exidx_entry_s) *map;
2449 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2450 unsigned int idx;
2451
2452 data = objfile_data (sec->objfile, arm_exidx_data_key);
2453 if (data != NULL)
2454 {
2455 map = data->section_maps[sec->the_bfd_section->index];
2456 if (!VEC_empty (arm_exidx_entry_s, map))
2457 {
2458 struct arm_exidx_entry *map_sym;
2459
2460 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2461 arm_compare_exidx_entries);
2462
2463 /* VEC_lower_bound finds the earliest ordered insertion
2464 point. If the following symbol starts at this exact
2465 address, we use that; otherwise, the preceding
2466 exception table entry covers this address. */
2467 if (idx < VEC_length (arm_exidx_entry_s, map))
2468 {
2469 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2470 if (map_sym->addr == map_key.addr)
2471 {
2472 if (start)
2473 *start = map_sym->addr + obj_section_addr (sec);
2474 return map_sym->entry;
2475 }
2476 }
2477
2478 if (idx > 0)
2479 {
2480 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2481 if (start)
2482 *start = map_sym->addr + obj_section_addr (sec);
2483 return map_sym->entry;
2484 }
2485 }
2486 }
2487 }
2488
2489 return NULL;
2490 }
2491
2492 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2493 instruction list from the ARM exception table entry ENTRY, allocate and
2494 return a prologue cache structure describing how to unwind this frame.
2495
2496 Return NULL if the unwinding instruction list contains a "spare",
2497 "reserved" or "refuse to unwind" instruction as defined in section
2498 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2499 for the ARM Architecture" document. */
2500
2501 static struct arm_prologue_cache *
2502 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2503 {
2504 CORE_ADDR vsp = 0;
2505 int vsp_valid = 0;
2506
2507 struct arm_prologue_cache *cache;
2508 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2509 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2510
2511 for (;;)
2512 {
2513 gdb_byte insn;
2514
2515 /* Whenever we reload SP, we actually have to retrieve its
2516 actual value in the current frame. */
2517 if (!vsp_valid)
2518 {
2519 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2520 {
2521 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2522 vsp = get_frame_register_unsigned (this_frame, reg);
2523 }
2524 else
2525 {
2526 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2527 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2528 }
2529
2530 vsp_valid = 1;
2531 }
2532
2533 /* Decode next unwind instruction. */
2534 insn = *entry++;
2535
2536 if ((insn & 0xc0) == 0)
2537 {
2538 int offset = insn & 0x3f;
2539 vsp += (offset << 2) + 4;
2540 }
2541 else if ((insn & 0xc0) == 0x40)
2542 {
2543 int offset = insn & 0x3f;
2544 vsp -= (offset << 2) + 4;
2545 }
2546 else if ((insn & 0xf0) == 0x80)
2547 {
2548 int mask = ((insn & 0xf) << 8) | *entry++;
2549 int i;
2550
2551 /* The special case of an all-zero mask identifies
2552 "Refuse to unwind". We return NULL to fall back
2553 to the prologue analyzer. */
2554 if (mask == 0)
2555 return NULL;
2556
2557 /* Pop registers r4..r15 under mask. */
2558 for (i = 0; i < 12; i++)
2559 if (mask & (1 << i))
2560 {
2561 cache->saved_regs[4 + i].addr = vsp;
2562 vsp += 4;
2563 }
2564
2565 /* Special-case popping SP -- we need to reload vsp. */
2566 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2567 vsp_valid = 0;
2568 }
2569 else if ((insn & 0xf0) == 0x90)
2570 {
2571 int reg = insn & 0xf;
2572
2573 /* Reserved cases. */
2574 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2575 return NULL;
2576
2577 /* Set SP from another register and mark VSP for reload. */
2578 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2579 vsp_valid = 0;
2580 }
2581 else if ((insn & 0xf0) == 0xa0)
2582 {
2583 int count = insn & 0x7;
2584 int pop_lr = (insn & 0x8) != 0;
2585 int i;
2586
2587 /* Pop r4..r[4+count]. */
2588 for (i = 0; i <= count; i++)
2589 {
2590 cache->saved_regs[4 + i].addr = vsp;
2591 vsp += 4;
2592 }
2593
2594 /* If indicated by flag, pop LR as well. */
2595 if (pop_lr)
2596 {
2597 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2598 vsp += 4;
2599 }
2600 }
2601 else if (insn == 0xb0)
2602 {
2603 /* We could only have updated PC by popping into it; if so, it
2604 will show up as address. Otherwise, copy LR into PC. */
2605 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2606 cache->saved_regs[ARM_PC_REGNUM]
2607 = cache->saved_regs[ARM_LR_REGNUM];
2608
2609 /* We're done. */
2610 break;
2611 }
2612 else if (insn == 0xb1)
2613 {
2614 int mask = *entry++;
2615 int i;
2616
2617 /* All-zero mask and mask >= 16 is "spare". */
2618 if (mask == 0 || mask >= 16)
2619 return NULL;
2620
2621 /* Pop r0..r3 under mask. */
2622 for (i = 0; i < 4; i++)
2623 if (mask & (1 << i))
2624 {
2625 cache->saved_regs[i].addr = vsp;
2626 vsp += 4;
2627 }
2628 }
2629 else if (insn == 0xb2)
2630 {
2631 ULONGEST offset = 0;
2632 unsigned shift = 0;
2633
2634 do
2635 {
2636 offset |= (*entry & 0x7f) << shift;
2637 shift += 7;
2638 }
2639 while (*entry++ & 0x80);
2640
2641 vsp += 0x204 + (offset << 2);
2642 }
2643 else if (insn == 0xb3)
2644 {
2645 int start = *entry >> 4;
2646 int count = (*entry++) & 0xf;
2647 int i;
2648
2649 /* Only registers D0..D15 are valid here. */
2650 if (start + count >= 16)
2651 return NULL;
2652
2653 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2654 for (i = 0; i <= count; i++)
2655 {
2656 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2657 vsp += 8;
2658 }
2659
2660 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2661 vsp += 4;
2662 }
2663 else if ((insn & 0xf8) == 0xb8)
2664 {
2665 int count = insn & 0x7;
2666 int i;
2667
2668 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2669 for (i = 0; i <= count; i++)
2670 {
2671 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2672 vsp += 8;
2673 }
2674
2675 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2676 vsp += 4;
2677 }
2678 else if (insn == 0xc6)
2679 {
2680 int start = *entry >> 4;
2681 int count = (*entry++) & 0xf;
2682 int i;
2683
2684 /* Only registers WR0..WR15 are valid. */
2685 if (start + count >= 16)
2686 return NULL;
2687
2688 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2689 for (i = 0; i <= count; i++)
2690 {
2691 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2692 vsp += 8;
2693 }
2694 }
2695 else if (insn == 0xc7)
2696 {
2697 int mask = *entry++;
2698 int i;
2699
2700 /* All-zero mask and mask >= 16 is "spare". */
2701 if (mask == 0 || mask >= 16)
2702 return NULL;
2703
2704 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2705 for (i = 0; i < 4; i++)
2706 if (mask & (1 << i))
2707 {
2708 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2709 vsp += 4;
2710 }
2711 }
2712 else if ((insn & 0xf8) == 0xc0)
2713 {
2714 int count = insn & 0x7;
2715 int i;
2716
2717 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2718 for (i = 0; i <= count; i++)
2719 {
2720 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2721 vsp += 8;
2722 }
2723 }
2724 else if (insn == 0xc8)
2725 {
2726 int start = *entry >> 4;
2727 int count = (*entry++) & 0xf;
2728 int i;
2729
2730 /* Only registers D0..D31 are valid. */
2731 if (start + count >= 16)
2732 return NULL;
2733
2734 /* Pop VFP double-precision registers
2735 D[16+start]..D[16+start+count]. */
2736 for (i = 0; i <= count; i++)
2737 {
2738 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2739 vsp += 8;
2740 }
2741 }
2742 else if (insn == 0xc9)
2743 {
2744 int start = *entry >> 4;
2745 int count = (*entry++) & 0xf;
2746 int i;
2747
2748 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2749 for (i = 0; i <= count; i++)
2750 {
2751 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2752 vsp += 8;
2753 }
2754 }
2755 else if ((insn & 0xf8) == 0xd0)
2756 {
2757 int count = insn & 0x7;
2758 int i;
2759
2760 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2761 for (i = 0; i <= count; i++)
2762 {
2763 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2764 vsp += 8;
2765 }
2766 }
2767 else
2768 {
2769 /* Everything else is "spare". */
2770 return NULL;
2771 }
2772 }
2773
2774 /* If we restore SP from a register, assume this was the frame register.
2775 Otherwise just fall back to SP as frame register. */
2776 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2777 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2778 else
2779 cache->framereg = ARM_SP_REGNUM;
2780
2781 /* Determine offset to previous frame. */
2782 cache->framesize
2783 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2784
2785 /* We already got the previous SP. */
2786 cache->prev_sp = vsp;
2787
2788 return cache;
2789 }
2790
2791 /* Unwinding via ARM exception table entries. Note that the sniffer
2792 already computes a filled-in prologue cache, which is then used
2793 with the same arm_prologue_this_id and arm_prologue_prev_register
2794 routines also used for prologue-parsing based unwinding. */
2795
2796 static int
2797 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2798 struct frame_info *this_frame,
2799 void **this_prologue_cache)
2800 {
2801 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2802 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2803 CORE_ADDR addr_in_block, exidx_region, func_start;
2804 struct arm_prologue_cache *cache;
2805 gdb_byte *entry;
2806
2807 /* See if we have an ARM exception table entry covering this address. */
2808 addr_in_block = get_frame_address_in_block (this_frame);
2809 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2810 if (!entry)
2811 return 0;
2812
2813 /* The ARM exception table does not describe unwind information
2814 for arbitrary PC values, but is guaranteed to be correct only
2815 at call sites. We have to decide here whether we want to use
2816 ARM exception table information for this frame, or fall back
2817 to using prologue parsing. (Note that if we have DWARF CFI,
2818 this sniffer isn't even called -- CFI is always preferred.)
2819
2820 Before we make this decision, however, we check whether we
2821 actually have *symbol* information for the current frame.
2822 If not, prologue parsing would not work anyway, so we might
2823 as well use the exception table and hope for the best. */
2824 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2825 {
2826 int exc_valid = 0;
2827
2828 /* If the next frame is "normal", we are at a call site in this
2829 frame, so exception information is guaranteed to be valid. */
2830 if (get_next_frame (this_frame)
2831 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2832 exc_valid = 1;
2833
2834 /* We also assume exception information is valid if we're currently
2835 blocked in a system call. The system library is supposed to
2836 ensure this, so that e.g. pthread cancellation works. */
2837 if (arm_frame_is_thumb (this_frame))
2838 {
2839 LONGEST insn;
2840
2841 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2842 byte_order_for_code, &insn)
2843 && (insn & 0xff00) == 0xdf00 /* svc */)
2844 exc_valid = 1;
2845 }
2846 else
2847 {
2848 LONGEST insn;
2849
2850 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2851 byte_order_for_code, &insn)
2852 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2853 exc_valid = 1;
2854 }
2855
2856 /* Bail out if we don't know that exception information is valid. */
2857 if (!exc_valid)
2858 return 0;
2859
2860 /* The ARM exception index does not mark the *end* of the region
2861 covered by the entry, and some functions will not have any entry.
2862 To correctly recognize the end of the covered region, the linker
2863 should have inserted dummy records with a CANTUNWIND marker.
2864
2865 Unfortunately, current versions of GNU ld do not reliably do
2866 this, and thus we may have found an incorrect entry above.
2867 As a (temporary) sanity check, we only use the entry if it
2868 lies *within* the bounds of the function. Note that this check
2869 might reject perfectly valid entries that just happen to cover
2870 multiple functions; therefore this check ought to be removed
2871 once the linker is fixed. */
2872 if (func_start > exidx_region)
2873 return 0;
2874 }
2875
2876 /* Decode the list of unwinding instructions into a prologue cache.
2877 Note that this may fail due to e.g. a "refuse to unwind" code. */
2878 cache = arm_exidx_fill_cache (this_frame, entry);
2879 if (!cache)
2880 return 0;
2881
2882 *this_prologue_cache = cache;
2883 return 1;
2884 }
2885
2886 struct frame_unwind arm_exidx_unwind = {
2887 NORMAL_FRAME,
2888 default_frame_unwind_stop_reason,
2889 arm_prologue_this_id,
2890 arm_prologue_prev_register,
2891 NULL,
2892 arm_exidx_unwind_sniffer
2893 };
2894
2895 static struct arm_prologue_cache *
2896 arm_make_stub_cache (struct frame_info *this_frame)
2897 {
2898 struct arm_prologue_cache *cache;
2899
2900 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2901 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2902
2903 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2904
2905 return cache;
2906 }
2907
2908 /* Our frame ID for a stub frame is the current SP and LR. */
2909
2910 static void
2911 arm_stub_this_id (struct frame_info *this_frame,
2912 void **this_cache,
2913 struct frame_id *this_id)
2914 {
2915 struct arm_prologue_cache *cache;
2916
2917 if (*this_cache == NULL)
2918 *this_cache = arm_make_stub_cache (this_frame);
2919 cache = *this_cache;
2920
2921 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2922 }
2923
2924 static int
2925 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2926 struct frame_info *this_frame,
2927 void **this_prologue_cache)
2928 {
2929 CORE_ADDR addr_in_block;
2930 char dummy[4];
2931
2932 addr_in_block = get_frame_address_in_block (this_frame);
2933 if (in_plt_section (addr_in_block, NULL)
2934 /* We also use the stub winder if the target memory is unreadable
2935 to avoid having the prologue unwinder trying to read it. */
2936 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
2937 return 1;
2938
2939 return 0;
2940 }
2941
2942 struct frame_unwind arm_stub_unwind = {
2943 NORMAL_FRAME,
2944 default_frame_unwind_stop_reason,
2945 arm_stub_this_id,
2946 arm_prologue_prev_register,
2947 NULL,
2948 arm_stub_unwind_sniffer
2949 };
2950
2951 static CORE_ADDR
2952 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
2953 {
2954 struct arm_prologue_cache *cache;
2955
2956 if (*this_cache == NULL)
2957 *this_cache = arm_make_prologue_cache (this_frame);
2958 cache = *this_cache;
2959
2960 return cache->prev_sp - cache->framesize;
2961 }
2962
2963 struct frame_base arm_normal_base = {
2964 &arm_prologue_unwind,
2965 arm_normal_frame_base,
2966 arm_normal_frame_base,
2967 arm_normal_frame_base
2968 };
2969
2970 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2971 dummy frame. The frame ID's base needs to match the TOS value
2972 saved by save_dummy_frame_tos() and returned from
2973 arm_push_dummy_call, and the PC needs to match the dummy frame's
2974 breakpoint. */
2975
2976 static struct frame_id
2977 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2978 {
2979 return frame_id_build (get_frame_register_unsigned (this_frame,
2980 ARM_SP_REGNUM),
2981 get_frame_pc (this_frame));
2982 }
2983
2984 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2985 be used to construct the previous frame's ID, after looking up the
2986 containing function). */
2987
2988 static CORE_ADDR
2989 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
2990 {
2991 CORE_ADDR pc;
2992 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2993 return arm_addr_bits_remove (gdbarch, pc);
2994 }
2995
2996 static CORE_ADDR
2997 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2998 {
2999 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
3000 }
3001
3002 static struct value *
3003 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
3004 int regnum)
3005 {
3006 struct gdbarch * gdbarch = get_frame_arch (this_frame);
3007 CORE_ADDR lr, cpsr;
3008 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
3009
3010 switch (regnum)
3011 {
3012 case ARM_PC_REGNUM:
3013 /* The PC is normally copied from the return column, which
3014 describes saves of LR. However, that version may have an
3015 extra bit set to indicate Thumb state. The bit is not
3016 part of the PC. */
3017 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3018 return frame_unwind_got_constant (this_frame, regnum,
3019 arm_addr_bits_remove (gdbarch, lr));
3020
3021 case ARM_PS_REGNUM:
3022 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3023 cpsr = get_frame_register_unsigned (this_frame, regnum);
3024 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3025 if (IS_THUMB_ADDR (lr))
3026 cpsr |= t_bit;
3027 else
3028 cpsr &= ~t_bit;
3029 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3030
3031 default:
3032 internal_error (__FILE__, __LINE__,
3033 _("Unexpected register %d"), regnum);
3034 }
3035 }
3036
3037 static void
3038 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3039 struct dwarf2_frame_state_reg *reg,
3040 struct frame_info *this_frame)
3041 {
3042 switch (regnum)
3043 {
3044 case ARM_PC_REGNUM:
3045 case ARM_PS_REGNUM:
3046 reg->how = DWARF2_FRAME_REG_FN;
3047 reg->loc.fn = arm_dwarf2_prev_register;
3048 break;
3049 case ARM_SP_REGNUM:
3050 reg->how = DWARF2_FRAME_REG_CFA;
3051 break;
3052 }
3053 }
3054
3055 /* Return true if we are in the function's epilogue, i.e. after the
3056 instruction that destroyed the function's stack frame. */
3057
3058 static int
3059 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3060 {
3061 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3062 unsigned int insn, insn2;
3063 int found_return = 0, found_stack_adjust = 0;
3064 CORE_ADDR func_start, func_end;
3065 CORE_ADDR scan_pc;
3066 gdb_byte buf[4];
3067
3068 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3069 return 0;
3070
3071 /* The epilogue is a sequence of instructions along the following lines:
3072
3073 - add stack frame size to SP or FP
3074 - [if frame pointer used] restore SP from FP
3075 - restore registers from SP [may include PC]
3076 - a return-type instruction [if PC wasn't already restored]
3077
3078 In a first pass, we scan forward from the current PC and verify the
3079 instructions we find as compatible with this sequence, ending in a
3080 return instruction.
3081
3082 However, this is not sufficient to distinguish indirect function calls
3083 within a function from indirect tail calls in the epilogue in some cases.
3084 Therefore, if we didn't already find any SP-changing instruction during
3085 forward scan, we add a backward scanning heuristic to ensure we actually
3086 are in the epilogue. */
3087
3088 scan_pc = pc;
3089 while (scan_pc < func_end && !found_return)
3090 {
3091 if (target_read_memory (scan_pc, buf, 2))
3092 break;
3093
3094 scan_pc += 2;
3095 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3096
3097 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3098 found_return = 1;
3099 else if (insn == 0x46f7) /* mov pc, lr */
3100 found_return = 1;
3101 else if (insn == 0x46bd) /* mov sp, r7 */
3102 found_stack_adjust = 1;
3103 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3104 found_stack_adjust = 1;
3105 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
3106 {
3107 found_stack_adjust = 1;
3108 if (insn & 0x0100) /* <registers> include PC. */
3109 found_return = 1;
3110 }
3111 else if ((insn & 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
3112 {
3113 if (target_read_memory (scan_pc, buf, 2))
3114 break;
3115
3116 scan_pc += 2;
3117 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3118
3119 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3120 {
3121 found_stack_adjust = 1;
3122 if (insn2 & 0x8000) /* <registers> include PC. */
3123 found_return = 1;
3124 }
3125 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3126 && (insn2 & 0x0fff) == 0x0b04)
3127 {
3128 found_stack_adjust = 1;
3129 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3130 found_return = 1;
3131 }
3132 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3133 && (insn2 & 0x0e00) == 0x0a00)
3134 found_stack_adjust = 1;
3135 else
3136 break;
3137 }
3138 else
3139 break;
3140 }
3141
3142 if (!found_return)
3143 return 0;
3144
3145 /* Since any instruction in the epilogue sequence, with the possible
3146 exception of return itself, updates the stack pointer, we need to
3147 scan backwards for at most one instruction. Try either a 16-bit or
3148 a 32-bit instruction. This is just a heuristic, so we do not worry
3149 too much about false positives. */
3150
3151 if (!found_stack_adjust)
3152 {
3153 if (pc - 4 < func_start)
3154 return 0;
3155 if (target_read_memory (pc - 4, buf, 4))
3156 return 0;
3157
3158 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3159 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3160
3161 if (insn2 == 0x46bd) /* mov sp, r7 */
3162 found_stack_adjust = 1;
3163 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3164 found_stack_adjust = 1;
3165 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
3166 found_stack_adjust = 1;
3167 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3168 found_stack_adjust = 1;
3169 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3170 && (insn2 & 0x0fff) == 0x0b04)
3171 found_stack_adjust = 1;
3172 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3173 && (insn2 & 0x0e00) == 0x0a00)
3174 found_stack_adjust = 1;
3175 }
3176
3177 return found_stack_adjust;
3178 }
3179
3180 /* Return true if we are in the function's epilogue, i.e. after the
3181 instruction that destroyed the function's stack frame. */
3182
3183 static int
3184 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3185 {
3186 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3187 unsigned int insn;
3188 int found_return, found_stack_adjust;
3189 CORE_ADDR func_start, func_end;
3190
3191 if (arm_pc_is_thumb (gdbarch, pc))
3192 return thumb_in_function_epilogue_p (gdbarch, pc);
3193
3194 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3195 return 0;
3196
3197 /* We are in the epilogue if the previous instruction was a stack
3198 adjustment and the next instruction is a possible return (bx, mov
3199 pc, or pop). We could have to scan backwards to find the stack
3200 adjustment, or forwards to find the return, but this is a decent
3201 approximation. First scan forwards. */
3202
3203 found_return = 0;
3204 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3205 if (bits (insn, 28, 31) != INST_NV)
3206 {
3207 if ((insn & 0x0ffffff0) == 0x012fff10)
3208 /* BX. */
3209 found_return = 1;
3210 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3211 /* MOV PC. */
3212 found_return = 1;
3213 else if ((insn & 0x0fff0000) == 0x08bd0000
3214 && (insn & 0x0000c000) != 0)
3215 /* POP (LDMIA), including PC or LR. */
3216 found_return = 1;
3217 }
3218
3219 if (!found_return)
3220 return 0;
3221
3222 /* Scan backwards. This is just a heuristic, so do not worry about
3223 false positives from mode changes. */
3224
3225 if (pc < func_start + 4)
3226 return 0;
3227
3228 found_stack_adjust = 0;
3229 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3230 if (bits (insn, 28, 31) != INST_NV)
3231 {
3232 if ((insn & 0x0df0f000) == 0x0080d000)
3233 /* ADD SP (register or immediate). */
3234 found_stack_adjust = 1;
3235 else if ((insn & 0x0df0f000) == 0x0040d000)
3236 /* SUB SP (register or immediate). */
3237 found_stack_adjust = 1;
3238 else if ((insn & 0x0ffffff0) == 0x01a0d000)
3239 /* MOV SP. */
3240 found_stack_adjust = 1;
3241 else if ((insn & 0x0fff0000) == 0x08bd0000)
3242 /* POP (LDMIA). */
3243 found_stack_adjust = 1;
3244 }
3245
3246 if (found_stack_adjust)
3247 return 1;
3248
3249 return 0;
3250 }
3251
3252
3253 /* When arguments must be pushed onto the stack, they go on in reverse
3254 order. The code below implements a FILO (stack) to do this. */
3255
3256 struct stack_item
3257 {
3258 int len;
3259 struct stack_item *prev;
3260 void *data;
3261 };
3262
3263 static struct stack_item *
3264 push_stack_item (struct stack_item *prev, const void *contents, int len)
3265 {
3266 struct stack_item *si;
3267 si = xmalloc (sizeof (struct stack_item));
3268 si->data = xmalloc (len);
3269 si->len = len;
3270 si->prev = prev;
3271 memcpy (si->data, contents, len);
3272 return si;
3273 }
3274
3275 static struct stack_item *
3276 pop_stack_item (struct stack_item *si)
3277 {
3278 struct stack_item *dead = si;
3279 si = si->prev;
3280 xfree (dead->data);
3281 xfree (dead);
3282 return si;
3283 }
3284
3285
3286 /* Return the alignment (in bytes) of the given type. */
3287
3288 static int
3289 arm_type_align (struct type *t)
3290 {
3291 int n;
3292 int align;
3293 int falign;
3294
3295 t = check_typedef (t);
3296 switch (TYPE_CODE (t))
3297 {
3298 default:
3299 /* Should never happen. */
3300 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3301 return 4;
3302
3303 case TYPE_CODE_PTR:
3304 case TYPE_CODE_ENUM:
3305 case TYPE_CODE_INT:
3306 case TYPE_CODE_FLT:
3307 case TYPE_CODE_SET:
3308 case TYPE_CODE_RANGE:
3309 case TYPE_CODE_BITSTRING:
3310 case TYPE_CODE_REF:
3311 case TYPE_CODE_CHAR:
3312 case TYPE_CODE_BOOL:
3313 return TYPE_LENGTH (t);
3314
3315 case TYPE_CODE_ARRAY:
3316 case TYPE_CODE_COMPLEX:
3317 /* TODO: What about vector types? */
3318 return arm_type_align (TYPE_TARGET_TYPE (t));
3319
3320 case TYPE_CODE_STRUCT:
3321 case TYPE_CODE_UNION:
3322 align = 1;
3323 for (n = 0; n < TYPE_NFIELDS (t); n++)
3324 {
3325 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3326 if (falign > align)
3327 align = falign;
3328 }
3329 return align;
3330 }
3331 }
3332
3333 /* Possible base types for a candidate for passing and returning in
3334 VFP registers. */
3335
3336 enum arm_vfp_cprc_base_type
3337 {
3338 VFP_CPRC_UNKNOWN,
3339 VFP_CPRC_SINGLE,
3340 VFP_CPRC_DOUBLE,
3341 VFP_CPRC_VEC64,
3342 VFP_CPRC_VEC128
3343 };
3344
3345 /* The length of one element of base type B. */
3346
3347 static unsigned
3348 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3349 {
3350 switch (b)
3351 {
3352 case VFP_CPRC_SINGLE:
3353 return 4;
3354 case VFP_CPRC_DOUBLE:
3355 return 8;
3356 case VFP_CPRC_VEC64:
3357 return 8;
3358 case VFP_CPRC_VEC128:
3359 return 16;
3360 default:
3361 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3362 (int) b);
3363 }
3364 }
3365
3366 /* The character ('s', 'd' or 'q') for the type of VFP register used
3367 for passing base type B. */
3368
3369 static int
3370 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3371 {
3372 switch (b)
3373 {
3374 case VFP_CPRC_SINGLE:
3375 return 's';
3376 case VFP_CPRC_DOUBLE:
3377 return 'd';
3378 case VFP_CPRC_VEC64:
3379 return 'd';
3380 case VFP_CPRC_VEC128:
3381 return 'q';
3382 default:
3383 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3384 (int) b);
3385 }
3386 }
3387
3388 /* Determine whether T may be part of a candidate for passing and
3389 returning in VFP registers, ignoring the limit on the total number
3390 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3391 classification of the first valid component found; if it is not
3392 VFP_CPRC_UNKNOWN, all components must have the same classification
3393 as *BASE_TYPE. If it is found that T contains a type not permitted
3394 for passing and returning in VFP registers, a type differently
3395 classified from *BASE_TYPE, or two types differently classified
3396 from each other, return -1, otherwise return the total number of
3397 base-type elements found (possibly 0 in an empty structure or
3398 array). Vectors and complex types are not currently supported,
3399 matching the generic AAPCS support. */
3400
3401 static int
3402 arm_vfp_cprc_sub_candidate (struct type *t,
3403 enum arm_vfp_cprc_base_type *base_type)
3404 {
3405 t = check_typedef (t);
3406 switch (TYPE_CODE (t))
3407 {
3408 case TYPE_CODE_FLT:
3409 switch (TYPE_LENGTH (t))
3410 {
3411 case 4:
3412 if (*base_type == VFP_CPRC_UNKNOWN)
3413 *base_type = VFP_CPRC_SINGLE;
3414 else if (*base_type != VFP_CPRC_SINGLE)
3415 return -1;
3416 return 1;
3417
3418 case 8:
3419 if (*base_type == VFP_CPRC_UNKNOWN)
3420 *base_type = VFP_CPRC_DOUBLE;
3421 else if (*base_type != VFP_CPRC_DOUBLE)
3422 return -1;
3423 return 1;
3424
3425 default:
3426 return -1;
3427 }
3428 break;
3429
3430 case TYPE_CODE_ARRAY:
3431 {
3432 int count;
3433 unsigned unitlen;
3434 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3435 if (count == -1)
3436 return -1;
3437 if (TYPE_LENGTH (t) == 0)
3438 {
3439 gdb_assert (count == 0);
3440 return 0;
3441 }
3442 else if (count == 0)
3443 return -1;
3444 unitlen = arm_vfp_cprc_unit_length (*base_type);
3445 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3446 return TYPE_LENGTH (t) / unitlen;
3447 }
3448 break;
3449
3450 case TYPE_CODE_STRUCT:
3451 {
3452 int count = 0;
3453 unsigned unitlen;
3454 int i;
3455 for (i = 0; i < TYPE_NFIELDS (t); i++)
3456 {
3457 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3458 base_type);
3459 if (sub_count == -1)
3460 return -1;
3461 count += sub_count;
3462 }
3463 if (TYPE_LENGTH (t) == 0)
3464 {
3465 gdb_assert (count == 0);
3466 return 0;
3467 }
3468 else if (count == 0)
3469 return -1;
3470 unitlen = arm_vfp_cprc_unit_length (*base_type);
3471 if (TYPE_LENGTH (t) != unitlen * count)
3472 return -1;
3473 return count;
3474 }
3475
3476 case TYPE_CODE_UNION:
3477 {
3478 int count = 0;
3479 unsigned unitlen;
3480 int i;
3481 for (i = 0; i < TYPE_NFIELDS (t); i++)
3482 {
3483 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3484 base_type);
3485 if (sub_count == -1)
3486 return -1;
3487 count = (count > sub_count ? count : sub_count);
3488 }
3489 if (TYPE_LENGTH (t) == 0)
3490 {
3491 gdb_assert (count == 0);
3492 return 0;
3493 }
3494 else if (count == 0)
3495 return -1;
3496 unitlen = arm_vfp_cprc_unit_length (*base_type);
3497 if (TYPE_LENGTH (t) != unitlen * count)
3498 return -1;
3499 return count;
3500 }
3501
3502 default:
3503 break;
3504 }
3505
3506 return -1;
3507 }
3508
3509 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3510 if passed to or returned from a non-variadic function with the VFP
3511 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3512 *BASE_TYPE to the base type for T and *COUNT to the number of
3513 elements of that base type before returning. */
3514
3515 static int
3516 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3517 int *count)
3518 {
3519 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3520 int c = arm_vfp_cprc_sub_candidate (t, &b);
3521 if (c <= 0 || c > 4)
3522 return 0;
3523 *base_type = b;
3524 *count = c;
3525 return 1;
3526 }
3527
3528 /* Return 1 if the VFP ABI should be used for passing arguments to and
3529 returning values from a function of type FUNC_TYPE, 0
3530 otherwise. */
3531
3532 static int
3533 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3534 {
3535 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3536 /* Variadic functions always use the base ABI. Assume that functions
3537 without debug info are not variadic. */
3538 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3539 return 0;
3540 /* The VFP ABI is only supported as a variant of AAPCS. */
3541 if (tdep->arm_abi != ARM_ABI_AAPCS)
3542 return 0;
3543 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3544 }
3545
3546 /* We currently only support passing parameters in integer registers, which
3547 conforms with GCC's default model, and VFP argument passing following
3548 the VFP variant of AAPCS. Several other variants exist and
3549 we should probably support some of them based on the selected ABI. */
3550
3551 static CORE_ADDR
3552 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3553 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3554 struct value **args, CORE_ADDR sp, int struct_return,
3555 CORE_ADDR struct_addr)
3556 {
3557 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3558 int argnum;
3559 int argreg;
3560 int nstack;
3561 struct stack_item *si = NULL;
3562 int use_vfp_abi;
3563 struct type *ftype;
3564 unsigned vfp_regs_free = (1 << 16) - 1;
3565
3566 /* Determine the type of this function and whether the VFP ABI
3567 applies. */
3568 ftype = check_typedef (value_type (function));
3569 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3570 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3571 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3572
3573 /* Set the return address. For the ARM, the return breakpoint is
3574 always at BP_ADDR. */
3575 if (arm_pc_is_thumb (gdbarch, bp_addr))
3576 bp_addr |= 1;
3577 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3578
3579 /* Walk through the list of args and determine how large a temporary
3580 stack is required. Need to take care here as structs may be
3581 passed on the stack, and we have to to push them. */
3582 nstack = 0;
3583
3584 argreg = ARM_A1_REGNUM;
3585 nstack = 0;
3586
3587 /* The struct_return pointer occupies the first parameter
3588 passing register. */
3589 if (struct_return)
3590 {
3591 if (arm_debug)
3592 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3593 gdbarch_register_name (gdbarch, argreg),
3594 paddress (gdbarch, struct_addr));
3595 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3596 argreg++;
3597 }
3598
3599 for (argnum = 0; argnum < nargs; argnum++)
3600 {
3601 int len;
3602 struct type *arg_type;
3603 struct type *target_type;
3604 enum type_code typecode;
3605 const bfd_byte *val;
3606 int align;
3607 enum arm_vfp_cprc_base_type vfp_base_type;
3608 int vfp_base_count;
3609 int may_use_core_reg = 1;
3610
3611 arg_type = check_typedef (value_type (args[argnum]));
3612 len = TYPE_LENGTH (arg_type);
3613 target_type = TYPE_TARGET_TYPE (arg_type);
3614 typecode = TYPE_CODE (arg_type);
3615 val = value_contents (args[argnum]);
3616
3617 align = arm_type_align (arg_type);
3618 /* Round alignment up to a whole number of words. */
3619 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3620 /* Different ABIs have different maximum alignments. */
3621 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3622 {
3623 /* The APCS ABI only requires word alignment. */
3624 align = INT_REGISTER_SIZE;
3625 }
3626 else
3627 {
3628 /* The AAPCS requires at most doubleword alignment. */
3629 if (align > INT_REGISTER_SIZE * 2)
3630 align = INT_REGISTER_SIZE * 2;
3631 }
3632
3633 if (use_vfp_abi
3634 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3635 &vfp_base_count))
3636 {
3637 int regno;
3638 int unit_length;
3639 int shift;
3640 unsigned mask;
3641
3642 /* Because this is a CPRC it cannot go in a core register or
3643 cause a core register to be skipped for alignment.
3644 Either it goes in VFP registers and the rest of this loop
3645 iteration is skipped for this argument, or it goes on the
3646 stack (and the stack alignment code is correct for this
3647 case). */
3648 may_use_core_reg = 0;
3649
3650 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3651 shift = unit_length / 4;
3652 mask = (1 << (shift * vfp_base_count)) - 1;
3653 for (regno = 0; regno < 16; regno += shift)
3654 if (((vfp_regs_free >> regno) & mask) == mask)
3655 break;
3656
3657 if (regno < 16)
3658 {
3659 int reg_char;
3660 int reg_scaled;
3661 int i;
3662
3663 vfp_regs_free &= ~(mask << regno);
3664 reg_scaled = regno / shift;
3665 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3666 for (i = 0; i < vfp_base_count; i++)
3667 {
3668 char name_buf[4];
3669 int regnum;
3670 if (reg_char == 'q')
3671 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3672 val + i * unit_length);
3673 else
3674 {
3675 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
3676 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3677 strlen (name_buf));
3678 regcache_cooked_write (regcache, regnum,
3679 val + i * unit_length);
3680 }
3681 }
3682 continue;
3683 }
3684 else
3685 {
3686 /* This CPRC could not go in VFP registers, so all VFP
3687 registers are now marked as used. */
3688 vfp_regs_free = 0;
3689 }
3690 }
3691
3692 /* Push stack padding for dowubleword alignment. */
3693 if (nstack & (align - 1))
3694 {
3695 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3696 nstack += INT_REGISTER_SIZE;
3697 }
3698
3699 /* Doubleword aligned quantities must go in even register pairs. */
3700 if (may_use_core_reg
3701 && argreg <= ARM_LAST_ARG_REGNUM
3702 && align > INT_REGISTER_SIZE
3703 && argreg & 1)
3704 argreg++;
3705
3706 /* If the argument is a pointer to a function, and it is a
3707 Thumb function, create a LOCAL copy of the value and set
3708 the THUMB bit in it. */
3709 if (TYPE_CODE_PTR == typecode
3710 && target_type != NULL
3711 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3712 {
3713 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3714 if (arm_pc_is_thumb (gdbarch, regval))
3715 {
3716 bfd_byte *copy = alloca (len);
3717 store_unsigned_integer (copy, len, byte_order,
3718 MAKE_THUMB_ADDR (regval));
3719 val = copy;
3720 }
3721 }
3722
3723 /* Copy the argument to general registers or the stack in
3724 register-sized pieces. Large arguments are split between
3725 registers and stack. */
3726 while (len > 0)
3727 {
3728 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3729
3730 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3731 {
3732 /* The argument is being passed in a general purpose
3733 register. */
3734 CORE_ADDR regval
3735 = extract_unsigned_integer (val, partial_len, byte_order);
3736 if (byte_order == BFD_ENDIAN_BIG)
3737 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3738 if (arm_debug)
3739 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3740 argnum,
3741 gdbarch_register_name
3742 (gdbarch, argreg),
3743 phex (regval, INT_REGISTER_SIZE));
3744 regcache_cooked_write_unsigned (regcache, argreg, regval);
3745 argreg++;
3746 }
3747 else
3748 {
3749 /* Push the arguments onto the stack. */
3750 if (arm_debug)
3751 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3752 argnum, nstack);
3753 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3754 nstack += INT_REGISTER_SIZE;
3755 }
3756
3757 len -= partial_len;
3758 val += partial_len;
3759 }
3760 }
3761 /* If we have an odd number of words to push, then decrement the stack
3762 by one word now, so first stack argument will be dword aligned. */
3763 if (nstack & 4)
3764 sp -= 4;
3765
3766 while (si)
3767 {
3768 sp -= si->len;
3769 write_memory (sp, si->data, si->len);
3770 si = pop_stack_item (si);
3771 }
3772
3773 /* Finally, update teh SP register. */
3774 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3775
3776 return sp;
3777 }
3778
3779
3780 /* Always align the frame to an 8-byte boundary. This is required on
3781 some platforms and harmless on the rest. */
3782
3783 static CORE_ADDR
3784 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3785 {
3786 /* Align the stack to eight bytes. */
3787 return sp & ~ (CORE_ADDR) 7;
3788 }
3789
3790 static void
3791 print_fpu_flags (int flags)
3792 {
3793 if (flags & (1 << 0))
3794 fputs ("IVO ", stdout);
3795 if (flags & (1 << 1))
3796 fputs ("DVZ ", stdout);
3797 if (flags & (1 << 2))
3798 fputs ("OFL ", stdout);
3799 if (flags & (1 << 3))
3800 fputs ("UFL ", stdout);
3801 if (flags & (1 << 4))
3802 fputs ("INX ", stdout);
3803 putchar ('\n');
3804 }
3805
3806 /* Print interesting information about the floating point processor
3807 (if present) or emulator. */
3808 static void
3809 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3810 struct frame_info *frame, const char *args)
3811 {
3812 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3813 int type;
3814
3815 type = (status >> 24) & 127;
3816 if (status & (1 << 31))
3817 printf (_("Hardware FPU type %d\n"), type);
3818 else
3819 printf (_("Software FPU type %d\n"), type);
3820 /* i18n: [floating point unit] mask */
3821 fputs (_("mask: "), stdout);
3822 print_fpu_flags (status >> 16);
3823 /* i18n: [floating point unit] flags */
3824 fputs (_("flags: "), stdout);
3825 print_fpu_flags (status);
3826 }
3827
3828 /* Construct the ARM extended floating point type. */
3829 static struct type *
3830 arm_ext_type (struct gdbarch *gdbarch)
3831 {
3832 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3833
3834 if (!tdep->arm_ext_type)
3835 tdep->arm_ext_type
3836 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3837 floatformats_arm_ext);
3838
3839 return tdep->arm_ext_type;
3840 }
3841
3842 static struct type *
3843 arm_neon_double_type (struct gdbarch *gdbarch)
3844 {
3845 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3846
3847 if (tdep->neon_double_type == NULL)
3848 {
3849 struct type *t, *elem;
3850
3851 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3852 TYPE_CODE_UNION);
3853 elem = builtin_type (gdbarch)->builtin_uint8;
3854 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3855 elem = builtin_type (gdbarch)->builtin_uint16;
3856 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3857 elem = builtin_type (gdbarch)->builtin_uint32;
3858 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3859 elem = builtin_type (gdbarch)->builtin_uint64;
3860 append_composite_type_field (t, "u64", elem);
3861 elem = builtin_type (gdbarch)->builtin_float;
3862 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3863 elem = builtin_type (gdbarch)->builtin_double;
3864 append_composite_type_field (t, "f64", elem);
3865
3866 TYPE_VECTOR (t) = 1;
3867 TYPE_NAME (t) = "neon_d";
3868 tdep->neon_double_type = t;
3869 }
3870
3871 return tdep->neon_double_type;
3872 }
3873
3874 /* FIXME: The vector types are not correctly ordered on big-endian
3875 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3876 bits of d0 - regardless of what unit size is being held in d0. So
3877 the offset of the first uint8 in d0 is 7, but the offset of the
3878 first float is 4. This code works as-is for little-endian
3879 targets. */
3880
3881 static struct type *
3882 arm_neon_quad_type (struct gdbarch *gdbarch)
3883 {
3884 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3885
3886 if (tdep->neon_quad_type == NULL)
3887 {
3888 struct type *t, *elem;
3889
3890 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3891 TYPE_CODE_UNION);
3892 elem = builtin_type (gdbarch)->builtin_uint8;
3893 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3894 elem = builtin_type (gdbarch)->builtin_uint16;
3895 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3896 elem = builtin_type (gdbarch)->builtin_uint32;
3897 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3898 elem = builtin_type (gdbarch)->builtin_uint64;
3899 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
3900 elem = builtin_type (gdbarch)->builtin_float;
3901 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
3902 elem = builtin_type (gdbarch)->builtin_double;
3903 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
3904
3905 TYPE_VECTOR (t) = 1;
3906 TYPE_NAME (t) = "neon_q";
3907 tdep->neon_quad_type = t;
3908 }
3909
3910 return tdep->neon_quad_type;
3911 }
3912
3913 /* Return the GDB type object for the "standard" data type of data in
3914 register N. */
3915
3916 static struct type *
3917 arm_register_type (struct gdbarch *gdbarch, int regnum)
3918 {
3919 int num_regs = gdbarch_num_regs (gdbarch);
3920
3921 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
3922 && regnum >= num_regs && regnum < num_regs + 32)
3923 return builtin_type (gdbarch)->builtin_float;
3924
3925 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
3926 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
3927 return arm_neon_quad_type (gdbarch);
3928
3929 /* If the target description has register information, we are only
3930 in this function so that we can override the types of
3931 double-precision registers for NEON. */
3932 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
3933 {
3934 struct type *t = tdesc_register_type (gdbarch, regnum);
3935
3936 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
3937 && TYPE_CODE (t) == TYPE_CODE_FLT
3938 && gdbarch_tdep (gdbarch)->have_neon)
3939 return arm_neon_double_type (gdbarch);
3940 else
3941 return t;
3942 }
3943
3944 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
3945 {
3946 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
3947 return builtin_type (gdbarch)->builtin_void;
3948
3949 return arm_ext_type (gdbarch);
3950 }
3951 else if (regnum == ARM_SP_REGNUM)
3952 return builtin_type (gdbarch)->builtin_data_ptr;
3953 else if (regnum == ARM_PC_REGNUM)
3954 return builtin_type (gdbarch)->builtin_func_ptr;
3955 else if (regnum >= ARRAY_SIZE (arm_register_names))
3956 /* These registers are only supported on targets which supply
3957 an XML description. */
3958 return builtin_type (gdbarch)->builtin_int0;
3959 else
3960 return builtin_type (gdbarch)->builtin_uint32;
3961 }
3962
3963 /* Map a DWARF register REGNUM onto the appropriate GDB register
3964 number. */
3965
3966 static int
3967 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
3968 {
3969 /* Core integer regs. */
3970 if (reg >= 0 && reg <= 15)
3971 return reg;
3972
3973 /* Legacy FPA encoding. These were once used in a way which
3974 overlapped with VFP register numbering, so their use is
3975 discouraged, but GDB doesn't support the ARM toolchain
3976 which used them for VFP. */
3977 if (reg >= 16 && reg <= 23)
3978 return ARM_F0_REGNUM + reg - 16;
3979
3980 /* New assignments for the FPA registers. */
3981 if (reg >= 96 && reg <= 103)
3982 return ARM_F0_REGNUM + reg - 96;
3983
3984 /* WMMX register assignments. */
3985 if (reg >= 104 && reg <= 111)
3986 return ARM_WCGR0_REGNUM + reg - 104;
3987
3988 if (reg >= 112 && reg <= 127)
3989 return ARM_WR0_REGNUM + reg - 112;
3990
3991 if (reg >= 192 && reg <= 199)
3992 return ARM_WC0_REGNUM + reg - 192;
3993
3994 /* VFP v2 registers. A double precision value is actually
3995 in d1 rather than s2, but the ABI only defines numbering
3996 for the single precision registers. This will "just work"
3997 in GDB for little endian targets (we'll read eight bytes,
3998 starting in s0 and then progressing to s1), but will be
3999 reversed on big endian targets with VFP. This won't
4000 be a problem for the new Neon quad registers; you're supposed
4001 to use DW_OP_piece for those. */
4002 if (reg >= 64 && reg <= 95)
4003 {
4004 char name_buf[4];
4005
4006 sprintf (name_buf, "s%d", reg - 64);
4007 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4008 strlen (name_buf));
4009 }
4010
4011 /* VFP v3 / Neon registers. This range is also used for VFP v2
4012 registers, except that it now describes d0 instead of s0. */
4013 if (reg >= 256 && reg <= 287)
4014 {
4015 char name_buf[4];
4016
4017 sprintf (name_buf, "d%d", reg - 256);
4018 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4019 strlen (name_buf));
4020 }
4021
4022 return -1;
4023 }
4024
4025 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4026 static int
4027 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4028 {
4029 int reg = regnum;
4030 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4031
4032 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4033 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4034
4035 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4036 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4037
4038 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4039 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4040
4041 if (reg < NUM_GREGS)
4042 return SIM_ARM_R0_REGNUM + reg;
4043 reg -= NUM_GREGS;
4044
4045 if (reg < NUM_FREGS)
4046 return SIM_ARM_FP0_REGNUM + reg;
4047 reg -= NUM_FREGS;
4048
4049 if (reg < NUM_SREGS)
4050 return SIM_ARM_FPS_REGNUM + reg;
4051 reg -= NUM_SREGS;
4052
4053 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4054 }
4055
4056 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4057 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4058 It is thought that this is is the floating-point register format on
4059 little-endian systems. */
4060
4061 static void
4062 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4063 void *dbl, int endianess)
4064 {
4065 DOUBLEST d;
4066
4067 if (endianess == BFD_ENDIAN_BIG)
4068 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4069 else
4070 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4071 ptr, &d);
4072 floatformat_from_doublest (fmt, &d, dbl);
4073 }
4074
4075 static void
4076 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4077 int endianess)
4078 {
4079 DOUBLEST d;
4080
4081 floatformat_to_doublest (fmt, ptr, &d);
4082 if (endianess == BFD_ENDIAN_BIG)
4083 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4084 else
4085 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4086 &d, dbl);
4087 }
4088
4089 static int
4090 condition_true (unsigned long cond, unsigned long status_reg)
4091 {
4092 if (cond == INST_AL || cond == INST_NV)
4093 return 1;
4094
4095 switch (cond)
4096 {
4097 case INST_EQ:
4098 return ((status_reg & FLAG_Z) != 0);
4099 case INST_NE:
4100 return ((status_reg & FLAG_Z) == 0);
4101 case INST_CS:
4102 return ((status_reg & FLAG_C) != 0);
4103 case INST_CC:
4104 return ((status_reg & FLAG_C) == 0);
4105 case INST_MI:
4106 return ((status_reg & FLAG_N) != 0);
4107 case INST_PL:
4108 return ((status_reg & FLAG_N) == 0);
4109 case INST_VS:
4110 return ((status_reg & FLAG_V) != 0);
4111 case INST_VC:
4112 return ((status_reg & FLAG_V) == 0);
4113 case INST_HI:
4114 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4115 case INST_LS:
4116 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4117 case INST_GE:
4118 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4119 case INST_LT:
4120 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4121 case INST_GT:
4122 return (((status_reg & FLAG_Z) == 0)
4123 && (((status_reg & FLAG_N) == 0)
4124 == ((status_reg & FLAG_V) == 0)));
4125 case INST_LE:
4126 return (((status_reg & FLAG_Z) != 0)
4127 || (((status_reg & FLAG_N) == 0)
4128 != ((status_reg & FLAG_V) == 0)));
4129 }
4130 return 1;
4131 }
4132
4133 static unsigned long
4134 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4135 unsigned long pc_val, unsigned long status_reg)
4136 {
4137 unsigned long res, shift;
4138 int rm = bits (inst, 0, 3);
4139 unsigned long shifttype = bits (inst, 5, 6);
4140
4141 if (bit (inst, 4))
4142 {
4143 int rs = bits (inst, 8, 11);
4144 shift = (rs == 15 ? pc_val + 8
4145 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4146 }
4147 else
4148 shift = bits (inst, 7, 11);
4149
4150 res = (rm == ARM_PC_REGNUM
4151 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4152 : get_frame_register_unsigned (frame, rm));
4153
4154 switch (shifttype)
4155 {
4156 case 0: /* LSL */
4157 res = shift >= 32 ? 0 : res << shift;
4158 break;
4159
4160 case 1: /* LSR */
4161 res = shift >= 32 ? 0 : res >> shift;
4162 break;
4163
4164 case 2: /* ASR */
4165 if (shift >= 32)
4166 shift = 31;
4167 res = ((res & 0x80000000L)
4168 ? ~((~res) >> shift) : res >> shift);
4169 break;
4170
4171 case 3: /* ROR/RRX */
4172 shift &= 31;
4173 if (shift == 0)
4174 res = (res >> 1) | (carry ? 0x80000000L : 0);
4175 else
4176 res = (res >> shift) | (res << (32 - shift));
4177 break;
4178 }
4179
4180 return res & 0xffffffff;
4181 }
4182
4183 /* Return number of 1-bits in VAL. */
4184
4185 static int
4186 bitcount (unsigned long val)
4187 {
4188 int nbits;
4189 for (nbits = 0; val != 0; nbits++)
4190 val &= val - 1; /* Delete rightmost 1-bit in val. */
4191 return nbits;
4192 }
4193
4194 /* Return the size in bytes of the complete Thumb instruction whose
4195 first halfword is INST1. */
4196
4197 static int
4198 thumb_insn_size (unsigned short inst1)
4199 {
4200 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4201 return 4;
4202 else
4203 return 2;
4204 }
4205
4206 static int
4207 thumb_advance_itstate (unsigned int itstate)
4208 {
4209 /* Preserve IT[7:5], the first three bits of the condition. Shift
4210 the upcoming condition flags left by one bit. */
4211 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4212
4213 /* If we have finished the IT block, clear the state. */
4214 if ((itstate & 0x0f) == 0)
4215 itstate = 0;
4216
4217 return itstate;
4218 }
4219
4220 /* Find the next PC after the current instruction executes. In some
4221 cases we can not statically determine the answer (see the IT state
4222 handling in this function); in that case, a breakpoint may be
4223 inserted in addition to the returned PC, which will be used to set
4224 another breakpoint by our caller. */
4225
4226 static CORE_ADDR
4227 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
4228 {
4229 struct gdbarch *gdbarch = get_frame_arch (frame);
4230 struct address_space *aspace = get_frame_address_space (frame);
4231 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4232 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4233 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4234 unsigned short inst1;
4235 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4236 unsigned long offset;
4237 ULONGEST status, itstate;
4238
4239 nextpc = MAKE_THUMB_ADDR (nextpc);
4240 pc_val = MAKE_THUMB_ADDR (pc_val);
4241
4242 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4243
4244 /* Thumb-2 conditional execution support. There are eight bits in
4245 the CPSR which describe conditional execution state. Once
4246 reconstructed (they're in a funny order), the low five bits
4247 describe the low bit of the condition for each instruction and
4248 how many instructions remain. The high three bits describe the
4249 base condition. One of the low four bits will be set if an IT
4250 block is active. These bits read as zero on earlier
4251 processors. */
4252 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4253 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4254
4255 /* If-Then handling. On GNU/Linux, where this routine is used, we
4256 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4257 can disable execution of the undefined instruction. So we might
4258 miss the breakpoint if we set it on a skipped conditional
4259 instruction. Because conditional instructions can change the
4260 flags, affecting the execution of further instructions, we may
4261 need to set two breakpoints. */
4262
4263 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4264 {
4265 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4266 {
4267 /* An IT instruction. Because this instruction does not
4268 modify the flags, we can accurately predict the next
4269 executed instruction. */
4270 itstate = inst1 & 0x00ff;
4271 pc += thumb_insn_size (inst1);
4272
4273 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4274 {
4275 inst1 = read_memory_unsigned_integer (pc, 2,
4276 byte_order_for_code);
4277 pc += thumb_insn_size (inst1);
4278 itstate = thumb_advance_itstate (itstate);
4279 }
4280
4281 return MAKE_THUMB_ADDR (pc);
4282 }
4283 else if (itstate != 0)
4284 {
4285 /* We are in a conditional block. Check the condition. */
4286 if (! condition_true (itstate >> 4, status))
4287 {
4288 /* Advance to the next executed instruction. */
4289 pc += thumb_insn_size (inst1);
4290 itstate = thumb_advance_itstate (itstate);
4291
4292 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4293 {
4294 inst1 = read_memory_unsigned_integer (pc, 2,
4295 byte_order_for_code);
4296 pc += thumb_insn_size (inst1);
4297 itstate = thumb_advance_itstate (itstate);
4298 }
4299
4300 return MAKE_THUMB_ADDR (pc);
4301 }
4302 else if ((itstate & 0x0f) == 0x08)
4303 {
4304 /* This is the last instruction of the conditional
4305 block, and it is executed. We can handle it normally
4306 because the following instruction is not conditional,
4307 and we must handle it normally because it is
4308 permitted to branch. Fall through. */
4309 }
4310 else
4311 {
4312 int cond_negated;
4313
4314 /* There are conditional instructions after this one.
4315 If this instruction modifies the flags, then we can
4316 not predict what the next executed instruction will
4317 be. Fortunately, this instruction is architecturally
4318 forbidden to branch; we know it will fall through.
4319 Start by skipping past it. */
4320 pc += thumb_insn_size (inst1);
4321 itstate = thumb_advance_itstate (itstate);
4322
4323 /* Set a breakpoint on the following instruction. */
4324 gdb_assert ((itstate & 0x0f) != 0);
4325 if (insert_bkpt)
4326 insert_single_step_breakpoint (gdbarch, aspace, pc);
4327 cond_negated = (itstate >> 4) & 1;
4328
4329 /* Skip all following instructions with the same
4330 condition. If there is a later instruction in the IT
4331 block with the opposite condition, set the other
4332 breakpoint there. If not, then set a breakpoint on
4333 the instruction after the IT block. */
4334 do
4335 {
4336 inst1 = read_memory_unsigned_integer (pc, 2,
4337 byte_order_for_code);
4338 pc += thumb_insn_size (inst1);
4339 itstate = thumb_advance_itstate (itstate);
4340 }
4341 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4342
4343 return MAKE_THUMB_ADDR (pc);
4344 }
4345 }
4346 }
4347 else if (itstate & 0x0f)
4348 {
4349 /* We are in a conditional block. Check the condition. */
4350 int cond = itstate >> 4;
4351
4352 if (! condition_true (cond, status))
4353 {
4354 /* Advance to the next instruction. All the 32-bit
4355 instructions share a common prefix. */
4356 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4357 return MAKE_THUMB_ADDR (pc + 4);
4358 else
4359 return MAKE_THUMB_ADDR (pc + 2);
4360 }
4361
4362 /* Otherwise, handle the instruction normally. */
4363 }
4364
4365 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4366 {
4367 CORE_ADDR sp;
4368
4369 /* Fetch the saved PC from the stack. It's stored above
4370 all of the other registers. */
4371 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4372 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4373 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4374 }
4375 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4376 {
4377 unsigned long cond = bits (inst1, 8, 11);
4378 if (cond == 0x0f) /* 0x0f = SWI */
4379 {
4380 struct gdbarch_tdep *tdep;
4381 tdep = gdbarch_tdep (gdbarch);
4382
4383 if (tdep->syscall_next_pc != NULL)
4384 nextpc = tdep->syscall_next_pc (frame);
4385
4386 }
4387 else if (cond != 0x0f && condition_true (cond, status))
4388 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4389 }
4390 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4391 {
4392 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4393 }
4394 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
4395 {
4396 unsigned short inst2;
4397 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4398
4399 /* Default to the next instruction. */
4400 nextpc = pc + 4;
4401 nextpc = MAKE_THUMB_ADDR (nextpc);
4402
4403 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4404 {
4405 /* Branches and miscellaneous control instructions. */
4406
4407 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4408 {
4409 /* B, BL, BLX. */
4410 int j1, j2, imm1, imm2;
4411
4412 imm1 = sbits (inst1, 0, 10);
4413 imm2 = bits (inst2, 0, 10);
4414 j1 = bit (inst2, 13);
4415 j2 = bit (inst2, 11);
4416
4417 offset = ((imm1 << 12) + (imm2 << 1));
4418 offset ^= ((!j2) << 22) | ((!j1) << 23);
4419
4420 nextpc = pc_val + offset;
4421 /* For BLX make sure to clear the low bits. */
4422 if (bit (inst2, 12) == 0)
4423 nextpc = nextpc & 0xfffffffc;
4424 }
4425 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4426 {
4427 /* SUBS PC, LR, #imm8. */
4428 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4429 nextpc -= inst2 & 0x00ff;
4430 }
4431 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4432 {
4433 /* Conditional branch. */
4434 if (condition_true (bits (inst1, 6, 9), status))
4435 {
4436 int sign, j1, j2, imm1, imm2;
4437
4438 sign = sbits (inst1, 10, 10);
4439 imm1 = bits (inst1, 0, 5);
4440 imm2 = bits (inst2, 0, 10);
4441 j1 = bit (inst2, 13);
4442 j2 = bit (inst2, 11);
4443
4444 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4445 offset += (imm1 << 12) + (imm2 << 1);
4446
4447 nextpc = pc_val + offset;
4448 }
4449 }
4450 }
4451 else if ((inst1 & 0xfe50) == 0xe810)
4452 {
4453 /* Load multiple or RFE. */
4454 int rn, offset, load_pc = 1;
4455
4456 rn = bits (inst1, 0, 3);
4457 if (bit (inst1, 7) && !bit (inst1, 8))
4458 {
4459 /* LDMIA or POP */
4460 if (!bit (inst2, 15))
4461 load_pc = 0;
4462 offset = bitcount (inst2) * 4 - 4;
4463 }
4464 else if (!bit (inst1, 7) && bit (inst1, 8))
4465 {
4466 /* LDMDB */
4467 if (!bit (inst2, 15))
4468 load_pc = 0;
4469 offset = -4;
4470 }
4471 else if (bit (inst1, 7) && bit (inst1, 8))
4472 {
4473 /* RFEIA */
4474 offset = 0;
4475 }
4476 else if (!bit (inst1, 7) && !bit (inst1, 8))
4477 {
4478 /* RFEDB */
4479 offset = -8;
4480 }
4481 else
4482 load_pc = 0;
4483
4484 if (load_pc)
4485 {
4486 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4487 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4488 }
4489 }
4490 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4491 {
4492 /* MOV PC or MOVS PC. */
4493 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4494 nextpc = MAKE_THUMB_ADDR (nextpc);
4495 }
4496 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4497 {
4498 /* LDR PC. */
4499 CORE_ADDR base;
4500 int rn, load_pc = 1;
4501
4502 rn = bits (inst1, 0, 3);
4503 base = get_frame_register_unsigned (frame, rn);
4504 if (rn == ARM_PC_REGNUM)
4505 {
4506 base = (base + 4) & ~(CORE_ADDR) 0x3;
4507 if (bit (inst1, 7))
4508 base += bits (inst2, 0, 11);
4509 else
4510 base -= bits (inst2, 0, 11);
4511 }
4512 else if (bit (inst1, 7))
4513 base += bits (inst2, 0, 11);
4514 else if (bit (inst2, 11))
4515 {
4516 if (bit (inst2, 10))
4517 {
4518 if (bit (inst2, 9))
4519 base += bits (inst2, 0, 7);
4520 else
4521 base -= bits (inst2, 0, 7);
4522 }
4523 }
4524 else if ((inst2 & 0x0fc0) == 0x0000)
4525 {
4526 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4527 base += get_frame_register_unsigned (frame, rm) << shift;
4528 }
4529 else
4530 /* Reserved. */
4531 load_pc = 0;
4532
4533 if (load_pc)
4534 nextpc = get_frame_memory_unsigned (frame, base, 4);
4535 }
4536 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4537 {
4538 /* TBB. */
4539 CORE_ADDR tbl_reg, table, offset, length;
4540
4541 tbl_reg = bits (inst1, 0, 3);
4542 if (tbl_reg == 0x0f)
4543 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4544 else
4545 table = get_frame_register_unsigned (frame, tbl_reg);
4546
4547 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4548 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4549 nextpc = pc_val + length;
4550 }
4551 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4552 {
4553 /* TBH. */
4554 CORE_ADDR tbl_reg, table, offset, length;
4555
4556 tbl_reg = bits (inst1, 0, 3);
4557 if (tbl_reg == 0x0f)
4558 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4559 else
4560 table = get_frame_register_unsigned (frame, tbl_reg);
4561
4562 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4563 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4564 nextpc = pc_val + length;
4565 }
4566 }
4567 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4568 {
4569 if (bits (inst1, 3, 6) == 0x0f)
4570 nextpc = pc_val;
4571 else
4572 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4573 }
4574 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4575 {
4576 if (bits (inst1, 3, 6) == 0x0f)
4577 nextpc = pc_val;
4578 else
4579 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4580
4581 nextpc = MAKE_THUMB_ADDR (nextpc);
4582 }
4583 else if ((inst1 & 0xf500) == 0xb100)
4584 {
4585 /* CBNZ or CBZ. */
4586 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4587 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4588
4589 if (bit (inst1, 11) && reg != 0)
4590 nextpc = pc_val + imm;
4591 else if (!bit (inst1, 11) && reg == 0)
4592 nextpc = pc_val + imm;
4593 }
4594 return nextpc;
4595 }
4596
4597 /* Get the raw next address. PC is the current program counter, in
4598 FRAME. INSERT_BKPT should be TRUE if we want a breakpoint set on
4599 the alternative next instruction if there are two options.
4600
4601 The value returned has the execution state of the next instruction
4602 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4603 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4604 address. */
4605
4606 static CORE_ADDR
4607 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
4608 {
4609 struct gdbarch *gdbarch = get_frame_arch (frame);
4610 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4611 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4612 unsigned long pc_val;
4613 unsigned long this_instr;
4614 unsigned long status;
4615 CORE_ADDR nextpc;
4616
4617 if (arm_frame_is_thumb (frame))
4618 return thumb_get_next_pc_raw (frame, pc, insert_bkpt);
4619
4620 pc_val = (unsigned long) pc;
4621 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4622
4623 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4624 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4625
4626 if (bits (this_instr, 28, 31) == INST_NV)
4627 switch (bits (this_instr, 24, 27))
4628 {
4629 case 0xa:
4630 case 0xb:
4631 {
4632 /* Branch with Link and change to Thumb. */
4633 nextpc = BranchDest (pc, this_instr);
4634 nextpc |= bit (this_instr, 24) << 1;
4635 nextpc = MAKE_THUMB_ADDR (nextpc);
4636 break;
4637 }
4638 case 0xc:
4639 case 0xd:
4640 case 0xe:
4641 /* Coprocessor register transfer. */
4642 if (bits (this_instr, 12, 15) == 15)
4643 error (_("Invalid update to pc in instruction"));
4644 break;
4645 }
4646 else if (condition_true (bits (this_instr, 28, 31), status))
4647 {
4648 switch (bits (this_instr, 24, 27))
4649 {
4650 case 0x0:
4651 case 0x1: /* data processing */
4652 case 0x2:
4653 case 0x3:
4654 {
4655 unsigned long operand1, operand2, result = 0;
4656 unsigned long rn;
4657 int c;
4658
4659 if (bits (this_instr, 12, 15) != 15)
4660 break;
4661
4662 if (bits (this_instr, 22, 25) == 0
4663 && bits (this_instr, 4, 7) == 9) /* multiply */
4664 error (_("Invalid update to pc in instruction"));
4665
4666 /* BX <reg>, BLX <reg> */
4667 if (bits (this_instr, 4, 27) == 0x12fff1
4668 || bits (this_instr, 4, 27) == 0x12fff3)
4669 {
4670 rn = bits (this_instr, 0, 3);
4671 nextpc = ((rn == ARM_PC_REGNUM)
4672 ? (pc_val + 8)
4673 : get_frame_register_unsigned (frame, rn));
4674
4675 return nextpc;
4676 }
4677
4678 /* Multiply into PC. */
4679 c = (status & FLAG_C) ? 1 : 0;
4680 rn = bits (this_instr, 16, 19);
4681 operand1 = ((rn == ARM_PC_REGNUM)
4682 ? (pc_val + 8)
4683 : get_frame_register_unsigned (frame, rn));
4684
4685 if (bit (this_instr, 25))
4686 {
4687 unsigned long immval = bits (this_instr, 0, 7);
4688 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4689 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4690 & 0xffffffff;
4691 }
4692 else /* operand 2 is a shifted register. */
4693 operand2 = shifted_reg_val (frame, this_instr, c,
4694 pc_val, status);
4695
4696 switch (bits (this_instr, 21, 24))
4697 {
4698 case 0x0: /*and */
4699 result = operand1 & operand2;
4700 break;
4701
4702 case 0x1: /*eor */
4703 result = operand1 ^ operand2;
4704 break;
4705
4706 case 0x2: /*sub */
4707 result = operand1 - operand2;
4708 break;
4709
4710 case 0x3: /*rsb */
4711 result = operand2 - operand1;
4712 break;
4713
4714 case 0x4: /*add */
4715 result = operand1 + operand2;
4716 break;
4717
4718 case 0x5: /*adc */
4719 result = operand1 + operand2 + c;
4720 break;
4721
4722 case 0x6: /*sbc */
4723 result = operand1 - operand2 + c;
4724 break;
4725
4726 case 0x7: /*rsc */
4727 result = operand2 - operand1 + c;
4728 break;
4729
4730 case 0x8:
4731 case 0x9:
4732 case 0xa:
4733 case 0xb: /* tst, teq, cmp, cmn */
4734 result = (unsigned long) nextpc;
4735 break;
4736
4737 case 0xc: /*orr */
4738 result = operand1 | operand2;
4739 break;
4740
4741 case 0xd: /*mov */
4742 /* Always step into a function. */
4743 result = operand2;
4744 break;
4745
4746 case 0xe: /*bic */
4747 result = operand1 & ~operand2;
4748 break;
4749
4750 case 0xf: /*mvn */
4751 result = ~operand2;
4752 break;
4753 }
4754
4755 /* In 26-bit APCS the bottom two bits of the result are
4756 ignored, and we always end up in ARM state. */
4757 if (!arm_apcs_32)
4758 nextpc = arm_addr_bits_remove (gdbarch, result);
4759 else
4760 nextpc = result;
4761
4762 break;
4763 }
4764
4765 case 0x4:
4766 case 0x5: /* data transfer */
4767 case 0x6:
4768 case 0x7:
4769 if (bit (this_instr, 20))
4770 {
4771 /* load */
4772 if (bits (this_instr, 12, 15) == 15)
4773 {
4774 /* rd == pc */
4775 unsigned long rn;
4776 unsigned long base;
4777
4778 if (bit (this_instr, 22))
4779 error (_("Invalid update to pc in instruction"));
4780
4781 /* byte write to PC */
4782 rn = bits (this_instr, 16, 19);
4783 base = ((rn == ARM_PC_REGNUM)
4784 ? (pc_val + 8)
4785 : get_frame_register_unsigned (frame, rn));
4786
4787 if (bit (this_instr, 24))
4788 {
4789 /* pre-indexed */
4790 int c = (status & FLAG_C) ? 1 : 0;
4791 unsigned long offset =
4792 (bit (this_instr, 25)
4793 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4794 : bits (this_instr, 0, 11));
4795
4796 if (bit (this_instr, 23))
4797 base += offset;
4798 else
4799 base -= offset;
4800 }
4801 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
4802 4, byte_order);
4803 }
4804 }
4805 break;
4806
4807 case 0x8:
4808 case 0x9: /* block transfer */
4809 if (bit (this_instr, 20))
4810 {
4811 /* LDM */
4812 if (bit (this_instr, 15))
4813 {
4814 /* loading pc */
4815 int offset = 0;
4816
4817 if (bit (this_instr, 23))
4818 {
4819 /* up */
4820 unsigned long reglist = bits (this_instr, 0, 14);
4821 offset = bitcount (reglist) * 4;
4822 if (bit (this_instr, 24)) /* pre */
4823 offset += 4;
4824 }
4825 else if (bit (this_instr, 24))
4826 offset = -4;
4827
4828 {
4829 unsigned long rn_val =
4830 get_frame_register_unsigned (frame,
4831 bits (this_instr, 16, 19));
4832 nextpc =
4833 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
4834 + offset),
4835 4, byte_order);
4836 }
4837 }
4838 }
4839 break;
4840
4841 case 0xb: /* branch & link */
4842 case 0xa: /* branch */
4843 {
4844 nextpc = BranchDest (pc, this_instr);
4845 break;
4846 }
4847
4848 case 0xc:
4849 case 0xd:
4850 case 0xe: /* coproc ops */
4851 break;
4852 case 0xf: /* SWI */
4853 {
4854 struct gdbarch_tdep *tdep;
4855 tdep = gdbarch_tdep (gdbarch);
4856
4857 if (tdep->syscall_next_pc != NULL)
4858 nextpc = tdep->syscall_next_pc (frame);
4859
4860 }
4861 break;
4862
4863 default:
4864 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
4865 return (pc);
4866 }
4867 }
4868
4869 return nextpc;
4870 }
4871
4872 CORE_ADDR
4873 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
4874 {
4875 struct gdbarch *gdbarch = get_frame_arch (frame);
4876 CORE_ADDR nextpc =
4877 gdbarch_addr_bits_remove (gdbarch,
4878 arm_get_next_pc_raw (frame, pc, TRUE));
4879 if (nextpc == pc)
4880 error (_("Infinite loop detected"));
4881 return nextpc;
4882 }
4883
4884 /* single_step() is called just before we want to resume the inferior,
4885 if we want to single-step it but there is no hardware or kernel
4886 single-step support. We find the target of the coming instruction
4887 and breakpoint it. */
4888
4889 int
4890 arm_software_single_step (struct frame_info *frame)
4891 {
4892 struct gdbarch *gdbarch = get_frame_arch (frame);
4893 struct address_space *aspace = get_frame_address_space (frame);
4894
4895 /* NOTE: This may insert the wrong breakpoint instruction when
4896 single-stepping over a mode-changing instruction, if the
4897 CPSR heuristics are used. */
4898
4899 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
4900 insert_single_step_breakpoint (gdbarch, aspace, next_pc);
4901
4902 return 1;
4903 }
4904
4905 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4906 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4907 NULL if an error occurs. BUF is freed. */
4908
4909 static gdb_byte *
4910 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
4911 int old_len, int new_len)
4912 {
4913 gdb_byte *new_buf, *middle;
4914 int bytes_to_read = new_len - old_len;
4915
4916 new_buf = xmalloc (new_len);
4917 memcpy (new_buf + bytes_to_read, buf, old_len);
4918 xfree (buf);
4919 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
4920 {
4921 xfree (new_buf);
4922 return NULL;
4923 }
4924 return new_buf;
4925 }
4926
4927 /* An IT block is at most the 2-byte IT instruction followed by
4928 four 4-byte instructions. The furthest back we must search to
4929 find an IT block that affects the current instruction is thus
4930 2 + 3 * 4 == 14 bytes. */
4931 #define MAX_IT_BLOCK_PREFIX 14
4932
4933 /* Use a quick scan if there are more than this many bytes of
4934 code. */
4935 #define IT_SCAN_THRESHOLD 32
4936
4937 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4938 A breakpoint in an IT block may not be hit, depending on the
4939 condition flags. */
4940 static CORE_ADDR
4941 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
4942 {
4943 gdb_byte *buf;
4944 char map_type;
4945 CORE_ADDR boundary, func_start;
4946 int buf_len, buf2_len;
4947 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
4948 int i, any, last_it, last_it_count;
4949
4950 /* If we are using BKPT breakpoints, none of this is necessary. */
4951 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
4952 return bpaddr;
4953
4954 /* ARM mode does not have this problem. */
4955 if (!arm_pc_is_thumb (gdbarch, bpaddr))
4956 return bpaddr;
4957
4958 /* We are setting a breakpoint in Thumb code that could potentially
4959 contain an IT block. The first step is to find how much Thumb
4960 code there is; we do not need to read outside of known Thumb
4961 sequences. */
4962 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
4963 if (map_type == 0)
4964 /* Thumb-2 code must have mapping symbols to have a chance. */
4965 return bpaddr;
4966
4967 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
4968
4969 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
4970 && func_start > boundary)
4971 boundary = func_start;
4972
4973 /* Search for a candidate IT instruction. We have to do some fancy
4974 footwork to distinguish a real IT instruction from the second
4975 half of a 32-bit instruction, but there is no need for that if
4976 there's no candidate. */
4977 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
4978 if (buf_len == 0)
4979 /* No room for an IT instruction. */
4980 return bpaddr;
4981
4982 buf = xmalloc (buf_len);
4983 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
4984 return bpaddr;
4985 any = 0;
4986 for (i = 0; i < buf_len; i += 2)
4987 {
4988 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4989 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4990 {
4991 any = 1;
4992 break;
4993 }
4994 }
4995 if (any == 0)
4996 {
4997 xfree (buf);
4998 return bpaddr;
4999 }
5000
5001 /* OK, the code bytes before this instruction contain at least one
5002 halfword which resembles an IT instruction. We know that it's
5003 Thumb code, but there are still two possibilities. Either the
5004 halfword really is an IT instruction, or it is the second half of
5005 a 32-bit Thumb instruction. The only way we can tell is to
5006 scan forwards from a known instruction boundary. */
5007 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5008 {
5009 int definite;
5010
5011 /* There's a lot of code before this instruction. Start with an
5012 optimistic search; it's easy to recognize halfwords that can
5013 not be the start of a 32-bit instruction, and use that to
5014 lock on to the instruction boundaries. */
5015 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5016 if (buf == NULL)
5017 return bpaddr;
5018 buf_len = IT_SCAN_THRESHOLD;
5019
5020 definite = 0;
5021 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5022 {
5023 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5024 if (thumb_insn_size (inst1) == 2)
5025 {
5026 definite = 1;
5027 break;
5028 }
5029 }
5030
5031 /* At this point, if DEFINITE, BUF[I] is the first place we
5032 are sure that we know the instruction boundaries, and it is far
5033 enough from BPADDR that we could not miss an IT instruction
5034 affecting BPADDR. If ! DEFINITE, give up - start from a
5035 known boundary. */
5036 if (! definite)
5037 {
5038 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5039 bpaddr - boundary);
5040 if (buf == NULL)
5041 return bpaddr;
5042 buf_len = bpaddr - boundary;
5043 i = 0;
5044 }
5045 }
5046 else
5047 {
5048 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5049 if (buf == NULL)
5050 return bpaddr;
5051 buf_len = bpaddr - boundary;
5052 i = 0;
5053 }
5054
5055 /* Scan forwards. Find the last IT instruction before BPADDR. */
5056 last_it = -1;
5057 last_it_count = 0;
5058 while (i < buf_len)
5059 {
5060 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5061 last_it_count--;
5062 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5063 {
5064 last_it = i;
5065 if (inst1 & 0x0001)
5066 last_it_count = 4;
5067 else if (inst1 & 0x0002)
5068 last_it_count = 3;
5069 else if (inst1 & 0x0004)
5070 last_it_count = 2;
5071 else
5072 last_it_count = 1;
5073 }
5074 i += thumb_insn_size (inst1);
5075 }
5076
5077 xfree (buf);
5078
5079 if (last_it == -1)
5080 /* There wasn't really an IT instruction after all. */
5081 return bpaddr;
5082
5083 if (last_it_count < 1)
5084 /* It was too far away. */
5085 return bpaddr;
5086
5087 /* This really is a trouble spot. Move the breakpoint to the IT
5088 instruction. */
5089 return bpaddr - buf_len + last_it;
5090 }
5091
5092 /* ARM displaced stepping support.
5093
5094 Generally ARM displaced stepping works as follows:
5095
5096 1. When an instruction is to be single-stepped, it is first decoded by
5097 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5098 Depending on the type of instruction, it is then copied to a scratch
5099 location, possibly in a modified form. The copy_* set of functions
5100 performs such modification, as necessary. A breakpoint is placed after
5101 the modified instruction in the scratch space to return control to GDB.
5102 Note in particular that instructions which modify the PC will no longer
5103 do so after modification.
5104
5105 2. The instruction is single-stepped, by setting the PC to the scratch
5106 location address, and resuming. Control returns to GDB when the
5107 breakpoint is hit.
5108
5109 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5110 function used for the current instruction. This function's job is to
5111 put the CPU/memory state back to what it would have been if the
5112 instruction had been executed unmodified in its original location. */
5113
5114 /* NOP instruction (mov r0, r0). */
5115 #define ARM_NOP 0xe1a00000
5116
5117 /* Helper for register reads for displaced stepping. In particular, this
5118 returns the PC as it would be seen by the instruction at its original
5119 location. */
5120
5121 ULONGEST
5122 displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5123 int regno)
5124 {
5125 ULONGEST ret;
5126 CORE_ADDR from = dsc->insn_addr;
5127
5128 if (regno == ARM_PC_REGNUM)
5129 {
5130 /* Compute pipeline offset:
5131 - When executing an ARM instruction, PC reads as the address of the
5132 current instruction plus 8.
5133 - When executing a Thumb instruction, PC reads as the address of the
5134 current instruction plus 4. */
5135
5136 if (!dsc->is_thumb)
5137 from += 8;
5138 else
5139 from += 4;
5140
5141 if (debug_displaced)
5142 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5143 (unsigned long) from);
5144 return (ULONGEST) from;
5145 }
5146 else
5147 {
5148 regcache_cooked_read_unsigned (regs, regno, &ret);
5149 if (debug_displaced)
5150 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5151 regno, (unsigned long) ret);
5152 return ret;
5153 }
5154 }
5155
5156 static int
5157 displaced_in_arm_mode (struct regcache *regs)
5158 {
5159 ULONGEST ps;
5160 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5161
5162 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5163
5164 return (ps & t_bit) == 0;
5165 }
5166
5167 /* Write to the PC as from a branch instruction. */
5168
5169 static void
5170 branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5171 ULONGEST val)
5172 {
5173 if (!dsc->is_thumb)
5174 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5175 architecture versions < 6. */
5176 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5177 val & ~(ULONGEST) 0x3);
5178 else
5179 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5180 val & ~(ULONGEST) 0x1);
5181 }
5182
5183 /* Write to the PC as from a branch-exchange instruction. */
5184
5185 static void
5186 bx_write_pc (struct regcache *regs, ULONGEST val)
5187 {
5188 ULONGEST ps;
5189 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5190
5191 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5192
5193 if ((val & 1) == 1)
5194 {
5195 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5196 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5197 }
5198 else if ((val & 2) == 0)
5199 {
5200 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5201 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5202 }
5203 else
5204 {
5205 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5206 mode, align dest to 4 bytes). */
5207 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5208 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5209 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5210 }
5211 }
5212
5213 /* Write to the PC as if from a load instruction. */
5214
5215 static void
5216 load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5217 ULONGEST val)
5218 {
5219 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5220 bx_write_pc (regs, val);
5221 else
5222 branch_write_pc (regs, dsc, val);
5223 }
5224
5225 /* Write to the PC as if from an ALU instruction. */
5226
5227 static void
5228 alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5229 ULONGEST val)
5230 {
5231 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
5232 bx_write_pc (regs, val);
5233 else
5234 branch_write_pc (regs, dsc, val);
5235 }
5236
5237 /* Helper for writing to registers for displaced stepping. Writing to the PC
5238 has a varying effects depending on the instruction which does the write:
5239 this is controlled by the WRITE_PC argument. */
5240
5241 void
5242 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5243 int regno, ULONGEST val, enum pc_write_style write_pc)
5244 {
5245 if (regno == ARM_PC_REGNUM)
5246 {
5247 if (debug_displaced)
5248 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5249 (unsigned long) val);
5250 switch (write_pc)
5251 {
5252 case BRANCH_WRITE_PC:
5253 branch_write_pc (regs, dsc, val);
5254 break;
5255
5256 case BX_WRITE_PC:
5257 bx_write_pc (regs, val);
5258 break;
5259
5260 case LOAD_WRITE_PC:
5261 load_write_pc (regs, dsc, val);
5262 break;
5263
5264 case ALU_WRITE_PC:
5265 alu_write_pc (regs, dsc, val);
5266 break;
5267
5268 case CANNOT_WRITE_PC:
5269 warning (_("Instruction wrote to PC in an unexpected way when "
5270 "single-stepping"));
5271 break;
5272
5273 default:
5274 internal_error (__FILE__, __LINE__,
5275 _("Invalid argument to displaced_write_reg"));
5276 }
5277
5278 dsc->wrote_to_pc = 1;
5279 }
5280 else
5281 {
5282 if (debug_displaced)
5283 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5284 regno, (unsigned long) val);
5285 regcache_cooked_write_unsigned (regs, regno, val);
5286 }
5287 }
5288
5289 /* This function is used to concisely determine if an instruction INSN
5290 references PC. Register fields of interest in INSN should have the
5291 corresponding fields of BITMASK set to 0b1111. The function
5292 returns return 1 if any of these fields in INSN reference the PC
5293 (also 0b1111, r15), else it returns 0. */
5294
5295 static int
5296 insn_references_pc (uint32_t insn, uint32_t bitmask)
5297 {
5298 uint32_t lowbit = 1;
5299
5300 while (bitmask != 0)
5301 {
5302 uint32_t mask;
5303
5304 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5305 ;
5306
5307 if (!lowbit)
5308 break;
5309
5310 mask = lowbit * 0xf;
5311
5312 if ((insn & mask) == mask)
5313 return 1;
5314
5315 bitmask &= ~mask;
5316 }
5317
5318 return 0;
5319 }
5320
5321 /* The simplest copy function. Many instructions have the same effect no
5322 matter what address they are executed at: in those cases, use this. */
5323
5324 static int
5325 copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5326 const char *iname, struct displaced_step_closure *dsc)
5327 {
5328 if (debug_displaced)
5329 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5330 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5331 iname);
5332
5333 dsc->modinsn[0] = insn;
5334
5335 return 0;
5336 }
5337
5338 /* Preload instructions with immediate offset. */
5339
5340 static void
5341 cleanup_preload (struct gdbarch *gdbarch,
5342 struct regcache *regs, struct displaced_step_closure *dsc)
5343 {
5344 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5345 if (!dsc->u.preload.immed)
5346 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5347 }
5348
5349 static int
5350 copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5351 struct displaced_step_closure *dsc)
5352 {
5353 unsigned int rn = bits (insn, 16, 19);
5354 ULONGEST rn_val;
5355
5356 if (!insn_references_pc (insn, 0x000f0000ul))
5357 return copy_unmodified (gdbarch, insn, "preload", dsc);
5358
5359 if (debug_displaced)
5360 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5361 (unsigned long) insn);
5362
5363 /* Preload instructions:
5364
5365 {pli/pld} [rn, #+/-imm]
5366 ->
5367 {pli/pld} [r0, #+/-imm]. */
5368
5369 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5370 rn_val = displaced_read_reg (regs, dsc, rn);
5371 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5372
5373 dsc->u.preload.immed = 1;
5374
5375 dsc->modinsn[0] = insn & 0xfff0ffff;
5376
5377 dsc->cleanup = &cleanup_preload;
5378
5379 return 0;
5380 }
5381
5382 /* Preload instructions with register offset. */
5383
5384 static int
5385 copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5386 struct regcache *regs,
5387 struct displaced_step_closure *dsc)
5388 {
5389 unsigned int rn = bits (insn, 16, 19);
5390 unsigned int rm = bits (insn, 0, 3);
5391 ULONGEST rn_val, rm_val;
5392
5393 if (!insn_references_pc (insn, 0x000f000ful))
5394 return copy_unmodified (gdbarch, insn, "preload reg", dsc);
5395
5396 if (debug_displaced)
5397 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5398 (unsigned long) insn);
5399
5400 /* Preload register-offset instructions:
5401
5402 {pli/pld} [rn, rm {, shift}]
5403 ->
5404 {pli/pld} [r0, r1 {, shift}]. */
5405
5406 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5407 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5408 rn_val = displaced_read_reg (regs, dsc, rn);
5409 rm_val = displaced_read_reg (regs, dsc, rm);
5410 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5411 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5412
5413 dsc->u.preload.immed = 0;
5414
5415 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5416
5417 dsc->cleanup = &cleanup_preload;
5418
5419 return 0;
5420 }
5421
5422 /* Copy/cleanup coprocessor load and store instructions. */
5423
5424 static void
5425 cleanup_copro_load_store (struct gdbarch *gdbarch,
5426 struct regcache *regs,
5427 struct displaced_step_closure *dsc)
5428 {
5429 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
5430
5431 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5432
5433 if (dsc->u.ldst.writeback)
5434 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5435 }
5436
5437 static int
5438 copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5439 struct regcache *regs,
5440 struct displaced_step_closure *dsc)
5441 {
5442 unsigned int rn = bits (insn, 16, 19);
5443 ULONGEST rn_val;
5444
5445 if (!insn_references_pc (insn, 0x000f0000ul))
5446 return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5447
5448 if (debug_displaced)
5449 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5450 "load/store insn %.8lx\n", (unsigned long) insn);
5451
5452 /* Coprocessor load/store instructions:
5453
5454 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5455 ->
5456 {stc/stc2} [r0, #+/-imm].
5457
5458 ldc/ldc2 are handled identically. */
5459
5460 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5461 rn_val = displaced_read_reg (regs, dsc, rn);
5462 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5463
5464 dsc->u.ldst.writeback = bit (insn, 25);
5465 dsc->u.ldst.rn = rn;
5466
5467 dsc->modinsn[0] = insn & 0xfff0ffff;
5468
5469 dsc->cleanup = &cleanup_copro_load_store;
5470
5471 return 0;
5472 }
5473
5474 /* Clean up branch instructions (actually perform the branch, by setting
5475 PC). */
5476
5477 static void
5478 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
5479 struct displaced_step_closure *dsc)
5480 {
5481 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5482 int branch_taken = condition_true (dsc->u.branch.cond, status);
5483 enum pc_write_style write_pc = dsc->u.branch.exchange
5484 ? BX_WRITE_PC : BRANCH_WRITE_PC;
5485
5486 if (!branch_taken)
5487 return;
5488
5489 if (dsc->u.branch.link)
5490 {
5491 ULONGEST pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
5492 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, pc - 4, CANNOT_WRITE_PC);
5493 }
5494
5495 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
5496 }
5497
5498 /* Copy B/BL/BLX instructions with immediate destinations. */
5499
5500 static int
5501 copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
5502 struct regcache *regs, struct displaced_step_closure *dsc)
5503 {
5504 unsigned int cond = bits (insn, 28, 31);
5505 int exchange = (cond == 0xf);
5506 int link = exchange || bit (insn, 24);
5507 CORE_ADDR from = dsc->insn_addr;
5508 long offset;
5509
5510 if (debug_displaced)
5511 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
5512 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
5513 (unsigned long) insn);
5514
5515 /* Implement "BL<cond> <label>" as:
5516
5517 Preparation: cond <- instruction condition
5518 Insn: mov r0, r0 (nop)
5519 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5520
5521 B<cond> similar, but don't set r14 in cleanup. */
5522
5523 if (exchange)
5524 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5525 then arrange the switch into Thumb mode. */
5526 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
5527 else
5528 offset = bits (insn, 0, 23) << 2;
5529
5530 if (bit (offset, 25))
5531 offset = offset | ~0x3ffffff;
5532
5533 dsc->u.branch.cond = cond;
5534 dsc->u.branch.link = link;
5535 dsc->u.branch.exchange = exchange;
5536 dsc->u.branch.dest = from + 8 + offset;
5537
5538 dsc->modinsn[0] = ARM_NOP;
5539
5540 dsc->cleanup = &cleanup_branch;
5541
5542 return 0;
5543 }
5544
5545 /* Copy BX/BLX with register-specified destinations. */
5546
5547 static int
5548 copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
5549 struct regcache *regs, struct displaced_step_closure *dsc)
5550 {
5551 unsigned int cond = bits (insn, 28, 31);
5552 /* BX: x12xxx1x
5553 BLX: x12xxx3x. */
5554 int link = bit (insn, 5);
5555 unsigned int rm = bits (insn, 0, 3);
5556
5557 if (debug_displaced)
5558 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
5559 "%.8lx\n", (link) ? "blx" : "bx",
5560 (unsigned long) insn);
5561
5562 /* Implement {BX,BLX}<cond> <reg>" as:
5563
5564 Preparation: cond <- instruction condition
5565 Insn: mov r0, r0 (nop)
5566 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
5567
5568 Don't set r14 in cleanup for BX. */
5569
5570 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
5571
5572 dsc->u.branch.cond = cond;
5573 dsc->u.branch.link = link;
5574 dsc->u.branch.exchange = 1;
5575
5576 dsc->modinsn[0] = ARM_NOP;
5577
5578 dsc->cleanup = &cleanup_branch;
5579
5580 return 0;
5581 }
5582
5583 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
5584
5585 static void
5586 cleanup_alu_imm (struct gdbarch *gdbarch,
5587 struct regcache *regs, struct displaced_step_closure *dsc)
5588 {
5589 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5590 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5591 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5592 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5593 }
5594
5595 static int
5596 copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5597 struct displaced_step_closure *dsc)
5598 {
5599 unsigned int rn = bits (insn, 16, 19);
5600 unsigned int rd = bits (insn, 12, 15);
5601 unsigned int op = bits (insn, 21, 24);
5602 int is_mov = (op == 0xd);
5603 ULONGEST rd_val, rn_val;
5604
5605 if (!insn_references_pc (insn, 0x000ff000ul))
5606 return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
5607
5608 if (debug_displaced)
5609 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
5610 "%.8lx\n", is_mov ? "move" : "ALU",
5611 (unsigned long) insn);
5612
5613 /* Instruction is of form:
5614
5615 <op><cond> rd, [rn,] #imm
5616
5617 Rewrite as:
5618
5619 Preparation: tmp1, tmp2 <- r0, r1;
5620 r0, r1 <- rd, rn
5621 Insn: <op><cond> r0, r1, #imm
5622 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5623 */
5624
5625 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5626 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5627 rn_val = displaced_read_reg (regs, dsc, rn);
5628 rd_val = displaced_read_reg (regs, dsc, rd);
5629 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5630 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5631 dsc->rd = rd;
5632
5633 if (is_mov)
5634 dsc->modinsn[0] = insn & 0xfff00fff;
5635 else
5636 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
5637
5638 dsc->cleanup = &cleanup_alu_imm;
5639
5640 return 0;
5641 }
5642
5643 /* Copy/cleanup arithmetic/logic insns with register RHS. */
5644
5645 static void
5646 cleanup_alu_reg (struct gdbarch *gdbarch,
5647 struct regcache *regs, struct displaced_step_closure *dsc)
5648 {
5649 ULONGEST rd_val;
5650 int i;
5651
5652 rd_val = displaced_read_reg (regs, dsc, 0);
5653
5654 for (i = 0; i < 3; i++)
5655 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5656
5657 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5658 }
5659
5660 static int
5661 copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5662 struct displaced_step_closure *dsc)
5663 {
5664 unsigned int rn = bits (insn, 16, 19);
5665 unsigned int rm = bits (insn, 0, 3);
5666 unsigned int rd = bits (insn, 12, 15);
5667 unsigned int op = bits (insn, 21, 24);
5668 int is_mov = (op == 0xd);
5669 ULONGEST rd_val, rn_val, rm_val;
5670
5671 if (!insn_references_pc (insn, 0x000ff00ful))
5672 return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
5673
5674 if (debug_displaced)
5675 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
5676 is_mov ? "move" : "ALU", (unsigned long) insn);
5677
5678 /* Instruction is of form:
5679
5680 <op><cond> rd, [rn,] rm [, <shift>]
5681
5682 Rewrite as:
5683
5684 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
5685 r0, r1, r2 <- rd, rn, rm
5686 Insn: <op><cond> r0, r1, r2 [, <shift>]
5687 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
5688 */
5689
5690 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5691 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5692 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5693 rd_val = displaced_read_reg (regs, dsc, rd);
5694 rn_val = displaced_read_reg (regs, dsc, rn);
5695 rm_val = displaced_read_reg (regs, dsc, rm);
5696 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5697 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5698 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5699 dsc->rd = rd;
5700
5701 if (is_mov)
5702 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
5703 else
5704 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
5705
5706 dsc->cleanup = &cleanup_alu_reg;
5707
5708 return 0;
5709 }
5710
5711 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
5712
5713 static void
5714 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
5715 struct regcache *regs,
5716 struct displaced_step_closure *dsc)
5717 {
5718 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5719 int i;
5720
5721 for (i = 0; i < 4; i++)
5722 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5723
5724 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5725 }
5726
5727 static int
5728 copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
5729 struct regcache *regs,
5730 struct displaced_step_closure *dsc)
5731 {
5732 unsigned int rn = bits (insn, 16, 19);
5733 unsigned int rm = bits (insn, 0, 3);
5734 unsigned int rd = bits (insn, 12, 15);
5735 unsigned int rs = bits (insn, 8, 11);
5736 unsigned int op = bits (insn, 21, 24);
5737 int is_mov = (op == 0xd), i;
5738 ULONGEST rd_val, rn_val, rm_val, rs_val;
5739
5740 if (!insn_references_pc (insn, 0x000fff0ful))
5741 return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
5742
5743 if (debug_displaced)
5744 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
5745 "%.8lx\n", is_mov ? "move" : "ALU",
5746 (unsigned long) insn);
5747
5748 /* Instruction is of form:
5749
5750 <op><cond> rd, [rn,] rm, <shift> rs
5751
5752 Rewrite as:
5753
5754 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
5755 r0, r1, r2, r3 <- rd, rn, rm, rs
5756 Insn: <op><cond> r0, r1, r2, <shift> r3
5757 Cleanup: tmp5 <- r0
5758 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
5759 rd <- tmp5
5760 */
5761
5762 for (i = 0; i < 4; i++)
5763 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
5764
5765 rd_val = displaced_read_reg (regs, dsc, rd);
5766 rn_val = displaced_read_reg (regs, dsc, rn);
5767 rm_val = displaced_read_reg (regs, dsc, rm);
5768 rs_val = displaced_read_reg (regs, dsc, rs);
5769 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5770 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5771 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5772 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
5773 dsc->rd = rd;
5774
5775 if (is_mov)
5776 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
5777 else
5778 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
5779
5780 dsc->cleanup = &cleanup_alu_shifted_reg;
5781
5782 return 0;
5783 }
5784
5785 /* Clean up load instructions. */
5786
5787 static void
5788 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
5789 struct displaced_step_closure *dsc)
5790 {
5791 ULONGEST rt_val, rt_val2 = 0, rn_val;
5792
5793 rt_val = displaced_read_reg (regs, dsc, 0);
5794 if (dsc->u.ldst.xfersize == 8)
5795 rt_val2 = displaced_read_reg (regs, dsc, 1);
5796 rn_val = displaced_read_reg (regs, dsc, 2);
5797
5798 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5799 if (dsc->u.ldst.xfersize > 4)
5800 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5801 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5802 if (!dsc->u.ldst.immed)
5803 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5804
5805 /* Handle register writeback. */
5806 if (dsc->u.ldst.writeback)
5807 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5808 /* Put result in right place. */
5809 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
5810 if (dsc->u.ldst.xfersize == 8)
5811 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
5812 }
5813
5814 /* Clean up store instructions. */
5815
5816 static void
5817 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
5818 struct displaced_step_closure *dsc)
5819 {
5820 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
5821
5822 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5823 if (dsc->u.ldst.xfersize > 4)
5824 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5825 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5826 if (!dsc->u.ldst.immed)
5827 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5828 if (!dsc->u.ldst.restore_r4)
5829 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
5830
5831 /* Writeback. */
5832 if (dsc->u.ldst.writeback)
5833 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5834 }
5835
5836 /* Copy "extra" load/store instructions. These are halfword/doubleword
5837 transfers, which have a different encoding to byte/word transfers. */
5838
5839 static int
5840 copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
5841 struct regcache *regs, struct displaced_step_closure *dsc)
5842 {
5843 unsigned int op1 = bits (insn, 20, 24);
5844 unsigned int op2 = bits (insn, 5, 6);
5845 unsigned int rt = bits (insn, 12, 15);
5846 unsigned int rn = bits (insn, 16, 19);
5847 unsigned int rm = bits (insn, 0, 3);
5848 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
5849 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
5850 int immed = (op1 & 0x4) != 0;
5851 int opcode;
5852 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
5853
5854 if (!insn_references_pc (insn, 0x000ff00ful))
5855 return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
5856
5857 if (debug_displaced)
5858 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
5859 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
5860 (unsigned long) insn);
5861
5862 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
5863
5864 if (opcode < 0)
5865 internal_error (__FILE__, __LINE__,
5866 _("copy_extra_ld_st: instruction decode error"));
5867
5868 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5869 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5870 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5871 if (!immed)
5872 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5873
5874 rt_val = displaced_read_reg (regs, dsc, rt);
5875 if (bytesize[opcode] == 8)
5876 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
5877 rn_val = displaced_read_reg (regs, dsc, rn);
5878 if (!immed)
5879 rm_val = displaced_read_reg (regs, dsc, rm);
5880
5881 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5882 if (bytesize[opcode] == 8)
5883 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
5884 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5885 if (!immed)
5886 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5887
5888 dsc->rd = rt;
5889 dsc->u.ldst.xfersize = bytesize[opcode];
5890 dsc->u.ldst.rn = rn;
5891 dsc->u.ldst.immed = immed;
5892 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5893 dsc->u.ldst.restore_r4 = 0;
5894
5895 if (immed)
5896 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
5897 ->
5898 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
5899 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5900 else
5901 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
5902 ->
5903 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
5904 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5905
5906 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
5907
5908 return 0;
5909 }
5910
5911 /* Copy byte/word loads and stores. */
5912
5913 static int
5914 copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
5915 struct regcache *regs,
5916 struct displaced_step_closure *dsc, int load, int byte,
5917 int usermode)
5918 {
5919 int immed = !bit (insn, 25);
5920 unsigned int rt = bits (insn, 12, 15);
5921 unsigned int rn = bits (insn, 16, 19);
5922 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
5923 ULONGEST rt_val, rn_val, rm_val = 0;
5924
5925 if (!insn_references_pc (insn, 0x000ff00ful))
5926 return copy_unmodified (gdbarch, insn, "load/store", dsc);
5927
5928 if (debug_displaced)
5929 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
5930 load ? (byte ? "ldrb" : "ldr")
5931 : (byte ? "strb" : "str"), usermode ? "t" : "",
5932 (unsigned long) insn);
5933
5934 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5935 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5936 if (!immed)
5937 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5938 if (!load)
5939 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
5940
5941 rt_val = displaced_read_reg (regs, dsc, rt);
5942 rn_val = displaced_read_reg (regs, dsc, rn);
5943 if (!immed)
5944 rm_val = displaced_read_reg (regs, dsc, rm);
5945
5946 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5947 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5948 if (!immed)
5949 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5950
5951 dsc->rd = rt;
5952 dsc->u.ldst.xfersize = byte ? 1 : 4;
5953 dsc->u.ldst.rn = rn;
5954 dsc->u.ldst.immed = immed;
5955 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5956
5957 /* To write PC we can do:
5958
5959 Before this sequence of instructions:
5960 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
5961 r2 is the Rn value got from dispalced_read_reg.
5962
5963 Insn1: push {pc} Write address of STR instruction + offset on stack
5964 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
5965 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
5966 = addr(Insn1) + offset - addr(Insn3) - 8
5967 = offset - 16
5968 Insn4: add r4, r4, #8 r4 = offset - 8
5969 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
5970 = from + offset
5971 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
5972
5973 Otherwise we don't know what value to write for PC, since the offset is
5974 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
5975 of this can be found in Section "Saving from r15" in
5976 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
5977
5978 if (load || rt != ARM_PC_REGNUM)
5979 {
5980 dsc->u.ldst.restore_r4 = 0;
5981
5982 if (immed)
5983 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
5984 ->
5985 {ldr,str}[b]<cond> r0, [r2, #imm]. */
5986 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5987 else
5988 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
5989 ->
5990 {ldr,str}[b]<cond> r0, [r2, r3]. */
5991 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5992 }
5993 else
5994 {
5995 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
5996 dsc->u.ldst.restore_r4 = 1;
5997 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
5998 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
5999 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
6000 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
6001 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
6002
6003 /* As above. */
6004 if (immed)
6005 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
6006 else
6007 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
6008
6009 dsc->numinsns = 6;
6010 }
6011
6012 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6013
6014 return 0;
6015 }
6016
6017 /* Cleanup LDM instructions with fully-populated register list. This is an
6018 unfortunate corner case: it's impossible to implement correctly by modifying
6019 the instruction. The issue is as follows: we have an instruction,
6020
6021 ldm rN, {r0-r15}
6022
6023 which we must rewrite to avoid loading PC. A possible solution would be to
6024 do the load in two halves, something like (with suitable cleanup
6025 afterwards):
6026
6027 mov r8, rN
6028 ldm[id][ab] r8!, {r0-r7}
6029 str r7, <temp>
6030 ldm[id][ab] r8, {r7-r14}
6031 <bkpt>
6032
6033 but at present there's no suitable place for <temp>, since the scratch space
6034 is overwritten before the cleanup routine is called. For now, we simply
6035 emulate the instruction. */
6036
6037 static void
6038 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6039 struct displaced_step_closure *dsc)
6040 {
6041 int inc = dsc->u.block.increment;
6042 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6043 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6044 uint32_t regmask = dsc->u.block.regmask;
6045 int regno = inc ? 0 : 15;
6046 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6047 int exception_return = dsc->u.block.load && dsc->u.block.user
6048 && (regmask & 0x8000) != 0;
6049 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6050 int do_transfer = condition_true (dsc->u.block.cond, status);
6051 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6052
6053 if (!do_transfer)
6054 return;
6055
6056 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6057 sensible we can do here. Complain loudly. */
6058 if (exception_return)
6059 error (_("Cannot single-step exception return"));
6060
6061 /* We don't handle any stores here for now. */
6062 gdb_assert (dsc->u.block.load != 0);
6063
6064 if (debug_displaced)
6065 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6066 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6067 dsc->u.block.increment ? "inc" : "dec",
6068 dsc->u.block.before ? "before" : "after");
6069
6070 while (regmask)
6071 {
6072 uint32_t memword;
6073
6074 if (inc)
6075 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
6076 regno++;
6077 else
6078 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6079 regno--;
6080
6081 xfer_addr += bump_before;
6082
6083 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6084 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6085
6086 xfer_addr += bump_after;
6087
6088 regmask &= ~(1 << regno);
6089 }
6090
6091 if (dsc->u.block.writeback)
6092 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6093 CANNOT_WRITE_PC);
6094 }
6095
6096 /* Clean up an STM which included the PC in the register list. */
6097
6098 static void
6099 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6100 struct displaced_step_closure *dsc)
6101 {
6102 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6103 int store_executed = condition_true (dsc->u.block.cond, status);
6104 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
6105 CORE_ADDR stm_insn_addr;
6106 uint32_t pc_val;
6107 long offset;
6108 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6109
6110 /* If condition code fails, there's nothing else to do. */
6111 if (!store_executed)
6112 return;
6113
6114 if (dsc->u.block.increment)
6115 {
6116 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
6117
6118 if (dsc->u.block.before)
6119 pc_stored_at += 4;
6120 }
6121 else
6122 {
6123 pc_stored_at = dsc->u.block.xfer_addr;
6124
6125 if (dsc->u.block.before)
6126 pc_stored_at -= 4;
6127 }
6128
6129 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
6130 stm_insn_addr = dsc->scratch_base;
6131 offset = pc_val - stm_insn_addr;
6132
6133 if (debug_displaced)
6134 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
6135 "STM instruction\n", offset);
6136
6137 /* Rewrite the stored PC to the proper value for the non-displaced original
6138 instruction. */
6139 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
6140 dsc->insn_addr + offset);
6141 }
6142
6143 /* Clean up an LDM which includes the PC in the register list. We clumped all
6144 the registers in the transferred list into a contiguous range r0...rX (to
6145 avoid loading PC directly and losing control of the debugged program), so we
6146 must undo that here. */
6147
6148 static void
6149 cleanup_block_load_pc (struct gdbarch *gdbarch,
6150 struct regcache *regs,
6151 struct displaced_step_closure *dsc)
6152 {
6153 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6154 int load_executed = condition_true (dsc->u.block.cond, status), i;
6155 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
6156 unsigned int regs_loaded = bitcount (mask);
6157 unsigned int num_to_shuffle = regs_loaded, clobbered;
6158
6159 /* The method employed here will fail if the register list is fully populated
6160 (we need to avoid loading PC directly). */
6161 gdb_assert (num_to_shuffle < 16);
6162
6163 if (!load_executed)
6164 return;
6165
6166 clobbered = (1 << num_to_shuffle) - 1;
6167
6168 while (num_to_shuffle > 0)
6169 {
6170 if ((mask & (1 << write_reg)) != 0)
6171 {
6172 unsigned int read_reg = num_to_shuffle - 1;
6173
6174 if (read_reg != write_reg)
6175 {
6176 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
6177 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
6178 if (debug_displaced)
6179 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
6180 "loaded register r%d to r%d\n"), read_reg,
6181 write_reg);
6182 }
6183 else if (debug_displaced)
6184 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
6185 "r%d already in the right place\n"),
6186 write_reg);
6187
6188 clobbered &= ~(1 << write_reg);
6189
6190 num_to_shuffle--;
6191 }
6192
6193 write_reg--;
6194 }
6195
6196 /* Restore any registers we scribbled over. */
6197 for (write_reg = 0; clobbered != 0; write_reg++)
6198 {
6199 if ((clobbered & (1 << write_reg)) != 0)
6200 {
6201 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
6202 CANNOT_WRITE_PC);
6203 if (debug_displaced)
6204 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
6205 "clobbered register r%d\n"), write_reg);
6206 clobbered &= ~(1 << write_reg);
6207 }
6208 }
6209
6210 /* Perform register writeback manually. */
6211 if (dsc->u.block.writeback)
6212 {
6213 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
6214
6215 if (dsc->u.block.increment)
6216 new_rn_val += regs_loaded * 4;
6217 else
6218 new_rn_val -= regs_loaded * 4;
6219
6220 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
6221 CANNOT_WRITE_PC);
6222 }
6223 }
6224
6225 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6226 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6227
6228 static int
6229 copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6230 struct displaced_step_closure *dsc)
6231 {
6232 int load = bit (insn, 20);
6233 int user = bit (insn, 22);
6234 int increment = bit (insn, 23);
6235 int before = bit (insn, 24);
6236 int writeback = bit (insn, 21);
6237 int rn = bits (insn, 16, 19);
6238
6239 /* Block transfers which don't mention PC can be run directly
6240 out-of-line. */
6241 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
6242 return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
6243
6244 if (rn == ARM_PC_REGNUM)
6245 {
6246 warning (_("displaced: Unpredictable LDM or STM with "
6247 "base register r15"));
6248 return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
6249 }
6250
6251 if (debug_displaced)
6252 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6253 "%.8lx\n", (unsigned long) insn);
6254
6255 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6256 dsc->u.block.rn = rn;
6257
6258 dsc->u.block.load = load;
6259 dsc->u.block.user = user;
6260 dsc->u.block.increment = increment;
6261 dsc->u.block.before = before;
6262 dsc->u.block.writeback = writeback;
6263 dsc->u.block.cond = bits (insn, 28, 31);
6264
6265 dsc->u.block.regmask = insn & 0xffff;
6266
6267 if (load)
6268 {
6269 if ((insn & 0xffff) == 0xffff)
6270 {
6271 /* LDM with a fully-populated register list. This case is
6272 particularly tricky. Implement for now by fully emulating the
6273 instruction (which might not behave perfectly in all cases, but
6274 these instructions should be rare enough for that not to matter
6275 too much). */
6276 dsc->modinsn[0] = ARM_NOP;
6277
6278 dsc->cleanup = &cleanup_block_load_all;
6279 }
6280 else
6281 {
6282 /* LDM of a list of registers which includes PC. Implement by
6283 rewriting the list of registers to be transferred into a
6284 contiguous chunk r0...rX before doing the transfer, then shuffling
6285 registers into the correct places in the cleanup routine. */
6286 unsigned int regmask = insn & 0xffff;
6287 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
6288 unsigned int to = 0, from = 0, i, new_rn;
6289
6290 for (i = 0; i < num_in_list; i++)
6291 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6292
6293 /* Writeback makes things complicated. We need to avoid clobbering
6294 the base register with one of the registers in our modified
6295 register list, but just using a different register can't work in
6296 all cases, e.g.:
6297
6298 ldm r14!, {r0-r13,pc}
6299
6300 which would need to be rewritten as:
6301
6302 ldm rN!, {r0-r14}
6303
6304 but that can't work, because there's no free register for N.
6305
6306 Solve this by turning off the writeback bit, and emulating
6307 writeback manually in the cleanup routine. */
6308
6309 if (writeback)
6310 insn &= ~(1 << 21);
6311
6312 new_regmask = (1 << num_in_list) - 1;
6313
6314 if (debug_displaced)
6315 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
6316 "{..., pc}: original reg list %.4x, modified "
6317 "list %.4x\n"), rn, writeback ? "!" : "",
6318 (int) insn & 0xffff, new_regmask);
6319
6320 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
6321
6322 dsc->cleanup = &cleanup_block_load_pc;
6323 }
6324 }
6325 else
6326 {
6327 /* STM of a list of registers which includes PC. Run the instruction
6328 as-is, but out of line: this will store the wrong value for the PC,
6329 so we must manually fix up the memory in the cleanup routine.
6330 Doing things this way has the advantage that we can auto-detect
6331 the offset of the PC write (which is architecture-dependent) in
6332 the cleanup routine. */
6333 dsc->modinsn[0] = insn;
6334
6335 dsc->cleanup = &cleanup_block_store_pc;
6336 }
6337
6338 return 0;
6339 }
6340
6341 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
6342 for Linux, where some SVC instructions must be treated specially. */
6343
6344 static void
6345 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
6346 struct displaced_step_closure *dsc)
6347 {
6348 CORE_ADDR resume_addr = dsc->insn_addr + 4;
6349
6350 if (debug_displaced)
6351 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
6352 "%.8lx\n", (unsigned long) resume_addr);
6353
6354 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
6355 }
6356
6357 static int
6358 copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
6359 struct regcache *regs, struct displaced_step_closure *dsc)
6360 {
6361 /* Allow OS-specific code to override SVC handling. */
6362 if (dsc->u.svc.copy_svc_os)
6363 return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
6364
6365 if (debug_displaced)
6366 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
6367 (unsigned long) insn);
6368
6369 /* Preparation: none.
6370 Insn: unmodified svc.
6371 Cleanup: pc <- insn_addr + 4. */
6372
6373 dsc->modinsn[0] = insn;
6374
6375 dsc->cleanup = &cleanup_svc;
6376 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
6377 instruction. */
6378 dsc->wrote_to_pc = 1;
6379
6380 return 0;
6381 }
6382
6383 /* Copy undefined instructions. */
6384
6385 static int
6386 copy_undef (struct gdbarch *gdbarch, uint32_t insn,
6387 struct displaced_step_closure *dsc)
6388 {
6389 if (debug_displaced)
6390 fprintf_unfiltered (gdb_stdlog,
6391 "displaced: copying undefined insn %.8lx\n",
6392 (unsigned long) insn);
6393
6394 dsc->modinsn[0] = insn;
6395
6396 return 0;
6397 }
6398
6399 /* Copy unpredictable instructions. */
6400
6401 static int
6402 copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
6403 struct displaced_step_closure *dsc)
6404 {
6405 if (debug_displaced)
6406 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
6407 "%.8lx\n", (unsigned long) insn);
6408
6409 dsc->modinsn[0] = insn;
6410
6411 return 0;
6412 }
6413
6414 /* The decode_* functions are instruction decoding helpers. They mostly follow
6415 the presentation in the ARM ARM. */
6416
6417 static int
6418 decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
6419 struct regcache *regs,
6420 struct displaced_step_closure *dsc)
6421 {
6422 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
6423 unsigned int rn = bits (insn, 16, 19);
6424
6425 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
6426 return copy_unmodified (gdbarch, insn, "cps", dsc);
6427 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
6428 return copy_unmodified (gdbarch, insn, "setend", dsc);
6429 else if ((op1 & 0x60) == 0x20)
6430 return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
6431 else if ((op1 & 0x71) == 0x40)
6432 return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
6433 else if ((op1 & 0x77) == 0x41)
6434 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6435 else if ((op1 & 0x77) == 0x45)
6436 return copy_preload (gdbarch, insn, regs, dsc); /* pli. */
6437 else if ((op1 & 0x77) == 0x51)
6438 {
6439 if (rn != 0xf)
6440 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6441 else
6442 return copy_unpred (gdbarch, insn, dsc);
6443 }
6444 else if ((op1 & 0x77) == 0x55)
6445 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6446 else if (op1 == 0x57)
6447 switch (op2)
6448 {
6449 case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
6450 case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
6451 case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
6452 case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
6453 default: return copy_unpred (gdbarch, insn, dsc);
6454 }
6455 else if ((op1 & 0x63) == 0x43)
6456 return copy_unpred (gdbarch, insn, dsc);
6457 else if ((op2 & 0x1) == 0x0)
6458 switch (op1 & ~0x80)
6459 {
6460 case 0x61:
6461 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6462 case 0x65:
6463 return copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
6464 case 0x71: case 0x75:
6465 /* pld/pldw reg. */
6466 return copy_preload_reg (gdbarch, insn, regs, dsc);
6467 case 0x63: case 0x67: case 0x73: case 0x77:
6468 return copy_unpred (gdbarch, insn, dsc);
6469 default:
6470 return copy_undef (gdbarch, insn, dsc);
6471 }
6472 else
6473 return copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
6474 }
6475
6476 static int
6477 decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
6478 struct regcache *regs,
6479 struct displaced_step_closure *dsc)
6480 {
6481 if (bit (insn, 27) == 0)
6482 return decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
6483 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
6484 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
6485 {
6486 case 0x0: case 0x2:
6487 return copy_unmodified (gdbarch, insn, "srs", dsc);
6488
6489 case 0x1: case 0x3:
6490 return copy_unmodified (gdbarch, insn, "rfe", dsc);
6491
6492 case 0x4: case 0x5: case 0x6: case 0x7:
6493 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
6494
6495 case 0x8:
6496 switch ((insn & 0xe00000) >> 21)
6497 {
6498 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
6499 /* stc/stc2. */
6500 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6501
6502 case 0x2:
6503 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6504
6505 default:
6506 return copy_undef (gdbarch, insn, dsc);
6507 }
6508
6509 case 0x9:
6510 {
6511 int rn_f = (bits (insn, 16, 19) == 0xf);
6512 switch ((insn & 0xe00000) >> 21)
6513 {
6514 case 0x1: case 0x3:
6515 /* ldc/ldc2 imm (undefined for rn == pc). */
6516 return rn_f ? copy_undef (gdbarch, insn, dsc)
6517 : copy_copro_load_store (gdbarch, insn, regs, dsc);
6518
6519 case 0x2:
6520 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6521
6522 case 0x4: case 0x5: case 0x6: case 0x7:
6523 /* ldc/ldc2 lit (undefined for rn != pc). */
6524 return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
6525 : copy_undef (gdbarch, insn, dsc);
6526
6527 default:
6528 return copy_undef (gdbarch, insn, dsc);
6529 }
6530 }
6531
6532 case 0xa:
6533 return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
6534
6535 case 0xb:
6536 if (bits (insn, 16, 19) == 0xf)
6537 /* ldc/ldc2 lit. */
6538 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6539 else
6540 return copy_undef (gdbarch, insn, dsc);
6541
6542 case 0xc:
6543 if (bit (insn, 4))
6544 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6545 else
6546 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6547
6548 case 0xd:
6549 if (bit (insn, 4))
6550 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6551 else
6552 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6553
6554 default:
6555 return copy_undef (gdbarch, insn, dsc);
6556 }
6557 }
6558
6559 /* Decode miscellaneous instructions in dp/misc encoding space. */
6560
6561 static int
6562 decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
6563 struct regcache *regs,
6564 struct displaced_step_closure *dsc)
6565 {
6566 unsigned int op2 = bits (insn, 4, 6);
6567 unsigned int op = bits (insn, 21, 22);
6568 unsigned int op1 = bits (insn, 16, 19);
6569
6570 switch (op2)
6571 {
6572 case 0x0:
6573 return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
6574
6575 case 0x1:
6576 if (op == 0x1) /* bx. */
6577 return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
6578 else if (op == 0x3)
6579 return copy_unmodified (gdbarch, insn, "clz", dsc);
6580 else
6581 return copy_undef (gdbarch, insn, dsc);
6582
6583 case 0x2:
6584 if (op == 0x1)
6585 /* Not really supported. */
6586 return copy_unmodified (gdbarch, insn, "bxj", dsc);
6587 else
6588 return copy_undef (gdbarch, insn, dsc);
6589
6590 case 0x3:
6591 if (op == 0x1)
6592 return copy_bx_blx_reg (gdbarch, insn,
6593 regs, dsc); /* blx register. */
6594 else
6595 return copy_undef (gdbarch, insn, dsc);
6596
6597 case 0x5:
6598 return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
6599
6600 case 0x7:
6601 if (op == 0x1)
6602 return copy_unmodified (gdbarch, insn, "bkpt", dsc);
6603 else if (op == 0x3)
6604 /* Not really supported. */
6605 return copy_unmodified (gdbarch, insn, "smc", dsc);
6606
6607 default:
6608 return copy_undef (gdbarch, insn, dsc);
6609 }
6610 }
6611
6612 static int
6613 decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6614 struct displaced_step_closure *dsc)
6615 {
6616 if (bit (insn, 25))
6617 switch (bits (insn, 20, 24))
6618 {
6619 case 0x10:
6620 return copy_unmodified (gdbarch, insn, "movw", dsc);
6621
6622 case 0x14:
6623 return copy_unmodified (gdbarch, insn, "movt", dsc);
6624
6625 case 0x12: case 0x16:
6626 return copy_unmodified (gdbarch, insn, "msr imm", dsc);
6627
6628 default:
6629 return copy_alu_imm (gdbarch, insn, regs, dsc);
6630 }
6631 else
6632 {
6633 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
6634
6635 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
6636 return copy_alu_reg (gdbarch, insn, regs, dsc);
6637 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
6638 return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
6639 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
6640 return decode_miscellaneous (gdbarch, insn, regs, dsc);
6641 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
6642 return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
6643 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
6644 return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
6645 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
6646 return copy_unmodified (gdbarch, insn, "synch", dsc);
6647 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
6648 /* 2nd arg means "unpriveleged". */
6649 return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
6650 dsc);
6651 }
6652
6653 /* Should be unreachable. */
6654 return 1;
6655 }
6656
6657 static int
6658 decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
6659 struct regcache *regs,
6660 struct displaced_step_closure *dsc)
6661 {
6662 int a = bit (insn, 25), b = bit (insn, 4);
6663 uint32_t op1 = bits (insn, 20, 24);
6664 int rn_f = bits (insn, 16, 19) == 0xf;
6665
6666 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
6667 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
6668 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
6669 else if ((!a && (op1 & 0x17) == 0x02)
6670 || (a && (op1 & 0x17) == 0x02 && !b))
6671 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
6672 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
6673 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
6674 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
6675 else if ((!a && (op1 & 0x17) == 0x03)
6676 || (a && (op1 & 0x17) == 0x03 && !b))
6677 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
6678 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
6679 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
6680 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
6681 else if ((!a && (op1 & 0x17) == 0x06)
6682 || (a && (op1 & 0x17) == 0x06 && !b))
6683 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
6684 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
6685 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
6686 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
6687 else if ((!a && (op1 & 0x17) == 0x07)
6688 || (a && (op1 & 0x17) == 0x07 && !b))
6689 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
6690
6691 /* Should be unreachable. */
6692 return 1;
6693 }
6694
6695 static int
6696 decode_media (struct gdbarch *gdbarch, uint32_t insn,
6697 struct displaced_step_closure *dsc)
6698 {
6699 switch (bits (insn, 20, 24))
6700 {
6701 case 0x00: case 0x01: case 0x02: case 0x03:
6702 return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
6703
6704 case 0x04: case 0x05: case 0x06: case 0x07:
6705 return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
6706
6707 case 0x08: case 0x09: case 0x0a: case 0x0b:
6708 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
6709 return copy_unmodified (gdbarch, insn,
6710 "decode/pack/unpack/saturate/reverse", dsc);
6711
6712 case 0x18:
6713 if (bits (insn, 5, 7) == 0) /* op2. */
6714 {
6715 if (bits (insn, 12, 15) == 0xf)
6716 return copy_unmodified (gdbarch, insn, "usad8", dsc);
6717 else
6718 return copy_unmodified (gdbarch, insn, "usada8", dsc);
6719 }
6720 else
6721 return copy_undef (gdbarch, insn, dsc);
6722
6723 case 0x1a: case 0x1b:
6724 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6725 return copy_unmodified (gdbarch, insn, "sbfx", dsc);
6726 else
6727 return copy_undef (gdbarch, insn, dsc);
6728
6729 case 0x1c: case 0x1d:
6730 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
6731 {
6732 if (bits (insn, 0, 3) == 0xf)
6733 return copy_unmodified (gdbarch, insn, "bfc", dsc);
6734 else
6735 return copy_unmodified (gdbarch, insn, "bfi", dsc);
6736 }
6737 else
6738 return copy_undef (gdbarch, insn, dsc);
6739
6740 case 0x1e: case 0x1f:
6741 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6742 return copy_unmodified (gdbarch, insn, "ubfx", dsc);
6743 else
6744 return copy_undef (gdbarch, insn, dsc);
6745 }
6746
6747 /* Should be unreachable. */
6748 return 1;
6749 }
6750
6751 static int
6752 decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
6753 struct regcache *regs, struct displaced_step_closure *dsc)
6754 {
6755 if (bit (insn, 25))
6756 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
6757 else
6758 return copy_block_xfer (gdbarch, insn, regs, dsc);
6759 }
6760
6761 static int
6762 decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
6763 struct regcache *regs,
6764 struct displaced_step_closure *dsc)
6765 {
6766 unsigned int opcode = bits (insn, 20, 24);
6767
6768 switch (opcode)
6769 {
6770 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
6771 return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
6772
6773 case 0x08: case 0x0a: case 0x0c: case 0x0e:
6774 case 0x12: case 0x16:
6775 return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
6776
6777 case 0x09: case 0x0b: case 0x0d: case 0x0f:
6778 case 0x13: case 0x17:
6779 return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
6780
6781 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
6782 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
6783 /* Note: no writeback for these instructions. Bit 25 will always be
6784 zero though (via caller), so the following works OK. */
6785 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6786 }
6787
6788 /* Should be unreachable. */
6789 return 1;
6790 }
6791
6792 static int
6793 decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
6794 struct regcache *regs, struct displaced_step_closure *dsc)
6795 {
6796 unsigned int op1 = bits (insn, 20, 25);
6797 int op = bit (insn, 4);
6798 unsigned int coproc = bits (insn, 8, 11);
6799 unsigned int rn = bits (insn, 16, 19);
6800
6801 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
6802 return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
6803 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
6804 && (coproc & 0xe) != 0xa)
6805 /* stc/stc2. */
6806 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6807 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
6808 && (coproc & 0xe) != 0xa)
6809 /* ldc/ldc2 imm/lit. */
6810 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6811 else if ((op1 & 0x3e) == 0x00)
6812 return copy_undef (gdbarch, insn, dsc);
6813 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
6814 return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
6815 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
6816 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6817 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
6818 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6819 else if ((op1 & 0x30) == 0x20 && !op)
6820 {
6821 if ((coproc & 0xe) == 0xa)
6822 return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
6823 else
6824 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6825 }
6826 else if ((op1 & 0x30) == 0x20 && op)
6827 return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
6828 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
6829 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6830 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
6831 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6832 else if ((op1 & 0x30) == 0x30)
6833 return copy_svc (gdbarch, insn, to, regs, dsc);
6834 else
6835 return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
6836 }
6837
6838 static void
6839 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
6840 CORE_ADDR to, struct regcache *regs,
6841 struct displaced_step_closure *dsc)
6842 {
6843 error (_("Displaced stepping is only supported in ARM mode"));
6844 }
6845
6846 void
6847 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
6848 CORE_ADDR to, struct regcache *regs,
6849 struct displaced_step_closure *dsc)
6850 {
6851 int err = 0;
6852 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6853 uint32_t insn;
6854
6855 /* Most displaced instructions use a 1-instruction scratch space, so set this
6856 here and override below if/when necessary. */
6857 dsc->numinsns = 1;
6858 dsc->insn_addr = from;
6859 dsc->scratch_base = to;
6860 dsc->cleanup = NULL;
6861 dsc->wrote_to_pc = 0;
6862
6863 if (!displaced_in_arm_mode (regs))
6864 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
6865
6866 dsc->is_thumb = 0;
6867 dsc->insn_size = 4;
6868 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
6869 if (debug_displaced)
6870 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
6871 "at %.8lx\n", (unsigned long) insn,
6872 (unsigned long) from);
6873
6874 if ((insn & 0xf0000000) == 0xf0000000)
6875 err = decode_unconditional (gdbarch, insn, regs, dsc);
6876 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
6877 {
6878 case 0x0: case 0x1: case 0x2: case 0x3:
6879 err = decode_dp_misc (gdbarch, insn, regs, dsc);
6880 break;
6881
6882 case 0x4: case 0x5: case 0x6:
6883 err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
6884 break;
6885
6886 case 0x7:
6887 err = decode_media (gdbarch, insn, dsc);
6888 break;
6889
6890 case 0x8: case 0x9: case 0xa: case 0xb:
6891 err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
6892 break;
6893
6894 case 0xc: case 0xd: case 0xe: case 0xf:
6895 err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
6896 break;
6897 }
6898
6899 if (err)
6900 internal_error (__FILE__, __LINE__,
6901 _("arm_process_displaced_insn: Instruction decode error"));
6902 }
6903
6904 /* Actually set up the scratch space for a displaced instruction. */
6905
6906 void
6907 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
6908 CORE_ADDR to, struct displaced_step_closure *dsc)
6909 {
6910 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6911 unsigned int i, len, offset;
6912 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6913 int size = dsc->is_thumb? 2 : 4;
6914 const unsigned char *bkp_insn;
6915
6916 offset = 0;
6917 /* Poke modified instruction(s). */
6918 for (i = 0; i < dsc->numinsns; i++)
6919 {
6920 if (debug_displaced)
6921 {
6922 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
6923 if (size == 4)
6924 fprintf_unfiltered (gdb_stdlog, "%.8lx",
6925 dsc->modinsn[i]);
6926 else if (size == 2)
6927 fprintf_unfiltered (gdb_stdlog, "%.4x",
6928 (unsigned short)dsc->modinsn[i]);
6929
6930 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
6931 (unsigned long) to + offset);
6932
6933 }
6934 write_memory_unsigned_integer (to + offset, size,
6935 byte_order_for_code,
6936 dsc->modinsn[i]);
6937 offset += size;
6938 }
6939
6940 /* Choose the correct breakpoint instruction. */
6941 if (dsc->is_thumb)
6942 {
6943 bkp_insn = tdep->thumb_breakpoint;
6944 len = tdep->thumb_breakpoint_size;
6945 }
6946 else
6947 {
6948 bkp_insn = tdep->arm_breakpoint;
6949 len = tdep->arm_breakpoint_size;
6950 }
6951
6952 /* Put breakpoint afterwards. */
6953 write_memory (to + offset, bkp_insn, len);
6954
6955 if (debug_displaced)
6956 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
6957 paddress (gdbarch, from), paddress (gdbarch, to));
6958 }
6959
6960 /* Entry point for copying an instruction into scratch space for displaced
6961 stepping. */
6962
6963 struct displaced_step_closure *
6964 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
6965 CORE_ADDR from, CORE_ADDR to,
6966 struct regcache *regs)
6967 {
6968 struct displaced_step_closure *dsc
6969 = xmalloc (sizeof (struct displaced_step_closure));
6970 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
6971 arm_displaced_init_closure (gdbarch, from, to, dsc);
6972
6973 return dsc;
6974 }
6975
6976 /* Entry point for cleaning things up after a displaced instruction has been
6977 single-stepped. */
6978
6979 void
6980 arm_displaced_step_fixup (struct gdbarch *gdbarch,
6981 struct displaced_step_closure *dsc,
6982 CORE_ADDR from, CORE_ADDR to,
6983 struct regcache *regs)
6984 {
6985 if (dsc->cleanup)
6986 dsc->cleanup (gdbarch, regs, dsc);
6987
6988 if (!dsc->wrote_to_pc)
6989 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
6990 dsc->insn_addr + dsc->insn_size);
6991
6992 }
6993
6994 #include "bfd-in2.h"
6995 #include "libcoff.h"
6996
6997 static int
6998 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
6999 {
7000 struct gdbarch *gdbarch = info->application_data;
7001
7002 if (arm_pc_is_thumb (gdbarch, memaddr))
7003 {
7004 static asymbol *asym;
7005 static combined_entry_type ce;
7006 static struct coff_symbol_struct csym;
7007 static struct bfd fake_bfd;
7008 static bfd_target fake_target;
7009
7010 if (csym.native == NULL)
7011 {
7012 /* Create a fake symbol vector containing a Thumb symbol.
7013 This is solely so that the code in print_insn_little_arm()
7014 and print_insn_big_arm() in opcodes/arm-dis.c will detect
7015 the presence of a Thumb symbol and switch to decoding
7016 Thumb instructions. */
7017
7018 fake_target.flavour = bfd_target_coff_flavour;
7019 fake_bfd.xvec = &fake_target;
7020 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
7021 csym.native = &ce;
7022 csym.symbol.the_bfd = &fake_bfd;
7023 csym.symbol.name = "fake";
7024 asym = (asymbol *) & csym;
7025 }
7026
7027 memaddr = UNMAKE_THUMB_ADDR (memaddr);
7028 info->symbols = &asym;
7029 }
7030 else
7031 info->symbols = NULL;
7032
7033 if (info->endian == BFD_ENDIAN_BIG)
7034 return print_insn_big_arm (memaddr, info);
7035 else
7036 return print_insn_little_arm (memaddr, info);
7037 }
7038
7039 /* The following define instruction sequences that will cause ARM
7040 cpu's to take an undefined instruction trap. These are used to
7041 signal a breakpoint to GDB.
7042
7043 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
7044 modes. A different instruction is required for each mode. The ARM
7045 cpu's can also be big or little endian. Thus four different
7046 instructions are needed to support all cases.
7047
7048 Note: ARMv4 defines several new instructions that will take the
7049 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
7050 not in fact add the new instructions. The new undefined
7051 instructions in ARMv4 are all instructions that had no defined
7052 behaviour in earlier chips. There is no guarantee that they will
7053 raise an exception, but may be treated as NOP's. In practice, it
7054 may only safe to rely on instructions matching:
7055
7056 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
7057 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
7058 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
7059
7060 Even this may only true if the condition predicate is true. The
7061 following use a condition predicate of ALWAYS so it is always TRUE.
7062
7063 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
7064 and NetBSD all use a software interrupt rather than an undefined
7065 instruction to force a trap. This can be handled by by the
7066 abi-specific code during establishment of the gdbarch vector. */
7067
7068 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
7069 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
7070 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
7071 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
7072
7073 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
7074 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
7075 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
7076 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
7077
7078 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
7079 the program counter value to determine whether a 16-bit or 32-bit
7080 breakpoint should be used. It returns a pointer to a string of
7081 bytes that encode a breakpoint instruction, stores the length of
7082 the string to *lenptr, and adjusts the program counter (if
7083 necessary) to point to the actual memory location where the
7084 breakpoint should be inserted. */
7085
7086 static const unsigned char *
7087 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
7088 {
7089 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7090 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7091
7092 if (arm_pc_is_thumb (gdbarch, *pcptr))
7093 {
7094 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
7095
7096 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
7097 check whether we are replacing a 32-bit instruction. */
7098 if (tdep->thumb2_breakpoint != NULL)
7099 {
7100 gdb_byte buf[2];
7101 if (target_read_memory (*pcptr, buf, 2) == 0)
7102 {
7103 unsigned short inst1;
7104 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
7105 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
7106 {
7107 *lenptr = tdep->thumb2_breakpoint_size;
7108 return tdep->thumb2_breakpoint;
7109 }
7110 }
7111 }
7112
7113 *lenptr = tdep->thumb_breakpoint_size;
7114 return tdep->thumb_breakpoint;
7115 }
7116 else
7117 {
7118 *lenptr = tdep->arm_breakpoint_size;
7119 return tdep->arm_breakpoint;
7120 }
7121 }
7122
7123 static void
7124 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
7125 int *kindptr)
7126 {
7127 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7128
7129 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
7130
7131 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
7132 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
7133 that this is not confused with a 32-bit ARM breakpoint. */
7134 *kindptr = 3;
7135 }
7136
7137 /* Extract from an array REGBUF containing the (raw) register state a
7138 function return value of type TYPE, and copy that, in virtual
7139 format, into VALBUF. */
7140
7141 static void
7142 arm_extract_return_value (struct type *type, struct regcache *regs,
7143 gdb_byte *valbuf)
7144 {
7145 struct gdbarch *gdbarch = get_regcache_arch (regs);
7146 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7147
7148 if (TYPE_CODE_FLT == TYPE_CODE (type))
7149 {
7150 switch (gdbarch_tdep (gdbarch)->fp_model)
7151 {
7152 case ARM_FLOAT_FPA:
7153 {
7154 /* The value is in register F0 in internal format. We need to
7155 extract the raw value and then convert it to the desired
7156 internal type. */
7157 bfd_byte tmpbuf[FP_REGISTER_SIZE];
7158
7159 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
7160 convert_from_extended (floatformat_from_type (type), tmpbuf,
7161 valbuf, gdbarch_byte_order (gdbarch));
7162 }
7163 break;
7164
7165 case ARM_FLOAT_SOFT_FPA:
7166 case ARM_FLOAT_SOFT_VFP:
7167 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7168 not using the VFP ABI code. */
7169 case ARM_FLOAT_VFP:
7170 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
7171 if (TYPE_LENGTH (type) > 4)
7172 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
7173 valbuf + INT_REGISTER_SIZE);
7174 break;
7175
7176 default:
7177 internal_error (__FILE__, __LINE__,
7178 _("arm_extract_return_value: "
7179 "Floating point model not supported"));
7180 break;
7181 }
7182 }
7183 else if (TYPE_CODE (type) == TYPE_CODE_INT
7184 || TYPE_CODE (type) == TYPE_CODE_CHAR
7185 || TYPE_CODE (type) == TYPE_CODE_BOOL
7186 || TYPE_CODE (type) == TYPE_CODE_PTR
7187 || TYPE_CODE (type) == TYPE_CODE_REF
7188 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7189 {
7190 /* If the type is a plain integer, then the access is
7191 straight-forward. Otherwise we have to play around a bit
7192 more. */
7193 int len = TYPE_LENGTH (type);
7194 int regno = ARM_A1_REGNUM;
7195 ULONGEST tmp;
7196
7197 while (len > 0)
7198 {
7199 /* By using store_unsigned_integer we avoid having to do
7200 anything special for small big-endian values. */
7201 regcache_cooked_read_unsigned (regs, regno++, &tmp);
7202 store_unsigned_integer (valbuf,
7203 (len > INT_REGISTER_SIZE
7204 ? INT_REGISTER_SIZE : len),
7205 byte_order, tmp);
7206 len -= INT_REGISTER_SIZE;
7207 valbuf += INT_REGISTER_SIZE;
7208 }
7209 }
7210 else
7211 {
7212 /* For a structure or union the behaviour is as if the value had
7213 been stored to word-aligned memory and then loaded into
7214 registers with 32-bit load instruction(s). */
7215 int len = TYPE_LENGTH (type);
7216 int regno = ARM_A1_REGNUM;
7217 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7218
7219 while (len > 0)
7220 {
7221 regcache_cooked_read (regs, regno++, tmpbuf);
7222 memcpy (valbuf, tmpbuf,
7223 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7224 len -= INT_REGISTER_SIZE;
7225 valbuf += INT_REGISTER_SIZE;
7226 }
7227 }
7228 }
7229
7230
7231 /* Will a function return an aggregate type in memory or in a
7232 register? Return 0 if an aggregate type can be returned in a
7233 register, 1 if it must be returned in memory. */
7234
7235 static int
7236 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
7237 {
7238 int nRc;
7239 enum type_code code;
7240
7241 CHECK_TYPEDEF (type);
7242
7243 /* In the ARM ABI, "integer" like aggregate types are returned in
7244 registers. For an aggregate type to be integer like, its size
7245 must be less than or equal to INT_REGISTER_SIZE and the
7246 offset of each addressable subfield must be zero. Note that bit
7247 fields are not addressable, and all addressable subfields of
7248 unions always start at offset zero.
7249
7250 This function is based on the behaviour of GCC 2.95.1.
7251 See: gcc/arm.c: arm_return_in_memory() for details.
7252
7253 Note: All versions of GCC before GCC 2.95.2 do not set up the
7254 parameters correctly for a function returning the following
7255 structure: struct { float f;}; This should be returned in memory,
7256 not a register. Richard Earnshaw sent me a patch, but I do not
7257 know of any way to detect if a function like the above has been
7258 compiled with the correct calling convention. */
7259
7260 /* All aggregate types that won't fit in a register must be returned
7261 in memory. */
7262 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
7263 {
7264 return 1;
7265 }
7266
7267 /* The AAPCS says all aggregates not larger than a word are returned
7268 in a register. */
7269 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
7270 return 0;
7271
7272 /* The only aggregate types that can be returned in a register are
7273 structs and unions. Arrays must be returned in memory. */
7274 code = TYPE_CODE (type);
7275 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
7276 {
7277 return 1;
7278 }
7279
7280 /* Assume all other aggregate types can be returned in a register.
7281 Run a check for structures, unions and arrays. */
7282 nRc = 0;
7283
7284 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
7285 {
7286 int i;
7287 /* Need to check if this struct/union is "integer" like. For
7288 this to be true, its size must be less than or equal to
7289 INT_REGISTER_SIZE and the offset of each addressable
7290 subfield must be zero. Note that bit fields are not
7291 addressable, and unions always start at offset zero. If any
7292 of the subfields is a floating point type, the struct/union
7293 cannot be an integer type. */
7294
7295 /* For each field in the object, check:
7296 1) Is it FP? --> yes, nRc = 1;
7297 2) Is it addressable (bitpos != 0) and
7298 not packed (bitsize == 0)?
7299 --> yes, nRc = 1
7300 */
7301
7302 for (i = 0; i < TYPE_NFIELDS (type); i++)
7303 {
7304 enum type_code field_type_code;
7305 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
7306 i)));
7307
7308 /* Is it a floating point type field? */
7309 if (field_type_code == TYPE_CODE_FLT)
7310 {
7311 nRc = 1;
7312 break;
7313 }
7314
7315 /* If bitpos != 0, then we have to care about it. */
7316 if (TYPE_FIELD_BITPOS (type, i) != 0)
7317 {
7318 /* Bitfields are not addressable. If the field bitsize is
7319 zero, then the field is not packed. Hence it cannot be
7320 a bitfield or any other packed type. */
7321 if (TYPE_FIELD_BITSIZE (type, i) == 0)
7322 {
7323 nRc = 1;
7324 break;
7325 }
7326 }
7327 }
7328 }
7329
7330 return nRc;
7331 }
7332
7333 /* Write into appropriate registers a function return value of type
7334 TYPE, given in virtual format. */
7335
7336 static void
7337 arm_store_return_value (struct type *type, struct regcache *regs,
7338 const gdb_byte *valbuf)
7339 {
7340 struct gdbarch *gdbarch = get_regcache_arch (regs);
7341 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7342
7343 if (TYPE_CODE (type) == TYPE_CODE_FLT)
7344 {
7345 char buf[MAX_REGISTER_SIZE];
7346
7347 switch (gdbarch_tdep (gdbarch)->fp_model)
7348 {
7349 case ARM_FLOAT_FPA:
7350
7351 convert_to_extended (floatformat_from_type (type), buf, valbuf,
7352 gdbarch_byte_order (gdbarch));
7353 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
7354 break;
7355
7356 case ARM_FLOAT_SOFT_FPA:
7357 case ARM_FLOAT_SOFT_VFP:
7358 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7359 not using the VFP ABI code. */
7360 case ARM_FLOAT_VFP:
7361 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
7362 if (TYPE_LENGTH (type) > 4)
7363 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
7364 valbuf + INT_REGISTER_SIZE);
7365 break;
7366
7367 default:
7368 internal_error (__FILE__, __LINE__,
7369 _("arm_store_return_value: Floating "
7370 "point model not supported"));
7371 break;
7372 }
7373 }
7374 else if (TYPE_CODE (type) == TYPE_CODE_INT
7375 || TYPE_CODE (type) == TYPE_CODE_CHAR
7376 || TYPE_CODE (type) == TYPE_CODE_BOOL
7377 || TYPE_CODE (type) == TYPE_CODE_PTR
7378 || TYPE_CODE (type) == TYPE_CODE_REF
7379 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7380 {
7381 if (TYPE_LENGTH (type) <= 4)
7382 {
7383 /* Values of one word or less are zero/sign-extended and
7384 returned in r0. */
7385 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7386 LONGEST val = unpack_long (type, valbuf);
7387
7388 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
7389 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
7390 }
7391 else
7392 {
7393 /* Integral values greater than one word are stored in consecutive
7394 registers starting with r0. This will always be a multiple of
7395 the regiser size. */
7396 int len = TYPE_LENGTH (type);
7397 int regno = ARM_A1_REGNUM;
7398
7399 while (len > 0)
7400 {
7401 regcache_cooked_write (regs, regno++, valbuf);
7402 len -= INT_REGISTER_SIZE;
7403 valbuf += INT_REGISTER_SIZE;
7404 }
7405 }
7406 }
7407 else
7408 {
7409 /* For a structure or union the behaviour is as if the value had
7410 been stored to word-aligned memory and then loaded into
7411 registers with 32-bit load instruction(s). */
7412 int len = TYPE_LENGTH (type);
7413 int regno = ARM_A1_REGNUM;
7414 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7415
7416 while (len > 0)
7417 {
7418 memcpy (tmpbuf, valbuf,
7419 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7420 regcache_cooked_write (regs, regno++, tmpbuf);
7421 len -= INT_REGISTER_SIZE;
7422 valbuf += INT_REGISTER_SIZE;
7423 }
7424 }
7425 }
7426
7427
7428 /* Handle function return values. */
7429
7430 static enum return_value_convention
7431 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
7432 struct type *valtype, struct regcache *regcache,
7433 gdb_byte *readbuf, const gdb_byte *writebuf)
7434 {
7435 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7436 enum arm_vfp_cprc_base_type vfp_base_type;
7437 int vfp_base_count;
7438
7439 if (arm_vfp_abi_for_function (gdbarch, func_type)
7440 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
7441 {
7442 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
7443 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
7444 int i;
7445 for (i = 0; i < vfp_base_count; i++)
7446 {
7447 if (reg_char == 'q')
7448 {
7449 if (writebuf)
7450 arm_neon_quad_write (gdbarch, regcache, i,
7451 writebuf + i * unit_length);
7452
7453 if (readbuf)
7454 arm_neon_quad_read (gdbarch, regcache, i,
7455 readbuf + i * unit_length);
7456 }
7457 else
7458 {
7459 char name_buf[4];
7460 int regnum;
7461
7462 sprintf (name_buf, "%c%d", reg_char, i);
7463 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7464 strlen (name_buf));
7465 if (writebuf)
7466 regcache_cooked_write (regcache, regnum,
7467 writebuf + i * unit_length);
7468 if (readbuf)
7469 regcache_cooked_read (regcache, regnum,
7470 readbuf + i * unit_length);
7471 }
7472 }
7473 return RETURN_VALUE_REGISTER_CONVENTION;
7474 }
7475
7476 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
7477 || TYPE_CODE (valtype) == TYPE_CODE_UNION
7478 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
7479 {
7480 if (tdep->struct_return == pcc_struct_return
7481 || arm_return_in_memory (gdbarch, valtype))
7482 return RETURN_VALUE_STRUCT_CONVENTION;
7483 }
7484
7485 if (writebuf)
7486 arm_store_return_value (valtype, regcache, writebuf);
7487
7488 if (readbuf)
7489 arm_extract_return_value (valtype, regcache, readbuf);
7490
7491 return RETURN_VALUE_REGISTER_CONVENTION;
7492 }
7493
7494
7495 static int
7496 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
7497 {
7498 struct gdbarch *gdbarch = get_frame_arch (frame);
7499 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7500 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7501 CORE_ADDR jb_addr;
7502 char buf[INT_REGISTER_SIZE];
7503
7504 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
7505
7506 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
7507 INT_REGISTER_SIZE))
7508 return 0;
7509
7510 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
7511 return 1;
7512 }
7513
7514 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
7515 return the target PC. Otherwise return 0. */
7516
7517 CORE_ADDR
7518 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
7519 {
7520 char *name;
7521 int namelen;
7522 CORE_ADDR start_addr;
7523
7524 /* Find the starting address and name of the function containing the PC. */
7525 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
7526 return 0;
7527
7528 /* If PC is in a Thumb call or return stub, return the address of the
7529 target PC, which is in a register. The thunk functions are called
7530 _call_via_xx, where x is the register name. The possible names
7531 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
7532 functions, named __ARM_call_via_r[0-7]. */
7533 if (strncmp (name, "_call_via_", 10) == 0
7534 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
7535 {
7536 /* Use the name suffix to determine which register contains the
7537 target PC. */
7538 static char *table[15] =
7539 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
7540 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
7541 };
7542 int regno;
7543 int offset = strlen (name) - 2;
7544
7545 for (regno = 0; regno <= 14; regno++)
7546 if (strcmp (&name[offset], table[regno]) == 0)
7547 return get_frame_register_unsigned (frame, regno);
7548 }
7549
7550 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
7551 non-interworking calls to foo. We could decode the stubs
7552 to find the target but it's easier to use the symbol table. */
7553 namelen = strlen (name);
7554 if (name[0] == '_' && name[1] == '_'
7555 && ((namelen > 2 + strlen ("_from_thumb")
7556 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
7557 strlen ("_from_thumb")) == 0)
7558 || (namelen > 2 + strlen ("_from_arm")
7559 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
7560 strlen ("_from_arm")) == 0)))
7561 {
7562 char *target_name;
7563 int target_len = namelen - 2;
7564 struct minimal_symbol *minsym;
7565 struct objfile *objfile;
7566 struct obj_section *sec;
7567
7568 if (name[namelen - 1] == 'b')
7569 target_len -= strlen ("_from_thumb");
7570 else
7571 target_len -= strlen ("_from_arm");
7572
7573 target_name = alloca (target_len + 1);
7574 memcpy (target_name, name + 2, target_len);
7575 target_name[target_len] = '\0';
7576
7577 sec = find_pc_section (pc);
7578 objfile = (sec == NULL) ? NULL : sec->objfile;
7579 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
7580 if (minsym != NULL)
7581 return SYMBOL_VALUE_ADDRESS (minsym);
7582 else
7583 return 0;
7584 }
7585
7586 return 0; /* not a stub */
7587 }
7588
7589 static void
7590 set_arm_command (char *args, int from_tty)
7591 {
7592 printf_unfiltered (_("\
7593 \"set arm\" must be followed by an apporpriate subcommand.\n"));
7594 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
7595 }
7596
7597 static void
7598 show_arm_command (char *args, int from_tty)
7599 {
7600 cmd_show_list (showarmcmdlist, from_tty, "");
7601 }
7602
7603 static void
7604 arm_update_current_architecture (void)
7605 {
7606 struct gdbarch_info info;
7607
7608 /* If the current architecture is not ARM, we have nothing to do. */
7609 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
7610 return;
7611
7612 /* Update the architecture. */
7613 gdbarch_info_init (&info);
7614
7615 if (!gdbarch_update_p (info))
7616 internal_error (__FILE__, __LINE__, _("could not update architecture"));
7617 }
7618
7619 static void
7620 set_fp_model_sfunc (char *args, int from_tty,
7621 struct cmd_list_element *c)
7622 {
7623 enum arm_float_model fp_model;
7624
7625 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
7626 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
7627 {
7628 arm_fp_model = fp_model;
7629 break;
7630 }
7631
7632 if (fp_model == ARM_FLOAT_LAST)
7633 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
7634 current_fp_model);
7635
7636 arm_update_current_architecture ();
7637 }
7638
7639 static void
7640 show_fp_model (struct ui_file *file, int from_tty,
7641 struct cmd_list_element *c, const char *value)
7642 {
7643 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7644
7645 if (arm_fp_model == ARM_FLOAT_AUTO
7646 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
7647 fprintf_filtered (file, _("\
7648 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
7649 fp_model_strings[tdep->fp_model]);
7650 else
7651 fprintf_filtered (file, _("\
7652 The current ARM floating point model is \"%s\".\n"),
7653 fp_model_strings[arm_fp_model]);
7654 }
7655
7656 static void
7657 arm_set_abi (char *args, int from_tty,
7658 struct cmd_list_element *c)
7659 {
7660 enum arm_abi_kind arm_abi;
7661
7662 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
7663 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
7664 {
7665 arm_abi_global = arm_abi;
7666 break;
7667 }
7668
7669 if (arm_abi == ARM_ABI_LAST)
7670 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
7671 arm_abi_string);
7672
7673 arm_update_current_architecture ();
7674 }
7675
7676 static void
7677 arm_show_abi (struct ui_file *file, int from_tty,
7678 struct cmd_list_element *c, const char *value)
7679 {
7680 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7681
7682 if (arm_abi_global == ARM_ABI_AUTO
7683 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
7684 fprintf_filtered (file, _("\
7685 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
7686 arm_abi_strings[tdep->arm_abi]);
7687 else
7688 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
7689 arm_abi_string);
7690 }
7691
7692 static void
7693 arm_show_fallback_mode (struct ui_file *file, int from_tty,
7694 struct cmd_list_element *c, const char *value)
7695 {
7696 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7697
7698 fprintf_filtered (file,
7699 _("The current execution mode assumed "
7700 "(when symbols are unavailable) is \"%s\".\n"),
7701 arm_fallback_mode_string);
7702 }
7703
7704 static void
7705 arm_show_force_mode (struct ui_file *file, int from_tty,
7706 struct cmd_list_element *c, const char *value)
7707 {
7708 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7709
7710 fprintf_filtered (file,
7711 _("The current execution mode assumed "
7712 "(even when symbols are available) is \"%s\".\n"),
7713 arm_force_mode_string);
7714 }
7715
7716 /* If the user changes the register disassembly style used for info
7717 register and other commands, we have to also switch the style used
7718 in opcodes for disassembly output. This function is run in the "set
7719 arm disassembly" command, and does that. */
7720
7721 static void
7722 set_disassembly_style_sfunc (char *args, int from_tty,
7723 struct cmd_list_element *c)
7724 {
7725 set_disassembly_style ();
7726 }
7727 \f
7728 /* Return the ARM register name corresponding to register I. */
7729 static const char *
7730 arm_register_name (struct gdbarch *gdbarch, int i)
7731 {
7732 const int num_regs = gdbarch_num_regs (gdbarch);
7733
7734 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
7735 && i >= num_regs && i < num_regs + 32)
7736 {
7737 static const char *const vfp_pseudo_names[] = {
7738 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
7739 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
7740 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
7741 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
7742 };
7743
7744 return vfp_pseudo_names[i - num_regs];
7745 }
7746
7747 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
7748 && i >= num_regs + 32 && i < num_regs + 32 + 16)
7749 {
7750 static const char *const neon_pseudo_names[] = {
7751 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
7752 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
7753 };
7754
7755 return neon_pseudo_names[i - num_regs - 32];
7756 }
7757
7758 if (i >= ARRAY_SIZE (arm_register_names))
7759 /* These registers are only supported on targets which supply
7760 an XML description. */
7761 return "";
7762
7763 return arm_register_names[i];
7764 }
7765
7766 static void
7767 set_disassembly_style (void)
7768 {
7769 int current;
7770
7771 /* Find the style that the user wants. */
7772 for (current = 0; current < num_disassembly_options; current++)
7773 if (disassembly_style == valid_disassembly_styles[current])
7774 break;
7775 gdb_assert (current < num_disassembly_options);
7776
7777 /* Synchronize the disassembler. */
7778 set_arm_regname_option (current);
7779 }
7780
7781 /* Test whether the coff symbol specific value corresponds to a Thumb
7782 function. */
7783
7784 static int
7785 coff_sym_is_thumb (int val)
7786 {
7787 return (val == C_THUMBEXT
7788 || val == C_THUMBSTAT
7789 || val == C_THUMBEXTFUNC
7790 || val == C_THUMBSTATFUNC
7791 || val == C_THUMBLABEL);
7792 }
7793
7794 /* arm_coff_make_msymbol_special()
7795 arm_elf_make_msymbol_special()
7796
7797 These functions test whether the COFF or ELF symbol corresponds to
7798 an address in thumb code, and set a "special" bit in a minimal
7799 symbol to indicate that it does. */
7800
7801 static void
7802 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
7803 {
7804 /* Thumb symbols are of type STT_LOPROC, (synonymous with
7805 STT_ARM_TFUNC). */
7806 if (ELF_ST_TYPE (((elf_symbol_type *)sym)->internal_elf_sym.st_info)
7807 == STT_LOPROC)
7808 MSYMBOL_SET_SPECIAL (msym);
7809 }
7810
7811 static void
7812 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
7813 {
7814 if (coff_sym_is_thumb (val))
7815 MSYMBOL_SET_SPECIAL (msym);
7816 }
7817
7818 static void
7819 arm_objfile_data_free (struct objfile *objfile, void *arg)
7820 {
7821 struct arm_per_objfile *data = arg;
7822 unsigned int i;
7823
7824 for (i = 0; i < objfile->obfd->section_count; i++)
7825 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
7826 }
7827
7828 static void
7829 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
7830 asymbol *sym)
7831 {
7832 const char *name = bfd_asymbol_name (sym);
7833 struct arm_per_objfile *data;
7834 VEC(arm_mapping_symbol_s) **map_p;
7835 struct arm_mapping_symbol new_map_sym;
7836
7837 gdb_assert (name[0] == '$');
7838 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
7839 return;
7840
7841 data = objfile_data (objfile, arm_objfile_data_key);
7842 if (data == NULL)
7843 {
7844 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
7845 struct arm_per_objfile);
7846 set_objfile_data (objfile, arm_objfile_data_key, data);
7847 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
7848 objfile->obfd->section_count,
7849 VEC(arm_mapping_symbol_s) *);
7850 }
7851 map_p = &data->section_maps[bfd_get_section (sym)->index];
7852
7853 new_map_sym.value = sym->value;
7854 new_map_sym.type = name[1];
7855
7856 /* Assume that most mapping symbols appear in order of increasing
7857 value. If they were randomly distributed, it would be faster to
7858 always push here and then sort at first use. */
7859 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
7860 {
7861 struct arm_mapping_symbol *prev_map_sym;
7862
7863 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
7864 if (prev_map_sym->value >= sym->value)
7865 {
7866 unsigned int idx;
7867 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
7868 arm_compare_mapping_symbols);
7869 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
7870 return;
7871 }
7872 }
7873
7874 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
7875 }
7876
7877 static void
7878 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
7879 {
7880 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7881 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
7882
7883 /* If necessary, set the T bit. */
7884 if (arm_apcs_32)
7885 {
7886 ULONGEST val, t_bit;
7887 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
7888 t_bit = arm_psr_thumb_bit (gdbarch);
7889 if (arm_pc_is_thumb (gdbarch, pc))
7890 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7891 val | t_bit);
7892 else
7893 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7894 val & ~t_bit);
7895 }
7896 }
7897
7898 /* Read the contents of a NEON quad register, by reading from two
7899 double registers. This is used to implement the quad pseudo
7900 registers, and for argument passing in case the quad registers are
7901 missing; vectors are passed in quad registers when using the VFP
7902 ABI, even if a NEON unit is not present. REGNUM is the index of
7903 the quad register, in [0, 15]. */
7904
7905 static enum register_status
7906 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
7907 int regnum, gdb_byte *buf)
7908 {
7909 char name_buf[4];
7910 gdb_byte reg_buf[8];
7911 int offset, double_regnum;
7912 enum register_status status;
7913
7914 sprintf (name_buf, "d%d", regnum << 1);
7915 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7916 strlen (name_buf));
7917
7918 /* d0 is always the least significant half of q0. */
7919 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7920 offset = 8;
7921 else
7922 offset = 0;
7923
7924 status = regcache_raw_read (regcache, double_regnum, reg_buf);
7925 if (status != REG_VALID)
7926 return status;
7927 memcpy (buf + offset, reg_buf, 8);
7928
7929 offset = 8 - offset;
7930 status = regcache_raw_read (regcache, double_regnum + 1, reg_buf);
7931 if (status != REG_VALID)
7932 return status;
7933 memcpy (buf + offset, reg_buf, 8);
7934
7935 return REG_VALID;
7936 }
7937
7938 static enum register_status
7939 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
7940 int regnum, gdb_byte *buf)
7941 {
7942 const int num_regs = gdbarch_num_regs (gdbarch);
7943 char name_buf[4];
7944 gdb_byte reg_buf[8];
7945 int offset, double_regnum;
7946
7947 gdb_assert (regnum >= num_regs);
7948 regnum -= num_regs;
7949
7950 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
7951 /* Quad-precision register. */
7952 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
7953 else
7954 {
7955 enum register_status status;
7956
7957 /* Single-precision register. */
7958 gdb_assert (regnum < 32);
7959
7960 /* s0 is always the least significant half of d0. */
7961 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7962 offset = (regnum & 1) ? 0 : 4;
7963 else
7964 offset = (regnum & 1) ? 4 : 0;
7965
7966 sprintf (name_buf, "d%d", regnum >> 1);
7967 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7968 strlen (name_buf));
7969
7970 status = regcache_raw_read (regcache, double_regnum, reg_buf);
7971 if (status == REG_VALID)
7972 memcpy (buf, reg_buf + offset, 4);
7973 return status;
7974 }
7975 }
7976
7977 /* Store the contents of BUF to a NEON quad register, by writing to
7978 two double registers. This is used to implement the quad pseudo
7979 registers, and for argument passing in case the quad registers are
7980 missing; vectors are passed in quad registers when using the VFP
7981 ABI, even if a NEON unit is not present. REGNUM is the index
7982 of the quad register, in [0, 15]. */
7983
7984 static void
7985 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
7986 int regnum, const gdb_byte *buf)
7987 {
7988 char name_buf[4];
7989 gdb_byte reg_buf[8];
7990 int offset, double_regnum;
7991
7992 sprintf (name_buf, "d%d", regnum << 1);
7993 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7994 strlen (name_buf));
7995
7996 /* d0 is always the least significant half of q0. */
7997 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7998 offset = 8;
7999 else
8000 offset = 0;
8001
8002 regcache_raw_write (regcache, double_regnum, buf + offset);
8003 offset = 8 - offset;
8004 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
8005 }
8006
8007 static void
8008 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
8009 int regnum, const gdb_byte *buf)
8010 {
8011 const int num_regs = gdbarch_num_regs (gdbarch);
8012 char name_buf[4];
8013 gdb_byte reg_buf[8];
8014 int offset, double_regnum;
8015
8016 gdb_assert (regnum >= num_regs);
8017 regnum -= num_regs;
8018
8019 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
8020 /* Quad-precision register. */
8021 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
8022 else
8023 {
8024 /* Single-precision register. */
8025 gdb_assert (regnum < 32);
8026
8027 /* s0 is always the least significant half of d0. */
8028 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8029 offset = (regnum & 1) ? 0 : 4;
8030 else
8031 offset = (regnum & 1) ? 4 : 0;
8032
8033 sprintf (name_buf, "d%d", regnum >> 1);
8034 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8035 strlen (name_buf));
8036
8037 regcache_raw_read (regcache, double_regnum, reg_buf);
8038 memcpy (reg_buf + offset, buf, 4);
8039 regcache_raw_write (regcache, double_regnum, reg_buf);
8040 }
8041 }
8042
8043 static struct value *
8044 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
8045 {
8046 const int *reg_p = baton;
8047 return value_of_register (*reg_p, frame);
8048 }
8049 \f
8050 static enum gdb_osabi
8051 arm_elf_osabi_sniffer (bfd *abfd)
8052 {
8053 unsigned int elfosabi;
8054 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
8055
8056 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
8057
8058 if (elfosabi == ELFOSABI_ARM)
8059 /* GNU tools use this value. Check note sections in this case,
8060 as well. */
8061 bfd_map_over_sections (abfd,
8062 generic_elf_osabi_sniff_abi_tag_sections,
8063 &osabi);
8064
8065 /* Anything else will be handled by the generic ELF sniffer. */
8066 return osabi;
8067 }
8068
8069 static int
8070 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
8071 struct reggroup *group)
8072 {
8073 /* FPS register's type is INT, but belongs to float_reggroup. Beside
8074 this, FPS register belongs to save_regroup, restore_reggroup, and
8075 all_reggroup, of course. */
8076 if (regnum == ARM_FPS_REGNUM)
8077 return (group == float_reggroup
8078 || group == save_reggroup
8079 || group == restore_reggroup
8080 || group == all_reggroup);
8081 else
8082 return default_register_reggroup_p (gdbarch, regnum, group);
8083 }
8084
8085 \f
8086 /* Initialize the current architecture based on INFO. If possible,
8087 re-use an architecture from ARCHES, which is a list of
8088 architectures already created during this debugging session.
8089
8090 Called e.g. at program startup, when reading a core file, and when
8091 reading a binary file. */
8092
8093 static struct gdbarch *
8094 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
8095 {
8096 struct gdbarch_tdep *tdep;
8097 struct gdbarch *gdbarch;
8098 struct gdbarch_list *best_arch;
8099 enum arm_abi_kind arm_abi = arm_abi_global;
8100 enum arm_float_model fp_model = arm_fp_model;
8101 struct tdesc_arch_data *tdesc_data = NULL;
8102 int i, is_m = 0;
8103 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
8104 int have_neon = 0;
8105 int have_fpa_registers = 1;
8106 const struct target_desc *tdesc = info.target_desc;
8107
8108 /* If we have an object to base this architecture on, try to determine
8109 its ABI. */
8110
8111 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
8112 {
8113 int ei_osabi, e_flags;
8114
8115 switch (bfd_get_flavour (info.abfd))
8116 {
8117 case bfd_target_aout_flavour:
8118 /* Assume it's an old APCS-style ABI. */
8119 arm_abi = ARM_ABI_APCS;
8120 break;
8121
8122 case bfd_target_coff_flavour:
8123 /* Assume it's an old APCS-style ABI. */
8124 /* XXX WinCE? */
8125 arm_abi = ARM_ABI_APCS;
8126 break;
8127
8128 case bfd_target_elf_flavour:
8129 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
8130 e_flags = elf_elfheader (info.abfd)->e_flags;
8131
8132 if (ei_osabi == ELFOSABI_ARM)
8133 {
8134 /* GNU tools used to use this value, but do not for EABI
8135 objects. There's nowhere to tag an EABI version
8136 anyway, so assume APCS. */
8137 arm_abi = ARM_ABI_APCS;
8138 }
8139 else if (ei_osabi == ELFOSABI_NONE)
8140 {
8141 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
8142 int attr_arch, attr_profile;
8143
8144 switch (eabi_ver)
8145 {
8146 case EF_ARM_EABI_UNKNOWN:
8147 /* Assume GNU tools. */
8148 arm_abi = ARM_ABI_APCS;
8149 break;
8150
8151 case EF_ARM_EABI_VER4:
8152 case EF_ARM_EABI_VER5:
8153 arm_abi = ARM_ABI_AAPCS;
8154 /* EABI binaries default to VFP float ordering.
8155 They may also contain build attributes that can
8156 be used to identify if the VFP argument-passing
8157 ABI is in use. */
8158 if (fp_model == ARM_FLOAT_AUTO)
8159 {
8160 #ifdef HAVE_ELF
8161 switch (bfd_elf_get_obj_attr_int (info.abfd,
8162 OBJ_ATTR_PROC,
8163 Tag_ABI_VFP_args))
8164 {
8165 case 0:
8166 /* "The user intended FP parameter/result
8167 passing to conform to AAPCS, base
8168 variant". */
8169 fp_model = ARM_FLOAT_SOFT_VFP;
8170 break;
8171 case 1:
8172 /* "The user intended FP parameter/result
8173 passing to conform to AAPCS, VFP
8174 variant". */
8175 fp_model = ARM_FLOAT_VFP;
8176 break;
8177 case 2:
8178 /* "The user intended FP parameter/result
8179 passing to conform to tool chain-specific
8180 conventions" - we don't know any such
8181 conventions, so leave it as "auto". */
8182 break;
8183 default:
8184 /* Attribute value not mentioned in the
8185 October 2008 ABI, so leave it as
8186 "auto". */
8187 break;
8188 }
8189 #else
8190 fp_model = ARM_FLOAT_SOFT_VFP;
8191 #endif
8192 }
8193 break;
8194
8195 default:
8196 /* Leave it as "auto". */
8197 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
8198 break;
8199 }
8200
8201 #ifdef HAVE_ELF
8202 /* Detect M-profile programs. This only works if the
8203 executable file includes build attributes; GCC does
8204 copy them to the executable, but e.g. RealView does
8205 not. */
8206 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
8207 Tag_CPU_arch);
8208 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
8209 OBJ_ATTR_PROC,
8210 Tag_CPU_arch_profile);
8211 /* GCC specifies the profile for v6-M; RealView only
8212 specifies the profile for architectures starting with
8213 V7 (as opposed to architectures with a tag
8214 numerically greater than TAG_CPU_ARCH_V7). */
8215 if (!tdesc_has_registers (tdesc)
8216 && (attr_arch == TAG_CPU_ARCH_V6_M
8217 || attr_arch == TAG_CPU_ARCH_V6S_M
8218 || attr_profile == 'M'))
8219 tdesc = tdesc_arm_with_m;
8220 #endif
8221 }
8222
8223 if (fp_model == ARM_FLOAT_AUTO)
8224 {
8225 int e_flags = elf_elfheader (info.abfd)->e_flags;
8226
8227 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
8228 {
8229 case 0:
8230 /* Leave it as "auto". Strictly speaking this case
8231 means FPA, but almost nobody uses that now, and
8232 many toolchains fail to set the appropriate bits
8233 for the floating-point model they use. */
8234 break;
8235 case EF_ARM_SOFT_FLOAT:
8236 fp_model = ARM_FLOAT_SOFT_FPA;
8237 break;
8238 case EF_ARM_VFP_FLOAT:
8239 fp_model = ARM_FLOAT_VFP;
8240 break;
8241 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
8242 fp_model = ARM_FLOAT_SOFT_VFP;
8243 break;
8244 }
8245 }
8246
8247 if (e_flags & EF_ARM_BE8)
8248 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
8249
8250 break;
8251
8252 default:
8253 /* Leave it as "auto". */
8254 break;
8255 }
8256 }
8257
8258 /* Check any target description for validity. */
8259 if (tdesc_has_registers (tdesc))
8260 {
8261 /* For most registers we require GDB's default names; but also allow
8262 the numeric names for sp / lr / pc, as a convenience. */
8263 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
8264 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
8265 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
8266
8267 const struct tdesc_feature *feature;
8268 int valid_p;
8269
8270 feature = tdesc_find_feature (tdesc,
8271 "org.gnu.gdb.arm.core");
8272 if (feature == NULL)
8273 {
8274 feature = tdesc_find_feature (tdesc,
8275 "org.gnu.gdb.arm.m-profile");
8276 if (feature == NULL)
8277 return NULL;
8278 else
8279 is_m = 1;
8280 }
8281
8282 tdesc_data = tdesc_data_alloc ();
8283
8284 valid_p = 1;
8285 for (i = 0; i < ARM_SP_REGNUM; i++)
8286 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
8287 arm_register_names[i]);
8288 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8289 ARM_SP_REGNUM,
8290 arm_sp_names);
8291 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8292 ARM_LR_REGNUM,
8293 arm_lr_names);
8294 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8295 ARM_PC_REGNUM,
8296 arm_pc_names);
8297 if (is_m)
8298 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8299 ARM_PS_REGNUM, "xpsr");
8300 else
8301 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8302 ARM_PS_REGNUM, "cpsr");
8303
8304 if (!valid_p)
8305 {
8306 tdesc_data_cleanup (tdesc_data);
8307 return NULL;
8308 }
8309
8310 feature = tdesc_find_feature (tdesc,
8311 "org.gnu.gdb.arm.fpa");
8312 if (feature != NULL)
8313 {
8314 valid_p = 1;
8315 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
8316 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
8317 arm_register_names[i]);
8318 if (!valid_p)
8319 {
8320 tdesc_data_cleanup (tdesc_data);
8321 return NULL;
8322 }
8323 }
8324 else
8325 have_fpa_registers = 0;
8326
8327 feature = tdesc_find_feature (tdesc,
8328 "org.gnu.gdb.xscale.iwmmxt");
8329 if (feature != NULL)
8330 {
8331 static const char *const iwmmxt_names[] = {
8332 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
8333 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
8334 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
8335 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
8336 };
8337
8338 valid_p = 1;
8339 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
8340 valid_p
8341 &= tdesc_numbered_register (feature, tdesc_data, i,
8342 iwmmxt_names[i - ARM_WR0_REGNUM]);
8343
8344 /* Check for the control registers, but do not fail if they
8345 are missing. */
8346 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
8347 tdesc_numbered_register (feature, tdesc_data, i,
8348 iwmmxt_names[i - ARM_WR0_REGNUM]);
8349
8350 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
8351 valid_p
8352 &= tdesc_numbered_register (feature, tdesc_data, i,
8353 iwmmxt_names[i - ARM_WR0_REGNUM]);
8354
8355 if (!valid_p)
8356 {
8357 tdesc_data_cleanup (tdesc_data);
8358 return NULL;
8359 }
8360 }
8361
8362 /* If we have a VFP unit, check whether the single precision registers
8363 are present. If not, then we will synthesize them as pseudo
8364 registers. */
8365 feature = tdesc_find_feature (tdesc,
8366 "org.gnu.gdb.arm.vfp");
8367 if (feature != NULL)
8368 {
8369 static const char *const vfp_double_names[] = {
8370 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
8371 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
8372 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
8373 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
8374 };
8375
8376 /* Require the double precision registers. There must be either
8377 16 or 32. */
8378 valid_p = 1;
8379 for (i = 0; i < 32; i++)
8380 {
8381 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8382 ARM_D0_REGNUM + i,
8383 vfp_double_names[i]);
8384 if (!valid_p)
8385 break;
8386 }
8387
8388 if (!valid_p && i != 16)
8389 {
8390 tdesc_data_cleanup (tdesc_data);
8391 return NULL;
8392 }
8393
8394 if (tdesc_unnumbered_register (feature, "s0") == 0)
8395 have_vfp_pseudos = 1;
8396
8397 have_vfp_registers = 1;
8398
8399 /* If we have VFP, also check for NEON. The architecture allows
8400 NEON without VFP (integer vector operations only), but GDB
8401 does not support that. */
8402 feature = tdesc_find_feature (tdesc,
8403 "org.gnu.gdb.arm.neon");
8404 if (feature != NULL)
8405 {
8406 /* NEON requires 32 double-precision registers. */
8407 if (i != 32)
8408 {
8409 tdesc_data_cleanup (tdesc_data);
8410 return NULL;
8411 }
8412
8413 /* If there are quad registers defined by the stub, use
8414 their type; otherwise (normally) provide them with
8415 the default type. */
8416 if (tdesc_unnumbered_register (feature, "q0") == 0)
8417 have_neon_pseudos = 1;
8418
8419 have_neon = 1;
8420 }
8421 }
8422 }
8423
8424 /* If there is already a candidate, use it. */
8425 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
8426 best_arch != NULL;
8427 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
8428 {
8429 if (arm_abi != ARM_ABI_AUTO
8430 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
8431 continue;
8432
8433 if (fp_model != ARM_FLOAT_AUTO
8434 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
8435 continue;
8436
8437 /* There are various other properties in tdep that we do not
8438 need to check here: those derived from a target description,
8439 since gdbarches with a different target description are
8440 automatically disqualified. */
8441
8442 /* Do check is_m, though, since it might come from the binary. */
8443 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
8444 continue;
8445
8446 /* Found a match. */
8447 break;
8448 }
8449
8450 if (best_arch != NULL)
8451 {
8452 if (tdesc_data != NULL)
8453 tdesc_data_cleanup (tdesc_data);
8454 return best_arch->gdbarch;
8455 }
8456
8457 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
8458 gdbarch = gdbarch_alloc (&info, tdep);
8459
8460 /* Record additional information about the architecture we are defining.
8461 These are gdbarch discriminators, like the OSABI. */
8462 tdep->arm_abi = arm_abi;
8463 tdep->fp_model = fp_model;
8464 tdep->is_m = is_m;
8465 tdep->have_fpa_registers = have_fpa_registers;
8466 tdep->have_vfp_registers = have_vfp_registers;
8467 tdep->have_vfp_pseudos = have_vfp_pseudos;
8468 tdep->have_neon_pseudos = have_neon_pseudos;
8469 tdep->have_neon = have_neon;
8470
8471 /* Breakpoints. */
8472 switch (info.byte_order_for_code)
8473 {
8474 case BFD_ENDIAN_BIG:
8475 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
8476 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
8477 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
8478 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
8479
8480 break;
8481
8482 case BFD_ENDIAN_LITTLE:
8483 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
8484 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
8485 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
8486 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
8487
8488 break;
8489
8490 default:
8491 internal_error (__FILE__, __LINE__,
8492 _("arm_gdbarch_init: bad byte order for float format"));
8493 }
8494
8495 /* On ARM targets char defaults to unsigned. */
8496 set_gdbarch_char_signed (gdbarch, 0);
8497
8498 /* Note: for displaced stepping, this includes the breakpoint, and one word
8499 of additional scratch space. This setting isn't used for anything beside
8500 displaced stepping at present. */
8501 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
8502
8503 /* This should be low enough for everything. */
8504 tdep->lowest_pc = 0x20;
8505 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
8506
8507 /* The default, for both APCS and AAPCS, is to return small
8508 structures in registers. */
8509 tdep->struct_return = reg_struct_return;
8510
8511 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
8512 set_gdbarch_frame_align (gdbarch, arm_frame_align);
8513
8514 set_gdbarch_write_pc (gdbarch, arm_write_pc);
8515
8516 /* Frame handling. */
8517 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
8518 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
8519 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
8520
8521 frame_base_set_default (gdbarch, &arm_normal_base);
8522
8523 /* Address manipulation. */
8524 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
8525 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
8526
8527 /* Advance PC across function entry code. */
8528 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
8529
8530 /* Detect whether PC is in function epilogue. */
8531 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
8532
8533 /* Skip trampolines. */
8534 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
8535
8536 /* The stack grows downward. */
8537 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
8538
8539 /* Breakpoint manipulation. */
8540 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
8541 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
8542 arm_remote_breakpoint_from_pc);
8543
8544 /* Information about registers, etc. */
8545 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
8546 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
8547 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
8548 set_gdbarch_register_type (gdbarch, arm_register_type);
8549 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
8550
8551 /* This "info float" is FPA-specific. Use the generic version if we
8552 do not have FPA. */
8553 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
8554 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
8555
8556 /* Internal <-> external register number maps. */
8557 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
8558 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
8559
8560 set_gdbarch_register_name (gdbarch, arm_register_name);
8561
8562 /* Returning results. */
8563 set_gdbarch_return_value (gdbarch, arm_return_value);
8564
8565 /* Disassembly. */
8566 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
8567
8568 /* Minsymbol frobbing. */
8569 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
8570 set_gdbarch_coff_make_msymbol_special (gdbarch,
8571 arm_coff_make_msymbol_special);
8572 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
8573
8574 /* Thumb-2 IT block support. */
8575 set_gdbarch_adjust_breakpoint_address (gdbarch,
8576 arm_adjust_breakpoint_address);
8577
8578 /* Virtual tables. */
8579 set_gdbarch_vbit_in_delta (gdbarch, 1);
8580
8581 /* Hook in the ABI-specific overrides, if they have been registered. */
8582 gdbarch_init_osabi (info, gdbarch);
8583
8584 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
8585
8586 /* Add some default predicates. */
8587 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
8588 dwarf2_append_unwinders (gdbarch);
8589 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
8590 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
8591
8592 /* Now we have tuned the configuration, set a few final things,
8593 based on what the OS ABI has told us. */
8594
8595 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
8596 binaries are always marked. */
8597 if (tdep->arm_abi == ARM_ABI_AUTO)
8598 tdep->arm_abi = ARM_ABI_APCS;
8599
8600 /* Watchpoints are not steppable. */
8601 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
8602
8603 /* We used to default to FPA for generic ARM, but almost nobody
8604 uses that now, and we now provide a way for the user to force
8605 the model. So default to the most useful variant. */
8606 if (tdep->fp_model == ARM_FLOAT_AUTO)
8607 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
8608
8609 if (tdep->jb_pc >= 0)
8610 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
8611
8612 /* Floating point sizes and format. */
8613 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
8614 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
8615 {
8616 set_gdbarch_double_format
8617 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
8618 set_gdbarch_long_double_format
8619 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
8620 }
8621 else
8622 {
8623 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
8624 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
8625 }
8626
8627 if (have_vfp_pseudos)
8628 {
8629 /* NOTE: These are the only pseudo registers used by
8630 the ARM target at the moment. If more are added, a
8631 little more care in numbering will be needed. */
8632
8633 int num_pseudos = 32;
8634 if (have_neon_pseudos)
8635 num_pseudos += 16;
8636 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
8637 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
8638 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
8639 }
8640
8641 if (tdesc_data)
8642 {
8643 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
8644
8645 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
8646
8647 /* Override tdesc_register_type to adjust the types of VFP
8648 registers for NEON. */
8649 set_gdbarch_register_type (gdbarch, arm_register_type);
8650 }
8651
8652 /* Add standard register aliases. We add aliases even for those
8653 nanes which are used by the current architecture - it's simpler,
8654 and does no harm, since nothing ever lists user registers. */
8655 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
8656 user_reg_add (gdbarch, arm_register_aliases[i].name,
8657 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
8658
8659 return gdbarch;
8660 }
8661
8662 static void
8663 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
8664 {
8665 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8666
8667 if (tdep == NULL)
8668 return;
8669
8670 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
8671 (unsigned long) tdep->lowest_pc);
8672 }
8673
8674 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
8675
8676 void
8677 _initialize_arm_tdep (void)
8678 {
8679 struct ui_file *stb;
8680 long length;
8681 struct cmd_list_element *new_set, *new_show;
8682 const char *setname;
8683 const char *setdesc;
8684 const char *const *regnames;
8685 int numregs, i, j;
8686 static char *helptext;
8687 char regdesc[1024], *rdptr = regdesc;
8688 size_t rest = sizeof (regdesc);
8689
8690 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
8691
8692 arm_objfile_data_key
8693 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
8694
8695 /* Add ourselves to objfile event chain. */
8696 observer_attach_new_objfile (arm_exidx_new_objfile);
8697 arm_exidx_data_key
8698 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
8699
8700 /* Register an ELF OS ABI sniffer for ARM binaries. */
8701 gdbarch_register_osabi_sniffer (bfd_arch_arm,
8702 bfd_target_elf_flavour,
8703 arm_elf_osabi_sniffer);
8704
8705 /* Initialize the standard target descriptions. */
8706 initialize_tdesc_arm_with_m ();
8707
8708 /* Get the number of possible sets of register names defined in opcodes. */
8709 num_disassembly_options = get_arm_regname_num_options ();
8710
8711 /* Add root prefix command for all "set arm"/"show arm" commands. */
8712 add_prefix_cmd ("arm", no_class, set_arm_command,
8713 _("Various ARM-specific commands."),
8714 &setarmcmdlist, "set arm ", 0, &setlist);
8715
8716 add_prefix_cmd ("arm", no_class, show_arm_command,
8717 _("Various ARM-specific commands."),
8718 &showarmcmdlist, "show arm ", 0, &showlist);
8719
8720 /* Sync the opcode insn printer with our register viewer. */
8721 parse_arm_disassembler_option ("reg-names-std");
8722
8723 /* Initialize the array that will be passed to
8724 add_setshow_enum_cmd(). */
8725 valid_disassembly_styles
8726 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
8727 for (i = 0; i < num_disassembly_options; i++)
8728 {
8729 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
8730 valid_disassembly_styles[i] = setname;
8731 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
8732 rdptr += length;
8733 rest -= length;
8734 /* When we find the default names, tell the disassembler to use
8735 them. */
8736 if (!strcmp (setname, "std"))
8737 {
8738 disassembly_style = setname;
8739 set_arm_regname_option (i);
8740 }
8741 }
8742 /* Mark the end of valid options. */
8743 valid_disassembly_styles[num_disassembly_options] = NULL;
8744
8745 /* Create the help text. */
8746 stb = mem_fileopen ();
8747 fprintf_unfiltered (stb, "%s%s%s",
8748 _("The valid values are:\n"),
8749 regdesc,
8750 _("The default is \"std\"."));
8751 helptext = ui_file_xstrdup (stb, NULL);
8752 ui_file_delete (stb);
8753
8754 add_setshow_enum_cmd("disassembler", no_class,
8755 valid_disassembly_styles, &disassembly_style,
8756 _("Set the disassembly style."),
8757 _("Show the disassembly style."),
8758 helptext,
8759 set_disassembly_style_sfunc,
8760 NULL, /* FIXME: i18n: The disassembly style is
8761 \"%s\". */
8762 &setarmcmdlist, &showarmcmdlist);
8763
8764 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
8765 _("Set usage of ARM 32-bit mode."),
8766 _("Show usage of ARM 32-bit mode."),
8767 _("When off, a 26-bit PC will be used."),
8768 NULL,
8769 NULL, /* FIXME: i18n: Usage of ARM 32-bit
8770 mode is %s. */
8771 &setarmcmdlist, &showarmcmdlist);
8772
8773 /* Add a command to allow the user to force the FPU model. */
8774 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
8775 _("Set the floating point type."),
8776 _("Show the floating point type."),
8777 _("auto - Determine the FP typefrom the OS-ABI.\n\
8778 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
8779 fpa - FPA co-processor (GCC compiled).\n\
8780 softvfp - Software FP with pure-endian doubles.\n\
8781 vfp - VFP co-processor."),
8782 set_fp_model_sfunc, show_fp_model,
8783 &setarmcmdlist, &showarmcmdlist);
8784
8785 /* Add a command to allow the user to force the ABI. */
8786 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
8787 _("Set the ABI."),
8788 _("Show the ABI."),
8789 NULL, arm_set_abi, arm_show_abi,
8790 &setarmcmdlist, &showarmcmdlist);
8791
8792 /* Add two commands to allow the user to force the assumed
8793 execution mode. */
8794 add_setshow_enum_cmd ("fallback-mode", class_support,
8795 arm_mode_strings, &arm_fallback_mode_string,
8796 _("Set the mode assumed when symbols are unavailable."),
8797 _("Show the mode assumed when symbols are unavailable."),
8798 NULL, NULL, arm_show_fallback_mode,
8799 &setarmcmdlist, &showarmcmdlist);
8800 add_setshow_enum_cmd ("force-mode", class_support,
8801 arm_mode_strings, &arm_force_mode_string,
8802 _("Set the mode assumed even when symbols are available."),
8803 _("Show the mode assumed even when symbols are available."),
8804 NULL, NULL, arm_show_force_mode,
8805 &setarmcmdlist, &showarmcmdlist);
8806
8807 /* Debugging flag. */
8808 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
8809 _("Set ARM debugging."),
8810 _("Show ARM debugging."),
8811 _("When on, arm-specific debugging is enabled."),
8812 NULL,
8813 NULL, /* FIXME: i18n: "ARM debugging is %s. */
8814 &setdebuglist, &showdebuglist);
8815 }
This page took 0.266228 seconds and 4 git commands to generate.