ChangeLog:
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "reggroups.h"
33 #include "doublest.h"
34 #include "value.h"
35 #include "arch-utils.h"
36 #include "osabi.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
40 #include "objfiles.h"
41 #include "dwarf2-frame.h"
42 #include "gdbtypes.h"
43 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
46 #include "observer.h"
47
48 #include "arm-tdep.h"
49 #include "gdb/sim-arm.h"
50
51 #include "elf-bfd.h"
52 #include "coff/internal.h"
53 #include "elf/arm.h"
54
55 #include "gdb_assert.h"
56 #include "vec.h"
57
58 #include "features/arm-with-m.c"
59
60 static int arm_debug;
61
62 /* Macros for setting and testing a bit in a minimal symbol that marks
63 it as Thumb function. The MSB of the minimal symbol's "info" field
64 is used for this purpose.
65
66 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
67 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
68
69 #define MSYMBOL_SET_SPECIAL(msym) \
70 MSYMBOL_TARGET_FLAG_1 (msym) = 1
71
72 #define MSYMBOL_IS_SPECIAL(msym) \
73 MSYMBOL_TARGET_FLAG_1 (msym)
74
75 /* Per-objfile data used for mapping symbols. */
76 static const struct objfile_data *arm_objfile_data_key;
77
78 struct arm_mapping_symbol
79 {
80 bfd_vma value;
81 char type;
82 };
83 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
84 DEF_VEC_O(arm_mapping_symbol_s);
85
86 struct arm_per_objfile
87 {
88 VEC(arm_mapping_symbol_s) **section_maps;
89 };
90
91 /* The list of available "set arm ..." and "show arm ..." commands. */
92 static struct cmd_list_element *setarmcmdlist = NULL;
93 static struct cmd_list_element *showarmcmdlist = NULL;
94
95 /* The type of floating-point to use. Keep this in sync with enum
96 arm_float_model, and the help string in _initialize_arm_tdep. */
97 static const char *fp_model_strings[] =
98 {
99 "auto",
100 "softfpa",
101 "fpa",
102 "softvfp",
103 "vfp",
104 NULL
105 };
106
107 /* A variable that can be configured by the user. */
108 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
109 static const char *current_fp_model = "auto";
110
111 /* The ABI to use. Keep this in sync with arm_abi_kind. */
112 static const char *arm_abi_strings[] =
113 {
114 "auto",
115 "APCS",
116 "AAPCS",
117 NULL
118 };
119
120 /* A variable that can be configured by the user. */
121 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
122 static const char *arm_abi_string = "auto";
123
124 /* The execution mode to assume. */
125 static const char *arm_mode_strings[] =
126 {
127 "auto",
128 "arm",
129 "thumb",
130 NULL
131 };
132
133 static const char *arm_fallback_mode_string = "auto";
134 static const char *arm_force_mode_string = "auto";
135
136 /* Number of different reg name sets (options). */
137 static int num_disassembly_options;
138
139 /* The standard register names, and all the valid aliases for them. Note
140 that `fp', `sp' and `pc' are not added in this alias list, because they
141 have been added as builtin user registers in
142 std-regs.c:_initialize_frame_reg. */
143 static const struct
144 {
145 const char *name;
146 int regnum;
147 } arm_register_aliases[] = {
148 /* Basic register numbers. */
149 { "r0", 0 },
150 { "r1", 1 },
151 { "r2", 2 },
152 { "r3", 3 },
153 { "r4", 4 },
154 { "r5", 5 },
155 { "r6", 6 },
156 { "r7", 7 },
157 { "r8", 8 },
158 { "r9", 9 },
159 { "r10", 10 },
160 { "r11", 11 },
161 { "r12", 12 },
162 { "r13", 13 },
163 { "r14", 14 },
164 { "r15", 15 },
165 /* Synonyms (argument and variable registers). */
166 { "a1", 0 },
167 { "a2", 1 },
168 { "a3", 2 },
169 { "a4", 3 },
170 { "v1", 4 },
171 { "v2", 5 },
172 { "v3", 6 },
173 { "v4", 7 },
174 { "v5", 8 },
175 { "v6", 9 },
176 { "v7", 10 },
177 { "v8", 11 },
178 /* Other platform-specific names for r9. */
179 { "sb", 9 },
180 { "tr", 9 },
181 /* Special names. */
182 { "ip", 12 },
183 { "lr", 14 },
184 /* Names used by GCC (not listed in the ARM EABI). */
185 { "sl", 10 },
186 /* A special name from the older ATPCS. */
187 { "wr", 7 },
188 };
189
190 static const char *const arm_register_names[] =
191 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
192 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
193 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
194 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
195 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
196 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
197 "fps", "cpsr" }; /* 24 25 */
198
199 /* Valid register name styles. */
200 static const char **valid_disassembly_styles;
201
202 /* Disassembly style to use. Default to "std" register names. */
203 static const char *disassembly_style;
204
205 /* This is used to keep the bfd arch_info in sync with the disassembly
206 style. */
207 static void set_disassembly_style_sfunc(char *, int,
208 struct cmd_list_element *);
209 static void set_disassembly_style (void);
210
211 static void convert_from_extended (const struct floatformat *, const void *,
212 void *, int);
213 static void convert_to_extended (const struct floatformat *, void *,
214 const void *, int);
215
216 static void arm_neon_quad_read (struct gdbarch *gdbarch,
217 struct regcache *regcache,
218 int regnum, gdb_byte *buf);
219 static void arm_neon_quad_write (struct gdbarch *gdbarch,
220 struct regcache *regcache,
221 int regnum, const gdb_byte *buf);
222
223 struct arm_prologue_cache
224 {
225 /* The stack pointer at the time this frame was created; i.e. the
226 caller's stack pointer when this function was called. It is used
227 to identify this frame. */
228 CORE_ADDR prev_sp;
229
230 /* The frame base for this frame is just prev_sp - frame size.
231 FRAMESIZE is the distance from the frame pointer to the
232 initial stack pointer. */
233
234 int framesize;
235
236 /* The register used to hold the frame pointer for this frame. */
237 int framereg;
238
239 /* Saved register offsets. */
240 struct trad_frame_saved_reg *saved_regs;
241 };
242
243 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
244 CORE_ADDR prologue_start,
245 CORE_ADDR prologue_end,
246 struct arm_prologue_cache *cache);
247
248 /* Architecture version for displaced stepping. This effects the behaviour of
249 certain instructions, and really should not be hard-wired. */
250
251 #define DISPLACED_STEPPING_ARCH_VERSION 5
252
253 /* Addresses for calling Thumb functions have the bit 0 set.
254 Here are some macros to test, set, or clear bit 0 of addresses. */
255 #define IS_THUMB_ADDR(addr) ((addr) & 1)
256 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
257 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
258
259 /* Set to true if the 32-bit mode is in use. */
260
261 int arm_apcs_32 = 1;
262
263 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
264
265 static int
266 arm_psr_thumb_bit (struct gdbarch *gdbarch)
267 {
268 if (gdbarch_tdep (gdbarch)->is_m)
269 return XPSR_T;
270 else
271 return CPSR_T;
272 }
273
274 /* Determine if FRAME is executing in Thumb mode. */
275
276 int
277 arm_frame_is_thumb (struct frame_info *frame)
278 {
279 CORE_ADDR cpsr;
280 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
281
282 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
283 directly (from a signal frame or dummy frame) or by interpreting
284 the saved LR (from a prologue or DWARF frame). So consult it and
285 trust the unwinders. */
286 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
287
288 return (cpsr & t_bit) != 0;
289 }
290
291 /* Callback for VEC_lower_bound. */
292
293 static inline int
294 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
295 const struct arm_mapping_symbol *rhs)
296 {
297 return lhs->value < rhs->value;
298 }
299
300 /* Search for the mapping symbol covering MEMADDR. If one is found,
301 return its type. Otherwise, return 0. If START is non-NULL,
302 set *START to the location of the mapping symbol. */
303
304 static char
305 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
306 {
307 struct obj_section *sec;
308
309 /* If there are mapping symbols, consult them. */
310 sec = find_pc_section (memaddr);
311 if (sec != NULL)
312 {
313 struct arm_per_objfile *data;
314 VEC(arm_mapping_symbol_s) *map;
315 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
316 0 };
317 unsigned int idx;
318
319 data = objfile_data (sec->objfile, arm_objfile_data_key);
320 if (data != NULL)
321 {
322 map = data->section_maps[sec->the_bfd_section->index];
323 if (!VEC_empty (arm_mapping_symbol_s, map))
324 {
325 struct arm_mapping_symbol *map_sym;
326
327 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
328 arm_compare_mapping_symbols);
329
330 /* VEC_lower_bound finds the earliest ordered insertion
331 point. If the following symbol starts at this exact
332 address, we use that; otherwise, the preceding
333 mapping symbol covers this address. */
334 if (idx < VEC_length (arm_mapping_symbol_s, map))
335 {
336 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
337 if (map_sym->value == map_key.value)
338 {
339 if (start)
340 *start = map_sym->value + obj_section_addr (sec);
341 return map_sym->type;
342 }
343 }
344
345 if (idx > 0)
346 {
347 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
348 if (start)
349 *start = map_sym->value + obj_section_addr (sec);
350 return map_sym->type;
351 }
352 }
353 }
354 }
355
356 return 0;
357 }
358
359 static CORE_ADDR arm_get_next_pc_raw (struct frame_info *frame,
360 CORE_ADDR pc, int insert_bkpt);
361
362 /* Determine if the program counter specified in MEMADDR is in a Thumb
363 function. This function should be called for addresses unrelated to
364 any executing frame; otherwise, prefer arm_frame_is_thumb. */
365
366 int
367 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
368 {
369 struct obj_section *sec;
370 struct minimal_symbol *sym;
371 char type;
372 struct displaced_step_closure* dsc
373 = get_displaced_step_closure_by_addr(memaddr);
374
375 /* If checking the mode of displaced instruction in copy area, the mode
376 should be determined by instruction on the original address. */
377 if (dsc)
378 {
379 if (debug_displaced)
380 fprintf_unfiltered (gdb_stdlog,
381 "displaced: check mode of %.8lx instead of %.8lx\n",
382 (unsigned long) dsc->insn_addr,
383 (unsigned long) memaddr);
384 memaddr = dsc->insn_addr;
385 }
386
387 /* If bit 0 of the address is set, assume this is a Thumb address. */
388 if (IS_THUMB_ADDR (memaddr))
389 return 1;
390
391 /* If the user wants to override the symbol table, let him. */
392 if (strcmp (arm_force_mode_string, "arm") == 0)
393 return 0;
394 if (strcmp (arm_force_mode_string, "thumb") == 0)
395 return 1;
396
397 /* ARM v6-M and v7-M are always in Thumb mode. */
398 if (gdbarch_tdep (gdbarch)->is_m)
399 return 1;
400
401 /* If there are mapping symbols, consult them. */
402 type = arm_find_mapping_symbol (memaddr, NULL);
403 if (type)
404 return type == 't';
405
406 /* Thumb functions have a "special" bit set in minimal symbols. */
407 sym = lookup_minimal_symbol_by_pc (memaddr);
408 if (sym)
409 return (MSYMBOL_IS_SPECIAL (sym));
410
411 /* If the user wants to override the fallback mode, let them. */
412 if (strcmp (arm_fallback_mode_string, "arm") == 0)
413 return 0;
414 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
415 return 1;
416
417 /* If we couldn't find any symbol, but we're talking to a running
418 target, then trust the current value of $cpsr. This lets
419 "display/i $pc" always show the correct mode (though if there is
420 a symbol table we will not reach here, so it still may not be
421 displayed in the mode it will be executed).
422
423 As a further heuristic if we detect that we are doing a single-step we
424 see what state executing the current instruction ends up with us being
425 in. */
426 if (target_has_registers)
427 {
428 struct frame_info *current_frame = get_current_frame ();
429 CORE_ADDR current_pc = get_frame_pc (current_frame);
430 int is_thumb = arm_frame_is_thumb (current_frame);
431 CORE_ADDR next_pc;
432 if (memaddr == current_pc)
433 return is_thumb;
434 else
435 {
436 struct gdbarch *gdbarch = get_frame_arch (current_frame);
437 next_pc = arm_get_next_pc_raw (current_frame, current_pc, FALSE);
438 if (memaddr == gdbarch_addr_bits_remove (gdbarch, next_pc))
439 return IS_THUMB_ADDR (next_pc);
440 else
441 return is_thumb;
442 }
443 }
444
445 /* Otherwise we're out of luck; we assume ARM. */
446 return 0;
447 }
448
449 /* Remove useless bits from addresses in a running program. */
450 static CORE_ADDR
451 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
452 {
453 if (arm_apcs_32)
454 return UNMAKE_THUMB_ADDR (val);
455 else
456 return (val & 0x03fffffc);
457 }
458
459 /* When reading symbols, we need to zap the low bit of the address,
460 which may be set to 1 for Thumb functions. */
461 static CORE_ADDR
462 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
463 {
464 return val & ~1;
465 }
466
467 /* Return 1 if PC is the start of a compiler helper function which
468 can be safely ignored during prologue skipping. IS_THUMB is true
469 if the function is known to be a Thumb function due to the way it
470 is being called. */
471 static int
472 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
473 {
474 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
475 struct minimal_symbol *msym;
476
477 msym = lookup_minimal_symbol_by_pc (pc);
478 if (msym != NULL
479 && SYMBOL_VALUE_ADDRESS (msym) == pc
480 && SYMBOL_LINKAGE_NAME (msym) != NULL)
481 {
482 const char *name = SYMBOL_LINKAGE_NAME (msym);
483
484 /* The GNU linker's Thumb call stub to foo is named
485 __foo_from_thumb. */
486 if (strstr (name, "_from_thumb") != NULL)
487 name += 2;
488
489 /* On soft-float targets, __truncdfsf2 is called to convert promoted
490 arguments to their argument types in non-prototyped
491 functions. */
492 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
493 return 1;
494 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
495 return 1;
496
497 /* Internal functions related to thread-local storage. */
498 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
499 return 1;
500 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
501 return 1;
502 }
503 else
504 {
505 /* If we run against a stripped glibc, we may be unable to identify
506 special functions by name. Check for one important case,
507 __aeabi_read_tp, by comparing the *code* against the default
508 implementation (this is hand-written ARM assembler in glibc). */
509
510 if (!is_thumb
511 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
512 == 0xe3e00a0f /* mov r0, #0xffff0fff */
513 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
514 == 0xe240f01f) /* sub pc, r0, #31 */
515 return 1;
516 }
517
518 return 0;
519 }
520
521 /* Support routines for instruction parsing. */
522 #define submask(x) ((1L << ((x) + 1)) - 1)
523 #define bit(obj,st) (((obj) >> (st)) & 1)
524 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
525 #define sbits(obj,st,fn) \
526 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
527 #define BranchDest(addr,instr) \
528 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
529
530 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
531 the first 16-bit of instruction, and INSN2 is the second 16-bit of
532 instruction. */
533 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
534 ((bits ((insn1), 0, 3) << 12) \
535 | (bits ((insn1), 10, 10) << 11) \
536 | (bits ((insn2), 12, 14) << 8) \
537 | bits ((insn2), 0, 7))
538
539 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
540 the 32-bit instruction. */
541 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
542 ((bits ((insn), 16, 19) << 12) \
543 | bits ((insn), 0, 11))
544
545 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
546
547 static unsigned int
548 thumb_expand_immediate (unsigned int imm)
549 {
550 unsigned int count = imm >> 7;
551
552 if (count < 8)
553 switch (count / 2)
554 {
555 case 0:
556 return imm & 0xff;
557 case 1:
558 return (imm & 0xff) | ((imm & 0xff) << 16);
559 case 2:
560 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
561 case 3:
562 return (imm & 0xff) | ((imm & 0xff) << 8)
563 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
564 }
565
566 return (0x80 | (imm & 0x7f)) << (32 - count);
567 }
568
569 /* Return 1 if the 16-bit Thumb instruction INST might change
570 control flow, 0 otherwise. */
571
572 static int
573 thumb_instruction_changes_pc (unsigned short inst)
574 {
575 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
576 return 1;
577
578 if ((inst & 0xf000) == 0xd000) /* conditional branch */
579 return 1;
580
581 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
582 return 1;
583
584 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
585 return 1;
586
587 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
588 return 1;
589
590 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
591 return 1;
592
593 return 0;
594 }
595
596 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
597 might change control flow, 0 otherwise. */
598
599 static int
600 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
601 {
602 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
603 {
604 /* Branches and miscellaneous control instructions. */
605
606 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
607 {
608 /* B, BL, BLX. */
609 return 1;
610 }
611 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
612 {
613 /* SUBS PC, LR, #imm8. */
614 return 1;
615 }
616 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
617 {
618 /* Conditional branch. */
619 return 1;
620 }
621
622 return 0;
623 }
624
625 if ((inst1 & 0xfe50) == 0xe810)
626 {
627 /* Load multiple or RFE. */
628
629 if (bit (inst1, 7) && !bit (inst1, 8))
630 {
631 /* LDMIA or POP */
632 if (bit (inst2, 15))
633 return 1;
634 }
635 else if (!bit (inst1, 7) && bit (inst1, 8))
636 {
637 /* LDMDB */
638 if (bit (inst2, 15))
639 return 1;
640 }
641 else if (bit (inst1, 7) && bit (inst1, 8))
642 {
643 /* RFEIA */
644 return 1;
645 }
646 else if (!bit (inst1, 7) && !bit (inst1, 8))
647 {
648 /* RFEDB */
649 return 1;
650 }
651
652 return 0;
653 }
654
655 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
656 {
657 /* MOV PC or MOVS PC. */
658 return 1;
659 }
660
661 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
662 {
663 /* LDR PC. */
664 if (bits (inst1, 0, 3) == 15)
665 return 1;
666 if (bit (inst1, 7))
667 return 1;
668 if (bit (inst2, 11))
669 return 1;
670 if ((inst2 & 0x0fc0) == 0x0000)
671 return 1;
672
673 return 0;
674 }
675
676 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
677 {
678 /* TBB. */
679 return 1;
680 }
681
682 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
683 {
684 /* TBH. */
685 return 1;
686 }
687
688 return 0;
689 }
690
691 /* Analyze a Thumb prologue, looking for a recognizable stack frame
692 and frame pointer. Scan until we encounter a store that could
693 clobber the stack frame unexpectedly, or an unknown instruction.
694 Return the last address which is definitely safe to skip for an
695 initial breakpoint. */
696
697 static CORE_ADDR
698 thumb_analyze_prologue (struct gdbarch *gdbarch,
699 CORE_ADDR start, CORE_ADDR limit,
700 struct arm_prologue_cache *cache)
701 {
702 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
703 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
704 int i;
705 pv_t regs[16];
706 struct pv_area *stack;
707 struct cleanup *back_to;
708 CORE_ADDR offset;
709 CORE_ADDR unrecognized_pc = 0;
710
711 for (i = 0; i < 16; i++)
712 regs[i] = pv_register (i, 0);
713 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
714 back_to = make_cleanup_free_pv_area (stack);
715
716 while (start < limit)
717 {
718 unsigned short insn;
719
720 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
721
722 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
723 {
724 int regno;
725 int mask;
726
727 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
728 break;
729
730 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
731 whether to save LR (R14). */
732 mask = (insn & 0xff) | ((insn & 0x100) << 6);
733
734 /* Calculate offsets of saved R0-R7 and LR. */
735 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
736 if (mask & (1 << regno))
737 {
738 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
739 -4);
740 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
741 }
742 }
743 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
744 sub sp, #simm */
745 {
746 offset = (insn & 0x7f) << 2; /* get scaled offset */
747 if (insn & 0x80) /* Check for SUB. */
748 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
749 -offset);
750 else
751 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
752 offset);
753 }
754 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
755 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
756 (insn & 0xff) << 2);
757 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
758 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
759 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
760 bits (insn, 6, 8));
761 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
762 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
763 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
764 bits (insn, 0, 7));
765 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
766 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
767 && pv_is_constant (regs[bits (insn, 3, 5)]))
768 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
769 regs[bits (insn, 6, 8)]);
770 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
771 && pv_is_constant (regs[bits (insn, 3, 6)]))
772 {
773 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
774 int rm = bits (insn, 3, 6);
775 regs[rd] = pv_add (regs[rd], regs[rm]);
776 }
777 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
778 {
779 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
780 int src_reg = (insn & 0x78) >> 3;
781 regs[dst_reg] = regs[src_reg];
782 }
783 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
784 {
785 /* Handle stores to the stack. Normally pushes are used,
786 but with GCC -mtpcs-frame, there may be other stores
787 in the prologue to create the frame. */
788 int regno = (insn >> 8) & 0x7;
789 pv_t addr;
790
791 offset = (insn & 0xff) << 2;
792 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
793
794 if (pv_area_store_would_trash (stack, addr))
795 break;
796
797 pv_area_store (stack, addr, 4, regs[regno]);
798 }
799 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
800 {
801 int rd = bits (insn, 0, 2);
802 int rn = bits (insn, 3, 5);
803 pv_t addr;
804
805 offset = bits (insn, 6, 10) << 2;
806 addr = pv_add_constant (regs[rn], offset);
807
808 if (pv_area_store_would_trash (stack, addr))
809 break;
810
811 pv_area_store (stack, addr, 4, regs[rd]);
812 }
813 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
814 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
815 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
816 /* Ignore stores of argument registers to the stack. */
817 ;
818 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
819 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
820 /* Ignore block loads from the stack, potentially copying
821 parameters from memory. */
822 ;
823 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
824 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
825 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
826 /* Similarly ignore single loads from the stack. */
827 ;
828 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
829 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
830 /* Skip register copies, i.e. saves to another register
831 instead of the stack. */
832 ;
833 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
834 /* Recognize constant loads; even with small stacks these are necessary
835 on Thumb. */
836 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
837 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
838 {
839 /* Constant pool loads, for the same reason. */
840 unsigned int constant;
841 CORE_ADDR loc;
842
843 loc = start + 4 + bits (insn, 0, 7) * 4;
844 constant = read_memory_unsigned_integer (loc, 4, byte_order);
845 regs[bits (insn, 8, 10)] = pv_constant (constant);
846 }
847 else if ((insn & 0xe000) == 0xe000)
848 {
849 unsigned short inst2;
850
851 inst2 = read_memory_unsigned_integer (start + 2, 2,
852 byte_order_for_code);
853
854 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
855 {
856 /* BL, BLX. Allow some special function calls when
857 skipping the prologue; GCC generates these before
858 storing arguments to the stack. */
859 CORE_ADDR nextpc;
860 int j1, j2, imm1, imm2;
861
862 imm1 = sbits (insn, 0, 10);
863 imm2 = bits (inst2, 0, 10);
864 j1 = bit (inst2, 13);
865 j2 = bit (inst2, 11);
866
867 offset = ((imm1 << 12) + (imm2 << 1));
868 offset ^= ((!j2) << 22) | ((!j1) << 23);
869
870 nextpc = start + 4 + offset;
871 /* For BLX make sure to clear the low bits. */
872 if (bit (inst2, 12) == 0)
873 nextpc = nextpc & 0xfffffffc;
874
875 if (!skip_prologue_function (gdbarch, nextpc,
876 bit (inst2, 12) != 0))
877 break;
878 }
879
880 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
881 { registers } */
882 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
883 {
884 pv_t addr = regs[bits (insn, 0, 3)];
885 int regno;
886
887 if (pv_area_store_would_trash (stack, addr))
888 break;
889
890 /* Calculate offsets of saved registers. */
891 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
892 if (inst2 & (1 << regno))
893 {
894 addr = pv_add_constant (addr, -4);
895 pv_area_store (stack, addr, 4, regs[regno]);
896 }
897
898 if (insn & 0x0020)
899 regs[bits (insn, 0, 3)] = addr;
900 }
901
902 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
903 [Rn, #+/-imm]{!} */
904 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
905 {
906 int regno1 = bits (inst2, 12, 15);
907 int regno2 = bits (inst2, 8, 11);
908 pv_t addr = regs[bits (insn, 0, 3)];
909
910 offset = inst2 & 0xff;
911 if (insn & 0x0080)
912 addr = pv_add_constant (addr, offset);
913 else
914 addr = pv_add_constant (addr, -offset);
915
916 if (pv_area_store_would_trash (stack, addr))
917 break;
918
919 pv_area_store (stack, addr, 4, regs[regno1]);
920 pv_area_store (stack, pv_add_constant (addr, 4),
921 4, regs[regno2]);
922
923 if (insn & 0x0020)
924 regs[bits (insn, 0, 3)] = addr;
925 }
926
927 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
928 && (inst2 & 0x0c00) == 0x0c00
929 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
930 {
931 int regno = bits (inst2, 12, 15);
932 pv_t addr = regs[bits (insn, 0, 3)];
933
934 offset = inst2 & 0xff;
935 if (inst2 & 0x0200)
936 addr = pv_add_constant (addr, offset);
937 else
938 addr = pv_add_constant (addr, -offset);
939
940 if (pv_area_store_would_trash (stack, addr))
941 break;
942
943 pv_area_store (stack, addr, 4, regs[regno]);
944
945 if (inst2 & 0x0100)
946 regs[bits (insn, 0, 3)] = addr;
947 }
948
949 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
950 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
951 {
952 int regno = bits (inst2, 12, 15);
953 pv_t addr;
954
955 offset = inst2 & 0xfff;
956 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
957
958 if (pv_area_store_would_trash (stack, addr))
959 break;
960
961 pv_area_store (stack, addr, 4, regs[regno]);
962 }
963
964 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
965 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
966 /* Ignore stores of argument registers to the stack. */
967 ;
968
969 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
970 && (inst2 & 0x0d00) == 0x0c00
971 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
972 /* Ignore stores of argument registers to the stack. */
973 ;
974
975 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
976 { registers } */
977 && (inst2 & 0x8000) == 0x0000
978 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
979 /* Ignore block loads from the stack, potentially copying
980 parameters from memory. */
981 ;
982
983 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
984 [Rn, #+/-imm] */
985 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
986 /* Similarly ignore dual loads from the stack. */
987 ;
988
989 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
990 && (inst2 & 0x0d00) == 0x0c00
991 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
992 /* Similarly ignore single loads from the stack. */
993 ;
994
995 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
996 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
997 /* Similarly ignore single loads from the stack. */
998 ;
999
1000 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
1001 && (inst2 & 0x8000) == 0x0000)
1002 {
1003 unsigned int imm = ((bits (insn, 10, 10) << 11)
1004 | (bits (inst2, 12, 14) << 8)
1005 | bits (inst2, 0, 7));
1006
1007 regs[bits (inst2, 8, 11)]
1008 = pv_add_constant (regs[bits (insn, 0, 3)],
1009 thumb_expand_immediate (imm));
1010 }
1011
1012 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1013 && (inst2 & 0x8000) == 0x0000)
1014 {
1015 unsigned int imm = ((bits (insn, 10, 10) << 11)
1016 | (bits (inst2, 12, 14) << 8)
1017 | bits (inst2, 0, 7));
1018
1019 regs[bits (inst2, 8, 11)]
1020 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1021 }
1022
1023 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1024 && (inst2 & 0x8000) == 0x0000)
1025 {
1026 unsigned int imm = ((bits (insn, 10, 10) << 11)
1027 | (bits (inst2, 12, 14) << 8)
1028 | bits (inst2, 0, 7));
1029
1030 regs[bits (inst2, 8, 11)]
1031 = pv_add_constant (regs[bits (insn, 0, 3)],
1032 - (CORE_ADDR) thumb_expand_immediate (imm));
1033 }
1034
1035 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1036 && (inst2 & 0x8000) == 0x0000)
1037 {
1038 unsigned int imm = ((bits (insn, 10, 10) << 11)
1039 | (bits (inst2, 12, 14) << 8)
1040 | bits (inst2, 0, 7));
1041
1042 regs[bits (inst2, 8, 11)]
1043 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1044 }
1045
1046 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1047 {
1048 unsigned int imm = ((bits (insn, 10, 10) << 11)
1049 | (bits (inst2, 12, 14) << 8)
1050 | bits (inst2, 0, 7));
1051
1052 regs[bits (inst2, 8, 11)]
1053 = pv_constant (thumb_expand_immediate (imm));
1054 }
1055
1056 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1057 {
1058 unsigned int imm
1059 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1060
1061 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1062 }
1063
1064 else if (insn == 0xea5f /* mov.w Rd,Rm */
1065 && (inst2 & 0xf0f0) == 0)
1066 {
1067 int dst_reg = (inst2 & 0x0f00) >> 8;
1068 int src_reg = inst2 & 0xf;
1069 regs[dst_reg] = regs[src_reg];
1070 }
1071
1072 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1073 {
1074 /* Constant pool loads. */
1075 unsigned int constant;
1076 CORE_ADDR loc;
1077
1078 offset = bits (insn, 0, 11);
1079 if (insn & 0x0080)
1080 loc = start + 4 + offset;
1081 else
1082 loc = start + 4 - offset;
1083
1084 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1085 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1086 }
1087
1088 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1089 {
1090 /* Constant pool loads. */
1091 unsigned int constant;
1092 CORE_ADDR loc;
1093
1094 offset = bits (insn, 0, 7) << 2;
1095 if (insn & 0x0080)
1096 loc = start + 4 + offset;
1097 else
1098 loc = start + 4 - offset;
1099
1100 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1101 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1102
1103 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1104 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1105 }
1106
1107 else if (thumb2_instruction_changes_pc (insn, inst2))
1108 {
1109 /* Don't scan past anything that might change control flow. */
1110 break;
1111 }
1112 else
1113 {
1114 /* The optimizer might shove anything into the prologue,
1115 so we just skip what we don't recognize. */
1116 unrecognized_pc = start;
1117 }
1118
1119 start += 2;
1120 }
1121 else if (thumb_instruction_changes_pc (insn))
1122 {
1123 /* Don't scan past anything that might change control flow. */
1124 break;
1125 }
1126 else
1127 {
1128 /* The optimizer might shove anything into the prologue,
1129 so we just skip what we don't recognize. */
1130 unrecognized_pc = start;
1131 }
1132
1133 start += 2;
1134 }
1135
1136 if (arm_debug)
1137 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1138 paddress (gdbarch, start));
1139
1140 if (unrecognized_pc == 0)
1141 unrecognized_pc = start;
1142
1143 if (cache == NULL)
1144 {
1145 do_cleanups (back_to);
1146 return unrecognized_pc;
1147 }
1148
1149 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1150 {
1151 /* Frame pointer is fp. Frame size is constant. */
1152 cache->framereg = ARM_FP_REGNUM;
1153 cache->framesize = -regs[ARM_FP_REGNUM].k;
1154 }
1155 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1156 {
1157 /* Frame pointer is r7. Frame size is constant. */
1158 cache->framereg = THUMB_FP_REGNUM;
1159 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1160 }
1161 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1162 {
1163 /* Try the stack pointer... this is a bit desperate. */
1164 cache->framereg = ARM_SP_REGNUM;
1165 cache->framesize = -regs[ARM_SP_REGNUM].k;
1166 }
1167 else
1168 {
1169 /* We're just out of luck. We don't know where the frame is. */
1170 cache->framereg = -1;
1171 cache->framesize = 0;
1172 }
1173
1174 for (i = 0; i < 16; i++)
1175 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1176 cache->saved_regs[i].addr = offset;
1177
1178 do_cleanups (back_to);
1179 return unrecognized_pc;
1180 }
1181
1182
1183 /* Try to analyze the instructions starting from PC, which load symbol
1184 __stack_chk_guard. Return the address of instruction after loading this
1185 symbol, set the dest register number to *BASEREG, and set the size of
1186 instructions for loading symbol in OFFSET. Return 0 if instructions are
1187 not recognized. */
1188
1189 static CORE_ADDR
1190 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1191 unsigned int *destreg, int *offset)
1192 {
1193 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1194 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1195 unsigned int low, high, address;
1196
1197 address = 0;
1198 if (is_thumb)
1199 {
1200 unsigned short insn1
1201 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1202
1203 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1204 {
1205 *destreg = bits (insn1, 8, 10);
1206 *offset = 2;
1207 address = bits (insn1, 0, 7);
1208 }
1209 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1210 {
1211 unsigned short insn2
1212 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1213
1214 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1215
1216 insn1
1217 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1218 insn2
1219 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1220
1221 /* movt Rd, #const */
1222 if ((insn1 & 0xfbc0) == 0xf2c0)
1223 {
1224 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1225 *destreg = bits (insn2, 8, 11);
1226 *offset = 8;
1227 address = (high << 16 | low);
1228 }
1229 }
1230 }
1231 else
1232 {
1233 unsigned int insn
1234 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1235
1236 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1237 {
1238 address = bits (insn, 0, 11);
1239 *destreg = bits (insn, 12, 15);
1240 *offset = 4;
1241 }
1242 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1243 {
1244 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1245
1246 insn
1247 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1248
1249 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1250 {
1251 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1252 *destreg = bits (insn, 12, 15);
1253 *offset = 8;
1254 address = (high << 16 | low);
1255 }
1256 }
1257 }
1258
1259 return address;
1260 }
1261
1262 /* Try to skip a sequence of instructions used for stack protector. If PC
1263 points to the first instruction of this sequence, return the address of
1264 first instruction after this sequence, otherwise, return original PC.
1265
1266 On arm, this sequence of instructions is composed of mainly three steps,
1267 Step 1: load symbol __stack_chk_guard,
1268 Step 2: load from address of __stack_chk_guard,
1269 Step 3: store it to somewhere else.
1270
1271 Usually, instructions on step 2 and step 3 are the same on various ARM
1272 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1273 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1274 instructions in step 1 vary from different ARM architectures. On ARMv7,
1275 they are,
1276
1277 movw Rn, #:lower16:__stack_chk_guard
1278 movt Rn, #:upper16:__stack_chk_guard
1279
1280 On ARMv5t, it is,
1281
1282 ldr Rn, .Label
1283 ....
1284 .Lable:
1285 .word __stack_chk_guard
1286
1287 Since ldr/str is a very popular instruction, we can't use them as
1288 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1289 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1290 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1291
1292 static CORE_ADDR
1293 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1294 {
1295 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1296 unsigned int address, basereg;
1297 struct minimal_symbol *stack_chk_guard;
1298 int offset;
1299 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1300 CORE_ADDR addr;
1301
1302 /* Try to parse the instructions in Step 1. */
1303 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1304 &basereg, &offset);
1305 if (!addr)
1306 return pc;
1307
1308 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1309 /* If name of symbol doesn't start with '__stack_chk_guard', this
1310 instruction sequence is not for stack protector. If symbol is
1311 removed, we conservatively think this sequence is for stack protector. */
1312 if (stack_chk_guard
1313 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard), "__stack_chk_guard",
1314 strlen ("__stack_chk_guard")) != 0)
1315 return pc;
1316
1317 if (is_thumb)
1318 {
1319 unsigned int destreg;
1320 unsigned short insn
1321 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1322
1323 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1324 if ((insn & 0xf800) != 0x6800)
1325 return pc;
1326 if (bits (insn, 3, 5) != basereg)
1327 return pc;
1328 destreg = bits (insn, 0, 2);
1329
1330 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1331 byte_order_for_code);
1332 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1333 if ((insn & 0xf800) != 0x6000)
1334 return pc;
1335 if (destreg != bits (insn, 0, 2))
1336 return pc;
1337 }
1338 else
1339 {
1340 unsigned int destreg;
1341 unsigned int insn
1342 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1343
1344 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1345 if ((insn & 0x0e500000) != 0x04100000)
1346 return pc;
1347 if (bits (insn, 16, 19) != basereg)
1348 return pc;
1349 destreg = bits (insn, 12, 15);
1350 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1351 insn = read_memory_unsigned_integer (pc + offset + 4,
1352 4, byte_order_for_code);
1353 if ((insn & 0x0e500000) != 0x04000000)
1354 return pc;
1355 if (bits (insn, 12, 15) != destreg)
1356 return pc;
1357 }
1358 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1359 on arm. */
1360 if (is_thumb)
1361 return pc + offset + 4;
1362 else
1363 return pc + offset + 8;
1364 }
1365
1366 /* Advance the PC across any function entry prologue instructions to
1367 reach some "real" code.
1368
1369 The APCS (ARM Procedure Call Standard) defines the following
1370 prologue:
1371
1372 mov ip, sp
1373 [stmfd sp!, {a1,a2,a3,a4}]
1374 stmfd sp!, {...,fp,ip,lr,pc}
1375 [stfe f7, [sp, #-12]!]
1376 [stfe f6, [sp, #-12]!]
1377 [stfe f5, [sp, #-12]!]
1378 [stfe f4, [sp, #-12]!]
1379 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1380
1381 static CORE_ADDR
1382 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1383 {
1384 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1385 unsigned long inst;
1386 CORE_ADDR skip_pc;
1387 CORE_ADDR func_addr, limit_pc;
1388 struct symtab_and_line sal;
1389
1390 /* See if we can determine the end of the prologue via the symbol table.
1391 If so, then return either PC, or the PC after the prologue, whichever
1392 is greater. */
1393 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1394 {
1395 CORE_ADDR post_prologue_pc
1396 = skip_prologue_using_sal (gdbarch, func_addr);
1397 struct symtab *s = find_pc_symtab (func_addr);
1398
1399 if (post_prologue_pc)
1400 post_prologue_pc
1401 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1402
1403
1404 /* GCC always emits a line note before the prologue and another
1405 one after, even if the two are at the same address or on the
1406 same line. Take advantage of this so that we do not need to
1407 know every instruction that might appear in the prologue. We
1408 will have producer information for most binaries; if it is
1409 missing (e.g. for -gstabs), assuming the GNU tools. */
1410 if (post_prologue_pc
1411 && (s == NULL
1412 || s->producer == NULL
1413 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1414 return post_prologue_pc;
1415
1416 if (post_prologue_pc != 0)
1417 {
1418 CORE_ADDR analyzed_limit;
1419
1420 /* For non-GCC compilers, make sure the entire line is an
1421 acceptable prologue; GDB will round this function's
1422 return value up to the end of the following line so we
1423 can not skip just part of a line (and we do not want to).
1424
1425 RealView does not treat the prologue specially, but does
1426 associate prologue code with the opening brace; so this
1427 lets us skip the first line if we think it is the opening
1428 brace. */
1429 if (arm_pc_is_thumb (gdbarch, func_addr))
1430 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1431 post_prologue_pc, NULL);
1432 else
1433 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1434 post_prologue_pc, NULL);
1435
1436 if (analyzed_limit != post_prologue_pc)
1437 return func_addr;
1438
1439 return post_prologue_pc;
1440 }
1441 }
1442
1443 /* Can't determine prologue from the symbol table, need to examine
1444 instructions. */
1445
1446 /* Find an upper limit on the function prologue using the debug
1447 information. If the debug information could not be used to provide
1448 that bound, then use an arbitrary large number as the upper bound. */
1449 /* Like arm_scan_prologue, stop no later than pc + 64. */
1450 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1451 if (limit_pc == 0)
1452 limit_pc = pc + 64; /* Magic. */
1453
1454
1455 /* Check if this is Thumb code. */
1456 if (arm_pc_is_thumb (gdbarch, pc))
1457 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1458
1459 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1460 {
1461 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1462
1463 /* "mov ip, sp" is no longer a required part of the prologue. */
1464 if (inst == 0xe1a0c00d) /* mov ip, sp */
1465 continue;
1466
1467 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1468 continue;
1469
1470 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1471 continue;
1472
1473 /* Some prologues begin with "str lr, [sp, #-4]!". */
1474 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1475 continue;
1476
1477 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1478 continue;
1479
1480 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1481 continue;
1482
1483 /* Any insns after this point may float into the code, if it makes
1484 for better instruction scheduling, so we skip them only if we
1485 find them, but still consider the function to be frame-ful. */
1486
1487 /* We may have either one sfmfd instruction here, or several stfe
1488 insns, depending on the version of floating point code we
1489 support. */
1490 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1491 continue;
1492
1493 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1494 continue;
1495
1496 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1497 continue;
1498
1499 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1500 continue;
1501
1502 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1503 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1504 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1505 continue;
1506
1507 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1508 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1509 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1510 continue;
1511
1512 /* Un-recognized instruction; stop scanning. */
1513 break;
1514 }
1515
1516 return skip_pc; /* End of prologue. */
1517 }
1518
1519 /* *INDENT-OFF* */
1520 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1521 This function decodes a Thumb function prologue to determine:
1522 1) the size of the stack frame
1523 2) which registers are saved on it
1524 3) the offsets of saved regs
1525 4) the offset from the stack pointer to the frame pointer
1526
1527 A typical Thumb function prologue would create this stack frame
1528 (offsets relative to FP)
1529 old SP -> 24 stack parameters
1530 20 LR
1531 16 R7
1532 R7 -> 0 local variables (16 bytes)
1533 SP -> -12 additional stack space (12 bytes)
1534 The frame size would thus be 36 bytes, and the frame offset would be
1535 12 bytes. The frame register is R7.
1536
1537 The comments for thumb_skip_prolog() describe the algorithm we use
1538 to detect the end of the prolog. */
1539 /* *INDENT-ON* */
1540
1541 static void
1542 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1543 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1544 {
1545 CORE_ADDR prologue_start;
1546 CORE_ADDR prologue_end;
1547 CORE_ADDR current_pc;
1548
1549 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1550 &prologue_end))
1551 {
1552 /* See comment in arm_scan_prologue for an explanation of
1553 this heuristics. */
1554 if (prologue_end > prologue_start + 64)
1555 {
1556 prologue_end = prologue_start + 64;
1557 }
1558 }
1559 else
1560 /* We're in the boondocks: we have no idea where the start of the
1561 function is. */
1562 return;
1563
1564 prologue_end = min (prologue_end, prev_pc);
1565
1566 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1567 }
1568
1569 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1570
1571 static int
1572 arm_instruction_changes_pc (uint32_t this_instr)
1573 {
1574 if (bits (this_instr, 28, 31) == INST_NV)
1575 /* Unconditional instructions. */
1576 switch (bits (this_instr, 24, 27))
1577 {
1578 case 0xa:
1579 case 0xb:
1580 /* Branch with Link and change to Thumb. */
1581 return 1;
1582 case 0xc:
1583 case 0xd:
1584 case 0xe:
1585 /* Coprocessor register transfer. */
1586 if (bits (this_instr, 12, 15) == 15)
1587 error (_("Invalid update to pc in instruction"));
1588 return 0;
1589 default:
1590 return 0;
1591 }
1592 else
1593 switch (bits (this_instr, 25, 27))
1594 {
1595 case 0x0:
1596 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1597 {
1598 /* Multiplies and extra load/stores. */
1599 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1600 /* Neither multiplies nor extension load/stores are allowed
1601 to modify PC. */
1602 return 0;
1603
1604 /* Otherwise, miscellaneous instructions. */
1605
1606 /* BX <reg>, BXJ <reg>, BLX <reg> */
1607 if (bits (this_instr, 4, 27) == 0x12fff1
1608 || bits (this_instr, 4, 27) == 0x12fff2
1609 || bits (this_instr, 4, 27) == 0x12fff3)
1610 return 1;
1611
1612 /* Other miscellaneous instructions are unpredictable if they
1613 modify PC. */
1614 return 0;
1615 }
1616 /* Data processing instruction. Fall through. */
1617
1618 case 0x1:
1619 if (bits (this_instr, 12, 15) == 15)
1620 return 1;
1621 else
1622 return 0;
1623
1624 case 0x2:
1625 case 0x3:
1626 /* Media instructions and architecturally undefined instructions. */
1627 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1628 return 0;
1629
1630 /* Stores. */
1631 if (bit (this_instr, 20) == 0)
1632 return 0;
1633
1634 /* Loads. */
1635 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1636 return 1;
1637 else
1638 return 0;
1639
1640 case 0x4:
1641 /* Load/store multiple. */
1642 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1643 return 1;
1644 else
1645 return 0;
1646
1647 case 0x5:
1648 /* Branch and branch with link. */
1649 return 1;
1650
1651 case 0x6:
1652 case 0x7:
1653 /* Coprocessor transfers or SWIs can not affect PC. */
1654 return 0;
1655
1656 default:
1657 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1658 }
1659 }
1660
1661 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1662 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1663 fill it in. Return the first address not recognized as a prologue
1664 instruction.
1665
1666 We recognize all the instructions typically found in ARM prologues,
1667 plus harmless instructions which can be skipped (either for analysis
1668 purposes, or a more restrictive set that can be skipped when finding
1669 the end of the prologue). */
1670
1671 static CORE_ADDR
1672 arm_analyze_prologue (struct gdbarch *gdbarch,
1673 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1674 struct arm_prologue_cache *cache)
1675 {
1676 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1677 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1678 int regno;
1679 CORE_ADDR offset, current_pc;
1680 pv_t regs[ARM_FPS_REGNUM];
1681 struct pv_area *stack;
1682 struct cleanup *back_to;
1683 int framereg, framesize;
1684 CORE_ADDR unrecognized_pc = 0;
1685
1686 /* Search the prologue looking for instructions that set up the
1687 frame pointer, adjust the stack pointer, and save registers.
1688
1689 Be careful, however, and if it doesn't look like a prologue,
1690 don't try to scan it. If, for instance, a frameless function
1691 begins with stmfd sp!, then we will tell ourselves there is
1692 a frame, which will confuse stack traceback, as well as "finish"
1693 and other operations that rely on a knowledge of the stack
1694 traceback. */
1695
1696 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1697 regs[regno] = pv_register (regno, 0);
1698 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1699 back_to = make_cleanup_free_pv_area (stack);
1700
1701 for (current_pc = prologue_start;
1702 current_pc < prologue_end;
1703 current_pc += 4)
1704 {
1705 unsigned int insn
1706 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1707
1708 if (insn == 0xe1a0c00d) /* mov ip, sp */
1709 {
1710 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1711 continue;
1712 }
1713 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1714 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1715 {
1716 unsigned imm = insn & 0xff; /* immediate value */
1717 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1718 int rd = bits (insn, 12, 15);
1719 imm = (imm >> rot) | (imm << (32 - rot));
1720 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1721 continue;
1722 }
1723 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1724 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1725 {
1726 unsigned imm = insn & 0xff; /* immediate value */
1727 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1728 int rd = bits (insn, 12, 15);
1729 imm = (imm >> rot) | (imm << (32 - rot));
1730 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1731 continue;
1732 }
1733 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1734 [sp, #-4]! */
1735 {
1736 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1737 break;
1738 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1739 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1740 regs[bits (insn, 12, 15)]);
1741 continue;
1742 }
1743 else if ((insn & 0xffff0000) == 0xe92d0000)
1744 /* stmfd sp!, {..., fp, ip, lr, pc}
1745 or
1746 stmfd sp!, {a1, a2, a3, a4} */
1747 {
1748 int mask = insn & 0xffff;
1749
1750 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1751 break;
1752
1753 /* Calculate offsets of saved registers. */
1754 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1755 if (mask & (1 << regno))
1756 {
1757 regs[ARM_SP_REGNUM]
1758 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1759 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1760 }
1761 }
1762 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1763 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1764 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1765 {
1766 /* No need to add this to saved_regs -- it's just an arg reg. */
1767 continue;
1768 }
1769 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1770 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1771 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1772 {
1773 /* No need to add this to saved_regs -- it's just an arg reg. */
1774 continue;
1775 }
1776 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1777 { registers } */
1778 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1779 {
1780 /* No need to add this to saved_regs -- it's just arg regs. */
1781 continue;
1782 }
1783 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1784 {
1785 unsigned imm = insn & 0xff; /* immediate value */
1786 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1787 imm = (imm >> rot) | (imm << (32 - rot));
1788 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1789 }
1790 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1791 {
1792 unsigned imm = insn & 0xff; /* immediate value */
1793 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1794 imm = (imm >> rot) | (imm << (32 - rot));
1795 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1796 }
1797 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1798 [sp, -#c]! */
1799 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1800 {
1801 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1802 break;
1803
1804 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1805 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1806 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1807 }
1808 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1809 [sp!] */
1810 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1811 {
1812 int n_saved_fp_regs;
1813 unsigned int fp_start_reg, fp_bound_reg;
1814
1815 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1816 break;
1817
1818 if ((insn & 0x800) == 0x800) /* N0 is set */
1819 {
1820 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1821 n_saved_fp_regs = 3;
1822 else
1823 n_saved_fp_regs = 1;
1824 }
1825 else
1826 {
1827 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1828 n_saved_fp_regs = 2;
1829 else
1830 n_saved_fp_regs = 4;
1831 }
1832
1833 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1834 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1835 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1836 {
1837 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1838 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1839 regs[fp_start_reg++]);
1840 }
1841 }
1842 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1843 {
1844 /* Allow some special function calls when skipping the
1845 prologue; GCC generates these before storing arguments to
1846 the stack. */
1847 CORE_ADDR dest = BranchDest (current_pc, insn);
1848
1849 if (skip_prologue_function (gdbarch, dest, 0))
1850 continue;
1851 else
1852 break;
1853 }
1854 else if ((insn & 0xf0000000) != 0xe0000000)
1855 break; /* Condition not true, exit early. */
1856 else if (arm_instruction_changes_pc (insn))
1857 /* Don't scan past anything that might change control flow. */
1858 break;
1859 else if ((insn & 0xfe500000) == 0xe8100000) /* ldm */
1860 {
1861 /* Ignore block loads from the stack, potentially copying
1862 parameters from memory. */
1863 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1864 continue;
1865 else
1866 break;
1867 }
1868 else if ((insn & 0xfc500000) == 0xe4100000)
1869 {
1870 /* Similarly ignore single loads from the stack. */
1871 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1872 continue;
1873 else
1874 break;
1875 }
1876 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1877 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1878 register instead of the stack. */
1879 continue;
1880 else
1881 {
1882 /* The optimizer might shove anything into the prologue,
1883 so we just skip what we don't recognize. */
1884 unrecognized_pc = current_pc;
1885 continue;
1886 }
1887 }
1888
1889 if (unrecognized_pc == 0)
1890 unrecognized_pc = current_pc;
1891
1892 /* The frame size is just the distance from the frame register
1893 to the original stack pointer. */
1894 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1895 {
1896 /* Frame pointer is fp. */
1897 framereg = ARM_FP_REGNUM;
1898 framesize = -regs[ARM_FP_REGNUM].k;
1899 }
1900 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1901 {
1902 /* Try the stack pointer... this is a bit desperate. */
1903 framereg = ARM_SP_REGNUM;
1904 framesize = -regs[ARM_SP_REGNUM].k;
1905 }
1906 else
1907 {
1908 /* We're just out of luck. We don't know where the frame is. */
1909 framereg = -1;
1910 framesize = 0;
1911 }
1912
1913 if (cache)
1914 {
1915 cache->framereg = framereg;
1916 cache->framesize = framesize;
1917
1918 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1919 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1920 cache->saved_regs[regno].addr = offset;
1921 }
1922
1923 if (arm_debug)
1924 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1925 paddress (gdbarch, unrecognized_pc));
1926
1927 do_cleanups (back_to);
1928 return unrecognized_pc;
1929 }
1930
1931 static void
1932 arm_scan_prologue (struct frame_info *this_frame,
1933 struct arm_prologue_cache *cache)
1934 {
1935 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1936 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1937 int regno;
1938 CORE_ADDR prologue_start, prologue_end, current_pc;
1939 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1940 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1941 pv_t regs[ARM_FPS_REGNUM];
1942 struct pv_area *stack;
1943 struct cleanup *back_to;
1944 CORE_ADDR offset;
1945
1946 /* Assume there is no frame until proven otherwise. */
1947 cache->framereg = ARM_SP_REGNUM;
1948 cache->framesize = 0;
1949
1950 /* Check for Thumb prologue. */
1951 if (arm_frame_is_thumb (this_frame))
1952 {
1953 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1954 return;
1955 }
1956
1957 /* Find the function prologue. If we can't find the function in
1958 the symbol table, peek in the stack frame to find the PC. */
1959 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1960 &prologue_end))
1961 {
1962 /* One way to find the end of the prologue (which works well
1963 for unoptimized code) is to do the following:
1964
1965 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1966
1967 if (sal.line == 0)
1968 prologue_end = prev_pc;
1969 else if (sal.end < prologue_end)
1970 prologue_end = sal.end;
1971
1972 This mechanism is very accurate so long as the optimizer
1973 doesn't move any instructions from the function body into the
1974 prologue. If this happens, sal.end will be the last
1975 instruction in the first hunk of prologue code just before
1976 the first instruction that the scheduler has moved from
1977 the body to the prologue.
1978
1979 In order to make sure that we scan all of the prologue
1980 instructions, we use a slightly less accurate mechanism which
1981 may scan more than necessary. To help compensate for this
1982 lack of accuracy, the prologue scanning loop below contains
1983 several clauses which'll cause the loop to terminate early if
1984 an implausible prologue instruction is encountered.
1985
1986 The expression
1987
1988 prologue_start + 64
1989
1990 is a suitable endpoint since it accounts for the largest
1991 possible prologue plus up to five instructions inserted by
1992 the scheduler. */
1993
1994 if (prologue_end > prologue_start + 64)
1995 {
1996 prologue_end = prologue_start + 64; /* See above. */
1997 }
1998 }
1999 else
2000 {
2001 /* We have no symbol information. Our only option is to assume this
2002 function has a standard stack frame and the normal frame register.
2003 Then, we can find the value of our frame pointer on entrance to
2004 the callee (or at the present moment if this is the innermost frame).
2005 The value stored there should be the address of the stmfd + 8. */
2006 CORE_ADDR frame_loc;
2007 LONGEST return_value;
2008
2009 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
2010 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
2011 return;
2012 else
2013 {
2014 prologue_start = gdbarch_addr_bits_remove
2015 (gdbarch, return_value) - 8;
2016 prologue_end = prologue_start + 64; /* See above. */
2017 }
2018 }
2019
2020 if (prev_pc < prologue_end)
2021 prologue_end = prev_pc;
2022
2023 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
2024 }
2025
2026 static struct arm_prologue_cache *
2027 arm_make_prologue_cache (struct frame_info *this_frame)
2028 {
2029 int reg;
2030 struct arm_prologue_cache *cache;
2031 CORE_ADDR unwound_fp;
2032
2033 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2034 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2035
2036 arm_scan_prologue (this_frame, cache);
2037
2038 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2039 if (unwound_fp == 0)
2040 return cache;
2041
2042 cache->prev_sp = unwound_fp + cache->framesize;
2043
2044 /* Calculate actual addresses of saved registers using offsets
2045 determined by arm_scan_prologue. */
2046 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2047 if (trad_frame_addr_p (cache->saved_regs, reg))
2048 cache->saved_regs[reg].addr += cache->prev_sp;
2049
2050 return cache;
2051 }
2052
2053 /* Our frame ID for a normal frame is the current function's starting PC
2054 and the caller's SP when we were called. */
2055
2056 static void
2057 arm_prologue_this_id (struct frame_info *this_frame,
2058 void **this_cache,
2059 struct frame_id *this_id)
2060 {
2061 struct arm_prologue_cache *cache;
2062 struct frame_id id;
2063 CORE_ADDR pc, func;
2064
2065 if (*this_cache == NULL)
2066 *this_cache = arm_make_prologue_cache (this_frame);
2067 cache = *this_cache;
2068
2069 /* This is meant to halt the backtrace at "_start". */
2070 pc = get_frame_pc (this_frame);
2071 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2072 return;
2073
2074 /* If we've hit a wall, stop. */
2075 if (cache->prev_sp == 0)
2076 return;
2077
2078 /* Use function start address as part of the frame ID. If we cannot
2079 identify the start address (due to missing symbol information),
2080 fall back to just using the current PC. */
2081 func = get_frame_func (this_frame);
2082 if (!func)
2083 func = pc;
2084
2085 id = frame_id_build (cache->prev_sp, func);
2086 *this_id = id;
2087 }
2088
2089 static struct value *
2090 arm_prologue_prev_register (struct frame_info *this_frame,
2091 void **this_cache,
2092 int prev_regnum)
2093 {
2094 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2095 struct arm_prologue_cache *cache;
2096
2097 if (*this_cache == NULL)
2098 *this_cache = arm_make_prologue_cache (this_frame);
2099 cache = *this_cache;
2100
2101 /* If we are asked to unwind the PC, then we need to return the LR
2102 instead. The prologue may save PC, but it will point into this
2103 frame's prologue, not the next frame's resume location. Also
2104 strip the saved T bit. A valid LR may have the low bit set, but
2105 a valid PC never does. */
2106 if (prev_regnum == ARM_PC_REGNUM)
2107 {
2108 CORE_ADDR lr;
2109
2110 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2111 return frame_unwind_got_constant (this_frame, prev_regnum,
2112 arm_addr_bits_remove (gdbarch, lr));
2113 }
2114
2115 /* SP is generally not saved to the stack, but this frame is
2116 identified by the next frame's stack pointer at the time of the call.
2117 The value was already reconstructed into PREV_SP. */
2118 if (prev_regnum == ARM_SP_REGNUM)
2119 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2120
2121 /* The CPSR may have been changed by the call instruction and by the
2122 called function. The only bit we can reconstruct is the T bit,
2123 by checking the low bit of LR as of the call. This is a reliable
2124 indicator of Thumb-ness except for some ARM v4T pre-interworking
2125 Thumb code, which could get away with a clear low bit as long as
2126 the called function did not use bx. Guess that all other
2127 bits are unchanged; the condition flags are presumably lost,
2128 but the processor status is likely valid. */
2129 if (prev_regnum == ARM_PS_REGNUM)
2130 {
2131 CORE_ADDR lr, cpsr;
2132 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2133
2134 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2135 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2136 if (IS_THUMB_ADDR (lr))
2137 cpsr |= t_bit;
2138 else
2139 cpsr &= ~t_bit;
2140 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2141 }
2142
2143 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2144 prev_regnum);
2145 }
2146
2147 struct frame_unwind arm_prologue_unwind = {
2148 NORMAL_FRAME,
2149 arm_prologue_this_id,
2150 arm_prologue_prev_register,
2151 NULL,
2152 default_frame_sniffer
2153 };
2154
2155 /* Maintain a list of ARM exception table entries per objfile, similar to the
2156 list of mapping symbols. We only cache entries for standard ARM-defined
2157 personality routines; the cache will contain only the frame unwinding
2158 instructions associated with the entry (not the descriptors). */
2159
2160 static const struct objfile_data *arm_exidx_data_key;
2161
2162 struct arm_exidx_entry
2163 {
2164 bfd_vma addr;
2165 gdb_byte *entry;
2166 };
2167 typedef struct arm_exidx_entry arm_exidx_entry_s;
2168 DEF_VEC_O(arm_exidx_entry_s);
2169
2170 struct arm_exidx_data
2171 {
2172 VEC(arm_exidx_entry_s) **section_maps;
2173 };
2174
2175 static void
2176 arm_exidx_data_free (struct objfile *objfile, void *arg)
2177 {
2178 struct arm_exidx_data *data = arg;
2179 unsigned int i;
2180
2181 for (i = 0; i < objfile->obfd->section_count; i++)
2182 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2183 }
2184
2185 static inline int
2186 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2187 const struct arm_exidx_entry *rhs)
2188 {
2189 return lhs->addr < rhs->addr;
2190 }
2191
2192 static struct obj_section *
2193 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2194 {
2195 struct obj_section *osect;
2196
2197 ALL_OBJFILE_OSECTIONS (objfile, osect)
2198 if (bfd_get_section_flags (objfile->obfd,
2199 osect->the_bfd_section) & SEC_ALLOC)
2200 {
2201 bfd_vma start, size;
2202 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2203 size = bfd_get_section_size (osect->the_bfd_section);
2204
2205 if (start <= vma && vma < start + size)
2206 return osect;
2207 }
2208
2209 return NULL;
2210 }
2211
2212 /* Parse contents of exception table and exception index sections
2213 of OBJFILE, and fill in the exception table entry cache.
2214
2215 For each entry that refers to a standard ARM-defined personality
2216 routine, extract the frame unwinding instructions (from either
2217 the index or the table section). The unwinding instructions
2218 are normalized by:
2219 - extracting them from the rest of the table data
2220 - converting to host endianness
2221 - appending the implicit 0xb0 ("Finish") code
2222
2223 The extracted and normalized instructions are stored for later
2224 retrieval by the arm_find_exidx_entry routine. */
2225
2226 static void
2227 arm_exidx_new_objfile (struct objfile *objfile)
2228 {
2229 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2230 struct arm_exidx_data *data;
2231 asection *exidx, *extab;
2232 bfd_vma exidx_vma = 0, extab_vma = 0;
2233 bfd_size_type exidx_size = 0, extab_size = 0;
2234 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2235 LONGEST i;
2236
2237 /* If we've already touched this file, do nothing. */
2238 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2239 return;
2240
2241 /* Read contents of exception table and index. */
2242 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2243 if (exidx)
2244 {
2245 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2246 exidx_size = bfd_get_section_size (exidx);
2247 exidx_data = xmalloc (exidx_size);
2248 make_cleanup (xfree, exidx_data);
2249
2250 if (!bfd_get_section_contents (objfile->obfd, exidx,
2251 exidx_data, 0, exidx_size))
2252 {
2253 do_cleanups (cleanups);
2254 return;
2255 }
2256 }
2257
2258 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2259 if (extab)
2260 {
2261 extab_vma = bfd_section_vma (objfile->obfd, extab);
2262 extab_size = bfd_get_section_size (extab);
2263 extab_data = xmalloc (extab_size);
2264 make_cleanup (xfree, extab_data);
2265
2266 if (!bfd_get_section_contents (objfile->obfd, extab,
2267 extab_data, 0, extab_size))
2268 {
2269 do_cleanups (cleanups);
2270 return;
2271 }
2272 }
2273
2274 /* Allocate exception table data structure. */
2275 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2276 set_objfile_data (objfile, arm_exidx_data_key, data);
2277 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2278 objfile->obfd->section_count,
2279 VEC(arm_exidx_entry_s) *);
2280
2281 /* Fill in exception table. */
2282 for (i = 0; i < exidx_size / 8; i++)
2283 {
2284 struct arm_exidx_entry new_exidx_entry;
2285 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2286 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2287 bfd_vma addr = 0, word = 0;
2288 int n_bytes = 0, n_words = 0;
2289 struct obj_section *sec;
2290 gdb_byte *entry = NULL;
2291
2292 /* Extract address of start of function. */
2293 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2294 idx += exidx_vma + i * 8;
2295
2296 /* Find section containing function and compute section offset. */
2297 sec = arm_obj_section_from_vma (objfile, idx);
2298 if (sec == NULL)
2299 continue;
2300 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2301
2302 /* Determine address of exception table entry. */
2303 if (val == 1)
2304 {
2305 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2306 }
2307 else if ((val & 0xff000000) == 0x80000000)
2308 {
2309 /* Exception table entry embedded in .ARM.exidx
2310 -- must be short form. */
2311 word = val;
2312 n_bytes = 3;
2313 }
2314 else if (!(val & 0x80000000))
2315 {
2316 /* Exception table entry in .ARM.extab. */
2317 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2318 addr += exidx_vma + i * 8 + 4;
2319
2320 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2321 {
2322 word = bfd_h_get_32 (objfile->obfd,
2323 extab_data + addr - extab_vma);
2324 addr += 4;
2325
2326 if ((word & 0xff000000) == 0x80000000)
2327 {
2328 /* Short form. */
2329 n_bytes = 3;
2330 }
2331 else if ((word & 0xff000000) == 0x81000000
2332 || (word & 0xff000000) == 0x82000000)
2333 {
2334 /* Long form. */
2335 n_bytes = 2;
2336 n_words = ((word >> 16) & 0xff);
2337 }
2338 else if (!(word & 0x80000000))
2339 {
2340 bfd_vma pers;
2341 struct obj_section *pers_sec;
2342 int gnu_personality = 0;
2343
2344 /* Custom personality routine. */
2345 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2346 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2347
2348 /* Check whether we've got one of the variants of the
2349 GNU personality routines. */
2350 pers_sec = arm_obj_section_from_vma (objfile, pers);
2351 if (pers_sec)
2352 {
2353 static const char *personality[] =
2354 {
2355 "__gcc_personality_v0",
2356 "__gxx_personality_v0",
2357 "__gcj_personality_v0",
2358 "__gnu_objc_personality_v0",
2359 NULL
2360 };
2361
2362 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2363 int k;
2364
2365 for (k = 0; personality[k]; k++)
2366 if (lookup_minimal_symbol_by_pc_name
2367 (pc, personality[k], objfile))
2368 {
2369 gnu_personality = 1;
2370 break;
2371 }
2372 }
2373
2374 /* If so, the next word contains a word count in the high
2375 byte, followed by the same unwind instructions as the
2376 pre-defined forms. */
2377 if (gnu_personality
2378 && addr + 4 <= extab_vma + extab_size)
2379 {
2380 word = bfd_h_get_32 (objfile->obfd,
2381 extab_data + addr - extab_vma);
2382 addr += 4;
2383 n_bytes = 3;
2384 n_words = ((word >> 24) & 0xff);
2385 }
2386 }
2387 }
2388 }
2389
2390 /* Sanity check address. */
2391 if (n_words)
2392 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2393 n_words = n_bytes = 0;
2394
2395 /* The unwind instructions reside in WORD (only the N_BYTES least
2396 significant bytes are valid), followed by N_WORDS words in the
2397 extab section starting at ADDR. */
2398 if (n_bytes || n_words)
2399 {
2400 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2401 n_bytes + n_words * 4 + 1);
2402
2403 while (n_bytes--)
2404 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2405
2406 while (n_words--)
2407 {
2408 word = bfd_h_get_32 (objfile->obfd,
2409 extab_data + addr - extab_vma);
2410 addr += 4;
2411
2412 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2413 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2414 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2415 *p++ = (gdb_byte) (word & 0xff);
2416 }
2417
2418 /* Implied "Finish" to terminate the list. */
2419 *p++ = 0xb0;
2420 }
2421
2422 /* Push entry onto vector. They are guaranteed to always
2423 appear in order of increasing addresses. */
2424 new_exidx_entry.addr = idx;
2425 new_exidx_entry.entry = entry;
2426 VEC_safe_push (arm_exidx_entry_s,
2427 data->section_maps[sec->the_bfd_section->index],
2428 &new_exidx_entry);
2429 }
2430
2431 do_cleanups (cleanups);
2432 }
2433
2434 /* Search for the exception table entry covering MEMADDR. If one is found,
2435 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2436 set *START to the start of the region covered by this entry. */
2437
2438 static gdb_byte *
2439 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2440 {
2441 struct obj_section *sec;
2442
2443 sec = find_pc_section (memaddr);
2444 if (sec != NULL)
2445 {
2446 struct arm_exidx_data *data;
2447 VEC(arm_exidx_entry_s) *map;
2448 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2449 unsigned int idx;
2450
2451 data = objfile_data (sec->objfile, arm_exidx_data_key);
2452 if (data != NULL)
2453 {
2454 map = data->section_maps[sec->the_bfd_section->index];
2455 if (!VEC_empty (arm_exidx_entry_s, map))
2456 {
2457 struct arm_exidx_entry *map_sym;
2458
2459 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2460 arm_compare_exidx_entries);
2461
2462 /* VEC_lower_bound finds the earliest ordered insertion
2463 point. If the following symbol starts at this exact
2464 address, we use that; otherwise, the preceding
2465 exception table entry covers this address. */
2466 if (idx < VEC_length (arm_exidx_entry_s, map))
2467 {
2468 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2469 if (map_sym->addr == map_key.addr)
2470 {
2471 if (start)
2472 *start = map_sym->addr + obj_section_addr (sec);
2473 return map_sym->entry;
2474 }
2475 }
2476
2477 if (idx > 0)
2478 {
2479 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2480 if (start)
2481 *start = map_sym->addr + obj_section_addr (sec);
2482 return map_sym->entry;
2483 }
2484 }
2485 }
2486 }
2487
2488 return NULL;
2489 }
2490
2491 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2492 instruction list from the ARM exception table entry ENTRY, allocate and
2493 return a prologue cache structure describing how to unwind this frame.
2494
2495 Return NULL if the unwinding instruction list contains a "spare",
2496 "reserved" or "refuse to unwind" instruction as defined in section
2497 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2498 for the ARM Architecture" document. */
2499
2500 static struct arm_prologue_cache *
2501 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2502 {
2503 CORE_ADDR vsp = 0;
2504 int vsp_valid = 0;
2505
2506 struct arm_prologue_cache *cache;
2507 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2508 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2509
2510 for (;;)
2511 {
2512 gdb_byte insn;
2513
2514 /* Whenever we reload SP, we actually have to retrieve its
2515 actual value in the current frame. */
2516 if (!vsp_valid)
2517 {
2518 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2519 {
2520 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2521 vsp = get_frame_register_unsigned (this_frame, reg);
2522 }
2523 else
2524 {
2525 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2526 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2527 }
2528
2529 vsp_valid = 1;
2530 }
2531
2532 /* Decode next unwind instruction. */
2533 insn = *entry++;
2534
2535 if ((insn & 0xc0) == 0)
2536 {
2537 int offset = insn & 0x3f;
2538 vsp += (offset << 2) + 4;
2539 }
2540 else if ((insn & 0xc0) == 0x40)
2541 {
2542 int offset = insn & 0x3f;
2543 vsp -= (offset << 2) + 4;
2544 }
2545 else if ((insn & 0xf0) == 0x80)
2546 {
2547 int mask = ((insn & 0xf) << 8) | *entry++;
2548 int i;
2549
2550 /* The special case of an all-zero mask identifies
2551 "Refuse to unwind". We return NULL to fall back
2552 to the prologue analyzer. */
2553 if (mask == 0)
2554 return NULL;
2555
2556 /* Pop registers r4..r15 under mask. */
2557 for (i = 0; i < 12; i++)
2558 if (mask & (1 << i))
2559 {
2560 cache->saved_regs[4 + i].addr = vsp;
2561 vsp += 4;
2562 }
2563
2564 /* Special-case popping SP -- we need to reload vsp. */
2565 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2566 vsp_valid = 0;
2567 }
2568 else if ((insn & 0xf0) == 0x90)
2569 {
2570 int reg = insn & 0xf;
2571
2572 /* Reserved cases. */
2573 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2574 return NULL;
2575
2576 /* Set SP from another register and mark VSP for reload. */
2577 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2578 vsp_valid = 0;
2579 }
2580 else if ((insn & 0xf0) == 0xa0)
2581 {
2582 int count = insn & 0x7;
2583 int pop_lr = (insn & 0x8) != 0;
2584 int i;
2585
2586 /* Pop r4..r[4+count]. */
2587 for (i = 0; i <= count; i++)
2588 {
2589 cache->saved_regs[4 + i].addr = vsp;
2590 vsp += 4;
2591 }
2592
2593 /* If indicated by flag, pop LR as well. */
2594 if (pop_lr)
2595 {
2596 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2597 vsp += 4;
2598 }
2599 }
2600 else if (insn == 0xb0)
2601 {
2602 /* We could only have updated PC by popping into it; if so, it
2603 will show up as address. Otherwise, copy LR into PC. */
2604 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2605 cache->saved_regs[ARM_PC_REGNUM]
2606 = cache->saved_regs[ARM_LR_REGNUM];
2607
2608 /* We're done. */
2609 break;
2610 }
2611 else if (insn == 0xb1)
2612 {
2613 int mask = *entry++;
2614 int i;
2615
2616 /* All-zero mask and mask >= 16 is "spare". */
2617 if (mask == 0 || mask >= 16)
2618 return NULL;
2619
2620 /* Pop r0..r3 under mask. */
2621 for (i = 0; i < 4; i++)
2622 if (mask & (1 << i))
2623 {
2624 cache->saved_regs[i].addr = vsp;
2625 vsp += 4;
2626 }
2627 }
2628 else if (insn == 0xb2)
2629 {
2630 ULONGEST offset = 0;
2631 unsigned shift = 0;
2632
2633 do
2634 {
2635 offset |= (*entry & 0x7f) << shift;
2636 shift += 7;
2637 }
2638 while (*entry++ & 0x80);
2639
2640 vsp += 0x204 + (offset << 2);
2641 }
2642 else if (insn == 0xb3)
2643 {
2644 int start = *entry >> 4;
2645 int count = (*entry++) & 0xf;
2646 int i;
2647
2648 /* Only registers D0..D15 are valid here. */
2649 if (start + count >= 16)
2650 return NULL;
2651
2652 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2653 for (i = 0; i <= count; i++)
2654 {
2655 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2656 vsp += 8;
2657 }
2658
2659 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2660 vsp += 4;
2661 }
2662 else if ((insn & 0xf8) == 0xb8)
2663 {
2664 int count = insn & 0x7;
2665 int i;
2666
2667 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2668 for (i = 0; i <= count; i++)
2669 {
2670 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2671 vsp += 8;
2672 }
2673
2674 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2675 vsp += 4;
2676 }
2677 else if (insn == 0xc6)
2678 {
2679 int start = *entry >> 4;
2680 int count = (*entry++) & 0xf;
2681 int i;
2682
2683 /* Only registers WR0..WR15 are valid. */
2684 if (start + count >= 16)
2685 return NULL;
2686
2687 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2688 for (i = 0; i <= count; i++)
2689 {
2690 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2691 vsp += 8;
2692 }
2693 }
2694 else if (insn == 0xc7)
2695 {
2696 int mask = *entry++;
2697 int i;
2698
2699 /* All-zero mask and mask >= 16 is "spare". */
2700 if (mask == 0 || mask >= 16)
2701 return NULL;
2702
2703 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2704 for (i = 0; i < 4; i++)
2705 if (mask & (1 << i))
2706 {
2707 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2708 vsp += 4;
2709 }
2710 }
2711 else if ((insn & 0xf8) == 0xc0)
2712 {
2713 int count = insn & 0x7;
2714 int i;
2715
2716 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2717 for (i = 0; i <= count; i++)
2718 {
2719 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2720 vsp += 8;
2721 }
2722 }
2723 else if (insn == 0xc8)
2724 {
2725 int start = *entry >> 4;
2726 int count = (*entry++) & 0xf;
2727 int i;
2728
2729 /* Only registers D0..D31 are valid. */
2730 if (start + count >= 16)
2731 return NULL;
2732
2733 /* Pop VFP double-precision registers
2734 D[16+start]..D[16+start+count]. */
2735 for (i = 0; i <= count; i++)
2736 {
2737 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2738 vsp += 8;
2739 }
2740 }
2741 else if (insn == 0xc9)
2742 {
2743 int start = *entry >> 4;
2744 int count = (*entry++) & 0xf;
2745 int i;
2746
2747 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2748 for (i = 0; i <= count; i++)
2749 {
2750 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2751 vsp += 8;
2752 }
2753 }
2754 else if ((insn & 0xf8) == 0xd0)
2755 {
2756 int count = insn & 0x7;
2757 int i;
2758
2759 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2760 for (i = 0; i <= count; i++)
2761 {
2762 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2763 vsp += 8;
2764 }
2765 }
2766 else
2767 {
2768 /* Everything else is "spare". */
2769 return NULL;
2770 }
2771 }
2772
2773 /* If we restore SP from a register, assume this was the frame register.
2774 Otherwise just fall back to SP as frame register. */
2775 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2776 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2777 else
2778 cache->framereg = ARM_SP_REGNUM;
2779
2780 /* Determine offset to previous frame. */
2781 cache->framesize
2782 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2783
2784 /* We already got the previous SP. */
2785 cache->prev_sp = vsp;
2786
2787 return cache;
2788 }
2789
2790 /* Unwinding via ARM exception table entries. Note that the sniffer
2791 already computes a filled-in prologue cache, which is then used
2792 with the same arm_prologue_this_id and arm_prologue_prev_register
2793 routines also used for prologue-parsing based unwinding. */
2794
2795 static int
2796 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2797 struct frame_info *this_frame,
2798 void **this_prologue_cache)
2799 {
2800 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2801 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2802 CORE_ADDR addr_in_block, exidx_region, func_start;
2803 struct arm_prologue_cache *cache;
2804 gdb_byte *entry;
2805
2806 /* See if we have an ARM exception table entry covering this address. */
2807 addr_in_block = get_frame_address_in_block (this_frame);
2808 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2809 if (!entry)
2810 return 0;
2811
2812 /* The ARM exception table does not describe unwind information
2813 for arbitrary PC values, but is guaranteed to be correct only
2814 at call sites. We have to decide here whether we want to use
2815 ARM exception table information for this frame, or fall back
2816 to using prologue parsing. (Note that if we have DWARF CFI,
2817 this sniffer isn't even called -- CFI is always preferred.)
2818
2819 Before we make this decision, however, we check whether we
2820 actually have *symbol* information for the current frame.
2821 If not, prologue parsing would not work anyway, so we might
2822 as well use the exception table and hope for the best. */
2823 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2824 {
2825 int exc_valid = 0;
2826
2827 /* If the next frame is "normal", we are at a call site in this
2828 frame, so exception information is guaranteed to be valid. */
2829 if (get_next_frame (this_frame)
2830 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2831 exc_valid = 1;
2832
2833 /* We also assume exception information is valid if we're currently
2834 blocked in a system call. The system library is supposed to
2835 ensure this, so that e.g. pthread cancellation works. */
2836 if (arm_frame_is_thumb (this_frame))
2837 {
2838 LONGEST insn;
2839
2840 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2841 byte_order_for_code, &insn)
2842 && (insn & 0xff00) == 0xdf00 /* svc */)
2843 exc_valid = 1;
2844 }
2845 else
2846 {
2847 LONGEST insn;
2848
2849 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2850 byte_order_for_code, &insn)
2851 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2852 exc_valid = 1;
2853 }
2854
2855 /* Bail out if we don't know that exception information is valid. */
2856 if (!exc_valid)
2857 return 0;
2858
2859 /* The ARM exception index does not mark the *end* of the region
2860 covered by the entry, and some functions will not have any entry.
2861 To correctly recognize the end of the covered region, the linker
2862 should have inserted dummy records with a CANTUNWIND marker.
2863
2864 Unfortunately, current versions of GNU ld do not reliably do
2865 this, and thus we may have found an incorrect entry above.
2866 As a (temporary) sanity check, we only use the entry if it
2867 lies *within* the bounds of the function. Note that this check
2868 might reject perfectly valid entries that just happen to cover
2869 multiple functions; therefore this check ought to be removed
2870 once the linker is fixed. */
2871 if (func_start > exidx_region)
2872 return 0;
2873 }
2874
2875 /* Decode the list of unwinding instructions into a prologue cache.
2876 Note that this may fail due to e.g. a "refuse to unwind" code. */
2877 cache = arm_exidx_fill_cache (this_frame, entry);
2878 if (!cache)
2879 return 0;
2880
2881 *this_prologue_cache = cache;
2882 return 1;
2883 }
2884
2885 struct frame_unwind arm_exidx_unwind = {
2886 NORMAL_FRAME,
2887 arm_prologue_this_id,
2888 arm_prologue_prev_register,
2889 NULL,
2890 arm_exidx_unwind_sniffer
2891 };
2892
2893 static struct arm_prologue_cache *
2894 arm_make_stub_cache (struct frame_info *this_frame)
2895 {
2896 struct arm_prologue_cache *cache;
2897
2898 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2899 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2900
2901 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2902
2903 return cache;
2904 }
2905
2906 /* Our frame ID for a stub frame is the current SP and LR. */
2907
2908 static void
2909 arm_stub_this_id (struct frame_info *this_frame,
2910 void **this_cache,
2911 struct frame_id *this_id)
2912 {
2913 struct arm_prologue_cache *cache;
2914
2915 if (*this_cache == NULL)
2916 *this_cache = arm_make_stub_cache (this_frame);
2917 cache = *this_cache;
2918
2919 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2920 }
2921
2922 static int
2923 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2924 struct frame_info *this_frame,
2925 void **this_prologue_cache)
2926 {
2927 CORE_ADDR addr_in_block;
2928 char dummy[4];
2929
2930 addr_in_block = get_frame_address_in_block (this_frame);
2931 if (in_plt_section (addr_in_block, NULL)
2932 /* We also use the stub winder if the target memory is unreadable
2933 to avoid having the prologue unwinder trying to read it. */
2934 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
2935 return 1;
2936
2937 return 0;
2938 }
2939
2940 struct frame_unwind arm_stub_unwind = {
2941 NORMAL_FRAME,
2942 arm_stub_this_id,
2943 arm_prologue_prev_register,
2944 NULL,
2945 arm_stub_unwind_sniffer
2946 };
2947
2948 static CORE_ADDR
2949 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
2950 {
2951 struct arm_prologue_cache *cache;
2952
2953 if (*this_cache == NULL)
2954 *this_cache = arm_make_prologue_cache (this_frame);
2955 cache = *this_cache;
2956
2957 return cache->prev_sp - cache->framesize;
2958 }
2959
2960 struct frame_base arm_normal_base = {
2961 &arm_prologue_unwind,
2962 arm_normal_frame_base,
2963 arm_normal_frame_base,
2964 arm_normal_frame_base
2965 };
2966
2967 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2968 dummy frame. The frame ID's base needs to match the TOS value
2969 saved by save_dummy_frame_tos() and returned from
2970 arm_push_dummy_call, and the PC needs to match the dummy frame's
2971 breakpoint. */
2972
2973 static struct frame_id
2974 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2975 {
2976 return frame_id_build (get_frame_register_unsigned (this_frame,
2977 ARM_SP_REGNUM),
2978 get_frame_pc (this_frame));
2979 }
2980
2981 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2982 be used to construct the previous frame's ID, after looking up the
2983 containing function). */
2984
2985 static CORE_ADDR
2986 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
2987 {
2988 CORE_ADDR pc;
2989 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2990 return arm_addr_bits_remove (gdbarch, pc);
2991 }
2992
2993 static CORE_ADDR
2994 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2995 {
2996 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2997 }
2998
2999 static struct value *
3000 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
3001 int regnum)
3002 {
3003 struct gdbarch * gdbarch = get_frame_arch (this_frame);
3004 CORE_ADDR lr, cpsr;
3005 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
3006
3007 switch (regnum)
3008 {
3009 case ARM_PC_REGNUM:
3010 /* The PC is normally copied from the return column, which
3011 describes saves of LR. However, that version may have an
3012 extra bit set to indicate Thumb state. The bit is not
3013 part of the PC. */
3014 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3015 return frame_unwind_got_constant (this_frame, regnum,
3016 arm_addr_bits_remove (gdbarch, lr));
3017
3018 case ARM_PS_REGNUM:
3019 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3020 cpsr = get_frame_register_unsigned (this_frame, regnum);
3021 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3022 if (IS_THUMB_ADDR (lr))
3023 cpsr |= t_bit;
3024 else
3025 cpsr &= ~t_bit;
3026 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3027
3028 default:
3029 internal_error (__FILE__, __LINE__,
3030 _("Unexpected register %d"), regnum);
3031 }
3032 }
3033
3034 static void
3035 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3036 struct dwarf2_frame_state_reg *reg,
3037 struct frame_info *this_frame)
3038 {
3039 switch (regnum)
3040 {
3041 case ARM_PC_REGNUM:
3042 case ARM_PS_REGNUM:
3043 reg->how = DWARF2_FRAME_REG_FN;
3044 reg->loc.fn = arm_dwarf2_prev_register;
3045 break;
3046 case ARM_SP_REGNUM:
3047 reg->how = DWARF2_FRAME_REG_CFA;
3048 break;
3049 }
3050 }
3051
3052 /* Return true if we are in the function's epilogue, i.e. after the
3053 instruction that destroyed the function's stack frame. */
3054
3055 static int
3056 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3057 {
3058 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3059 unsigned int insn, insn2;
3060 int found_return = 0, found_stack_adjust = 0;
3061 CORE_ADDR func_start, func_end;
3062 CORE_ADDR scan_pc;
3063 gdb_byte buf[4];
3064
3065 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3066 return 0;
3067
3068 /* The epilogue is a sequence of instructions along the following lines:
3069
3070 - add stack frame size to SP or FP
3071 - [if frame pointer used] restore SP from FP
3072 - restore registers from SP [may include PC]
3073 - a return-type instruction [if PC wasn't already restored]
3074
3075 In a first pass, we scan forward from the current PC and verify the
3076 instructions we find as compatible with this sequence, ending in a
3077 return instruction.
3078
3079 However, this is not sufficient to distinguish indirect function calls
3080 within a function from indirect tail calls in the epilogue in some cases.
3081 Therefore, if we didn't already find any SP-changing instruction during
3082 forward scan, we add a backward scanning heuristic to ensure we actually
3083 are in the epilogue. */
3084
3085 scan_pc = pc;
3086 while (scan_pc < func_end && !found_return)
3087 {
3088 if (target_read_memory (scan_pc, buf, 2))
3089 break;
3090
3091 scan_pc += 2;
3092 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3093
3094 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3095 found_return = 1;
3096 else if (insn == 0x46f7) /* mov pc, lr */
3097 found_return = 1;
3098 else if (insn == 0x46bd) /* mov sp, r7 */
3099 found_stack_adjust = 1;
3100 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3101 found_stack_adjust = 1;
3102 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
3103 {
3104 found_stack_adjust = 1;
3105 if (insn & 0x0100) /* <registers> include PC. */
3106 found_return = 1;
3107 }
3108 else if ((insn & 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
3109 {
3110 if (target_read_memory (scan_pc, buf, 2))
3111 break;
3112
3113 scan_pc += 2;
3114 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3115
3116 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3117 {
3118 found_stack_adjust = 1;
3119 if (insn2 & 0x8000) /* <registers> include PC. */
3120 found_return = 1;
3121 }
3122 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3123 && (insn2 & 0x0fff) == 0x0b04)
3124 {
3125 found_stack_adjust = 1;
3126 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3127 found_return = 1;
3128 }
3129 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3130 && (insn2 & 0x0e00) == 0x0a00)
3131 found_stack_adjust = 1;
3132 else
3133 break;
3134 }
3135 else
3136 break;
3137 }
3138
3139 if (!found_return)
3140 return 0;
3141
3142 /* Since any instruction in the epilogue sequence, with the possible
3143 exception of return itself, updates the stack pointer, we need to
3144 scan backwards for at most one instruction. Try either a 16-bit or
3145 a 32-bit instruction. This is just a heuristic, so we do not worry
3146 too much about false positives. */
3147
3148 if (!found_stack_adjust)
3149 {
3150 if (pc - 4 < func_start)
3151 return 0;
3152 if (target_read_memory (pc - 4, buf, 4))
3153 return 0;
3154
3155 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3156 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3157
3158 if (insn2 == 0x46bd) /* mov sp, r7 */
3159 found_stack_adjust = 1;
3160 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3161 found_stack_adjust = 1;
3162 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
3163 found_stack_adjust = 1;
3164 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3165 found_stack_adjust = 1;
3166 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3167 && (insn2 & 0x0fff) == 0x0b04)
3168 found_stack_adjust = 1;
3169 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3170 && (insn2 & 0x0e00) == 0x0a00)
3171 found_stack_adjust = 1;
3172 }
3173
3174 return found_stack_adjust;
3175 }
3176
3177 /* Return true if we are in the function's epilogue, i.e. after the
3178 instruction that destroyed the function's stack frame. */
3179
3180 static int
3181 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3182 {
3183 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3184 unsigned int insn;
3185 int found_return, found_stack_adjust;
3186 CORE_ADDR func_start, func_end;
3187
3188 if (arm_pc_is_thumb (gdbarch, pc))
3189 return thumb_in_function_epilogue_p (gdbarch, pc);
3190
3191 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3192 return 0;
3193
3194 /* We are in the epilogue if the previous instruction was a stack
3195 adjustment and the next instruction is a possible return (bx, mov
3196 pc, or pop). We could have to scan backwards to find the stack
3197 adjustment, or forwards to find the return, but this is a decent
3198 approximation. First scan forwards. */
3199
3200 found_return = 0;
3201 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3202 if (bits (insn, 28, 31) != INST_NV)
3203 {
3204 if ((insn & 0x0ffffff0) == 0x012fff10)
3205 /* BX. */
3206 found_return = 1;
3207 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3208 /* MOV PC. */
3209 found_return = 1;
3210 else if ((insn & 0x0fff0000) == 0x08bd0000
3211 && (insn & 0x0000c000) != 0)
3212 /* POP (LDMIA), including PC or LR. */
3213 found_return = 1;
3214 }
3215
3216 if (!found_return)
3217 return 0;
3218
3219 /* Scan backwards. This is just a heuristic, so do not worry about
3220 false positives from mode changes. */
3221
3222 if (pc < func_start + 4)
3223 return 0;
3224
3225 found_stack_adjust = 0;
3226 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3227 if (bits (insn, 28, 31) != INST_NV)
3228 {
3229 if ((insn & 0x0df0f000) == 0x0080d000)
3230 /* ADD SP (register or immediate). */
3231 found_stack_adjust = 1;
3232 else if ((insn & 0x0df0f000) == 0x0040d000)
3233 /* SUB SP (register or immediate). */
3234 found_stack_adjust = 1;
3235 else if ((insn & 0x0ffffff0) == 0x01a0d000)
3236 /* MOV SP. */
3237 found_stack_adjust = 1;
3238 else if ((insn & 0x0fff0000) == 0x08bd0000)
3239 /* POP (LDMIA). */
3240 found_stack_adjust = 1;
3241 }
3242
3243 if (found_stack_adjust)
3244 return 1;
3245
3246 return 0;
3247 }
3248
3249
3250 /* When arguments must be pushed onto the stack, they go on in reverse
3251 order. The code below implements a FILO (stack) to do this. */
3252
3253 struct stack_item
3254 {
3255 int len;
3256 struct stack_item *prev;
3257 void *data;
3258 };
3259
3260 static struct stack_item *
3261 push_stack_item (struct stack_item *prev, const void *contents, int len)
3262 {
3263 struct stack_item *si;
3264 si = xmalloc (sizeof (struct stack_item));
3265 si->data = xmalloc (len);
3266 si->len = len;
3267 si->prev = prev;
3268 memcpy (si->data, contents, len);
3269 return si;
3270 }
3271
3272 static struct stack_item *
3273 pop_stack_item (struct stack_item *si)
3274 {
3275 struct stack_item *dead = si;
3276 si = si->prev;
3277 xfree (dead->data);
3278 xfree (dead);
3279 return si;
3280 }
3281
3282
3283 /* Return the alignment (in bytes) of the given type. */
3284
3285 static int
3286 arm_type_align (struct type *t)
3287 {
3288 int n;
3289 int align;
3290 int falign;
3291
3292 t = check_typedef (t);
3293 switch (TYPE_CODE (t))
3294 {
3295 default:
3296 /* Should never happen. */
3297 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3298 return 4;
3299
3300 case TYPE_CODE_PTR:
3301 case TYPE_CODE_ENUM:
3302 case TYPE_CODE_INT:
3303 case TYPE_CODE_FLT:
3304 case TYPE_CODE_SET:
3305 case TYPE_CODE_RANGE:
3306 case TYPE_CODE_BITSTRING:
3307 case TYPE_CODE_REF:
3308 case TYPE_CODE_CHAR:
3309 case TYPE_CODE_BOOL:
3310 return TYPE_LENGTH (t);
3311
3312 case TYPE_CODE_ARRAY:
3313 case TYPE_CODE_COMPLEX:
3314 /* TODO: What about vector types? */
3315 return arm_type_align (TYPE_TARGET_TYPE (t));
3316
3317 case TYPE_CODE_STRUCT:
3318 case TYPE_CODE_UNION:
3319 align = 1;
3320 for (n = 0; n < TYPE_NFIELDS (t); n++)
3321 {
3322 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3323 if (falign > align)
3324 align = falign;
3325 }
3326 return align;
3327 }
3328 }
3329
3330 /* Possible base types for a candidate for passing and returning in
3331 VFP registers. */
3332
3333 enum arm_vfp_cprc_base_type
3334 {
3335 VFP_CPRC_UNKNOWN,
3336 VFP_CPRC_SINGLE,
3337 VFP_CPRC_DOUBLE,
3338 VFP_CPRC_VEC64,
3339 VFP_CPRC_VEC128
3340 };
3341
3342 /* The length of one element of base type B. */
3343
3344 static unsigned
3345 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3346 {
3347 switch (b)
3348 {
3349 case VFP_CPRC_SINGLE:
3350 return 4;
3351 case VFP_CPRC_DOUBLE:
3352 return 8;
3353 case VFP_CPRC_VEC64:
3354 return 8;
3355 case VFP_CPRC_VEC128:
3356 return 16;
3357 default:
3358 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3359 (int) b);
3360 }
3361 }
3362
3363 /* The character ('s', 'd' or 'q') for the type of VFP register used
3364 for passing base type B. */
3365
3366 static int
3367 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3368 {
3369 switch (b)
3370 {
3371 case VFP_CPRC_SINGLE:
3372 return 's';
3373 case VFP_CPRC_DOUBLE:
3374 return 'd';
3375 case VFP_CPRC_VEC64:
3376 return 'd';
3377 case VFP_CPRC_VEC128:
3378 return 'q';
3379 default:
3380 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3381 (int) b);
3382 }
3383 }
3384
3385 /* Determine whether T may be part of a candidate for passing and
3386 returning in VFP registers, ignoring the limit on the total number
3387 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3388 classification of the first valid component found; if it is not
3389 VFP_CPRC_UNKNOWN, all components must have the same classification
3390 as *BASE_TYPE. If it is found that T contains a type not permitted
3391 for passing and returning in VFP registers, a type differently
3392 classified from *BASE_TYPE, or two types differently classified
3393 from each other, return -1, otherwise return the total number of
3394 base-type elements found (possibly 0 in an empty structure or
3395 array). Vectors and complex types are not currently supported,
3396 matching the generic AAPCS support. */
3397
3398 static int
3399 arm_vfp_cprc_sub_candidate (struct type *t,
3400 enum arm_vfp_cprc_base_type *base_type)
3401 {
3402 t = check_typedef (t);
3403 switch (TYPE_CODE (t))
3404 {
3405 case TYPE_CODE_FLT:
3406 switch (TYPE_LENGTH (t))
3407 {
3408 case 4:
3409 if (*base_type == VFP_CPRC_UNKNOWN)
3410 *base_type = VFP_CPRC_SINGLE;
3411 else if (*base_type != VFP_CPRC_SINGLE)
3412 return -1;
3413 return 1;
3414
3415 case 8:
3416 if (*base_type == VFP_CPRC_UNKNOWN)
3417 *base_type = VFP_CPRC_DOUBLE;
3418 else if (*base_type != VFP_CPRC_DOUBLE)
3419 return -1;
3420 return 1;
3421
3422 default:
3423 return -1;
3424 }
3425 break;
3426
3427 case TYPE_CODE_ARRAY:
3428 {
3429 int count;
3430 unsigned unitlen;
3431 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3432 if (count == -1)
3433 return -1;
3434 if (TYPE_LENGTH (t) == 0)
3435 {
3436 gdb_assert (count == 0);
3437 return 0;
3438 }
3439 else if (count == 0)
3440 return -1;
3441 unitlen = arm_vfp_cprc_unit_length (*base_type);
3442 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3443 return TYPE_LENGTH (t) / unitlen;
3444 }
3445 break;
3446
3447 case TYPE_CODE_STRUCT:
3448 {
3449 int count = 0;
3450 unsigned unitlen;
3451 int i;
3452 for (i = 0; i < TYPE_NFIELDS (t); i++)
3453 {
3454 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3455 base_type);
3456 if (sub_count == -1)
3457 return -1;
3458 count += sub_count;
3459 }
3460 if (TYPE_LENGTH (t) == 0)
3461 {
3462 gdb_assert (count == 0);
3463 return 0;
3464 }
3465 else if (count == 0)
3466 return -1;
3467 unitlen = arm_vfp_cprc_unit_length (*base_type);
3468 if (TYPE_LENGTH (t) != unitlen * count)
3469 return -1;
3470 return count;
3471 }
3472
3473 case TYPE_CODE_UNION:
3474 {
3475 int count = 0;
3476 unsigned unitlen;
3477 int i;
3478 for (i = 0; i < TYPE_NFIELDS (t); i++)
3479 {
3480 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3481 base_type);
3482 if (sub_count == -1)
3483 return -1;
3484 count = (count > sub_count ? count : sub_count);
3485 }
3486 if (TYPE_LENGTH (t) == 0)
3487 {
3488 gdb_assert (count == 0);
3489 return 0;
3490 }
3491 else if (count == 0)
3492 return -1;
3493 unitlen = arm_vfp_cprc_unit_length (*base_type);
3494 if (TYPE_LENGTH (t) != unitlen * count)
3495 return -1;
3496 return count;
3497 }
3498
3499 default:
3500 break;
3501 }
3502
3503 return -1;
3504 }
3505
3506 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3507 if passed to or returned from a non-variadic function with the VFP
3508 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3509 *BASE_TYPE to the base type for T and *COUNT to the number of
3510 elements of that base type before returning. */
3511
3512 static int
3513 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3514 int *count)
3515 {
3516 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3517 int c = arm_vfp_cprc_sub_candidate (t, &b);
3518 if (c <= 0 || c > 4)
3519 return 0;
3520 *base_type = b;
3521 *count = c;
3522 return 1;
3523 }
3524
3525 /* Return 1 if the VFP ABI should be used for passing arguments to and
3526 returning values from a function of type FUNC_TYPE, 0
3527 otherwise. */
3528
3529 static int
3530 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3531 {
3532 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3533 /* Variadic functions always use the base ABI. Assume that functions
3534 without debug info are not variadic. */
3535 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3536 return 0;
3537 /* The VFP ABI is only supported as a variant of AAPCS. */
3538 if (tdep->arm_abi != ARM_ABI_AAPCS)
3539 return 0;
3540 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3541 }
3542
3543 /* We currently only support passing parameters in integer registers, which
3544 conforms with GCC's default model, and VFP argument passing following
3545 the VFP variant of AAPCS. Several other variants exist and
3546 we should probably support some of them based on the selected ABI. */
3547
3548 static CORE_ADDR
3549 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3550 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3551 struct value **args, CORE_ADDR sp, int struct_return,
3552 CORE_ADDR struct_addr)
3553 {
3554 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3555 int argnum;
3556 int argreg;
3557 int nstack;
3558 struct stack_item *si = NULL;
3559 int use_vfp_abi;
3560 struct type *ftype;
3561 unsigned vfp_regs_free = (1 << 16) - 1;
3562
3563 /* Determine the type of this function and whether the VFP ABI
3564 applies. */
3565 ftype = check_typedef (value_type (function));
3566 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3567 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3568 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3569
3570 /* Set the return address. For the ARM, the return breakpoint is
3571 always at BP_ADDR. */
3572 if (arm_pc_is_thumb (gdbarch, bp_addr))
3573 bp_addr |= 1;
3574 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3575
3576 /* Walk through the list of args and determine how large a temporary
3577 stack is required. Need to take care here as structs may be
3578 passed on the stack, and we have to to push them. */
3579 nstack = 0;
3580
3581 argreg = ARM_A1_REGNUM;
3582 nstack = 0;
3583
3584 /* The struct_return pointer occupies the first parameter
3585 passing register. */
3586 if (struct_return)
3587 {
3588 if (arm_debug)
3589 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3590 gdbarch_register_name (gdbarch, argreg),
3591 paddress (gdbarch, struct_addr));
3592 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3593 argreg++;
3594 }
3595
3596 for (argnum = 0; argnum < nargs; argnum++)
3597 {
3598 int len;
3599 struct type *arg_type;
3600 struct type *target_type;
3601 enum type_code typecode;
3602 const bfd_byte *val;
3603 int align;
3604 enum arm_vfp_cprc_base_type vfp_base_type;
3605 int vfp_base_count;
3606 int may_use_core_reg = 1;
3607
3608 arg_type = check_typedef (value_type (args[argnum]));
3609 len = TYPE_LENGTH (arg_type);
3610 target_type = TYPE_TARGET_TYPE (arg_type);
3611 typecode = TYPE_CODE (arg_type);
3612 val = value_contents (args[argnum]);
3613
3614 align = arm_type_align (arg_type);
3615 /* Round alignment up to a whole number of words. */
3616 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3617 /* Different ABIs have different maximum alignments. */
3618 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3619 {
3620 /* The APCS ABI only requires word alignment. */
3621 align = INT_REGISTER_SIZE;
3622 }
3623 else
3624 {
3625 /* The AAPCS requires at most doubleword alignment. */
3626 if (align > INT_REGISTER_SIZE * 2)
3627 align = INT_REGISTER_SIZE * 2;
3628 }
3629
3630 if (use_vfp_abi
3631 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3632 &vfp_base_count))
3633 {
3634 int regno;
3635 int unit_length;
3636 int shift;
3637 unsigned mask;
3638
3639 /* Because this is a CPRC it cannot go in a core register or
3640 cause a core register to be skipped for alignment.
3641 Either it goes in VFP registers and the rest of this loop
3642 iteration is skipped for this argument, or it goes on the
3643 stack (and the stack alignment code is correct for this
3644 case). */
3645 may_use_core_reg = 0;
3646
3647 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3648 shift = unit_length / 4;
3649 mask = (1 << (shift * vfp_base_count)) - 1;
3650 for (regno = 0; regno < 16; regno += shift)
3651 if (((vfp_regs_free >> regno) & mask) == mask)
3652 break;
3653
3654 if (regno < 16)
3655 {
3656 int reg_char;
3657 int reg_scaled;
3658 int i;
3659
3660 vfp_regs_free &= ~(mask << regno);
3661 reg_scaled = regno / shift;
3662 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3663 for (i = 0; i < vfp_base_count; i++)
3664 {
3665 char name_buf[4];
3666 int regnum;
3667 if (reg_char == 'q')
3668 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3669 val + i * unit_length);
3670 else
3671 {
3672 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
3673 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3674 strlen (name_buf));
3675 regcache_cooked_write (regcache, regnum,
3676 val + i * unit_length);
3677 }
3678 }
3679 continue;
3680 }
3681 else
3682 {
3683 /* This CPRC could not go in VFP registers, so all VFP
3684 registers are now marked as used. */
3685 vfp_regs_free = 0;
3686 }
3687 }
3688
3689 /* Push stack padding for dowubleword alignment. */
3690 if (nstack & (align - 1))
3691 {
3692 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3693 nstack += INT_REGISTER_SIZE;
3694 }
3695
3696 /* Doubleword aligned quantities must go in even register pairs. */
3697 if (may_use_core_reg
3698 && argreg <= ARM_LAST_ARG_REGNUM
3699 && align > INT_REGISTER_SIZE
3700 && argreg & 1)
3701 argreg++;
3702
3703 /* If the argument is a pointer to a function, and it is a
3704 Thumb function, create a LOCAL copy of the value and set
3705 the THUMB bit in it. */
3706 if (TYPE_CODE_PTR == typecode
3707 && target_type != NULL
3708 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3709 {
3710 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3711 if (arm_pc_is_thumb (gdbarch, regval))
3712 {
3713 bfd_byte *copy = alloca (len);
3714 store_unsigned_integer (copy, len, byte_order,
3715 MAKE_THUMB_ADDR (regval));
3716 val = copy;
3717 }
3718 }
3719
3720 /* Copy the argument to general registers or the stack in
3721 register-sized pieces. Large arguments are split between
3722 registers and stack. */
3723 while (len > 0)
3724 {
3725 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3726
3727 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3728 {
3729 /* The argument is being passed in a general purpose
3730 register. */
3731 CORE_ADDR regval
3732 = extract_unsigned_integer (val, partial_len, byte_order);
3733 if (byte_order == BFD_ENDIAN_BIG)
3734 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3735 if (arm_debug)
3736 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3737 argnum,
3738 gdbarch_register_name
3739 (gdbarch, argreg),
3740 phex (regval, INT_REGISTER_SIZE));
3741 regcache_cooked_write_unsigned (regcache, argreg, regval);
3742 argreg++;
3743 }
3744 else
3745 {
3746 /* Push the arguments onto the stack. */
3747 if (arm_debug)
3748 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3749 argnum, nstack);
3750 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3751 nstack += INT_REGISTER_SIZE;
3752 }
3753
3754 len -= partial_len;
3755 val += partial_len;
3756 }
3757 }
3758 /* If we have an odd number of words to push, then decrement the stack
3759 by one word now, so first stack argument will be dword aligned. */
3760 if (nstack & 4)
3761 sp -= 4;
3762
3763 while (si)
3764 {
3765 sp -= si->len;
3766 write_memory (sp, si->data, si->len);
3767 si = pop_stack_item (si);
3768 }
3769
3770 /* Finally, update teh SP register. */
3771 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3772
3773 return sp;
3774 }
3775
3776
3777 /* Always align the frame to an 8-byte boundary. This is required on
3778 some platforms and harmless on the rest. */
3779
3780 static CORE_ADDR
3781 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3782 {
3783 /* Align the stack to eight bytes. */
3784 return sp & ~ (CORE_ADDR) 7;
3785 }
3786
3787 static void
3788 print_fpu_flags (int flags)
3789 {
3790 if (flags & (1 << 0))
3791 fputs ("IVO ", stdout);
3792 if (flags & (1 << 1))
3793 fputs ("DVZ ", stdout);
3794 if (flags & (1 << 2))
3795 fputs ("OFL ", stdout);
3796 if (flags & (1 << 3))
3797 fputs ("UFL ", stdout);
3798 if (flags & (1 << 4))
3799 fputs ("INX ", stdout);
3800 putchar ('\n');
3801 }
3802
3803 /* Print interesting information about the floating point processor
3804 (if present) or emulator. */
3805 static void
3806 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3807 struct frame_info *frame, const char *args)
3808 {
3809 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3810 int type;
3811
3812 type = (status >> 24) & 127;
3813 if (status & (1 << 31))
3814 printf (_("Hardware FPU type %d\n"), type);
3815 else
3816 printf (_("Software FPU type %d\n"), type);
3817 /* i18n: [floating point unit] mask */
3818 fputs (_("mask: "), stdout);
3819 print_fpu_flags (status >> 16);
3820 /* i18n: [floating point unit] flags */
3821 fputs (_("flags: "), stdout);
3822 print_fpu_flags (status);
3823 }
3824
3825 /* Construct the ARM extended floating point type. */
3826 static struct type *
3827 arm_ext_type (struct gdbarch *gdbarch)
3828 {
3829 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3830
3831 if (!tdep->arm_ext_type)
3832 tdep->arm_ext_type
3833 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3834 floatformats_arm_ext);
3835
3836 return tdep->arm_ext_type;
3837 }
3838
3839 static struct type *
3840 arm_neon_double_type (struct gdbarch *gdbarch)
3841 {
3842 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3843
3844 if (tdep->neon_double_type == NULL)
3845 {
3846 struct type *t, *elem;
3847
3848 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3849 TYPE_CODE_UNION);
3850 elem = builtin_type (gdbarch)->builtin_uint8;
3851 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3852 elem = builtin_type (gdbarch)->builtin_uint16;
3853 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3854 elem = builtin_type (gdbarch)->builtin_uint32;
3855 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3856 elem = builtin_type (gdbarch)->builtin_uint64;
3857 append_composite_type_field (t, "u64", elem);
3858 elem = builtin_type (gdbarch)->builtin_float;
3859 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3860 elem = builtin_type (gdbarch)->builtin_double;
3861 append_composite_type_field (t, "f64", elem);
3862
3863 TYPE_VECTOR (t) = 1;
3864 TYPE_NAME (t) = "neon_d";
3865 tdep->neon_double_type = t;
3866 }
3867
3868 return tdep->neon_double_type;
3869 }
3870
3871 /* FIXME: The vector types are not correctly ordered on big-endian
3872 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3873 bits of d0 - regardless of what unit size is being held in d0. So
3874 the offset of the first uint8 in d0 is 7, but the offset of the
3875 first float is 4. This code works as-is for little-endian
3876 targets. */
3877
3878 static struct type *
3879 arm_neon_quad_type (struct gdbarch *gdbarch)
3880 {
3881 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3882
3883 if (tdep->neon_quad_type == NULL)
3884 {
3885 struct type *t, *elem;
3886
3887 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3888 TYPE_CODE_UNION);
3889 elem = builtin_type (gdbarch)->builtin_uint8;
3890 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3891 elem = builtin_type (gdbarch)->builtin_uint16;
3892 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3893 elem = builtin_type (gdbarch)->builtin_uint32;
3894 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3895 elem = builtin_type (gdbarch)->builtin_uint64;
3896 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
3897 elem = builtin_type (gdbarch)->builtin_float;
3898 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
3899 elem = builtin_type (gdbarch)->builtin_double;
3900 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
3901
3902 TYPE_VECTOR (t) = 1;
3903 TYPE_NAME (t) = "neon_q";
3904 tdep->neon_quad_type = t;
3905 }
3906
3907 return tdep->neon_quad_type;
3908 }
3909
3910 /* Return the GDB type object for the "standard" data type of data in
3911 register N. */
3912
3913 static struct type *
3914 arm_register_type (struct gdbarch *gdbarch, int regnum)
3915 {
3916 int num_regs = gdbarch_num_regs (gdbarch);
3917
3918 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
3919 && regnum >= num_regs && regnum < num_regs + 32)
3920 return builtin_type (gdbarch)->builtin_float;
3921
3922 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
3923 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
3924 return arm_neon_quad_type (gdbarch);
3925
3926 /* If the target description has register information, we are only
3927 in this function so that we can override the types of
3928 double-precision registers for NEON. */
3929 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
3930 {
3931 struct type *t = tdesc_register_type (gdbarch, regnum);
3932
3933 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
3934 && TYPE_CODE (t) == TYPE_CODE_FLT
3935 && gdbarch_tdep (gdbarch)->have_neon)
3936 return arm_neon_double_type (gdbarch);
3937 else
3938 return t;
3939 }
3940
3941 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
3942 {
3943 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
3944 return builtin_type (gdbarch)->builtin_void;
3945
3946 return arm_ext_type (gdbarch);
3947 }
3948 else if (regnum == ARM_SP_REGNUM)
3949 return builtin_type (gdbarch)->builtin_data_ptr;
3950 else if (regnum == ARM_PC_REGNUM)
3951 return builtin_type (gdbarch)->builtin_func_ptr;
3952 else if (regnum >= ARRAY_SIZE (arm_register_names))
3953 /* These registers are only supported on targets which supply
3954 an XML description. */
3955 return builtin_type (gdbarch)->builtin_int0;
3956 else
3957 return builtin_type (gdbarch)->builtin_uint32;
3958 }
3959
3960 /* Map a DWARF register REGNUM onto the appropriate GDB register
3961 number. */
3962
3963 static int
3964 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
3965 {
3966 /* Core integer regs. */
3967 if (reg >= 0 && reg <= 15)
3968 return reg;
3969
3970 /* Legacy FPA encoding. These were once used in a way which
3971 overlapped with VFP register numbering, so their use is
3972 discouraged, but GDB doesn't support the ARM toolchain
3973 which used them for VFP. */
3974 if (reg >= 16 && reg <= 23)
3975 return ARM_F0_REGNUM + reg - 16;
3976
3977 /* New assignments for the FPA registers. */
3978 if (reg >= 96 && reg <= 103)
3979 return ARM_F0_REGNUM + reg - 96;
3980
3981 /* WMMX register assignments. */
3982 if (reg >= 104 && reg <= 111)
3983 return ARM_WCGR0_REGNUM + reg - 104;
3984
3985 if (reg >= 112 && reg <= 127)
3986 return ARM_WR0_REGNUM + reg - 112;
3987
3988 if (reg >= 192 && reg <= 199)
3989 return ARM_WC0_REGNUM + reg - 192;
3990
3991 /* VFP v2 registers. A double precision value is actually
3992 in d1 rather than s2, but the ABI only defines numbering
3993 for the single precision registers. This will "just work"
3994 in GDB for little endian targets (we'll read eight bytes,
3995 starting in s0 and then progressing to s1), but will be
3996 reversed on big endian targets with VFP. This won't
3997 be a problem for the new Neon quad registers; you're supposed
3998 to use DW_OP_piece for those. */
3999 if (reg >= 64 && reg <= 95)
4000 {
4001 char name_buf[4];
4002
4003 sprintf (name_buf, "s%d", reg - 64);
4004 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4005 strlen (name_buf));
4006 }
4007
4008 /* VFP v3 / Neon registers. This range is also used for VFP v2
4009 registers, except that it now describes d0 instead of s0. */
4010 if (reg >= 256 && reg <= 287)
4011 {
4012 char name_buf[4];
4013
4014 sprintf (name_buf, "d%d", reg - 256);
4015 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4016 strlen (name_buf));
4017 }
4018
4019 return -1;
4020 }
4021
4022 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4023 static int
4024 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4025 {
4026 int reg = regnum;
4027 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4028
4029 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4030 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4031
4032 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4033 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4034
4035 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4036 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4037
4038 if (reg < NUM_GREGS)
4039 return SIM_ARM_R0_REGNUM + reg;
4040 reg -= NUM_GREGS;
4041
4042 if (reg < NUM_FREGS)
4043 return SIM_ARM_FP0_REGNUM + reg;
4044 reg -= NUM_FREGS;
4045
4046 if (reg < NUM_SREGS)
4047 return SIM_ARM_FPS_REGNUM + reg;
4048 reg -= NUM_SREGS;
4049
4050 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4051 }
4052
4053 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4054 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4055 It is thought that this is is the floating-point register format on
4056 little-endian systems. */
4057
4058 static void
4059 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4060 void *dbl, int endianess)
4061 {
4062 DOUBLEST d;
4063
4064 if (endianess == BFD_ENDIAN_BIG)
4065 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4066 else
4067 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4068 ptr, &d);
4069 floatformat_from_doublest (fmt, &d, dbl);
4070 }
4071
4072 static void
4073 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4074 int endianess)
4075 {
4076 DOUBLEST d;
4077
4078 floatformat_to_doublest (fmt, ptr, &d);
4079 if (endianess == BFD_ENDIAN_BIG)
4080 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4081 else
4082 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4083 &d, dbl);
4084 }
4085
4086 static int
4087 condition_true (unsigned long cond, unsigned long status_reg)
4088 {
4089 if (cond == INST_AL || cond == INST_NV)
4090 return 1;
4091
4092 switch (cond)
4093 {
4094 case INST_EQ:
4095 return ((status_reg & FLAG_Z) != 0);
4096 case INST_NE:
4097 return ((status_reg & FLAG_Z) == 0);
4098 case INST_CS:
4099 return ((status_reg & FLAG_C) != 0);
4100 case INST_CC:
4101 return ((status_reg & FLAG_C) == 0);
4102 case INST_MI:
4103 return ((status_reg & FLAG_N) != 0);
4104 case INST_PL:
4105 return ((status_reg & FLAG_N) == 0);
4106 case INST_VS:
4107 return ((status_reg & FLAG_V) != 0);
4108 case INST_VC:
4109 return ((status_reg & FLAG_V) == 0);
4110 case INST_HI:
4111 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4112 case INST_LS:
4113 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4114 case INST_GE:
4115 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4116 case INST_LT:
4117 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4118 case INST_GT:
4119 return (((status_reg & FLAG_Z) == 0)
4120 && (((status_reg & FLAG_N) == 0)
4121 == ((status_reg & FLAG_V) == 0)));
4122 case INST_LE:
4123 return (((status_reg & FLAG_Z) != 0)
4124 || (((status_reg & FLAG_N) == 0)
4125 != ((status_reg & FLAG_V) == 0)));
4126 }
4127 return 1;
4128 }
4129
4130 static unsigned long
4131 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4132 unsigned long pc_val, unsigned long status_reg)
4133 {
4134 unsigned long res, shift;
4135 int rm = bits (inst, 0, 3);
4136 unsigned long shifttype = bits (inst, 5, 6);
4137
4138 if (bit (inst, 4))
4139 {
4140 int rs = bits (inst, 8, 11);
4141 shift = (rs == 15 ? pc_val + 8
4142 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4143 }
4144 else
4145 shift = bits (inst, 7, 11);
4146
4147 res = (rm == 15
4148 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4149 : get_frame_register_unsigned (frame, rm));
4150
4151 switch (shifttype)
4152 {
4153 case 0: /* LSL */
4154 res = shift >= 32 ? 0 : res << shift;
4155 break;
4156
4157 case 1: /* LSR */
4158 res = shift >= 32 ? 0 : res >> shift;
4159 break;
4160
4161 case 2: /* ASR */
4162 if (shift >= 32)
4163 shift = 31;
4164 res = ((res & 0x80000000L)
4165 ? ~((~res) >> shift) : res >> shift);
4166 break;
4167
4168 case 3: /* ROR/RRX */
4169 shift &= 31;
4170 if (shift == 0)
4171 res = (res >> 1) | (carry ? 0x80000000L : 0);
4172 else
4173 res = (res >> shift) | (res << (32 - shift));
4174 break;
4175 }
4176
4177 return res & 0xffffffff;
4178 }
4179
4180 /* Return number of 1-bits in VAL. */
4181
4182 static int
4183 bitcount (unsigned long val)
4184 {
4185 int nbits;
4186 for (nbits = 0; val != 0; nbits++)
4187 val &= val - 1; /* Delete rightmost 1-bit in val. */
4188 return nbits;
4189 }
4190
4191 /* Return the size in bytes of the complete Thumb instruction whose
4192 first halfword is INST1. */
4193
4194 static int
4195 thumb_insn_size (unsigned short inst1)
4196 {
4197 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4198 return 4;
4199 else
4200 return 2;
4201 }
4202
4203 static int
4204 thumb_advance_itstate (unsigned int itstate)
4205 {
4206 /* Preserve IT[7:5], the first three bits of the condition. Shift
4207 the upcoming condition flags left by one bit. */
4208 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4209
4210 /* If we have finished the IT block, clear the state. */
4211 if ((itstate & 0x0f) == 0)
4212 itstate = 0;
4213
4214 return itstate;
4215 }
4216
4217 /* Find the next PC after the current instruction executes. In some
4218 cases we can not statically determine the answer (see the IT state
4219 handling in this function); in that case, a breakpoint may be
4220 inserted in addition to the returned PC, which will be used to set
4221 another breakpoint by our caller. */
4222
4223 static CORE_ADDR
4224 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
4225 {
4226 struct gdbarch *gdbarch = get_frame_arch (frame);
4227 struct address_space *aspace = get_frame_address_space (frame);
4228 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4229 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4230 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4231 unsigned short inst1;
4232 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4233 unsigned long offset;
4234 ULONGEST status, itstate;
4235
4236 nextpc = MAKE_THUMB_ADDR (nextpc);
4237 pc_val = MAKE_THUMB_ADDR (pc_val);
4238
4239 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4240
4241 /* Thumb-2 conditional execution support. There are eight bits in
4242 the CPSR which describe conditional execution state. Once
4243 reconstructed (they're in a funny order), the low five bits
4244 describe the low bit of the condition for each instruction and
4245 how many instructions remain. The high three bits describe the
4246 base condition. One of the low four bits will be set if an IT
4247 block is active. These bits read as zero on earlier
4248 processors. */
4249 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4250 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4251
4252 /* If-Then handling. On GNU/Linux, where this routine is used, we
4253 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4254 can disable execution of the undefined instruction. So we might
4255 miss the breakpoint if we set it on a skipped conditional
4256 instruction. Because conditional instructions can change the
4257 flags, affecting the execution of further instructions, we may
4258 need to set two breakpoints. */
4259
4260 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4261 {
4262 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4263 {
4264 /* An IT instruction. Because this instruction does not
4265 modify the flags, we can accurately predict the next
4266 executed instruction. */
4267 itstate = inst1 & 0x00ff;
4268 pc += thumb_insn_size (inst1);
4269
4270 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4271 {
4272 inst1 = read_memory_unsigned_integer (pc, 2,
4273 byte_order_for_code);
4274 pc += thumb_insn_size (inst1);
4275 itstate = thumb_advance_itstate (itstate);
4276 }
4277
4278 return MAKE_THUMB_ADDR (pc);
4279 }
4280 else if (itstate != 0)
4281 {
4282 /* We are in a conditional block. Check the condition. */
4283 if (! condition_true (itstate >> 4, status))
4284 {
4285 /* Advance to the next executed instruction. */
4286 pc += thumb_insn_size (inst1);
4287 itstate = thumb_advance_itstate (itstate);
4288
4289 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4290 {
4291 inst1 = read_memory_unsigned_integer (pc, 2,
4292 byte_order_for_code);
4293 pc += thumb_insn_size (inst1);
4294 itstate = thumb_advance_itstate (itstate);
4295 }
4296
4297 return MAKE_THUMB_ADDR (pc);
4298 }
4299 else if ((itstate & 0x0f) == 0x08)
4300 {
4301 /* This is the last instruction of the conditional
4302 block, and it is executed. We can handle it normally
4303 because the following instruction is not conditional,
4304 and we must handle it normally because it is
4305 permitted to branch. Fall through. */
4306 }
4307 else
4308 {
4309 int cond_negated;
4310
4311 /* There are conditional instructions after this one.
4312 If this instruction modifies the flags, then we can
4313 not predict what the next executed instruction will
4314 be. Fortunately, this instruction is architecturally
4315 forbidden to branch; we know it will fall through.
4316 Start by skipping past it. */
4317 pc += thumb_insn_size (inst1);
4318 itstate = thumb_advance_itstate (itstate);
4319
4320 /* Set a breakpoint on the following instruction. */
4321 gdb_assert ((itstate & 0x0f) != 0);
4322 if (insert_bkpt)
4323 insert_single_step_breakpoint (gdbarch, aspace, pc);
4324 cond_negated = (itstate >> 4) & 1;
4325
4326 /* Skip all following instructions with the same
4327 condition. If there is a later instruction in the IT
4328 block with the opposite condition, set the other
4329 breakpoint there. If not, then set a breakpoint on
4330 the instruction after the IT block. */
4331 do
4332 {
4333 inst1 = read_memory_unsigned_integer (pc, 2,
4334 byte_order_for_code);
4335 pc += thumb_insn_size (inst1);
4336 itstate = thumb_advance_itstate (itstate);
4337 }
4338 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4339
4340 return MAKE_THUMB_ADDR (pc);
4341 }
4342 }
4343 }
4344 else if (itstate & 0x0f)
4345 {
4346 /* We are in a conditional block. Check the condition. */
4347 int cond = itstate >> 4;
4348
4349 if (! condition_true (cond, status))
4350 {
4351 /* Advance to the next instruction. All the 32-bit
4352 instructions share a common prefix. */
4353 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4354 return MAKE_THUMB_ADDR (pc + 4);
4355 else
4356 return MAKE_THUMB_ADDR (pc + 2);
4357 }
4358
4359 /* Otherwise, handle the instruction normally. */
4360 }
4361
4362 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4363 {
4364 CORE_ADDR sp;
4365
4366 /* Fetch the saved PC from the stack. It's stored above
4367 all of the other registers. */
4368 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4369 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4370 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4371 }
4372 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4373 {
4374 unsigned long cond = bits (inst1, 8, 11);
4375 if (cond == 0x0f) /* 0x0f = SWI */
4376 {
4377 struct gdbarch_tdep *tdep;
4378 tdep = gdbarch_tdep (gdbarch);
4379
4380 if (tdep->syscall_next_pc != NULL)
4381 nextpc = tdep->syscall_next_pc (frame);
4382
4383 }
4384 else if (cond != 0x0f && condition_true (cond, status))
4385 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4386 }
4387 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4388 {
4389 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4390 }
4391 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
4392 {
4393 unsigned short inst2;
4394 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4395
4396 /* Default to the next instruction. */
4397 nextpc = pc + 4;
4398 nextpc = MAKE_THUMB_ADDR (nextpc);
4399
4400 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4401 {
4402 /* Branches and miscellaneous control instructions. */
4403
4404 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4405 {
4406 /* B, BL, BLX. */
4407 int j1, j2, imm1, imm2;
4408
4409 imm1 = sbits (inst1, 0, 10);
4410 imm2 = bits (inst2, 0, 10);
4411 j1 = bit (inst2, 13);
4412 j2 = bit (inst2, 11);
4413
4414 offset = ((imm1 << 12) + (imm2 << 1));
4415 offset ^= ((!j2) << 22) | ((!j1) << 23);
4416
4417 nextpc = pc_val + offset;
4418 /* For BLX make sure to clear the low bits. */
4419 if (bit (inst2, 12) == 0)
4420 nextpc = nextpc & 0xfffffffc;
4421 }
4422 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4423 {
4424 /* SUBS PC, LR, #imm8. */
4425 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4426 nextpc -= inst2 & 0x00ff;
4427 }
4428 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4429 {
4430 /* Conditional branch. */
4431 if (condition_true (bits (inst1, 6, 9), status))
4432 {
4433 int sign, j1, j2, imm1, imm2;
4434
4435 sign = sbits (inst1, 10, 10);
4436 imm1 = bits (inst1, 0, 5);
4437 imm2 = bits (inst2, 0, 10);
4438 j1 = bit (inst2, 13);
4439 j2 = bit (inst2, 11);
4440
4441 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4442 offset += (imm1 << 12) + (imm2 << 1);
4443
4444 nextpc = pc_val + offset;
4445 }
4446 }
4447 }
4448 else if ((inst1 & 0xfe50) == 0xe810)
4449 {
4450 /* Load multiple or RFE. */
4451 int rn, offset, load_pc = 1;
4452
4453 rn = bits (inst1, 0, 3);
4454 if (bit (inst1, 7) && !bit (inst1, 8))
4455 {
4456 /* LDMIA or POP */
4457 if (!bit (inst2, 15))
4458 load_pc = 0;
4459 offset = bitcount (inst2) * 4 - 4;
4460 }
4461 else if (!bit (inst1, 7) && bit (inst1, 8))
4462 {
4463 /* LDMDB */
4464 if (!bit (inst2, 15))
4465 load_pc = 0;
4466 offset = -4;
4467 }
4468 else if (bit (inst1, 7) && bit (inst1, 8))
4469 {
4470 /* RFEIA */
4471 offset = 0;
4472 }
4473 else if (!bit (inst1, 7) && !bit (inst1, 8))
4474 {
4475 /* RFEDB */
4476 offset = -8;
4477 }
4478 else
4479 load_pc = 0;
4480
4481 if (load_pc)
4482 {
4483 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4484 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4485 }
4486 }
4487 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4488 {
4489 /* MOV PC or MOVS PC. */
4490 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4491 nextpc = MAKE_THUMB_ADDR (nextpc);
4492 }
4493 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4494 {
4495 /* LDR PC. */
4496 CORE_ADDR base;
4497 int rn, load_pc = 1;
4498
4499 rn = bits (inst1, 0, 3);
4500 base = get_frame_register_unsigned (frame, rn);
4501 if (rn == 15)
4502 {
4503 base = (base + 4) & ~(CORE_ADDR) 0x3;
4504 if (bit (inst1, 7))
4505 base += bits (inst2, 0, 11);
4506 else
4507 base -= bits (inst2, 0, 11);
4508 }
4509 else if (bit (inst1, 7))
4510 base += bits (inst2, 0, 11);
4511 else if (bit (inst2, 11))
4512 {
4513 if (bit (inst2, 10))
4514 {
4515 if (bit (inst2, 9))
4516 base += bits (inst2, 0, 7);
4517 else
4518 base -= bits (inst2, 0, 7);
4519 }
4520 }
4521 else if ((inst2 & 0x0fc0) == 0x0000)
4522 {
4523 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4524 base += get_frame_register_unsigned (frame, rm) << shift;
4525 }
4526 else
4527 /* Reserved. */
4528 load_pc = 0;
4529
4530 if (load_pc)
4531 nextpc = get_frame_memory_unsigned (frame, base, 4);
4532 }
4533 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4534 {
4535 /* TBB. */
4536 CORE_ADDR tbl_reg, table, offset, length;
4537
4538 tbl_reg = bits (inst1, 0, 3);
4539 if (tbl_reg == 0x0f)
4540 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4541 else
4542 table = get_frame_register_unsigned (frame, tbl_reg);
4543
4544 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4545 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4546 nextpc = pc_val + length;
4547 }
4548 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4549 {
4550 /* TBH. */
4551 CORE_ADDR tbl_reg, table, offset, length;
4552
4553 tbl_reg = bits (inst1, 0, 3);
4554 if (tbl_reg == 0x0f)
4555 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4556 else
4557 table = get_frame_register_unsigned (frame, tbl_reg);
4558
4559 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4560 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4561 nextpc = pc_val + length;
4562 }
4563 }
4564 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4565 {
4566 if (bits (inst1, 3, 6) == 0x0f)
4567 nextpc = pc_val;
4568 else
4569 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4570 }
4571 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4572 {
4573 if (bits (inst1, 3, 6) == 0x0f)
4574 nextpc = pc_val;
4575 else
4576 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4577
4578 nextpc = MAKE_THUMB_ADDR (nextpc);
4579 }
4580 else if ((inst1 & 0xf500) == 0xb100)
4581 {
4582 /* CBNZ or CBZ. */
4583 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4584 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4585
4586 if (bit (inst1, 11) && reg != 0)
4587 nextpc = pc_val + imm;
4588 else if (!bit (inst1, 11) && reg == 0)
4589 nextpc = pc_val + imm;
4590 }
4591 return nextpc;
4592 }
4593
4594 /* Get the raw next address. PC is the current program counter, in
4595 FRAME. INSERT_BKPT should be TRUE if we want a breakpoint set on
4596 the alternative next instruction if there are two options.
4597
4598 The value returned has the execution state of the next instruction
4599 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4600 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4601 address. */
4602
4603 static CORE_ADDR
4604 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
4605 {
4606 struct gdbarch *gdbarch = get_frame_arch (frame);
4607 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4608 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4609 unsigned long pc_val;
4610 unsigned long this_instr;
4611 unsigned long status;
4612 CORE_ADDR nextpc;
4613
4614 if (arm_frame_is_thumb (frame))
4615 return thumb_get_next_pc_raw (frame, pc, insert_bkpt);
4616
4617 pc_val = (unsigned long) pc;
4618 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4619
4620 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4621 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4622
4623 if (bits (this_instr, 28, 31) == INST_NV)
4624 switch (bits (this_instr, 24, 27))
4625 {
4626 case 0xa:
4627 case 0xb:
4628 {
4629 /* Branch with Link and change to Thumb. */
4630 nextpc = BranchDest (pc, this_instr);
4631 nextpc |= bit (this_instr, 24) << 1;
4632 nextpc = MAKE_THUMB_ADDR (nextpc);
4633 break;
4634 }
4635 case 0xc:
4636 case 0xd:
4637 case 0xe:
4638 /* Coprocessor register transfer. */
4639 if (bits (this_instr, 12, 15) == 15)
4640 error (_("Invalid update to pc in instruction"));
4641 break;
4642 }
4643 else if (condition_true (bits (this_instr, 28, 31), status))
4644 {
4645 switch (bits (this_instr, 24, 27))
4646 {
4647 case 0x0:
4648 case 0x1: /* data processing */
4649 case 0x2:
4650 case 0x3:
4651 {
4652 unsigned long operand1, operand2, result = 0;
4653 unsigned long rn;
4654 int c;
4655
4656 if (bits (this_instr, 12, 15) != 15)
4657 break;
4658
4659 if (bits (this_instr, 22, 25) == 0
4660 && bits (this_instr, 4, 7) == 9) /* multiply */
4661 error (_("Invalid update to pc in instruction"));
4662
4663 /* BX <reg>, BLX <reg> */
4664 if (bits (this_instr, 4, 27) == 0x12fff1
4665 || bits (this_instr, 4, 27) == 0x12fff3)
4666 {
4667 rn = bits (this_instr, 0, 3);
4668 nextpc = (rn == 15) ? pc_val + 8
4669 : get_frame_register_unsigned (frame, rn);
4670 return nextpc;
4671 }
4672
4673 /* Multiply into PC. */
4674 c = (status & FLAG_C) ? 1 : 0;
4675 rn = bits (this_instr, 16, 19);
4676 operand1 = (rn == 15) ? pc_val + 8
4677 : get_frame_register_unsigned (frame, rn);
4678
4679 if (bit (this_instr, 25))
4680 {
4681 unsigned long immval = bits (this_instr, 0, 7);
4682 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4683 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4684 & 0xffffffff;
4685 }
4686 else /* operand 2 is a shifted register. */
4687 operand2 = shifted_reg_val (frame, this_instr, c,
4688 pc_val, status);
4689
4690 switch (bits (this_instr, 21, 24))
4691 {
4692 case 0x0: /*and */
4693 result = operand1 & operand2;
4694 break;
4695
4696 case 0x1: /*eor */
4697 result = operand1 ^ operand2;
4698 break;
4699
4700 case 0x2: /*sub */
4701 result = operand1 - operand2;
4702 break;
4703
4704 case 0x3: /*rsb */
4705 result = operand2 - operand1;
4706 break;
4707
4708 case 0x4: /*add */
4709 result = operand1 + operand2;
4710 break;
4711
4712 case 0x5: /*adc */
4713 result = operand1 + operand2 + c;
4714 break;
4715
4716 case 0x6: /*sbc */
4717 result = operand1 - operand2 + c;
4718 break;
4719
4720 case 0x7: /*rsc */
4721 result = operand2 - operand1 + c;
4722 break;
4723
4724 case 0x8:
4725 case 0x9:
4726 case 0xa:
4727 case 0xb: /* tst, teq, cmp, cmn */
4728 result = (unsigned long) nextpc;
4729 break;
4730
4731 case 0xc: /*orr */
4732 result = operand1 | operand2;
4733 break;
4734
4735 case 0xd: /*mov */
4736 /* Always step into a function. */
4737 result = operand2;
4738 break;
4739
4740 case 0xe: /*bic */
4741 result = operand1 & ~operand2;
4742 break;
4743
4744 case 0xf: /*mvn */
4745 result = ~operand2;
4746 break;
4747 }
4748
4749 /* In 26-bit APCS the bottom two bits of the result are
4750 ignored, and we always end up in ARM state. */
4751 if (!arm_apcs_32)
4752 nextpc = arm_addr_bits_remove (gdbarch, result);
4753 else
4754 nextpc = result;
4755
4756 break;
4757 }
4758
4759 case 0x4:
4760 case 0x5: /* data transfer */
4761 case 0x6:
4762 case 0x7:
4763 if (bit (this_instr, 20))
4764 {
4765 /* load */
4766 if (bits (this_instr, 12, 15) == 15)
4767 {
4768 /* rd == pc */
4769 unsigned long rn;
4770 unsigned long base;
4771
4772 if (bit (this_instr, 22))
4773 error (_("Invalid update to pc in instruction"));
4774
4775 /* byte write to PC */
4776 rn = bits (this_instr, 16, 19);
4777 base = (rn == 15) ? pc_val + 8
4778 : get_frame_register_unsigned (frame, rn);
4779 if (bit (this_instr, 24))
4780 {
4781 /* pre-indexed */
4782 int c = (status & FLAG_C) ? 1 : 0;
4783 unsigned long offset =
4784 (bit (this_instr, 25)
4785 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4786 : bits (this_instr, 0, 11));
4787
4788 if (bit (this_instr, 23))
4789 base += offset;
4790 else
4791 base -= offset;
4792 }
4793 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
4794 4, byte_order);
4795 }
4796 }
4797 break;
4798
4799 case 0x8:
4800 case 0x9: /* block transfer */
4801 if (bit (this_instr, 20))
4802 {
4803 /* LDM */
4804 if (bit (this_instr, 15))
4805 {
4806 /* loading pc */
4807 int offset = 0;
4808
4809 if (bit (this_instr, 23))
4810 {
4811 /* up */
4812 unsigned long reglist = bits (this_instr, 0, 14);
4813 offset = bitcount (reglist) * 4;
4814 if (bit (this_instr, 24)) /* pre */
4815 offset += 4;
4816 }
4817 else if (bit (this_instr, 24))
4818 offset = -4;
4819
4820 {
4821 unsigned long rn_val =
4822 get_frame_register_unsigned (frame,
4823 bits (this_instr, 16, 19));
4824 nextpc =
4825 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
4826 + offset),
4827 4, byte_order);
4828 }
4829 }
4830 }
4831 break;
4832
4833 case 0xb: /* branch & link */
4834 case 0xa: /* branch */
4835 {
4836 nextpc = BranchDest (pc, this_instr);
4837 break;
4838 }
4839
4840 case 0xc:
4841 case 0xd:
4842 case 0xe: /* coproc ops */
4843 break;
4844 case 0xf: /* SWI */
4845 {
4846 struct gdbarch_tdep *tdep;
4847 tdep = gdbarch_tdep (gdbarch);
4848
4849 if (tdep->syscall_next_pc != NULL)
4850 nextpc = tdep->syscall_next_pc (frame);
4851
4852 }
4853 break;
4854
4855 default:
4856 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
4857 return (pc);
4858 }
4859 }
4860
4861 return nextpc;
4862 }
4863
4864 CORE_ADDR
4865 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
4866 {
4867 struct gdbarch *gdbarch = get_frame_arch (frame);
4868 CORE_ADDR nextpc =
4869 gdbarch_addr_bits_remove (gdbarch,
4870 arm_get_next_pc_raw (frame, pc, TRUE));
4871 if (nextpc == pc)
4872 error (_("Infinite loop detected"));
4873 return nextpc;
4874 }
4875
4876 /* single_step() is called just before we want to resume the inferior,
4877 if we want to single-step it but there is no hardware or kernel
4878 single-step support. We find the target of the coming instruction
4879 and breakpoint it. */
4880
4881 int
4882 arm_software_single_step (struct frame_info *frame)
4883 {
4884 struct gdbarch *gdbarch = get_frame_arch (frame);
4885 struct address_space *aspace = get_frame_address_space (frame);
4886
4887 /* NOTE: This may insert the wrong breakpoint instruction when
4888 single-stepping over a mode-changing instruction, if the
4889 CPSR heuristics are used. */
4890
4891 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
4892 insert_single_step_breakpoint (gdbarch, aspace, next_pc);
4893
4894 return 1;
4895 }
4896
4897 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4898 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4899 NULL if an error occurs. BUF is freed. */
4900
4901 static gdb_byte *
4902 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
4903 int old_len, int new_len)
4904 {
4905 gdb_byte *new_buf, *middle;
4906 int bytes_to_read = new_len - old_len;
4907
4908 new_buf = xmalloc (new_len);
4909 memcpy (new_buf + bytes_to_read, buf, old_len);
4910 xfree (buf);
4911 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
4912 {
4913 xfree (new_buf);
4914 return NULL;
4915 }
4916 return new_buf;
4917 }
4918
4919 /* An IT block is at most the 2-byte IT instruction followed by
4920 four 4-byte instructions. The furthest back we must search to
4921 find an IT block that affects the current instruction is thus
4922 2 + 3 * 4 == 14 bytes. */
4923 #define MAX_IT_BLOCK_PREFIX 14
4924
4925 /* Use a quick scan if there are more than this many bytes of
4926 code. */
4927 #define IT_SCAN_THRESHOLD 32
4928
4929 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4930 A breakpoint in an IT block may not be hit, depending on the
4931 condition flags. */
4932 static CORE_ADDR
4933 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
4934 {
4935 gdb_byte *buf;
4936 char map_type;
4937 CORE_ADDR boundary, func_start;
4938 int buf_len, buf2_len;
4939 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
4940 int i, any, last_it, last_it_count;
4941
4942 /* If we are using BKPT breakpoints, none of this is necessary. */
4943 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
4944 return bpaddr;
4945
4946 /* ARM mode does not have this problem. */
4947 if (!arm_pc_is_thumb (gdbarch, bpaddr))
4948 return bpaddr;
4949
4950 /* We are setting a breakpoint in Thumb code that could potentially
4951 contain an IT block. The first step is to find how much Thumb
4952 code there is; we do not need to read outside of known Thumb
4953 sequences. */
4954 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
4955 if (map_type == 0)
4956 /* Thumb-2 code must have mapping symbols to have a chance. */
4957 return bpaddr;
4958
4959 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
4960
4961 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
4962 && func_start > boundary)
4963 boundary = func_start;
4964
4965 /* Search for a candidate IT instruction. We have to do some fancy
4966 footwork to distinguish a real IT instruction from the second
4967 half of a 32-bit instruction, but there is no need for that if
4968 there's no candidate. */
4969 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
4970 if (buf_len == 0)
4971 /* No room for an IT instruction. */
4972 return bpaddr;
4973
4974 buf = xmalloc (buf_len);
4975 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
4976 return bpaddr;
4977 any = 0;
4978 for (i = 0; i < buf_len; i += 2)
4979 {
4980 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4981 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4982 {
4983 any = 1;
4984 break;
4985 }
4986 }
4987 if (any == 0)
4988 {
4989 xfree (buf);
4990 return bpaddr;
4991 }
4992
4993 /* OK, the code bytes before this instruction contain at least one
4994 halfword which resembles an IT instruction. We know that it's
4995 Thumb code, but there are still two possibilities. Either the
4996 halfword really is an IT instruction, or it is the second half of
4997 a 32-bit Thumb instruction. The only way we can tell is to
4998 scan forwards from a known instruction boundary. */
4999 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5000 {
5001 int definite;
5002
5003 /* There's a lot of code before this instruction. Start with an
5004 optimistic search; it's easy to recognize halfwords that can
5005 not be the start of a 32-bit instruction, and use that to
5006 lock on to the instruction boundaries. */
5007 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5008 if (buf == NULL)
5009 return bpaddr;
5010 buf_len = IT_SCAN_THRESHOLD;
5011
5012 definite = 0;
5013 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5014 {
5015 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5016 if (thumb_insn_size (inst1) == 2)
5017 {
5018 definite = 1;
5019 break;
5020 }
5021 }
5022
5023 /* At this point, if DEFINITE, BUF[I] is the first place we
5024 are sure that we know the instruction boundaries, and it is far
5025 enough from BPADDR that we could not miss an IT instruction
5026 affecting BPADDR. If ! DEFINITE, give up - start from a
5027 known boundary. */
5028 if (! definite)
5029 {
5030 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5031 bpaddr - boundary);
5032 if (buf == NULL)
5033 return bpaddr;
5034 buf_len = bpaddr - boundary;
5035 i = 0;
5036 }
5037 }
5038 else
5039 {
5040 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5041 if (buf == NULL)
5042 return bpaddr;
5043 buf_len = bpaddr - boundary;
5044 i = 0;
5045 }
5046
5047 /* Scan forwards. Find the last IT instruction before BPADDR. */
5048 last_it = -1;
5049 last_it_count = 0;
5050 while (i < buf_len)
5051 {
5052 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5053 last_it_count--;
5054 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5055 {
5056 last_it = i;
5057 if (inst1 & 0x0001)
5058 last_it_count = 4;
5059 else if (inst1 & 0x0002)
5060 last_it_count = 3;
5061 else if (inst1 & 0x0004)
5062 last_it_count = 2;
5063 else
5064 last_it_count = 1;
5065 }
5066 i += thumb_insn_size (inst1);
5067 }
5068
5069 xfree (buf);
5070
5071 if (last_it == -1)
5072 /* There wasn't really an IT instruction after all. */
5073 return bpaddr;
5074
5075 if (last_it_count < 1)
5076 /* It was too far away. */
5077 return bpaddr;
5078
5079 /* This really is a trouble spot. Move the breakpoint to the IT
5080 instruction. */
5081 return bpaddr - buf_len + last_it;
5082 }
5083
5084 /* ARM displaced stepping support.
5085
5086 Generally ARM displaced stepping works as follows:
5087
5088 1. When an instruction is to be single-stepped, it is first decoded by
5089 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5090 Depending on the type of instruction, it is then copied to a scratch
5091 location, possibly in a modified form. The copy_* set of functions
5092 performs such modification, as necessary. A breakpoint is placed after
5093 the modified instruction in the scratch space to return control to GDB.
5094 Note in particular that instructions which modify the PC will no longer
5095 do so after modification.
5096
5097 2. The instruction is single-stepped, by setting the PC to the scratch
5098 location address, and resuming. Control returns to GDB when the
5099 breakpoint is hit.
5100
5101 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5102 function used for the current instruction. This function's job is to
5103 put the CPU/memory state back to what it would have been if the
5104 instruction had been executed unmodified in its original location. */
5105
5106 /* NOP instruction (mov r0, r0). */
5107 #define ARM_NOP 0xe1a00000
5108
5109 /* Helper for register reads for displaced stepping. In particular, this
5110 returns the PC as it would be seen by the instruction at its original
5111 location. */
5112
5113 ULONGEST
5114 displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
5115 {
5116 ULONGEST ret;
5117
5118 if (regno == 15)
5119 {
5120 if (debug_displaced)
5121 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5122 (unsigned long) from + 8);
5123 return (ULONGEST) from + 8; /* Pipeline offset. */
5124 }
5125 else
5126 {
5127 regcache_cooked_read_unsigned (regs, regno, &ret);
5128 if (debug_displaced)
5129 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5130 regno, (unsigned long) ret);
5131 return ret;
5132 }
5133 }
5134
5135 static int
5136 displaced_in_arm_mode (struct regcache *regs)
5137 {
5138 ULONGEST ps;
5139 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5140
5141 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5142
5143 return (ps & t_bit) == 0;
5144 }
5145
5146 /* Write to the PC as from a branch instruction. */
5147
5148 static void
5149 branch_write_pc (struct regcache *regs, ULONGEST val)
5150 {
5151 if (displaced_in_arm_mode (regs))
5152 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5153 architecture versions < 6. */
5154 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5155 val & ~(ULONGEST) 0x3);
5156 else
5157 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5158 val & ~(ULONGEST) 0x1);
5159 }
5160
5161 /* Write to the PC as from a branch-exchange instruction. */
5162
5163 static void
5164 bx_write_pc (struct regcache *regs, ULONGEST val)
5165 {
5166 ULONGEST ps;
5167 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5168
5169 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5170
5171 if ((val & 1) == 1)
5172 {
5173 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5174 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5175 }
5176 else if ((val & 2) == 0)
5177 {
5178 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5179 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5180 }
5181 else
5182 {
5183 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5184 mode, align dest to 4 bytes). */
5185 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5186 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5187 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5188 }
5189 }
5190
5191 /* Write to the PC as if from a load instruction. */
5192
5193 static void
5194 load_write_pc (struct regcache *regs, ULONGEST val)
5195 {
5196 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5197 bx_write_pc (regs, val);
5198 else
5199 branch_write_pc (regs, val);
5200 }
5201
5202 /* Write to the PC as if from an ALU instruction. */
5203
5204 static void
5205 alu_write_pc (struct regcache *regs, ULONGEST val)
5206 {
5207 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && displaced_in_arm_mode (regs))
5208 bx_write_pc (regs, val);
5209 else
5210 branch_write_pc (regs, val);
5211 }
5212
5213 /* Helper for writing to registers for displaced stepping. Writing to the PC
5214 has a varying effects depending on the instruction which does the write:
5215 this is controlled by the WRITE_PC argument. */
5216
5217 void
5218 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5219 int regno, ULONGEST val, enum pc_write_style write_pc)
5220 {
5221 if (regno == 15)
5222 {
5223 if (debug_displaced)
5224 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5225 (unsigned long) val);
5226 switch (write_pc)
5227 {
5228 case BRANCH_WRITE_PC:
5229 branch_write_pc (regs, val);
5230 break;
5231
5232 case BX_WRITE_PC:
5233 bx_write_pc (regs, val);
5234 break;
5235
5236 case LOAD_WRITE_PC:
5237 load_write_pc (regs, val);
5238 break;
5239
5240 case ALU_WRITE_PC:
5241 alu_write_pc (regs, val);
5242 break;
5243
5244 case CANNOT_WRITE_PC:
5245 warning (_("Instruction wrote to PC in an unexpected way when "
5246 "single-stepping"));
5247 break;
5248
5249 default:
5250 internal_error (__FILE__, __LINE__,
5251 _("Invalid argument to displaced_write_reg"));
5252 }
5253
5254 dsc->wrote_to_pc = 1;
5255 }
5256 else
5257 {
5258 if (debug_displaced)
5259 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5260 regno, (unsigned long) val);
5261 regcache_cooked_write_unsigned (regs, regno, val);
5262 }
5263 }
5264
5265 /* This function is used to concisely determine if an instruction INSN
5266 references PC. Register fields of interest in INSN should have the
5267 corresponding fields of BITMASK set to 0b1111. The function
5268 returns return 1 if any of these fields in INSN reference the PC
5269 (also 0b1111, r15), else it returns 0. */
5270
5271 static int
5272 insn_references_pc (uint32_t insn, uint32_t bitmask)
5273 {
5274 uint32_t lowbit = 1;
5275
5276 while (bitmask != 0)
5277 {
5278 uint32_t mask;
5279
5280 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5281 ;
5282
5283 if (!lowbit)
5284 break;
5285
5286 mask = lowbit * 0xf;
5287
5288 if ((insn & mask) == mask)
5289 return 1;
5290
5291 bitmask &= ~mask;
5292 }
5293
5294 return 0;
5295 }
5296
5297 /* The simplest copy function. Many instructions have the same effect no
5298 matter what address they are executed at: in those cases, use this. */
5299
5300 static int
5301 copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5302 const char *iname, struct displaced_step_closure *dsc)
5303 {
5304 if (debug_displaced)
5305 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5306 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5307 iname);
5308
5309 dsc->modinsn[0] = insn;
5310
5311 return 0;
5312 }
5313
5314 /* Preload instructions with immediate offset. */
5315
5316 static void
5317 cleanup_preload (struct gdbarch *gdbarch,
5318 struct regcache *regs, struct displaced_step_closure *dsc)
5319 {
5320 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5321 if (!dsc->u.preload.immed)
5322 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5323 }
5324
5325 static int
5326 copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5327 struct displaced_step_closure *dsc)
5328 {
5329 unsigned int rn = bits (insn, 16, 19);
5330 ULONGEST rn_val;
5331 CORE_ADDR from = dsc->insn_addr;
5332
5333 if (!insn_references_pc (insn, 0x000f0000ul))
5334 return copy_unmodified (gdbarch, insn, "preload", dsc);
5335
5336 if (debug_displaced)
5337 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5338 (unsigned long) insn);
5339
5340 /* Preload instructions:
5341
5342 {pli/pld} [rn, #+/-imm]
5343 ->
5344 {pli/pld} [r0, #+/-imm]. */
5345
5346 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5347 rn_val = displaced_read_reg (regs, from, rn);
5348 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5349
5350 dsc->u.preload.immed = 1;
5351
5352 dsc->modinsn[0] = insn & 0xfff0ffff;
5353
5354 dsc->cleanup = &cleanup_preload;
5355
5356 return 0;
5357 }
5358
5359 /* Preload instructions with register offset. */
5360
5361 static int
5362 copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5363 struct regcache *regs,
5364 struct displaced_step_closure *dsc)
5365 {
5366 unsigned int rn = bits (insn, 16, 19);
5367 unsigned int rm = bits (insn, 0, 3);
5368 ULONGEST rn_val, rm_val;
5369 CORE_ADDR from = dsc->insn_addr;
5370
5371 if (!insn_references_pc (insn, 0x000f000ful))
5372 return copy_unmodified (gdbarch, insn, "preload reg", dsc);
5373
5374 if (debug_displaced)
5375 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5376 (unsigned long) insn);
5377
5378 /* Preload register-offset instructions:
5379
5380 {pli/pld} [rn, rm {, shift}]
5381 ->
5382 {pli/pld} [r0, r1 {, shift}]. */
5383
5384 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5385 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
5386 rn_val = displaced_read_reg (regs, from, rn);
5387 rm_val = displaced_read_reg (regs, from, rm);
5388 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5389 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5390
5391 dsc->u.preload.immed = 0;
5392
5393 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5394
5395 dsc->cleanup = &cleanup_preload;
5396
5397 return 0;
5398 }
5399
5400 /* Copy/cleanup coprocessor load and store instructions. */
5401
5402 static void
5403 cleanup_copro_load_store (struct gdbarch *gdbarch,
5404 struct regcache *regs,
5405 struct displaced_step_closure *dsc)
5406 {
5407 ULONGEST rn_val = displaced_read_reg (regs, dsc->insn_addr, 0);
5408
5409 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5410
5411 if (dsc->u.ldst.writeback)
5412 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5413 }
5414
5415 static int
5416 copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5417 struct regcache *regs,
5418 struct displaced_step_closure *dsc)
5419 {
5420 unsigned int rn = bits (insn, 16, 19);
5421 ULONGEST rn_val;
5422 CORE_ADDR from = dsc->insn_addr;
5423
5424 if (!insn_references_pc (insn, 0x000f0000ul))
5425 return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5426
5427 if (debug_displaced)
5428 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5429 "load/store insn %.8lx\n", (unsigned long) insn);
5430
5431 /* Coprocessor load/store instructions:
5432
5433 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5434 ->
5435 {stc/stc2} [r0, #+/-imm].
5436
5437 ldc/ldc2 are handled identically. */
5438
5439 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5440 rn_val = displaced_read_reg (regs, from, rn);
5441 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5442
5443 dsc->u.ldst.writeback = bit (insn, 25);
5444 dsc->u.ldst.rn = rn;
5445
5446 dsc->modinsn[0] = insn & 0xfff0ffff;
5447
5448 dsc->cleanup = &cleanup_copro_load_store;
5449
5450 return 0;
5451 }
5452
5453 /* Clean up branch instructions (actually perform the branch, by setting
5454 PC). */
5455
5456 static void
5457 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
5458 struct displaced_step_closure *dsc)
5459 {
5460 ULONGEST from = dsc->insn_addr;
5461 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5462 int branch_taken = condition_true (dsc->u.branch.cond, status);
5463 enum pc_write_style write_pc = dsc->u.branch.exchange
5464 ? BX_WRITE_PC : BRANCH_WRITE_PC;
5465
5466 if (!branch_taken)
5467 return;
5468
5469 if (dsc->u.branch.link)
5470 {
5471 ULONGEST pc = displaced_read_reg (regs, from, 15);
5472 displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
5473 }
5474
5475 displaced_write_reg (regs, dsc, 15, dsc->u.branch.dest, write_pc);
5476 }
5477
5478 /* Copy B/BL/BLX instructions with immediate destinations. */
5479
5480 static int
5481 copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
5482 struct regcache *regs, struct displaced_step_closure *dsc)
5483 {
5484 unsigned int cond = bits (insn, 28, 31);
5485 int exchange = (cond == 0xf);
5486 int link = exchange || bit (insn, 24);
5487 CORE_ADDR from = dsc->insn_addr;
5488 long offset;
5489
5490 if (debug_displaced)
5491 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
5492 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
5493 (unsigned long) insn);
5494
5495 /* Implement "BL<cond> <label>" as:
5496
5497 Preparation: cond <- instruction condition
5498 Insn: mov r0, r0 (nop)
5499 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5500
5501 B<cond> similar, but don't set r14 in cleanup. */
5502
5503 if (exchange)
5504 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5505 then arrange the switch into Thumb mode. */
5506 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
5507 else
5508 offset = bits (insn, 0, 23) << 2;
5509
5510 if (bit (offset, 25))
5511 offset = offset | ~0x3ffffff;
5512
5513 dsc->u.branch.cond = cond;
5514 dsc->u.branch.link = link;
5515 dsc->u.branch.exchange = exchange;
5516 dsc->u.branch.dest = from + 8 + offset;
5517
5518 dsc->modinsn[0] = ARM_NOP;
5519
5520 dsc->cleanup = &cleanup_branch;
5521
5522 return 0;
5523 }
5524
5525 /* Copy BX/BLX with register-specified destinations. */
5526
5527 static int
5528 copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
5529 struct regcache *regs, struct displaced_step_closure *dsc)
5530 {
5531 unsigned int cond = bits (insn, 28, 31);
5532 /* BX: x12xxx1x
5533 BLX: x12xxx3x. */
5534 int link = bit (insn, 5);
5535 unsigned int rm = bits (insn, 0, 3);
5536 CORE_ADDR from = dsc->insn_addr;
5537
5538 if (debug_displaced)
5539 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
5540 "%.8lx\n", (link) ? "blx" : "bx",
5541 (unsigned long) insn);
5542
5543 /* Implement {BX,BLX}<cond> <reg>" as:
5544
5545 Preparation: cond <- instruction condition
5546 Insn: mov r0, r0 (nop)
5547 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
5548
5549 Don't set r14 in cleanup for BX. */
5550
5551 dsc->u.branch.dest = displaced_read_reg (regs, from, rm);
5552
5553 dsc->u.branch.cond = cond;
5554 dsc->u.branch.link = link;
5555 dsc->u.branch.exchange = 1;
5556
5557 dsc->modinsn[0] = ARM_NOP;
5558
5559 dsc->cleanup = &cleanup_branch;
5560
5561 return 0;
5562 }
5563
5564 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
5565
5566 static void
5567 cleanup_alu_imm (struct gdbarch *gdbarch,
5568 struct regcache *regs, struct displaced_step_closure *dsc)
5569 {
5570 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
5571 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5572 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5573 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5574 }
5575
5576 static int
5577 copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5578 struct displaced_step_closure *dsc)
5579 {
5580 unsigned int rn = bits (insn, 16, 19);
5581 unsigned int rd = bits (insn, 12, 15);
5582 unsigned int op = bits (insn, 21, 24);
5583 int is_mov = (op == 0xd);
5584 ULONGEST rd_val, rn_val;
5585 CORE_ADDR from = dsc->insn_addr;
5586
5587 if (!insn_references_pc (insn, 0x000ff000ul))
5588 return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
5589
5590 if (debug_displaced)
5591 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
5592 "%.8lx\n", is_mov ? "move" : "ALU",
5593 (unsigned long) insn);
5594
5595 /* Instruction is of form:
5596
5597 <op><cond> rd, [rn,] #imm
5598
5599 Rewrite as:
5600
5601 Preparation: tmp1, tmp2 <- r0, r1;
5602 r0, r1 <- rd, rn
5603 Insn: <op><cond> r0, r1, #imm
5604 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5605 */
5606
5607 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5608 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
5609 rn_val = displaced_read_reg (regs, from, rn);
5610 rd_val = displaced_read_reg (regs, from, rd);
5611 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5612 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5613 dsc->rd = rd;
5614
5615 if (is_mov)
5616 dsc->modinsn[0] = insn & 0xfff00fff;
5617 else
5618 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
5619
5620 dsc->cleanup = &cleanup_alu_imm;
5621
5622 return 0;
5623 }
5624
5625 /* Copy/cleanup arithmetic/logic insns with register RHS. */
5626
5627 static void
5628 cleanup_alu_reg (struct gdbarch *gdbarch,
5629 struct regcache *regs, struct displaced_step_closure *dsc)
5630 {
5631 ULONGEST rd_val;
5632 int i;
5633
5634 rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
5635
5636 for (i = 0; i < 3; i++)
5637 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5638
5639 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5640 }
5641
5642 static int
5643 copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5644 struct displaced_step_closure *dsc)
5645 {
5646 unsigned int rn = bits (insn, 16, 19);
5647 unsigned int rm = bits (insn, 0, 3);
5648 unsigned int rd = bits (insn, 12, 15);
5649 unsigned int op = bits (insn, 21, 24);
5650 int is_mov = (op == 0xd);
5651 ULONGEST rd_val, rn_val, rm_val;
5652 CORE_ADDR from = dsc->insn_addr;
5653
5654 if (!insn_references_pc (insn, 0x000ff00ful))
5655 return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
5656
5657 if (debug_displaced)
5658 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
5659 is_mov ? "move" : "ALU", (unsigned long) insn);
5660
5661 /* Instruction is of form:
5662
5663 <op><cond> rd, [rn,] rm [, <shift>]
5664
5665 Rewrite as:
5666
5667 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
5668 r0, r1, r2 <- rd, rn, rm
5669 Insn: <op><cond> r0, r1, r2 [, <shift>]
5670 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
5671 */
5672
5673 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5674 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
5675 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
5676 rd_val = displaced_read_reg (regs, from, rd);
5677 rn_val = displaced_read_reg (regs, from, rn);
5678 rm_val = displaced_read_reg (regs, from, rm);
5679 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5680 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5681 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5682 dsc->rd = rd;
5683
5684 if (is_mov)
5685 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
5686 else
5687 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
5688
5689 dsc->cleanup = &cleanup_alu_reg;
5690
5691 return 0;
5692 }
5693
5694 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
5695
5696 static void
5697 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
5698 struct regcache *regs,
5699 struct displaced_step_closure *dsc)
5700 {
5701 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
5702 int i;
5703
5704 for (i = 0; i < 4; i++)
5705 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5706
5707 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5708 }
5709
5710 static int
5711 copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
5712 struct regcache *regs,
5713 struct displaced_step_closure *dsc)
5714 {
5715 unsigned int rn = bits (insn, 16, 19);
5716 unsigned int rm = bits (insn, 0, 3);
5717 unsigned int rd = bits (insn, 12, 15);
5718 unsigned int rs = bits (insn, 8, 11);
5719 unsigned int op = bits (insn, 21, 24);
5720 int is_mov = (op == 0xd), i;
5721 ULONGEST rd_val, rn_val, rm_val, rs_val;
5722 CORE_ADDR from = dsc->insn_addr;
5723
5724 if (!insn_references_pc (insn, 0x000fff0ful))
5725 return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
5726
5727 if (debug_displaced)
5728 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
5729 "%.8lx\n", is_mov ? "move" : "ALU",
5730 (unsigned long) insn);
5731
5732 /* Instruction is of form:
5733
5734 <op><cond> rd, [rn,] rm, <shift> rs
5735
5736 Rewrite as:
5737
5738 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
5739 r0, r1, r2, r3 <- rd, rn, rm, rs
5740 Insn: <op><cond> r0, r1, r2, <shift> r3
5741 Cleanup: tmp5 <- r0
5742 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
5743 rd <- tmp5
5744 */
5745
5746 for (i = 0; i < 4; i++)
5747 dsc->tmp[i] = displaced_read_reg (regs, from, i);
5748
5749 rd_val = displaced_read_reg (regs, from, rd);
5750 rn_val = displaced_read_reg (regs, from, rn);
5751 rm_val = displaced_read_reg (regs, from, rm);
5752 rs_val = displaced_read_reg (regs, from, rs);
5753 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5754 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5755 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5756 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
5757 dsc->rd = rd;
5758
5759 if (is_mov)
5760 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
5761 else
5762 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
5763
5764 dsc->cleanup = &cleanup_alu_shifted_reg;
5765
5766 return 0;
5767 }
5768
5769 /* Clean up load instructions. */
5770
5771 static void
5772 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
5773 struct displaced_step_closure *dsc)
5774 {
5775 ULONGEST rt_val, rt_val2 = 0, rn_val;
5776 CORE_ADDR from = dsc->insn_addr;
5777
5778 rt_val = displaced_read_reg (regs, from, 0);
5779 if (dsc->u.ldst.xfersize == 8)
5780 rt_val2 = displaced_read_reg (regs, from, 1);
5781 rn_val = displaced_read_reg (regs, from, 2);
5782
5783 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5784 if (dsc->u.ldst.xfersize > 4)
5785 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5786 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5787 if (!dsc->u.ldst.immed)
5788 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5789
5790 /* Handle register writeback. */
5791 if (dsc->u.ldst.writeback)
5792 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5793 /* Put result in right place. */
5794 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
5795 if (dsc->u.ldst.xfersize == 8)
5796 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
5797 }
5798
5799 /* Clean up store instructions. */
5800
5801 static void
5802 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
5803 struct displaced_step_closure *dsc)
5804 {
5805 CORE_ADDR from = dsc->insn_addr;
5806 ULONGEST rn_val = displaced_read_reg (regs, from, 2);
5807
5808 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5809 if (dsc->u.ldst.xfersize > 4)
5810 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5811 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5812 if (!dsc->u.ldst.immed)
5813 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5814 if (!dsc->u.ldst.restore_r4)
5815 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
5816
5817 /* Writeback. */
5818 if (dsc->u.ldst.writeback)
5819 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5820 }
5821
5822 /* Copy "extra" load/store instructions. These are halfword/doubleword
5823 transfers, which have a different encoding to byte/word transfers. */
5824
5825 static int
5826 copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
5827 struct regcache *regs, struct displaced_step_closure *dsc)
5828 {
5829 unsigned int op1 = bits (insn, 20, 24);
5830 unsigned int op2 = bits (insn, 5, 6);
5831 unsigned int rt = bits (insn, 12, 15);
5832 unsigned int rn = bits (insn, 16, 19);
5833 unsigned int rm = bits (insn, 0, 3);
5834 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
5835 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
5836 int immed = (op1 & 0x4) != 0;
5837 int opcode;
5838 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
5839 CORE_ADDR from = dsc->insn_addr;
5840
5841 if (!insn_references_pc (insn, 0x000ff00ful))
5842 return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
5843
5844 if (debug_displaced)
5845 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
5846 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
5847 (unsigned long) insn);
5848
5849 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
5850
5851 if (opcode < 0)
5852 internal_error (__FILE__, __LINE__,
5853 _("copy_extra_ld_st: instruction decode error"));
5854
5855 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5856 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
5857 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
5858 if (!immed)
5859 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
5860
5861 rt_val = displaced_read_reg (regs, from, rt);
5862 if (bytesize[opcode] == 8)
5863 rt_val2 = displaced_read_reg (regs, from, rt + 1);
5864 rn_val = displaced_read_reg (regs, from, rn);
5865 if (!immed)
5866 rm_val = displaced_read_reg (regs, from, rm);
5867
5868 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5869 if (bytesize[opcode] == 8)
5870 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
5871 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5872 if (!immed)
5873 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5874
5875 dsc->rd = rt;
5876 dsc->u.ldst.xfersize = bytesize[opcode];
5877 dsc->u.ldst.rn = rn;
5878 dsc->u.ldst.immed = immed;
5879 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5880 dsc->u.ldst.restore_r4 = 0;
5881
5882 if (immed)
5883 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
5884 ->
5885 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
5886 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5887 else
5888 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
5889 ->
5890 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
5891 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5892
5893 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
5894
5895 return 0;
5896 }
5897
5898 /* Copy byte/word loads and stores. */
5899
5900 static int
5901 copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
5902 struct regcache *regs,
5903 struct displaced_step_closure *dsc, int load, int byte,
5904 int usermode)
5905 {
5906 int immed = !bit (insn, 25);
5907 unsigned int rt = bits (insn, 12, 15);
5908 unsigned int rn = bits (insn, 16, 19);
5909 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
5910 ULONGEST rt_val, rn_val, rm_val = 0;
5911 CORE_ADDR from = dsc->insn_addr;
5912
5913 if (!insn_references_pc (insn, 0x000ff00ful))
5914 return copy_unmodified (gdbarch, insn, "load/store", dsc);
5915
5916 if (debug_displaced)
5917 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
5918 load ? (byte ? "ldrb" : "ldr")
5919 : (byte ? "strb" : "str"), usermode ? "t" : "",
5920 (unsigned long) insn);
5921
5922 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5923 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
5924 if (!immed)
5925 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
5926 if (!load)
5927 dsc->tmp[4] = displaced_read_reg (regs, from, 4);
5928
5929 rt_val = displaced_read_reg (regs, from, rt);
5930 rn_val = displaced_read_reg (regs, from, rn);
5931 if (!immed)
5932 rm_val = displaced_read_reg (regs, from, rm);
5933
5934 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5935 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5936 if (!immed)
5937 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5938
5939 dsc->rd = rt;
5940 dsc->u.ldst.xfersize = byte ? 1 : 4;
5941 dsc->u.ldst.rn = rn;
5942 dsc->u.ldst.immed = immed;
5943 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5944
5945 /* To write PC we can do:
5946
5947 Before this sequence of instructions:
5948 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
5949 r2 is the Rn value got from dispalced_read_reg.
5950
5951 Insn1: push {pc} Write address of STR instruction + offset on stack
5952 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
5953 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
5954 = addr(Insn1) + offset - addr(Insn3) - 8
5955 = offset - 16
5956 Insn4: add r4, r4, #8 r4 = offset - 8
5957 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
5958 = from + offset
5959 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
5960
5961 Otherwise we don't know what value to write for PC, since the offset is
5962 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
5963 of this can be found in Section "Saving from r15" in
5964 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
5965
5966 if (load || rt != 15)
5967 {
5968 dsc->u.ldst.restore_r4 = 0;
5969
5970 if (immed)
5971 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
5972 ->
5973 {ldr,str}[b]<cond> r0, [r2, #imm]. */
5974 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5975 else
5976 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
5977 ->
5978 {ldr,str}[b]<cond> r0, [r2, r3]. */
5979 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5980 }
5981 else
5982 {
5983 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
5984 dsc->u.ldst.restore_r4 = 1;
5985 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
5986 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
5987 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
5988 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
5989 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
5990
5991 /* As above. */
5992 if (immed)
5993 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
5994 else
5995 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
5996
5997 dsc->modinsn[6] = 0x0; /* breakpoint location. */
5998 dsc->modinsn[7] = 0x0; /* scratch space. */
5999
6000 dsc->numinsns = 6;
6001 }
6002
6003 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6004
6005 return 0;
6006 }
6007
6008 /* Cleanup LDM instructions with fully-populated register list. This is an
6009 unfortunate corner case: it's impossible to implement correctly by modifying
6010 the instruction. The issue is as follows: we have an instruction,
6011
6012 ldm rN, {r0-r15}
6013
6014 which we must rewrite to avoid loading PC. A possible solution would be to
6015 do the load in two halves, something like (with suitable cleanup
6016 afterwards):
6017
6018 mov r8, rN
6019 ldm[id][ab] r8!, {r0-r7}
6020 str r7, <temp>
6021 ldm[id][ab] r8, {r7-r14}
6022 <bkpt>
6023
6024 but at present there's no suitable place for <temp>, since the scratch space
6025 is overwritten before the cleanup routine is called. For now, we simply
6026 emulate the instruction. */
6027
6028 static void
6029 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6030 struct displaced_step_closure *dsc)
6031 {
6032 ULONGEST from = dsc->insn_addr;
6033 int inc = dsc->u.block.increment;
6034 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6035 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6036 uint32_t regmask = dsc->u.block.regmask;
6037 int regno = inc ? 0 : 15;
6038 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6039 int exception_return = dsc->u.block.load && dsc->u.block.user
6040 && (regmask & 0x8000) != 0;
6041 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
6042 int do_transfer = condition_true (dsc->u.block.cond, status);
6043 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6044
6045 if (!do_transfer)
6046 return;
6047
6048 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6049 sensible we can do here. Complain loudly. */
6050 if (exception_return)
6051 error (_("Cannot single-step exception return"));
6052
6053 /* We don't handle any stores here for now. */
6054 gdb_assert (dsc->u.block.load != 0);
6055
6056 if (debug_displaced)
6057 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6058 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6059 dsc->u.block.increment ? "inc" : "dec",
6060 dsc->u.block.before ? "before" : "after");
6061
6062 while (regmask)
6063 {
6064 uint32_t memword;
6065
6066 if (inc)
6067 while (regno <= 15 && (regmask & (1 << regno)) == 0)
6068 regno++;
6069 else
6070 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6071 regno--;
6072
6073 xfer_addr += bump_before;
6074
6075 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6076 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6077
6078 xfer_addr += bump_after;
6079
6080 regmask &= ~(1 << regno);
6081 }
6082
6083 if (dsc->u.block.writeback)
6084 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6085 CANNOT_WRITE_PC);
6086 }
6087
6088 /* Clean up an STM which included the PC in the register list. */
6089
6090 static void
6091 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6092 struct displaced_step_closure *dsc)
6093 {
6094 ULONGEST from = dsc->insn_addr;
6095 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
6096 int store_executed = condition_true (dsc->u.block.cond, status);
6097 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
6098 CORE_ADDR stm_insn_addr;
6099 uint32_t pc_val;
6100 long offset;
6101 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6102
6103 /* If condition code fails, there's nothing else to do. */
6104 if (!store_executed)
6105 return;
6106
6107 if (dsc->u.block.increment)
6108 {
6109 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
6110
6111 if (dsc->u.block.before)
6112 pc_stored_at += 4;
6113 }
6114 else
6115 {
6116 pc_stored_at = dsc->u.block.xfer_addr;
6117
6118 if (dsc->u.block.before)
6119 pc_stored_at -= 4;
6120 }
6121
6122 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
6123 stm_insn_addr = dsc->scratch_base;
6124 offset = pc_val - stm_insn_addr;
6125
6126 if (debug_displaced)
6127 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
6128 "STM instruction\n", offset);
6129
6130 /* Rewrite the stored PC to the proper value for the non-displaced original
6131 instruction. */
6132 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
6133 dsc->insn_addr + offset);
6134 }
6135
6136 /* Clean up an LDM which includes the PC in the register list. We clumped all
6137 the registers in the transferred list into a contiguous range r0...rX (to
6138 avoid loading PC directly and losing control of the debugged program), so we
6139 must undo that here. */
6140
6141 static void
6142 cleanup_block_load_pc (struct gdbarch *gdbarch,
6143 struct regcache *regs,
6144 struct displaced_step_closure *dsc)
6145 {
6146 ULONGEST from = dsc->insn_addr;
6147 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
6148 int load_executed = condition_true (dsc->u.block.cond, status), i;
6149 unsigned int mask = dsc->u.block.regmask, write_reg = 15;
6150 unsigned int regs_loaded = bitcount (mask);
6151 unsigned int num_to_shuffle = regs_loaded, clobbered;
6152
6153 /* The method employed here will fail if the register list is fully populated
6154 (we need to avoid loading PC directly). */
6155 gdb_assert (num_to_shuffle < 16);
6156
6157 if (!load_executed)
6158 return;
6159
6160 clobbered = (1 << num_to_shuffle) - 1;
6161
6162 while (num_to_shuffle > 0)
6163 {
6164 if ((mask & (1 << write_reg)) != 0)
6165 {
6166 unsigned int read_reg = num_to_shuffle - 1;
6167
6168 if (read_reg != write_reg)
6169 {
6170 ULONGEST rval = displaced_read_reg (regs, from, read_reg);
6171 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
6172 if (debug_displaced)
6173 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
6174 "loaded register r%d to r%d\n"), read_reg,
6175 write_reg);
6176 }
6177 else if (debug_displaced)
6178 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
6179 "r%d already in the right place\n"),
6180 write_reg);
6181
6182 clobbered &= ~(1 << write_reg);
6183
6184 num_to_shuffle--;
6185 }
6186
6187 write_reg--;
6188 }
6189
6190 /* Restore any registers we scribbled over. */
6191 for (write_reg = 0; clobbered != 0; write_reg++)
6192 {
6193 if ((clobbered & (1 << write_reg)) != 0)
6194 {
6195 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
6196 CANNOT_WRITE_PC);
6197 if (debug_displaced)
6198 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
6199 "clobbered register r%d\n"), write_reg);
6200 clobbered &= ~(1 << write_reg);
6201 }
6202 }
6203
6204 /* Perform register writeback manually. */
6205 if (dsc->u.block.writeback)
6206 {
6207 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
6208
6209 if (dsc->u.block.increment)
6210 new_rn_val += regs_loaded * 4;
6211 else
6212 new_rn_val -= regs_loaded * 4;
6213
6214 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
6215 CANNOT_WRITE_PC);
6216 }
6217 }
6218
6219 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6220 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6221
6222 static int
6223 copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6224 struct displaced_step_closure *dsc)
6225 {
6226 int load = bit (insn, 20);
6227 int user = bit (insn, 22);
6228 int increment = bit (insn, 23);
6229 int before = bit (insn, 24);
6230 int writeback = bit (insn, 21);
6231 int rn = bits (insn, 16, 19);
6232 CORE_ADDR from = dsc->insn_addr;
6233
6234 /* Block transfers which don't mention PC can be run directly
6235 out-of-line. */
6236 if (rn != 15 && (insn & 0x8000) == 0)
6237 return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
6238
6239 if (rn == 15)
6240 {
6241 warning (_("displaced: Unpredictable LDM or STM with "
6242 "base register r15"));
6243 return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
6244 }
6245
6246 if (debug_displaced)
6247 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6248 "%.8lx\n", (unsigned long) insn);
6249
6250 dsc->u.block.xfer_addr = displaced_read_reg (regs, from, rn);
6251 dsc->u.block.rn = rn;
6252
6253 dsc->u.block.load = load;
6254 dsc->u.block.user = user;
6255 dsc->u.block.increment = increment;
6256 dsc->u.block.before = before;
6257 dsc->u.block.writeback = writeback;
6258 dsc->u.block.cond = bits (insn, 28, 31);
6259
6260 dsc->u.block.regmask = insn & 0xffff;
6261
6262 if (load)
6263 {
6264 if ((insn & 0xffff) == 0xffff)
6265 {
6266 /* LDM with a fully-populated register list. This case is
6267 particularly tricky. Implement for now by fully emulating the
6268 instruction (which might not behave perfectly in all cases, but
6269 these instructions should be rare enough for that not to matter
6270 too much). */
6271 dsc->modinsn[0] = ARM_NOP;
6272
6273 dsc->cleanup = &cleanup_block_load_all;
6274 }
6275 else
6276 {
6277 /* LDM of a list of registers which includes PC. Implement by
6278 rewriting the list of registers to be transferred into a
6279 contiguous chunk r0...rX before doing the transfer, then shuffling
6280 registers into the correct places in the cleanup routine. */
6281 unsigned int regmask = insn & 0xffff;
6282 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
6283 unsigned int to = 0, from = 0, i, new_rn;
6284
6285 for (i = 0; i < num_in_list; i++)
6286 dsc->tmp[i] = displaced_read_reg (regs, from, i);
6287
6288 /* Writeback makes things complicated. We need to avoid clobbering
6289 the base register with one of the registers in our modified
6290 register list, but just using a different register can't work in
6291 all cases, e.g.:
6292
6293 ldm r14!, {r0-r13,pc}
6294
6295 which would need to be rewritten as:
6296
6297 ldm rN!, {r0-r14}
6298
6299 but that can't work, because there's no free register for N.
6300
6301 Solve this by turning off the writeback bit, and emulating
6302 writeback manually in the cleanup routine. */
6303
6304 if (writeback)
6305 insn &= ~(1 << 21);
6306
6307 new_regmask = (1 << num_in_list) - 1;
6308
6309 if (debug_displaced)
6310 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
6311 "{..., pc}: original reg list %.4x, modified "
6312 "list %.4x\n"), rn, writeback ? "!" : "",
6313 (int) insn & 0xffff, new_regmask);
6314
6315 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
6316
6317 dsc->cleanup = &cleanup_block_load_pc;
6318 }
6319 }
6320 else
6321 {
6322 /* STM of a list of registers which includes PC. Run the instruction
6323 as-is, but out of line: this will store the wrong value for the PC,
6324 so we must manually fix up the memory in the cleanup routine.
6325 Doing things this way has the advantage that we can auto-detect
6326 the offset of the PC write (which is architecture-dependent) in
6327 the cleanup routine. */
6328 dsc->modinsn[0] = insn;
6329
6330 dsc->cleanup = &cleanup_block_store_pc;
6331 }
6332
6333 return 0;
6334 }
6335
6336 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
6337 for Linux, where some SVC instructions must be treated specially. */
6338
6339 static void
6340 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
6341 struct displaced_step_closure *dsc)
6342 {
6343 CORE_ADDR from = dsc->insn_addr;
6344 CORE_ADDR resume_addr = from + 4;
6345
6346 if (debug_displaced)
6347 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
6348 "%.8lx\n", (unsigned long) resume_addr);
6349
6350 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
6351 }
6352
6353 static int
6354 copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
6355 struct regcache *regs, struct displaced_step_closure *dsc)
6356 {
6357 CORE_ADDR from = dsc->insn_addr;
6358
6359 /* Allow OS-specific code to override SVC handling. */
6360 if (dsc->u.svc.copy_svc_os)
6361 return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
6362
6363 if (debug_displaced)
6364 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
6365 (unsigned long) insn);
6366
6367 /* Preparation: none.
6368 Insn: unmodified svc.
6369 Cleanup: pc <- insn_addr + 4. */
6370
6371 dsc->modinsn[0] = insn;
6372
6373 dsc->cleanup = &cleanup_svc;
6374 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
6375 instruction. */
6376 dsc->wrote_to_pc = 1;
6377
6378 return 0;
6379 }
6380
6381 /* Copy undefined instructions. */
6382
6383 static int
6384 copy_undef (struct gdbarch *gdbarch, uint32_t insn,
6385 struct displaced_step_closure *dsc)
6386 {
6387 if (debug_displaced)
6388 fprintf_unfiltered (gdb_stdlog,
6389 "displaced: copying undefined insn %.8lx\n",
6390 (unsigned long) insn);
6391
6392 dsc->modinsn[0] = insn;
6393
6394 return 0;
6395 }
6396
6397 /* Copy unpredictable instructions. */
6398
6399 static int
6400 copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
6401 struct displaced_step_closure *dsc)
6402 {
6403 if (debug_displaced)
6404 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
6405 "%.8lx\n", (unsigned long) insn);
6406
6407 dsc->modinsn[0] = insn;
6408
6409 return 0;
6410 }
6411
6412 /* The decode_* functions are instruction decoding helpers. They mostly follow
6413 the presentation in the ARM ARM. */
6414
6415 static int
6416 decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
6417 struct regcache *regs,
6418 struct displaced_step_closure *dsc)
6419 {
6420 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
6421 unsigned int rn = bits (insn, 16, 19);
6422
6423 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
6424 return copy_unmodified (gdbarch, insn, "cps", dsc);
6425 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
6426 return copy_unmodified (gdbarch, insn, "setend", dsc);
6427 else if ((op1 & 0x60) == 0x20)
6428 return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
6429 else if ((op1 & 0x71) == 0x40)
6430 return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
6431 else if ((op1 & 0x77) == 0x41)
6432 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6433 else if ((op1 & 0x77) == 0x45)
6434 return copy_preload (gdbarch, insn, regs, dsc); /* pli. */
6435 else if ((op1 & 0x77) == 0x51)
6436 {
6437 if (rn != 0xf)
6438 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6439 else
6440 return copy_unpred (gdbarch, insn, dsc);
6441 }
6442 else if ((op1 & 0x77) == 0x55)
6443 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6444 else if (op1 == 0x57)
6445 switch (op2)
6446 {
6447 case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
6448 case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
6449 case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
6450 case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
6451 default: return copy_unpred (gdbarch, insn, dsc);
6452 }
6453 else if ((op1 & 0x63) == 0x43)
6454 return copy_unpred (gdbarch, insn, dsc);
6455 else if ((op2 & 0x1) == 0x0)
6456 switch (op1 & ~0x80)
6457 {
6458 case 0x61:
6459 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6460 case 0x65:
6461 return copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
6462 case 0x71: case 0x75:
6463 /* pld/pldw reg. */
6464 return copy_preload_reg (gdbarch, insn, regs, dsc);
6465 case 0x63: case 0x67: case 0x73: case 0x77:
6466 return copy_unpred (gdbarch, insn, dsc);
6467 default:
6468 return copy_undef (gdbarch, insn, dsc);
6469 }
6470 else
6471 return copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
6472 }
6473
6474 static int
6475 decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
6476 struct regcache *regs,
6477 struct displaced_step_closure *dsc)
6478 {
6479 if (bit (insn, 27) == 0)
6480 return decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
6481 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
6482 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
6483 {
6484 case 0x0: case 0x2:
6485 return copy_unmodified (gdbarch, insn, "srs", dsc);
6486
6487 case 0x1: case 0x3:
6488 return copy_unmodified (gdbarch, insn, "rfe", dsc);
6489
6490 case 0x4: case 0x5: case 0x6: case 0x7:
6491 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
6492
6493 case 0x8:
6494 switch ((insn & 0xe00000) >> 21)
6495 {
6496 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
6497 /* stc/stc2. */
6498 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6499
6500 case 0x2:
6501 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6502
6503 default:
6504 return copy_undef (gdbarch, insn, dsc);
6505 }
6506
6507 case 0x9:
6508 {
6509 int rn_f = (bits (insn, 16, 19) == 0xf);
6510 switch ((insn & 0xe00000) >> 21)
6511 {
6512 case 0x1: case 0x3:
6513 /* ldc/ldc2 imm (undefined for rn == pc). */
6514 return rn_f ? copy_undef (gdbarch, insn, dsc)
6515 : copy_copro_load_store (gdbarch, insn, regs, dsc);
6516
6517 case 0x2:
6518 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6519
6520 case 0x4: case 0x5: case 0x6: case 0x7:
6521 /* ldc/ldc2 lit (undefined for rn != pc). */
6522 return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
6523 : copy_undef (gdbarch, insn, dsc);
6524
6525 default:
6526 return copy_undef (gdbarch, insn, dsc);
6527 }
6528 }
6529
6530 case 0xa:
6531 return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
6532
6533 case 0xb:
6534 if (bits (insn, 16, 19) == 0xf)
6535 /* ldc/ldc2 lit. */
6536 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6537 else
6538 return copy_undef (gdbarch, insn, dsc);
6539
6540 case 0xc:
6541 if (bit (insn, 4))
6542 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6543 else
6544 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6545
6546 case 0xd:
6547 if (bit (insn, 4))
6548 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6549 else
6550 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6551
6552 default:
6553 return copy_undef (gdbarch, insn, dsc);
6554 }
6555 }
6556
6557 /* Decode miscellaneous instructions in dp/misc encoding space. */
6558
6559 static int
6560 decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
6561 struct regcache *regs,
6562 struct displaced_step_closure *dsc)
6563 {
6564 unsigned int op2 = bits (insn, 4, 6);
6565 unsigned int op = bits (insn, 21, 22);
6566 unsigned int op1 = bits (insn, 16, 19);
6567
6568 switch (op2)
6569 {
6570 case 0x0:
6571 return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
6572
6573 case 0x1:
6574 if (op == 0x1) /* bx. */
6575 return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
6576 else if (op == 0x3)
6577 return copy_unmodified (gdbarch, insn, "clz", dsc);
6578 else
6579 return copy_undef (gdbarch, insn, dsc);
6580
6581 case 0x2:
6582 if (op == 0x1)
6583 /* Not really supported. */
6584 return copy_unmodified (gdbarch, insn, "bxj", dsc);
6585 else
6586 return copy_undef (gdbarch, insn, dsc);
6587
6588 case 0x3:
6589 if (op == 0x1)
6590 return copy_bx_blx_reg (gdbarch, insn,
6591 regs, dsc); /* blx register. */
6592 else
6593 return copy_undef (gdbarch, insn, dsc);
6594
6595 case 0x5:
6596 return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
6597
6598 case 0x7:
6599 if (op == 0x1)
6600 return copy_unmodified (gdbarch, insn, "bkpt", dsc);
6601 else if (op == 0x3)
6602 /* Not really supported. */
6603 return copy_unmodified (gdbarch, insn, "smc", dsc);
6604
6605 default:
6606 return copy_undef (gdbarch, insn, dsc);
6607 }
6608 }
6609
6610 static int
6611 decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6612 struct displaced_step_closure *dsc)
6613 {
6614 if (bit (insn, 25))
6615 switch (bits (insn, 20, 24))
6616 {
6617 case 0x10:
6618 return copy_unmodified (gdbarch, insn, "movw", dsc);
6619
6620 case 0x14:
6621 return copy_unmodified (gdbarch, insn, "movt", dsc);
6622
6623 case 0x12: case 0x16:
6624 return copy_unmodified (gdbarch, insn, "msr imm", dsc);
6625
6626 default:
6627 return copy_alu_imm (gdbarch, insn, regs, dsc);
6628 }
6629 else
6630 {
6631 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
6632
6633 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
6634 return copy_alu_reg (gdbarch, insn, regs, dsc);
6635 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
6636 return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
6637 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
6638 return decode_miscellaneous (gdbarch, insn, regs, dsc);
6639 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
6640 return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
6641 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
6642 return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
6643 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
6644 return copy_unmodified (gdbarch, insn, "synch", dsc);
6645 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
6646 /* 2nd arg means "unpriveleged". */
6647 return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
6648 dsc);
6649 }
6650
6651 /* Should be unreachable. */
6652 return 1;
6653 }
6654
6655 static int
6656 decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
6657 struct regcache *regs,
6658 struct displaced_step_closure *dsc)
6659 {
6660 int a = bit (insn, 25), b = bit (insn, 4);
6661 uint32_t op1 = bits (insn, 20, 24);
6662 int rn_f = bits (insn, 16, 19) == 0xf;
6663
6664 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
6665 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
6666 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
6667 else if ((!a && (op1 & 0x17) == 0x02)
6668 || (a && (op1 & 0x17) == 0x02 && !b))
6669 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
6670 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
6671 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
6672 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
6673 else if ((!a && (op1 & 0x17) == 0x03)
6674 || (a && (op1 & 0x17) == 0x03 && !b))
6675 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
6676 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
6677 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
6678 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
6679 else if ((!a && (op1 & 0x17) == 0x06)
6680 || (a && (op1 & 0x17) == 0x06 && !b))
6681 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
6682 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
6683 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
6684 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
6685 else if ((!a && (op1 & 0x17) == 0x07)
6686 || (a && (op1 & 0x17) == 0x07 && !b))
6687 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
6688
6689 /* Should be unreachable. */
6690 return 1;
6691 }
6692
6693 static int
6694 decode_media (struct gdbarch *gdbarch, uint32_t insn,
6695 struct displaced_step_closure *dsc)
6696 {
6697 switch (bits (insn, 20, 24))
6698 {
6699 case 0x00: case 0x01: case 0x02: case 0x03:
6700 return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
6701
6702 case 0x04: case 0x05: case 0x06: case 0x07:
6703 return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
6704
6705 case 0x08: case 0x09: case 0x0a: case 0x0b:
6706 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
6707 return copy_unmodified (gdbarch, insn,
6708 "decode/pack/unpack/saturate/reverse", dsc);
6709
6710 case 0x18:
6711 if (bits (insn, 5, 7) == 0) /* op2. */
6712 {
6713 if (bits (insn, 12, 15) == 0xf)
6714 return copy_unmodified (gdbarch, insn, "usad8", dsc);
6715 else
6716 return copy_unmodified (gdbarch, insn, "usada8", dsc);
6717 }
6718 else
6719 return copy_undef (gdbarch, insn, dsc);
6720
6721 case 0x1a: case 0x1b:
6722 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6723 return copy_unmodified (gdbarch, insn, "sbfx", dsc);
6724 else
6725 return copy_undef (gdbarch, insn, dsc);
6726
6727 case 0x1c: case 0x1d:
6728 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
6729 {
6730 if (bits (insn, 0, 3) == 0xf)
6731 return copy_unmodified (gdbarch, insn, "bfc", dsc);
6732 else
6733 return copy_unmodified (gdbarch, insn, "bfi", dsc);
6734 }
6735 else
6736 return copy_undef (gdbarch, insn, dsc);
6737
6738 case 0x1e: case 0x1f:
6739 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6740 return copy_unmodified (gdbarch, insn, "ubfx", dsc);
6741 else
6742 return copy_undef (gdbarch, insn, dsc);
6743 }
6744
6745 /* Should be unreachable. */
6746 return 1;
6747 }
6748
6749 static int
6750 decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
6751 struct regcache *regs, struct displaced_step_closure *dsc)
6752 {
6753 if (bit (insn, 25))
6754 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
6755 else
6756 return copy_block_xfer (gdbarch, insn, regs, dsc);
6757 }
6758
6759 static int
6760 decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
6761 struct regcache *regs,
6762 struct displaced_step_closure *dsc)
6763 {
6764 unsigned int opcode = bits (insn, 20, 24);
6765
6766 switch (opcode)
6767 {
6768 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
6769 return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
6770
6771 case 0x08: case 0x0a: case 0x0c: case 0x0e:
6772 case 0x12: case 0x16:
6773 return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
6774
6775 case 0x09: case 0x0b: case 0x0d: case 0x0f:
6776 case 0x13: case 0x17:
6777 return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
6778
6779 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
6780 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
6781 /* Note: no writeback for these instructions. Bit 25 will always be
6782 zero though (via caller), so the following works OK. */
6783 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6784 }
6785
6786 /* Should be unreachable. */
6787 return 1;
6788 }
6789
6790 static int
6791 decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
6792 struct regcache *regs, struct displaced_step_closure *dsc)
6793 {
6794 unsigned int op1 = bits (insn, 20, 25);
6795 int op = bit (insn, 4);
6796 unsigned int coproc = bits (insn, 8, 11);
6797 unsigned int rn = bits (insn, 16, 19);
6798
6799 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
6800 return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
6801 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
6802 && (coproc & 0xe) != 0xa)
6803 /* stc/stc2. */
6804 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6805 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
6806 && (coproc & 0xe) != 0xa)
6807 /* ldc/ldc2 imm/lit. */
6808 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6809 else if ((op1 & 0x3e) == 0x00)
6810 return copy_undef (gdbarch, insn, dsc);
6811 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
6812 return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
6813 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
6814 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6815 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
6816 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6817 else if ((op1 & 0x30) == 0x20 && !op)
6818 {
6819 if ((coproc & 0xe) == 0xa)
6820 return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
6821 else
6822 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6823 }
6824 else if ((op1 & 0x30) == 0x20 && op)
6825 return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
6826 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
6827 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6828 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
6829 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6830 else if ((op1 & 0x30) == 0x30)
6831 return copy_svc (gdbarch, insn, to, regs, dsc);
6832 else
6833 return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
6834 }
6835
6836 static void
6837 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
6838 CORE_ADDR to, struct regcache *regs,
6839 struct displaced_step_closure *dsc)
6840 {
6841 error (_("Displaced stepping is only supported in ARM mode"));
6842 }
6843
6844 void
6845 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
6846 CORE_ADDR to, struct regcache *regs,
6847 struct displaced_step_closure *dsc)
6848 {
6849 int err = 0;
6850 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6851 uint32_t insn;
6852
6853 /* Most displaced instructions use a 1-instruction scratch space, so set this
6854 here and override below if/when necessary. */
6855 dsc->numinsns = 1;
6856 dsc->insn_addr = from;
6857 dsc->scratch_base = to;
6858 dsc->cleanup = NULL;
6859 dsc->wrote_to_pc = 0;
6860
6861 if (!displaced_in_arm_mode (regs))
6862 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
6863
6864 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
6865 if (debug_displaced)
6866 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
6867 "at %.8lx\n", (unsigned long) insn,
6868 (unsigned long) from);
6869
6870 if ((insn & 0xf0000000) == 0xf0000000)
6871 err = decode_unconditional (gdbarch, insn, regs, dsc);
6872 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
6873 {
6874 case 0x0: case 0x1: case 0x2: case 0x3:
6875 err = decode_dp_misc (gdbarch, insn, regs, dsc);
6876 break;
6877
6878 case 0x4: case 0x5: case 0x6:
6879 err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
6880 break;
6881
6882 case 0x7:
6883 err = decode_media (gdbarch, insn, dsc);
6884 break;
6885
6886 case 0x8: case 0x9: case 0xa: case 0xb:
6887 err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
6888 break;
6889
6890 case 0xc: case 0xd: case 0xe: case 0xf:
6891 err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
6892 break;
6893 }
6894
6895 if (err)
6896 internal_error (__FILE__, __LINE__,
6897 _("arm_process_displaced_insn: Instruction decode error"));
6898 }
6899
6900 /* Actually set up the scratch space for a displaced instruction. */
6901
6902 void
6903 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
6904 CORE_ADDR to, struct displaced_step_closure *dsc)
6905 {
6906 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6907 unsigned int i;
6908 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6909
6910 /* Poke modified instruction(s). */
6911 for (i = 0; i < dsc->numinsns; i++)
6912 {
6913 if (debug_displaced)
6914 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
6915 "%.8lx\n", (unsigned long) dsc->modinsn[i],
6916 (unsigned long) to + i * 4);
6917 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
6918 dsc->modinsn[i]);
6919 }
6920
6921 /* Put breakpoint afterwards. */
6922 write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
6923 tdep->arm_breakpoint_size);
6924
6925 if (debug_displaced)
6926 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
6927 paddress (gdbarch, from), paddress (gdbarch, to));
6928 }
6929
6930 /* Entry point for copying an instruction into scratch space for displaced
6931 stepping. */
6932
6933 struct displaced_step_closure *
6934 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
6935 CORE_ADDR from, CORE_ADDR to,
6936 struct regcache *regs)
6937 {
6938 struct displaced_step_closure *dsc
6939 = xmalloc (sizeof (struct displaced_step_closure));
6940 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
6941 arm_displaced_init_closure (gdbarch, from, to, dsc);
6942
6943 return dsc;
6944 }
6945
6946 /* Entry point for cleaning things up after a displaced instruction has been
6947 single-stepped. */
6948
6949 void
6950 arm_displaced_step_fixup (struct gdbarch *gdbarch,
6951 struct displaced_step_closure *dsc,
6952 CORE_ADDR from, CORE_ADDR to,
6953 struct regcache *regs)
6954 {
6955 if (dsc->cleanup)
6956 dsc->cleanup (gdbarch, regs, dsc);
6957
6958 if (!dsc->wrote_to_pc)
6959 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
6960 }
6961
6962 #include "bfd-in2.h"
6963 #include "libcoff.h"
6964
6965 static int
6966 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
6967 {
6968 struct gdbarch *gdbarch = info->application_data;
6969
6970 if (arm_pc_is_thumb (gdbarch, memaddr))
6971 {
6972 static asymbol *asym;
6973 static combined_entry_type ce;
6974 static struct coff_symbol_struct csym;
6975 static struct bfd fake_bfd;
6976 static bfd_target fake_target;
6977
6978 if (csym.native == NULL)
6979 {
6980 /* Create a fake symbol vector containing a Thumb symbol.
6981 This is solely so that the code in print_insn_little_arm()
6982 and print_insn_big_arm() in opcodes/arm-dis.c will detect
6983 the presence of a Thumb symbol and switch to decoding
6984 Thumb instructions. */
6985
6986 fake_target.flavour = bfd_target_coff_flavour;
6987 fake_bfd.xvec = &fake_target;
6988 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
6989 csym.native = &ce;
6990 csym.symbol.the_bfd = &fake_bfd;
6991 csym.symbol.name = "fake";
6992 asym = (asymbol *) & csym;
6993 }
6994
6995 memaddr = UNMAKE_THUMB_ADDR (memaddr);
6996 info->symbols = &asym;
6997 }
6998 else
6999 info->symbols = NULL;
7000
7001 if (info->endian == BFD_ENDIAN_BIG)
7002 return print_insn_big_arm (memaddr, info);
7003 else
7004 return print_insn_little_arm (memaddr, info);
7005 }
7006
7007 /* The following define instruction sequences that will cause ARM
7008 cpu's to take an undefined instruction trap. These are used to
7009 signal a breakpoint to GDB.
7010
7011 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
7012 modes. A different instruction is required for each mode. The ARM
7013 cpu's can also be big or little endian. Thus four different
7014 instructions are needed to support all cases.
7015
7016 Note: ARMv4 defines several new instructions that will take the
7017 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
7018 not in fact add the new instructions. The new undefined
7019 instructions in ARMv4 are all instructions that had no defined
7020 behaviour in earlier chips. There is no guarantee that they will
7021 raise an exception, but may be treated as NOP's. In practice, it
7022 may only safe to rely on instructions matching:
7023
7024 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
7025 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
7026 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
7027
7028 Even this may only true if the condition predicate is true. The
7029 following use a condition predicate of ALWAYS so it is always TRUE.
7030
7031 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
7032 and NetBSD all use a software interrupt rather than an undefined
7033 instruction to force a trap. This can be handled by by the
7034 abi-specific code during establishment of the gdbarch vector. */
7035
7036 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
7037 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
7038 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
7039 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
7040
7041 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
7042 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
7043 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
7044 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
7045
7046 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
7047 the program counter value to determine whether a 16-bit or 32-bit
7048 breakpoint should be used. It returns a pointer to a string of
7049 bytes that encode a breakpoint instruction, stores the length of
7050 the string to *lenptr, and adjusts the program counter (if
7051 necessary) to point to the actual memory location where the
7052 breakpoint should be inserted. */
7053
7054 static const unsigned char *
7055 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
7056 {
7057 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7058 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7059
7060 if (arm_pc_is_thumb (gdbarch, *pcptr))
7061 {
7062 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
7063
7064 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
7065 check whether we are replacing a 32-bit instruction. */
7066 if (tdep->thumb2_breakpoint != NULL)
7067 {
7068 gdb_byte buf[2];
7069 if (target_read_memory (*pcptr, buf, 2) == 0)
7070 {
7071 unsigned short inst1;
7072 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
7073 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
7074 {
7075 *lenptr = tdep->thumb2_breakpoint_size;
7076 return tdep->thumb2_breakpoint;
7077 }
7078 }
7079 }
7080
7081 *lenptr = tdep->thumb_breakpoint_size;
7082 return tdep->thumb_breakpoint;
7083 }
7084 else
7085 {
7086 *lenptr = tdep->arm_breakpoint_size;
7087 return tdep->arm_breakpoint;
7088 }
7089 }
7090
7091 static void
7092 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
7093 int *kindptr)
7094 {
7095 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7096
7097 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
7098
7099 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
7100 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
7101 that this is not confused with a 32-bit ARM breakpoint. */
7102 *kindptr = 3;
7103 }
7104
7105 /* Extract from an array REGBUF containing the (raw) register state a
7106 function return value of type TYPE, and copy that, in virtual
7107 format, into VALBUF. */
7108
7109 static void
7110 arm_extract_return_value (struct type *type, struct regcache *regs,
7111 gdb_byte *valbuf)
7112 {
7113 struct gdbarch *gdbarch = get_regcache_arch (regs);
7114 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7115
7116 if (TYPE_CODE_FLT == TYPE_CODE (type))
7117 {
7118 switch (gdbarch_tdep (gdbarch)->fp_model)
7119 {
7120 case ARM_FLOAT_FPA:
7121 {
7122 /* The value is in register F0 in internal format. We need to
7123 extract the raw value and then convert it to the desired
7124 internal type. */
7125 bfd_byte tmpbuf[FP_REGISTER_SIZE];
7126
7127 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
7128 convert_from_extended (floatformat_from_type (type), tmpbuf,
7129 valbuf, gdbarch_byte_order (gdbarch));
7130 }
7131 break;
7132
7133 case ARM_FLOAT_SOFT_FPA:
7134 case ARM_FLOAT_SOFT_VFP:
7135 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7136 not using the VFP ABI code. */
7137 case ARM_FLOAT_VFP:
7138 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
7139 if (TYPE_LENGTH (type) > 4)
7140 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
7141 valbuf + INT_REGISTER_SIZE);
7142 break;
7143
7144 default:
7145 internal_error (__FILE__, __LINE__,
7146 _("arm_extract_return_value: "
7147 "Floating point model not supported"));
7148 break;
7149 }
7150 }
7151 else if (TYPE_CODE (type) == TYPE_CODE_INT
7152 || TYPE_CODE (type) == TYPE_CODE_CHAR
7153 || TYPE_CODE (type) == TYPE_CODE_BOOL
7154 || TYPE_CODE (type) == TYPE_CODE_PTR
7155 || TYPE_CODE (type) == TYPE_CODE_REF
7156 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7157 {
7158 /* If the the type is a plain integer, then the access is
7159 straight-forward. Otherwise we have to play around a bit more. */
7160 int len = TYPE_LENGTH (type);
7161 int regno = ARM_A1_REGNUM;
7162 ULONGEST tmp;
7163
7164 while (len > 0)
7165 {
7166 /* By using store_unsigned_integer we avoid having to do
7167 anything special for small big-endian values. */
7168 regcache_cooked_read_unsigned (regs, regno++, &tmp);
7169 store_unsigned_integer (valbuf,
7170 (len > INT_REGISTER_SIZE
7171 ? INT_REGISTER_SIZE : len),
7172 byte_order, tmp);
7173 len -= INT_REGISTER_SIZE;
7174 valbuf += INT_REGISTER_SIZE;
7175 }
7176 }
7177 else
7178 {
7179 /* For a structure or union the behaviour is as if the value had
7180 been stored to word-aligned memory and then loaded into
7181 registers with 32-bit load instruction(s). */
7182 int len = TYPE_LENGTH (type);
7183 int regno = ARM_A1_REGNUM;
7184 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7185
7186 while (len > 0)
7187 {
7188 regcache_cooked_read (regs, regno++, tmpbuf);
7189 memcpy (valbuf, tmpbuf,
7190 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7191 len -= INT_REGISTER_SIZE;
7192 valbuf += INT_REGISTER_SIZE;
7193 }
7194 }
7195 }
7196
7197
7198 /* Will a function return an aggregate type in memory or in a
7199 register? Return 0 if an aggregate type can be returned in a
7200 register, 1 if it must be returned in memory. */
7201
7202 static int
7203 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
7204 {
7205 int nRc;
7206 enum type_code code;
7207
7208 CHECK_TYPEDEF (type);
7209
7210 /* In the ARM ABI, "integer" like aggregate types are returned in
7211 registers. For an aggregate type to be integer like, its size
7212 must be less than or equal to INT_REGISTER_SIZE and the
7213 offset of each addressable subfield must be zero. Note that bit
7214 fields are not addressable, and all addressable subfields of
7215 unions always start at offset zero.
7216
7217 This function is based on the behaviour of GCC 2.95.1.
7218 See: gcc/arm.c: arm_return_in_memory() for details.
7219
7220 Note: All versions of GCC before GCC 2.95.2 do not set up the
7221 parameters correctly for a function returning the following
7222 structure: struct { float f;}; This should be returned in memory,
7223 not a register. Richard Earnshaw sent me a patch, but I do not
7224 know of any way to detect if a function like the above has been
7225 compiled with the correct calling convention. */
7226
7227 /* All aggregate types that won't fit in a register must be returned
7228 in memory. */
7229 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
7230 {
7231 return 1;
7232 }
7233
7234 /* The AAPCS says all aggregates not larger than a word are returned
7235 in a register. */
7236 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
7237 return 0;
7238
7239 /* The only aggregate types that can be returned in a register are
7240 structs and unions. Arrays must be returned in memory. */
7241 code = TYPE_CODE (type);
7242 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
7243 {
7244 return 1;
7245 }
7246
7247 /* Assume all other aggregate types can be returned in a register.
7248 Run a check for structures, unions and arrays. */
7249 nRc = 0;
7250
7251 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
7252 {
7253 int i;
7254 /* Need to check if this struct/union is "integer" like. For
7255 this to be true, its size must be less than or equal to
7256 INT_REGISTER_SIZE and the offset of each addressable
7257 subfield must be zero. Note that bit fields are not
7258 addressable, and unions always start at offset zero. If any
7259 of the subfields is a floating point type, the struct/union
7260 cannot be an integer type. */
7261
7262 /* For each field in the object, check:
7263 1) Is it FP? --> yes, nRc = 1;
7264 2) Is it addressable (bitpos != 0) and
7265 not packed (bitsize == 0)?
7266 --> yes, nRc = 1
7267 */
7268
7269 for (i = 0; i < TYPE_NFIELDS (type); i++)
7270 {
7271 enum type_code field_type_code;
7272 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
7273 i)));
7274
7275 /* Is it a floating point type field? */
7276 if (field_type_code == TYPE_CODE_FLT)
7277 {
7278 nRc = 1;
7279 break;
7280 }
7281
7282 /* If bitpos != 0, then we have to care about it. */
7283 if (TYPE_FIELD_BITPOS (type, i) != 0)
7284 {
7285 /* Bitfields are not addressable. If the field bitsize is
7286 zero, then the field is not packed. Hence it cannot be
7287 a bitfield or any other packed type. */
7288 if (TYPE_FIELD_BITSIZE (type, i) == 0)
7289 {
7290 nRc = 1;
7291 break;
7292 }
7293 }
7294 }
7295 }
7296
7297 return nRc;
7298 }
7299
7300 /* Write into appropriate registers a function return value of type
7301 TYPE, given in virtual format. */
7302
7303 static void
7304 arm_store_return_value (struct type *type, struct regcache *regs,
7305 const gdb_byte *valbuf)
7306 {
7307 struct gdbarch *gdbarch = get_regcache_arch (regs);
7308 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7309
7310 if (TYPE_CODE (type) == TYPE_CODE_FLT)
7311 {
7312 char buf[MAX_REGISTER_SIZE];
7313
7314 switch (gdbarch_tdep (gdbarch)->fp_model)
7315 {
7316 case ARM_FLOAT_FPA:
7317
7318 convert_to_extended (floatformat_from_type (type), buf, valbuf,
7319 gdbarch_byte_order (gdbarch));
7320 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
7321 break;
7322
7323 case ARM_FLOAT_SOFT_FPA:
7324 case ARM_FLOAT_SOFT_VFP:
7325 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7326 not using the VFP ABI code. */
7327 case ARM_FLOAT_VFP:
7328 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
7329 if (TYPE_LENGTH (type) > 4)
7330 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
7331 valbuf + INT_REGISTER_SIZE);
7332 break;
7333
7334 default:
7335 internal_error (__FILE__, __LINE__,
7336 _("arm_store_return_value: Floating "
7337 "point model not supported"));
7338 break;
7339 }
7340 }
7341 else if (TYPE_CODE (type) == TYPE_CODE_INT
7342 || TYPE_CODE (type) == TYPE_CODE_CHAR
7343 || TYPE_CODE (type) == TYPE_CODE_BOOL
7344 || TYPE_CODE (type) == TYPE_CODE_PTR
7345 || TYPE_CODE (type) == TYPE_CODE_REF
7346 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7347 {
7348 if (TYPE_LENGTH (type) <= 4)
7349 {
7350 /* Values of one word or less are zero/sign-extended and
7351 returned in r0. */
7352 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7353 LONGEST val = unpack_long (type, valbuf);
7354
7355 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
7356 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
7357 }
7358 else
7359 {
7360 /* Integral values greater than one word are stored in consecutive
7361 registers starting with r0. This will always be a multiple of
7362 the regiser size. */
7363 int len = TYPE_LENGTH (type);
7364 int regno = ARM_A1_REGNUM;
7365
7366 while (len > 0)
7367 {
7368 regcache_cooked_write (regs, regno++, valbuf);
7369 len -= INT_REGISTER_SIZE;
7370 valbuf += INT_REGISTER_SIZE;
7371 }
7372 }
7373 }
7374 else
7375 {
7376 /* For a structure or union the behaviour is as if the value had
7377 been stored to word-aligned memory and then loaded into
7378 registers with 32-bit load instruction(s). */
7379 int len = TYPE_LENGTH (type);
7380 int regno = ARM_A1_REGNUM;
7381 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7382
7383 while (len > 0)
7384 {
7385 memcpy (tmpbuf, valbuf,
7386 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7387 regcache_cooked_write (regs, regno++, tmpbuf);
7388 len -= INT_REGISTER_SIZE;
7389 valbuf += INT_REGISTER_SIZE;
7390 }
7391 }
7392 }
7393
7394
7395 /* Handle function return values. */
7396
7397 static enum return_value_convention
7398 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
7399 struct type *valtype, struct regcache *regcache,
7400 gdb_byte *readbuf, const gdb_byte *writebuf)
7401 {
7402 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7403 enum arm_vfp_cprc_base_type vfp_base_type;
7404 int vfp_base_count;
7405
7406 if (arm_vfp_abi_for_function (gdbarch, func_type)
7407 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
7408 {
7409 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
7410 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
7411 int i;
7412 for (i = 0; i < vfp_base_count; i++)
7413 {
7414 if (reg_char == 'q')
7415 {
7416 if (writebuf)
7417 arm_neon_quad_write (gdbarch, regcache, i,
7418 writebuf + i * unit_length);
7419
7420 if (readbuf)
7421 arm_neon_quad_read (gdbarch, regcache, i,
7422 readbuf + i * unit_length);
7423 }
7424 else
7425 {
7426 char name_buf[4];
7427 int regnum;
7428
7429 sprintf (name_buf, "%c%d", reg_char, i);
7430 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7431 strlen (name_buf));
7432 if (writebuf)
7433 regcache_cooked_write (regcache, regnum,
7434 writebuf + i * unit_length);
7435 if (readbuf)
7436 regcache_cooked_read (regcache, regnum,
7437 readbuf + i * unit_length);
7438 }
7439 }
7440 return RETURN_VALUE_REGISTER_CONVENTION;
7441 }
7442
7443 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
7444 || TYPE_CODE (valtype) == TYPE_CODE_UNION
7445 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
7446 {
7447 if (tdep->struct_return == pcc_struct_return
7448 || arm_return_in_memory (gdbarch, valtype))
7449 return RETURN_VALUE_STRUCT_CONVENTION;
7450 }
7451
7452 if (writebuf)
7453 arm_store_return_value (valtype, regcache, writebuf);
7454
7455 if (readbuf)
7456 arm_extract_return_value (valtype, regcache, readbuf);
7457
7458 return RETURN_VALUE_REGISTER_CONVENTION;
7459 }
7460
7461
7462 static int
7463 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
7464 {
7465 struct gdbarch *gdbarch = get_frame_arch (frame);
7466 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7467 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7468 CORE_ADDR jb_addr;
7469 char buf[INT_REGISTER_SIZE];
7470
7471 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
7472
7473 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
7474 INT_REGISTER_SIZE))
7475 return 0;
7476
7477 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
7478 return 1;
7479 }
7480
7481 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
7482 return the target PC. Otherwise return 0. */
7483
7484 CORE_ADDR
7485 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
7486 {
7487 char *name;
7488 int namelen;
7489 CORE_ADDR start_addr;
7490
7491 /* Find the starting address and name of the function containing the PC. */
7492 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
7493 return 0;
7494
7495 /* If PC is in a Thumb call or return stub, return the address of the
7496 target PC, which is in a register. The thunk functions are called
7497 _call_via_xx, where x is the register name. The possible names
7498 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
7499 functions, named __ARM_call_via_r[0-7]. */
7500 if (strncmp (name, "_call_via_", 10) == 0
7501 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
7502 {
7503 /* Use the name suffix to determine which register contains the
7504 target PC. */
7505 static char *table[15] =
7506 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
7507 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
7508 };
7509 int regno;
7510 int offset = strlen (name) - 2;
7511
7512 for (regno = 0; regno <= 14; regno++)
7513 if (strcmp (&name[offset], table[regno]) == 0)
7514 return get_frame_register_unsigned (frame, regno);
7515 }
7516
7517 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
7518 non-interworking calls to foo. We could decode the stubs
7519 to find the target but it's easier to use the symbol table. */
7520 namelen = strlen (name);
7521 if (name[0] == '_' && name[1] == '_'
7522 && ((namelen > 2 + strlen ("_from_thumb")
7523 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
7524 strlen ("_from_thumb")) == 0)
7525 || (namelen > 2 + strlen ("_from_arm")
7526 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
7527 strlen ("_from_arm")) == 0)))
7528 {
7529 char *target_name;
7530 int target_len = namelen - 2;
7531 struct minimal_symbol *minsym;
7532 struct objfile *objfile;
7533 struct obj_section *sec;
7534
7535 if (name[namelen - 1] == 'b')
7536 target_len -= strlen ("_from_thumb");
7537 else
7538 target_len -= strlen ("_from_arm");
7539
7540 target_name = alloca (target_len + 1);
7541 memcpy (target_name, name + 2, target_len);
7542 target_name[target_len] = '\0';
7543
7544 sec = find_pc_section (pc);
7545 objfile = (sec == NULL) ? NULL : sec->objfile;
7546 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
7547 if (minsym != NULL)
7548 return SYMBOL_VALUE_ADDRESS (minsym);
7549 else
7550 return 0;
7551 }
7552
7553 return 0; /* not a stub */
7554 }
7555
7556 static void
7557 set_arm_command (char *args, int from_tty)
7558 {
7559 printf_unfiltered (_("\
7560 \"set arm\" must be followed by an apporpriate subcommand.\n"));
7561 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
7562 }
7563
7564 static void
7565 show_arm_command (char *args, int from_tty)
7566 {
7567 cmd_show_list (showarmcmdlist, from_tty, "");
7568 }
7569
7570 static void
7571 arm_update_current_architecture (void)
7572 {
7573 struct gdbarch_info info;
7574
7575 /* If the current architecture is not ARM, we have nothing to do. */
7576 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
7577 return;
7578
7579 /* Update the architecture. */
7580 gdbarch_info_init (&info);
7581
7582 if (!gdbarch_update_p (info))
7583 internal_error (__FILE__, __LINE__, _("could not update architecture"));
7584 }
7585
7586 static void
7587 set_fp_model_sfunc (char *args, int from_tty,
7588 struct cmd_list_element *c)
7589 {
7590 enum arm_float_model fp_model;
7591
7592 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
7593 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
7594 {
7595 arm_fp_model = fp_model;
7596 break;
7597 }
7598
7599 if (fp_model == ARM_FLOAT_LAST)
7600 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
7601 current_fp_model);
7602
7603 arm_update_current_architecture ();
7604 }
7605
7606 static void
7607 show_fp_model (struct ui_file *file, int from_tty,
7608 struct cmd_list_element *c, const char *value)
7609 {
7610 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7611
7612 if (arm_fp_model == ARM_FLOAT_AUTO
7613 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
7614 fprintf_filtered (file, _("\
7615 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
7616 fp_model_strings[tdep->fp_model]);
7617 else
7618 fprintf_filtered (file, _("\
7619 The current ARM floating point model is \"%s\".\n"),
7620 fp_model_strings[arm_fp_model]);
7621 }
7622
7623 static void
7624 arm_set_abi (char *args, int from_tty,
7625 struct cmd_list_element *c)
7626 {
7627 enum arm_abi_kind arm_abi;
7628
7629 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
7630 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
7631 {
7632 arm_abi_global = arm_abi;
7633 break;
7634 }
7635
7636 if (arm_abi == ARM_ABI_LAST)
7637 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
7638 arm_abi_string);
7639
7640 arm_update_current_architecture ();
7641 }
7642
7643 static void
7644 arm_show_abi (struct ui_file *file, int from_tty,
7645 struct cmd_list_element *c, const char *value)
7646 {
7647 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7648
7649 if (arm_abi_global == ARM_ABI_AUTO
7650 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
7651 fprintf_filtered (file, _("\
7652 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
7653 arm_abi_strings[tdep->arm_abi]);
7654 else
7655 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
7656 arm_abi_string);
7657 }
7658
7659 static void
7660 arm_show_fallback_mode (struct ui_file *file, int from_tty,
7661 struct cmd_list_element *c, const char *value)
7662 {
7663 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7664
7665 fprintf_filtered (file,
7666 _("The current execution mode assumed "
7667 "(when symbols are unavailable) is \"%s\".\n"),
7668 arm_fallback_mode_string);
7669 }
7670
7671 static void
7672 arm_show_force_mode (struct ui_file *file, int from_tty,
7673 struct cmd_list_element *c, const char *value)
7674 {
7675 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7676
7677 fprintf_filtered (file,
7678 _("The current execution mode assumed "
7679 "(even when symbols are available) is \"%s\".\n"),
7680 arm_force_mode_string);
7681 }
7682
7683 /* If the user changes the register disassembly style used for info
7684 register and other commands, we have to also switch the style used
7685 in opcodes for disassembly output. This function is run in the "set
7686 arm disassembly" command, and does that. */
7687
7688 static void
7689 set_disassembly_style_sfunc (char *args, int from_tty,
7690 struct cmd_list_element *c)
7691 {
7692 set_disassembly_style ();
7693 }
7694 \f
7695 /* Return the ARM register name corresponding to register I. */
7696 static const char *
7697 arm_register_name (struct gdbarch *gdbarch, int i)
7698 {
7699 const int num_regs = gdbarch_num_regs (gdbarch);
7700
7701 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
7702 && i >= num_regs && i < num_regs + 32)
7703 {
7704 static const char *const vfp_pseudo_names[] = {
7705 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
7706 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
7707 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
7708 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
7709 };
7710
7711 return vfp_pseudo_names[i - num_regs];
7712 }
7713
7714 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
7715 && i >= num_regs + 32 && i < num_regs + 32 + 16)
7716 {
7717 static const char *const neon_pseudo_names[] = {
7718 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
7719 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
7720 };
7721
7722 return neon_pseudo_names[i - num_regs - 32];
7723 }
7724
7725 if (i >= ARRAY_SIZE (arm_register_names))
7726 /* These registers are only supported on targets which supply
7727 an XML description. */
7728 return "";
7729
7730 return arm_register_names[i];
7731 }
7732
7733 static void
7734 set_disassembly_style (void)
7735 {
7736 int current;
7737
7738 /* Find the style that the user wants. */
7739 for (current = 0; current < num_disassembly_options; current++)
7740 if (disassembly_style == valid_disassembly_styles[current])
7741 break;
7742 gdb_assert (current < num_disassembly_options);
7743
7744 /* Synchronize the disassembler. */
7745 set_arm_regname_option (current);
7746 }
7747
7748 /* Test whether the coff symbol specific value corresponds to a Thumb
7749 function. */
7750
7751 static int
7752 coff_sym_is_thumb (int val)
7753 {
7754 return (val == C_THUMBEXT
7755 || val == C_THUMBSTAT
7756 || val == C_THUMBEXTFUNC
7757 || val == C_THUMBSTATFUNC
7758 || val == C_THUMBLABEL);
7759 }
7760
7761 /* arm_coff_make_msymbol_special()
7762 arm_elf_make_msymbol_special()
7763
7764 These functions test whether the COFF or ELF symbol corresponds to
7765 an address in thumb code, and set a "special" bit in a minimal
7766 symbol to indicate that it does. */
7767
7768 static void
7769 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
7770 {
7771 /* Thumb symbols are of type STT_LOPROC, (synonymous with
7772 STT_ARM_TFUNC). */
7773 if (ELF_ST_TYPE (((elf_symbol_type *)sym)->internal_elf_sym.st_info)
7774 == STT_LOPROC)
7775 MSYMBOL_SET_SPECIAL (msym);
7776 }
7777
7778 static void
7779 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
7780 {
7781 if (coff_sym_is_thumb (val))
7782 MSYMBOL_SET_SPECIAL (msym);
7783 }
7784
7785 static void
7786 arm_objfile_data_free (struct objfile *objfile, void *arg)
7787 {
7788 struct arm_per_objfile *data = arg;
7789 unsigned int i;
7790
7791 for (i = 0; i < objfile->obfd->section_count; i++)
7792 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
7793 }
7794
7795 static void
7796 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
7797 asymbol *sym)
7798 {
7799 const char *name = bfd_asymbol_name (sym);
7800 struct arm_per_objfile *data;
7801 VEC(arm_mapping_symbol_s) **map_p;
7802 struct arm_mapping_symbol new_map_sym;
7803
7804 gdb_assert (name[0] == '$');
7805 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
7806 return;
7807
7808 data = objfile_data (objfile, arm_objfile_data_key);
7809 if (data == NULL)
7810 {
7811 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
7812 struct arm_per_objfile);
7813 set_objfile_data (objfile, arm_objfile_data_key, data);
7814 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
7815 objfile->obfd->section_count,
7816 VEC(arm_mapping_symbol_s) *);
7817 }
7818 map_p = &data->section_maps[bfd_get_section (sym)->index];
7819
7820 new_map_sym.value = sym->value;
7821 new_map_sym.type = name[1];
7822
7823 /* Assume that most mapping symbols appear in order of increasing
7824 value. If they were randomly distributed, it would be faster to
7825 always push here and then sort at first use. */
7826 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
7827 {
7828 struct arm_mapping_symbol *prev_map_sym;
7829
7830 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
7831 if (prev_map_sym->value >= sym->value)
7832 {
7833 unsigned int idx;
7834 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
7835 arm_compare_mapping_symbols);
7836 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
7837 return;
7838 }
7839 }
7840
7841 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
7842 }
7843
7844 static void
7845 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
7846 {
7847 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7848 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
7849
7850 /* If necessary, set the T bit. */
7851 if (arm_apcs_32)
7852 {
7853 ULONGEST val, t_bit;
7854 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
7855 t_bit = arm_psr_thumb_bit (gdbarch);
7856 if (arm_pc_is_thumb (gdbarch, pc))
7857 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7858 val | t_bit);
7859 else
7860 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7861 val & ~t_bit);
7862 }
7863 }
7864
7865 /* Read the contents of a NEON quad register, by reading from two
7866 double registers. This is used to implement the quad pseudo
7867 registers, and for argument passing in case the quad registers are
7868 missing; vectors are passed in quad registers when using the VFP
7869 ABI, even if a NEON unit is not present. REGNUM is the index of
7870 the quad register, in [0, 15]. */
7871
7872 static void
7873 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
7874 int regnum, gdb_byte *buf)
7875 {
7876 char name_buf[4];
7877 gdb_byte reg_buf[8];
7878 int offset, double_regnum;
7879
7880 sprintf (name_buf, "d%d", regnum << 1);
7881 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7882 strlen (name_buf));
7883
7884 /* d0 is always the least significant half of q0. */
7885 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7886 offset = 8;
7887 else
7888 offset = 0;
7889
7890 regcache_raw_read (regcache, double_regnum, reg_buf);
7891 memcpy (buf + offset, reg_buf, 8);
7892
7893 offset = 8 - offset;
7894 regcache_raw_read (regcache, double_regnum + 1, reg_buf);
7895 memcpy (buf + offset, reg_buf, 8);
7896 }
7897
7898 static void
7899 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
7900 int regnum, gdb_byte *buf)
7901 {
7902 const int num_regs = gdbarch_num_regs (gdbarch);
7903 char name_buf[4];
7904 gdb_byte reg_buf[8];
7905 int offset, double_regnum;
7906
7907 gdb_assert (regnum >= num_regs);
7908 regnum -= num_regs;
7909
7910 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
7911 /* Quad-precision register. */
7912 arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
7913 else
7914 {
7915 /* Single-precision register. */
7916 gdb_assert (regnum < 32);
7917
7918 /* s0 is always the least significant half of d0. */
7919 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7920 offset = (regnum & 1) ? 0 : 4;
7921 else
7922 offset = (regnum & 1) ? 4 : 0;
7923
7924 sprintf (name_buf, "d%d", regnum >> 1);
7925 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7926 strlen (name_buf));
7927
7928 regcache_raw_read (regcache, double_regnum, reg_buf);
7929 memcpy (buf, reg_buf + offset, 4);
7930 }
7931 }
7932
7933 /* Store the contents of BUF to a NEON quad register, by writing to
7934 two double registers. This is used to implement the quad pseudo
7935 registers, and for argument passing in case the quad registers are
7936 missing; vectors are passed in quad registers when using the VFP
7937 ABI, even if a NEON unit is not present. REGNUM is the index
7938 of the quad register, in [0, 15]. */
7939
7940 static void
7941 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
7942 int regnum, const gdb_byte *buf)
7943 {
7944 char name_buf[4];
7945 gdb_byte reg_buf[8];
7946 int offset, double_regnum;
7947
7948 sprintf (name_buf, "d%d", regnum << 1);
7949 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7950 strlen (name_buf));
7951
7952 /* d0 is always the least significant half of q0. */
7953 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7954 offset = 8;
7955 else
7956 offset = 0;
7957
7958 regcache_raw_write (regcache, double_regnum, buf + offset);
7959 offset = 8 - offset;
7960 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
7961 }
7962
7963 static void
7964 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
7965 int regnum, const gdb_byte *buf)
7966 {
7967 const int num_regs = gdbarch_num_regs (gdbarch);
7968 char name_buf[4];
7969 gdb_byte reg_buf[8];
7970 int offset, double_regnum;
7971
7972 gdb_assert (regnum >= num_regs);
7973 regnum -= num_regs;
7974
7975 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
7976 /* Quad-precision register. */
7977 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
7978 else
7979 {
7980 /* Single-precision register. */
7981 gdb_assert (regnum < 32);
7982
7983 /* s0 is always the least significant half of d0. */
7984 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7985 offset = (regnum & 1) ? 0 : 4;
7986 else
7987 offset = (regnum & 1) ? 4 : 0;
7988
7989 sprintf (name_buf, "d%d", regnum >> 1);
7990 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7991 strlen (name_buf));
7992
7993 regcache_raw_read (regcache, double_regnum, reg_buf);
7994 memcpy (reg_buf + offset, buf, 4);
7995 regcache_raw_write (regcache, double_regnum, reg_buf);
7996 }
7997 }
7998
7999 static struct value *
8000 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
8001 {
8002 const int *reg_p = baton;
8003 return value_of_register (*reg_p, frame);
8004 }
8005 \f
8006 static enum gdb_osabi
8007 arm_elf_osabi_sniffer (bfd *abfd)
8008 {
8009 unsigned int elfosabi;
8010 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
8011
8012 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
8013
8014 if (elfosabi == ELFOSABI_ARM)
8015 /* GNU tools use this value. Check note sections in this case,
8016 as well. */
8017 bfd_map_over_sections (abfd,
8018 generic_elf_osabi_sniff_abi_tag_sections,
8019 &osabi);
8020
8021 /* Anything else will be handled by the generic ELF sniffer. */
8022 return osabi;
8023 }
8024
8025 static int
8026 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
8027 struct reggroup *group)
8028 {
8029 /* FPS register's type is INT, but belongs to float_reggroup. Beside
8030 this, FPS register belongs to save_regroup, restore_reggroup, and
8031 all_reggroup, of course. */
8032 if (regnum == ARM_FPS_REGNUM)
8033 return (group == float_reggroup
8034 || group == save_reggroup
8035 || group == restore_reggroup
8036 || group == all_reggroup);
8037 else
8038 return default_register_reggroup_p (gdbarch, regnum, group);
8039 }
8040
8041 \f
8042 /* Initialize the current architecture based on INFO. If possible,
8043 re-use an architecture from ARCHES, which is a list of
8044 architectures already created during this debugging session.
8045
8046 Called e.g. at program startup, when reading a core file, and when
8047 reading a binary file. */
8048
8049 static struct gdbarch *
8050 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
8051 {
8052 struct gdbarch_tdep *tdep;
8053 struct gdbarch *gdbarch;
8054 struct gdbarch_list *best_arch;
8055 enum arm_abi_kind arm_abi = arm_abi_global;
8056 enum arm_float_model fp_model = arm_fp_model;
8057 struct tdesc_arch_data *tdesc_data = NULL;
8058 int i, is_m = 0;
8059 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
8060 int have_neon = 0;
8061 int have_fpa_registers = 1;
8062 const struct target_desc *tdesc = info.target_desc;
8063
8064 /* If we have an object to base this architecture on, try to determine
8065 its ABI. */
8066
8067 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
8068 {
8069 int ei_osabi, e_flags;
8070
8071 switch (bfd_get_flavour (info.abfd))
8072 {
8073 case bfd_target_aout_flavour:
8074 /* Assume it's an old APCS-style ABI. */
8075 arm_abi = ARM_ABI_APCS;
8076 break;
8077
8078 case bfd_target_coff_flavour:
8079 /* Assume it's an old APCS-style ABI. */
8080 /* XXX WinCE? */
8081 arm_abi = ARM_ABI_APCS;
8082 break;
8083
8084 case bfd_target_elf_flavour:
8085 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
8086 e_flags = elf_elfheader (info.abfd)->e_flags;
8087
8088 if (ei_osabi == ELFOSABI_ARM)
8089 {
8090 /* GNU tools used to use this value, but do not for EABI
8091 objects. There's nowhere to tag an EABI version
8092 anyway, so assume APCS. */
8093 arm_abi = ARM_ABI_APCS;
8094 }
8095 else if (ei_osabi == ELFOSABI_NONE)
8096 {
8097 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
8098 int attr_arch, attr_profile;
8099
8100 switch (eabi_ver)
8101 {
8102 case EF_ARM_EABI_UNKNOWN:
8103 /* Assume GNU tools. */
8104 arm_abi = ARM_ABI_APCS;
8105 break;
8106
8107 case EF_ARM_EABI_VER4:
8108 case EF_ARM_EABI_VER5:
8109 arm_abi = ARM_ABI_AAPCS;
8110 /* EABI binaries default to VFP float ordering.
8111 They may also contain build attributes that can
8112 be used to identify if the VFP argument-passing
8113 ABI is in use. */
8114 if (fp_model == ARM_FLOAT_AUTO)
8115 {
8116 #ifdef HAVE_ELF
8117 switch (bfd_elf_get_obj_attr_int (info.abfd,
8118 OBJ_ATTR_PROC,
8119 Tag_ABI_VFP_args))
8120 {
8121 case 0:
8122 /* "The user intended FP parameter/result
8123 passing to conform to AAPCS, base
8124 variant". */
8125 fp_model = ARM_FLOAT_SOFT_VFP;
8126 break;
8127 case 1:
8128 /* "The user intended FP parameter/result
8129 passing to conform to AAPCS, VFP
8130 variant". */
8131 fp_model = ARM_FLOAT_VFP;
8132 break;
8133 case 2:
8134 /* "The user intended FP parameter/result
8135 passing to conform to tool chain-specific
8136 conventions" - we don't know any such
8137 conventions, so leave it as "auto". */
8138 break;
8139 default:
8140 /* Attribute value not mentioned in the
8141 October 2008 ABI, so leave it as
8142 "auto". */
8143 break;
8144 }
8145 #else
8146 fp_model = ARM_FLOAT_SOFT_VFP;
8147 #endif
8148 }
8149 break;
8150
8151 default:
8152 /* Leave it as "auto". */
8153 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
8154 break;
8155 }
8156
8157 #ifdef HAVE_ELF
8158 /* Detect M-profile programs. This only works if the
8159 executable file includes build attributes; GCC does
8160 copy them to the executable, but e.g. RealView does
8161 not. */
8162 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
8163 Tag_CPU_arch);
8164 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
8165 OBJ_ATTR_PROC,
8166 Tag_CPU_arch_profile);
8167 /* GCC specifies the profile for v6-M; RealView only
8168 specifies the profile for architectures starting with
8169 V7 (as opposed to architectures with a tag
8170 numerically greater than TAG_CPU_ARCH_V7). */
8171 if (!tdesc_has_registers (tdesc)
8172 && (attr_arch == TAG_CPU_ARCH_V6_M
8173 || attr_arch == TAG_CPU_ARCH_V6S_M
8174 || attr_profile == 'M'))
8175 tdesc = tdesc_arm_with_m;
8176 #endif
8177 }
8178
8179 if (fp_model == ARM_FLOAT_AUTO)
8180 {
8181 int e_flags = elf_elfheader (info.abfd)->e_flags;
8182
8183 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
8184 {
8185 case 0:
8186 /* Leave it as "auto". Strictly speaking this case
8187 means FPA, but almost nobody uses that now, and
8188 many toolchains fail to set the appropriate bits
8189 for the floating-point model they use. */
8190 break;
8191 case EF_ARM_SOFT_FLOAT:
8192 fp_model = ARM_FLOAT_SOFT_FPA;
8193 break;
8194 case EF_ARM_VFP_FLOAT:
8195 fp_model = ARM_FLOAT_VFP;
8196 break;
8197 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
8198 fp_model = ARM_FLOAT_SOFT_VFP;
8199 break;
8200 }
8201 }
8202
8203 if (e_flags & EF_ARM_BE8)
8204 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
8205
8206 break;
8207
8208 default:
8209 /* Leave it as "auto". */
8210 break;
8211 }
8212 }
8213
8214 /* Check any target description for validity. */
8215 if (tdesc_has_registers (tdesc))
8216 {
8217 /* For most registers we require GDB's default names; but also allow
8218 the numeric names for sp / lr / pc, as a convenience. */
8219 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
8220 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
8221 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
8222
8223 const struct tdesc_feature *feature;
8224 int valid_p;
8225
8226 feature = tdesc_find_feature (tdesc,
8227 "org.gnu.gdb.arm.core");
8228 if (feature == NULL)
8229 {
8230 feature = tdesc_find_feature (tdesc,
8231 "org.gnu.gdb.arm.m-profile");
8232 if (feature == NULL)
8233 return NULL;
8234 else
8235 is_m = 1;
8236 }
8237
8238 tdesc_data = tdesc_data_alloc ();
8239
8240 valid_p = 1;
8241 for (i = 0; i < ARM_SP_REGNUM; i++)
8242 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
8243 arm_register_names[i]);
8244 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8245 ARM_SP_REGNUM,
8246 arm_sp_names);
8247 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8248 ARM_LR_REGNUM,
8249 arm_lr_names);
8250 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8251 ARM_PC_REGNUM,
8252 arm_pc_names);
8253 if (is_m)
8254 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8255 ARM_PS_REGNUM, "xpsr");
8256 else
8257 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8258 ARM_PS_REGNUM, "cpsr");
8259
8260 if (!valid_p)
8261 {
8262 tdesc_data_cleanup (tdesc_data);
8263 return NULL;
8264 }
8265
8266 feature = tdesc_find_feature (tdesc,
8267 "org.gnu.gdb.arm.fpa");
8268 if (feature != NULL)
8269 {
8270 valid_p = 1;
8271 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
8272 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
8273 arm_register_names[i]);
8274 if (!valid_p)
8275 {
8276 tdesc_data_cleanup (tdesc_data);
8277 return NULL;
8278 }
8279 }
8280 else
8281 have_fpa_registers = 0;
8282
8283 feature = tdesc_find_feature (tdesc,
8284 "org.gnu.gdb.xscale.iwmmxt");
8285 if (feature != NULL)
8286 {
8287 static const char *const iwmmxt_names[] = {
8288 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
8289 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
8290 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
8291 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
8292 };
8293
8294 valid_p = 1;
8295 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
8296 valid_p
8297 &= tdesc_numbered_register (feature, tdesc_data, i,
8298 iwmmxt_names[i - ARM_WR0_REGNUM]);
8299
8300 /* Check for the control registers, but do not fail if they
8301 are missing. */
8302 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
8303 tdesc_numbered_register (feature, tdesc_data, i,
8304 iwmmxt_names[i - ARM_WR0_REGNUM]);
8305
8306 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
8307 valid_p
8308 &= tdesc_numbered_register (feature, tdesc_data, i,
8309 iwmmxt_names[i - ARM_WR0_REGNUM]);
8310
8311 if (!valid_p)
8312 {
8313 tdesc_data_cleanup (tdesc_data);
8314 return NULL;
8315 }
8316 }
8317
8318 /* If we have a VFP unit, check whether the single precision registers
8319 are present. If not, then we will synthesize them as pseudo
8320 registers. */
8321 feature = tdesc_find_feature (tdesc,
8322 "org.gnu.gdb.arm.vfp");
8323 if (feature != NULL)
8324 {
8325 static const char *const vfp_double_names[] = {
8326 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
8327 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
8328 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
8329 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
8330 };
8331
8332 /* Require the double precision registers. There must be either
8333 16 or 32. */
8334 valid_p = 1;
8335 for (i = 0; i < 32; i++)
8336 {
8337 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8338 ARM_D0_REGNUM + i,
8339 vfp_double_names[i]);
8340 if (!valid_p)
8341 break;
8342 }
8343
8344 if (!valid_p && i != 16)
8345 {
8346 tdesc_data_cleanup (tdesc_data);
8347 return NULL;
8348 }
8349
8350 if (tdesc_unnumbered_register (feature, "s0") == 0)
8351 have_vfp_pseudos = 1;
8352
8353 have_vfp_registers = 1;
8354
8355 /* If we have VFP, also check for NEON. The architecture allows
8356 NEON without VFP (integer vector operations only), but GDB
8357 does not support that. */
8358 feature = tdesc_find_feature (tdesc,
8359 "org.gnu.gdb.arm.neon");
8360 if (feature != NULL)
8361 {
8362 /* NEON requires 32 double-precision registers. */
8363 if (i != 32)
8364 {
8365 tdesc_data_cleanup (tdesc_data);
8366 return NULL;
8367 }
8368
8369 /* If there are quad registers defined by the stub, use
8370 their type; otherwise (normally) provide them with
8371 the default type. */
8372 if (tdesc_unnumbered_register (feature, "q0") == 0)
8373 have_neon_pseudos = 1;
8374
8375 have_neon = 1;
8376 }
8377 }
8378 }
8379
8380 /* If there is already a candidate, use it. */
8381 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
8382 best_arch != NULL;
8383 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
8384 {
8385 if (arm_abi != ARM_ABI_AUTO
8386 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
8387 continue;
8388
8389 if (fp_model != ARM_FLOAT_AUTO
8390 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
8391 continue;
8392
8393 /* There are various other properties in tdep that we do not
8394 need to check here: those derived from a target description,
8395 since gdbarches with a different target description are
8396 automatically disqualified. */
8397
8398 /* Do check is_m, though, since it might come from the binary. */
8399 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
8400 continue;
8401
8402 /* Found a match. */
8403 break;
8404 }
8405
8406 if (best_arch != NULL)
8407 {
8408 if (tdesc_data != NULL)
8409 tdesc_data_cleanup (tdesc_data);
8410 return best_arch->gdbarch;
8411 }
8412
8413 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
8414 gdbarch = gdbarch_alloc (&info, tdep);
8415
8416 /* Record additional information about the architecture we are defining.
8417 These are gdbarch discriminators, like the OSABI. */
8418 tdep->arm_abi = arm_abi;
8419 tdep->fp_model = fp_model;
8420 tdep->is_m = is_m;
8421 tdep->have_fpa_registers = have_fpa_registers;
8422 tdep->have_vfp_registers = have_vfp_registers;
8423 tdep->have_vfp_pseudos = have_vfp_pseudos;
8424 tdep->have_neon_pseudos = have_neon_pseudos;
8425 tdep->have_neon = have_neon;
8426
8427 /* Breakpoints. */
8428 switch (info.byte_order_for_code)
8429 {
8430 case BFD_ENDIAN_BIG:
8431 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
8432 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
8433 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
8434 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
8435
8436 break;
8437
8438 case BFD_ENDIAN_LITTLE:
8439 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
8440 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
8441 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
8442 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
8443
8444 break;
8445
8446 default:
8447 internal_error (__FILE__, __LINE__,
8448 _("arm_gdbarch_init: bad byte order for float format"));
8449 }
8450
8451 /* On ARM targets char defaults to unsigned. */
8452 set_gdbarch_char_signed (gdbarch, 0);
8453
8454 /* Note: for displaced stepping, this includes the breakpoint, and one word
8455 of additional scratch space. This setting isn't used for anything beside
8456 displaced stepping at present. */
8457 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
8458
8459 /* This should be low enough for everything. */
8460 tdep->lowest_pc = 0x20;
8461 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
8462
8463 /* The default, for both APCS and AAPCS, is to return small
8464 structures in registers. */
8465 tdep->struct_return = reg_struct_return;
8466
8467 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
8468 set_gdbarch_frame_align (gdbarch, arm_frame_align);
8469
8470 set_gdbarch_write_pc (gdbarch, arm_write_pc);
8471
8472 /* Frame handling. */
8473 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
8474 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
8475 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
8476
8477 frame_base_set_default (gdbarch, &arm_normal_base);
8478
8479 /* Address manipulation. */
8480 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
8481 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
8482
8483 /* Advance PC across function entry code. */
8484 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
8485
8486 /* Detect whether PC is in function epilogue. */
8487 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
8488
8489 /* Skip trampolines. */
8490 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
8491
8492 /* The stack grows downward. */
8493 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
8494
8495 /* Breakpoint manipulation. */
8496 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
8497 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
8498 arm_remote_breakpoint_from_pc);
8499
8500 /* Information about registers, etc. */
8501 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
8502 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
8503 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
8504 set_gdbarch_register_type (gdbarch, arm_register_type);
8505 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
8506
8507 /* This "info float" is FPA-specific. Use the generic version if we
8508 do not have FPA. */
8509 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
8510 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
8511
8512 /* Internal <-> external register number maps. */
8513 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
8514 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
8515
8516 set_gdbarch_register_name (gdbarch, arm_register_name);
8517
8518 /* Returning results. */
8519 set_gdbarch_return_value (gdbarch, arm_return_value);
8520
8521 /* Disassembly. */
8522 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
8523
8524 /* Minsymbol frobbing. */
8525 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
8526 set_gdbarch_coff_make_msymbol_special (gdbarch,
8527 arm_coff_make_msymbol_special);
8528 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
8529
8530 /* Thumb-2 IT block support. */
8531 set_gdbarch_adjust_breakpoint_address (gdbarch,
8532 arm_adjust_breakpoint_address);
8533
8534 /* Virtual tables. */
8535 set_gdbarch_vbit_in_delta (gdbarch, 1);
8536
8537 /* Hook in the ABI-specific overrides, if they have been registered. */
8538 gdbarch_init_osabi (info, gdbarch);
8539
8540 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
8541
8542 /* Add some default predicates. */
8543 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
8544 dwarf2_append_unwinders (gdbarch);
8545 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
8546 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
8547
8548 /* Now we have tuned the configuration, set a few final things,
8549 based on what the OS ABI has told us. */
8550
8551 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
8552 binaries are always marked. */
8553 if (tdep->arm_abi == ARM_ABI_AUTO)
8554 tdep->arm_abi = ARM_ABI_APCS;
8555
8556 /* Watchpoints are not steppable. */
8557 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
8558
8559 /* We used to default to FPA for generic ARM, but almost nobody
8560 uses that now, and we now provide a way for the user to force
8561 the model. So default to the most useful variant. */
8562 if (tdep->fp_model == ARM_FLOAT_AUTO)
8563 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
8564
8565 if (tdep->jb_pc >= 0)
8566 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
8567
8568 /* Floating point sizes and format. */
8569 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
8570 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
8571 {
8572 set_gdbarch_double_format
8573 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
8574 set_gdbarch_long_double_format
8575 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
8576 }
8577 else
8578 {
8579 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
8580 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
8581 }
8582
8583 if (have_vfp_pseudos)
8584 {
8585 /* NOTE: These are the only pseudo registers used by
8586 the ARM target at the moment. If more are added, a
8587 little more care in numbering will be needed. */
8588
8589 int num_pseudos = 32;
8590 if (have_neon_pseudos)
8591 num_pseudos += 16;
8592 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
8593 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
8594 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
8595 }
8596
8597 if (tdesc_data)
8598 {
8599 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
8600
8601 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
8602
8603 /* Override tdesc_register_type to adjust the types of VFP
8604 registers for NEON. */
8605 set_gdbarch_register_type (gdbarch, arm_register_type);
8606 }
8607
8608 /* Add standard register aliases. We add aliases even for those
8609 nanes which are used by the current architecture - it's simpler,
8610 and does no harm, since nothing ever lists user registers. */
8611 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
8612 user_reg_add (gdbarch, arm_register_aliases[i].name,
8613 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
8614
8615 return gdbarch;
8616 }
8617
8618 static void
8619 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
8620 {
8621 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8622
8623 if (tdep == NULL)
8624 return;
8625
8626 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
8627 (unsigned long) tdep->lowest_pc);
8628 }
8629
8630 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
8631
8632 void
8633 _initialize_arm_tdep (void)
8634 {
8635 struct ui_file *stb;
8636 long length;
8637 struct cmd_list_element *new_set, *new_show;
8638 const char *setname;
8639 const char *setdesc;
8640 const char *const *regnames;
8641 int numregs, i, j;
8642 static char *helptext;
8643 char regdesc[1024], *rdptr = regdesc;
8644 size_t rest = sizeof (regdesc);
8645
8646 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
8647
8648 arm_objfile_data_key
8649 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
8650
8651 /* Add ourselves to objfile event chain. */
8652 observer_attach_new_objfile (arm_exidx_new_objfile);
8653 arm_exidx_data_key
8654 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
8655
8656 /* Register an ELF OS ABI sniffer for ARM binaries. */
8657 gdbarch_register_osabi_sniffer (bfd_arch_arm,
8658 bfd_target_elf_flavour,
8659 arm_elf_osabi_sniffer);
8660
8661 /* Initialize the standard target descriptions. */
8662 initialize_tdesc_arm_with_m ();
8663
8664 /* Get the number of possible sets of register names defined in opcodes. */
8665 num_disassembly_options = get_arm_regname_num_options ();
8666
8667 /* Add root prefix command for all "set arm"/"show arm" commands. */
8668 add_prefix_cmd ("arm", no_class, set_arm_command,
8669 _("Various ARM-specific commands."),
8670 &setarmcmdlist, "set arm ", 0, &setlist);
8671
8672 add_prefix_cmd ("arm", no_class, show_arm_command,
8673 _("Various ARM-specific commands."),
8674 &showarmcmdlist, "show arm ", 0, &showlist);
8675
8676 /* Sync the opcode insn printer with our register viewer. */
8677 parse_arm_disassembler_option ("reg-names-std");
8678
8679 /* Initialize the array that will be passed to
8680 add_setshow_enum_cmd(). */
8681 valid_disassembly_styles
8682 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
8683 for (i = 0; i < num_disassembly_options; i++)
8684 {
8685 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
8686 valid_disassembly_styles[i] = setname;
8687 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
8688 rdptr += length;
8689 rest -= length;
8690 /* When we find the default names, tell the disassembler to use
8691 them. */
8692 if (!strcmp (setname, "std"))
8693 {
8694 disassembly_style = setname;
8695 set_arm_regname_option (i);
8696 }
8697 }
8698 /* Mark the end of valid options. */
8699 valid_disassembly_styles[num_disassembly_options] = NULL;
8700
8701 /* Create the help text. */
8702 stb = mem_fileopen ();
8703 fprintf_unfiltered (stb, "%s%s%s",
8704 _("The valid values are:\n"),
8705 regdesc,
8706 _("The default is \"std\"."));
8707 helptext = ui_file_xstrdup (stb, NULL);
8708 ui_file_delete (stb);
8709
8710 add_setshow_enum_cmd("disassembler", no_class,
8711 valid_disassembly_styles, &disassembly_style,
8712 _("Set the disassembly style."),
8713 _("Show the disassembly style."),
8714 helptext,
8715 set_disassembly_style_sfunc,
8716 NULL, /* FIXME: i18n: The disassembly style is
8717 \"%s\". */
8718 &setarmcmdlist, &showarmcmdlist);
8719
8720 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
8721 _("Set usage of ARM 32-bit mode."),
8722 _("Show usage of ARM 32-bit mode."),
8723 _("When off, a 26-bit PC will be used."),
8724 NULL,
8725 NULL, /* FIXME: i18n: Usage of ARM 32-bit
8726 mode is %s. */
8727 &setarmcmdlist, &showarmcmdlist);
8728
8729 /* Add a command to allow the user to force the FPU model. */
8730 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
8731 _("Set the floating point type."),
8732 _("Show the floating point type."),
8733 _("auto - Determine the FP typefrom the OS-ABI.\n\
8734 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
8735 fpa - FPA co-processor (GCC compiled).\n\
8736 softvfp - Software FP with pure-endian doubles.\n\
8737 vfp - VFP co-processor."),
8738 set_fp_model_sfunc, show_fp_model,
8739 &setarmcmdlist, &showarmcmdlist);
8740
8741 /* Add a command to allow the user to force the ABI. */
8742 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
8743 _("Set the ABI."),
8744 _("Show the ABI."),
8745 NULL, arm_set_abi, arm_show_abi,
8746 &setarmcmdlist, &showarmcmdlist);
8747
8748 /* Add two commands to allow the user to force the assumed
8749 execution mode. */
8750 add_setshow_enum_cmd ("fallback-mode", class_support,
8751 arm_mode_strings, &arm_fallback_mode_string,
8752 _("Set the mode assumed when symbols are unavailable."),
8753 _("Show the mode assumed when symbols are unavailable."),
8754 NULL, NULL, arm_show_fallback_mode,
8755 &setarmcmdlist, &showarmcmdlist);
8756 add_setshow_enum_cmd ("force-mode", class_support,
8757 arm_mode_strings, &arm_force_mode_string,
8758 _("Set the mode assumed even when symbols are available."),
8759 _("Show the mode assumed even when symbols are available."),
8760 NULL, NULL, arm_show_force_mode,
8761 &setarmcmdlist, &showarmcmdlist);
8762
8763 /* Debugging flag. */
8764 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
8765 _("Set ARM debugging."),
8766 _("Show ARM debugging."),
8767 _("When on, arm-specific debugging is enabled."),
8768 NULL,
8769 NULL, /* FIXME: i18n: "ARM debugging is %s. */
8770 &setdebuglist, &showdebuglist);
8771 }
This page took 0.211593 seconds and 5 git commands to generate.