33372486a03ed56f3cb5b8f9e6f3fe820351fdb6
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "reggroups.h"
33 #include "doublest.h"
34 #include "value.h"
35 #include "arch-utils.h"
36 #include "osabi.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
40 #include "objfiles.h"
41 #include "dwarf2-frame.h"
42 #include "gdbtypes.h"
43 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
46 #include "observer.h"
47
48 #include "arm-tdep.h"
49 #include "gdb/sim-arm.h"
50
51 #include "elf-bfd.h"
52 #include "coff/internal.h"
53 #include "elf/arm.h"
54
55 #include "gdb_assert.h"
56 #include "vec.h"
57
58 #include "features/arm-with-m.c"
59 #include "features/arm-with-iwmmxt.c"
60 #include "features/arm-with-vfpv2.c"
61 #include "features/arm-with-vfpv3.c"
62 #include "features/arm-with-neon.c"
63
64 static int arm_debug;
65
66 /* Macros for setting and testing a bit in a minimal symbol that marks
67 it as Thumb function. The MSB of the minimal symbol's "info" field
68 is used for this purpose.
69
70 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
71 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
72
73 #define MSYMBOL_SET_SPECIAL(msym) \
74 MSYMBOL_TARGET_FLAG_1 (msym) = 1
75
76 #define MSYMBOL_IS_SPECIAL(msym) \
77 MSYMBOL_TARGET_FLAG_1 (msym)
78
79 /* Per-objfile data used for mapping symbols. */
80 static const struct objfile_data *arm_objfile_data_key;
81
82 struct arm_mapping_symbol
83 {
84 bfd_vma value;
85 char type;
86 };
87 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
88 DEF_VEC_O(arm_mapping_symbol_s);
89
90 struct arm_per_objfile
91 {
92 VEC(arm_mapping_symbol_s) **section_maps;
93 };
94
95 /* The list of available "set arm ..." and "show arm ..." commands. */
96 static struct cmd_list_element *setarmcmdlist = NULL;
97 static struct cmd_list_element *showarmcmdlist = NULL;
98
99 /* The type of floating-point to use. Keep this in sync with enum
100 arm_float_model, and the help string in _initialize_arm_tdep. */
101 static const char *fp_model_strings[] =
102 {
103 "auto",
104 "softfpa",
105 "fpa",
106 "softvfp",
107 "vfp",
108 NULL
109 };
110
111 /* A variable that can be configured by the user. */
112 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
113 static const char *current_fp_model = "auto";
114
115 /* The ABI to use. Keep this in sync with arm_abi_kind. */
116 static const char *arm_abi_strings[] =
117 {
118 "auto",
119 "APCS",
120 "AAPCS",
121 NULL
122 };
123
124 /* A variable that can be configured by the user. */
125 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
126 static const char *arm_abi_string = "auto";
127
128 /* The execution mode to assume. */
129 static const char *arm_mode_strings[] =
130 {
131 "auto",
132 "arm",
133 "thumb",
134 NULL
135 };
136
137 static const char *arm_fallback_mode_string = "auto";
138 static const char *arm_force_mode_string = "auto";
139
140 /* Internal override of the execution mode. -1 means no override,
141 0 means override to ARM mode, 1 means override to Thumb mode.
142 The effect is the same as if arm_force_mode has been set by the
143 user (except the internal override has precedence over a user's
144 arm_force_mode override). */
145 static int arm_override_mode = -1;
146
147 /* Number of different reg name sets (options). */
148 static int num_disassembly_options;
149
150 /* The standard register names, and all the valid aliases for them. Note
151 that `fp', `sp' and `pc' are not added in this alias list, because they
152 have been added as builtin user registers in
153 std-regs.c:_initialize_frame_reg. */
154 static const struct
155 {
156 const char *name;
157 int regnum;
158 } arm_register_aliases[] = {
159 /* Basic register numbers. */
160 { "r0", 0 },
161 { "r1", 1 },
162 { "r2", 2 },
163 { "r3", 3 },
164 { "r4", 4 },
165 { "r5", 5 },
166 { "r6", 6 },
167 { "r7", 7 },
168 { "r8", 8 },
169 { "r9", 9 },
170 { "r10", 10 },
171 { "r11", 11 },
172 { "r12", 12 },
173 { "r13", 13 },
174 { "r14", 14 },
175 { "r15", 15 },
176 /* Synonyms (argument and variable registers). */
177 { "a1", 0 },
178 { "a2", 1 },
179 { "a3", 2 },
180 { "a4", 3 },
181 { "v1", 4 },
182 { "v2", 5 },
183 { "v3", 6 },
184 { "v4", 7 },
185 { "v5", 8 },
186 { "v6", 9 },
187 { "v7", 10 },
188 { "v8", 11 },
189 /* Other platform-specific names for r9. */
190 { "sb", 9 },
191 { "tr", 9 },
192 /* Special names. */
193 { "ip", 12 },
194 { "lr", 14 },
195 /* Names used by GCC (not listed in the ARM EABI). */
196 { "sl", 10 },
197 /* A special name from the older ATPCS. */
198 { "wr", 7 },
199 };
200
201 static const char *const arm_register_names[] =
202 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
203 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
204 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
205 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
206 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
207 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
208 "fps", "cpsr" }; /* 24 25 */
209
210 /* Valid register name styles. */
211 static const char **valid_disassembly_styles;
212
213 /* Disassembly style to use. Default to "std" register names. */
214 static const char *disassembly_style;
215
216 /* This is used to keep the bfd arch_info in sync with the disassembly
217 style. */
218 static void set_disassembly_style_sfunc(char *, int,
219 struct cmd_list_element *);
220 static void set_disassembly_style (void);
221
222 static void convert_from_extended (const struct floatformat *, const void *,
223 void *, int);
224 static void convert_to_extended (const struct floatformat *, void *,
225 const void *, int);
226
227 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
228 struct regcache *regcache,
229 int regnum, gdb_byte *buf);
230 static void arm_neon_quad_write (struct gdbarch *gdbarch,
231 struct regcache *regcache,
232 int regnum, const gdb_byte *buf);
233
234 static int thumb_insn_size (unsigned short inst1);
235
236 struct arm_prologue_cache
237 {
238 /* The stack pointer at the time this frame was created; i.e. the
239 caller's stack pointer when this function was called. It is used
240 to identify this frame. */
241 CORE_ADDR prev_sp;
242
243 /* The frame base for this frame is just prev_sp - frame size.
244 FRAMESIZE is the distance from the frame pointer to the
245 initial stack pointer. */
246
247 int framesize;
248
249 /* The register used to hold the frame pointer for this frame. */
250 int framereg;
251
252 /* Saved register offsets. */
253 struct trad_frame_saved_reg *saved_regs;
254 };
255
256 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
257 CORE_ADDR prologue_start,
258 CORE_ADDR prologue_end,
259 struct arm_prologue_cache *cache);
260
261 /* Architecture version for displaced stepping. This effects the behaviour of
262 certain instructions, and really should not be hard-wired. */
263
264 #define DISPLACED_STEPPING_ARCH_VERSION 5
265
266 /* Addresses for calling Thumb functions have the bit 0 set.
267 Here are some macros to test, set, or clear bit 0 of addresses. */
268 #define IS_THUMB_ADDR(addr) ((addr) & 1)
269 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
270 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
271
272 /* Set to true if the 32-bit mode is in use. */
273
274 int arm_apcs_32 = 1;
275
276 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
277
278 int
279 arm_psr_thumb_bit (struct gdbarch *gdbarch)
280 {
281 if (gdbarch_tdep (gdbarch)->is_m)
282 return XPSR_T;
283 else
284 return CPSR_T;
285 }
286
287 /* Determine if FRAME is executing in Thumb mode. */
288
289 int
290 arm_frame_is_thumb (struct frame_info *frame)
291 {
292 CORE_ADDR cpsr;
293 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
294
295 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
296 directly (from a signal frame or dummy frame) or by interpreting
297 the saved LR (from a prologue or DWARF frame). So consult it and
298 trust the unwinders. */
299 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
300
301 return (cpsr & t_bit) != 0;
302 }
303
304 /* Callback for VEC_lower_bound. */
305
306 static inline int
307 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
308 const struct arm_mapping_symbol *rhs)
309 {
310 return lhs->value < rhs->value;
311 }
312
313 /* Search for the mapping symbol covering MEMADDR. If one is found,
314 return its type. Otherwise, return 0. If START is non-NULL,
315 set *START to the location of the mapping symbol. */
316
317 static char
318 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
319 {
320 struct obj_section *sec;
321
322 /* If there are mapping symbols, consult them. */
323 sec = find_pc_section (memaddr);
324 if (sec != NULL)
325 {
326 struct arm_per_objfile *data;
327 VEC(arm_mapping_symbol_s) *map;
328 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
329 0 };
330 unsigned int idx;
331
332 data = objfile_data (sec->objfile, arm_objfile_data_key);
333 if (data != NULL)
334 {
335 map = data->section_maps[sec->the_bfd_section->index];
336 if (!VEC_empty (arm_mapping_symbol_s, map))
337 {
338 struct arm_mapping_symbol *map_sym;
339
340 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
341 arm_compare_mapping_symbols);
342
343 /* VEC_lower_bound finds the earliest ordered insertion
344 point. If the following symbol starts at this exact
345 address, we use that; otherwise, the preceding
346 mapping symbol covers this address. */
347 if (idx < VEC_length (arm_mapping_symbol_s, map))
348 {
349 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
350 if (map_sym->value == map_key.value)
351 {
352 if (start)
353 *start = map_sym->value + obj_section_addr (sec);
354 return map_sym->type;
355 }
356 }
357
358 if (idx > 0)
359 {
360 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
361 if (start)
362 *start = map_sym->value + obj_section_addr (sec);
363 return map_sym->type;
364 }
365 }
366 }
367 }
368
369 return 0;
370 }
371
372 /* Determine if the program counter specified in MEMADDR is in a Thumb
373 function. This function should be called for addresses unrelated to
374 any executing frame; otherwise, prefer arm_frame_is_thumb. */
375
376 int
377 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
378 {
379 struct obj_section *sec;
380 struct minimal_symbol *sym;
381 char type;
382 struct displaced_step_closure* dsc
383 = get_displaced_step_closure_by_addr(memaddr);
384
385 /* If checking the mode of displaced instruction in copy area, the mode
386 should be determined by instruction on the original address. */
387 if (dsc)
388 {
389 if (debug_displaced)
390 fprintf_unfiltered (gdb_stdlog,
391 "displaced: check mode of %.8lx instead of %.8lx\n",
392 (unsigned long) dsc->insn_addr,
393 (unsigned long) memaddr);
394 memaddr = dsc->insn_addr;
395 }
396
397 /* If bit 0 of the address is set, assume this is a Thumb address. */
398 if (IS_THUMB_ADDR (memaddr))
399 return 1;
400
401 /* Respect internal mode override if active. */
402 if (arm_override_mode != -1)
403 return arm_override_mode;
404
405 /* If the user wants to override the symbol table, let him. */
406 if (strcmp (arm_force_mode_string, "arm") == 0)
407 return 0;
408 if (strcmp (arm_force_mode_string, "thumb") == 0)
409 return 1;
410
411 /* ARM v6-M and v7-M are always in Thumb mode. */
412 if (gdbarch_tdep (gdbarch)->is_m)
413 return 1;
414
415 /* If there are mapping symbols, consult them. */
416 type = arm_find_mapping_symbol (memaddr, NULL);
417 if (type)
418 return type == 't';
419
420 /* Thumb functions have a "special" bit set in minimal symbols. */
421 sym = lookup_minimal_symbol_by_pc (memaddr);
422 if (sym)
423 return (MSYMBOL_IS_SPECIAL (sym));
424
425 /* If the user wants to override the fallback mode, let them. */
426 if (strcmp (arm_fallback_mode_string, "arm") == 0)
427 return 0;
428 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
429 return 1;
430
431 /* If we couldn't find any symbol, but we're talking to a running
432 target, then trust the current value of $cpsr. This lets
433 "display/i $pc" always show the correct mode (though if there is
434 a symbol table we will not reach here, so it still may not be
435 displayed in the mode it will be executed). */
436 if (target_has_registers)
437 return arm_frame_is_thumb (get_current_frame ());
438
439 /* Otherwise we're out of luck; we assume ARM. */
440 return 0;
441 }
442
443 /* Remove useless bits from addresses in a running program. */
444 static CORE_ADDR
445 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
446 {
447 if (arm_apcs_32)
448 return UNMAKE_THUMB_ADDR (val);
449 else
450 return (val & 0x03fffffc);
451 }
452
453 /* When reading symbols, we need to zap the low bit of the address,
454 which may be set to 1 for Thumb functions. */
455 static CORE_ADDR
456 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
457 {
458 return val & ~1;
459 }
460
461 /* Return 1 if PC is the start of a compiler helper function which
462 can be safely ignored during prologue skipping. IS_THUMB is true
463 if the function is known to be a Thumb function due to the way it
464 is being called. */
465 static int
466 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
467 {
468 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
469 struct minimal_symbol *msym;
470
471 msym = lookup_minimal_symbol_by_pc (pc);
472 if (msym != NULL
473 && SYMBOL_VALUE_ADDRESS (msym) == pc
474 && SYMBOL_LINKAGE_NAME (msym) != NULL)
475 {
476 const char *name = SYMBOL_LINKAGE_NAME (msym);
477
478 /* The GNU linker's Thumb call stub to foo is named
479 __foo_from_thumb. */
480 if (strstr (name, "_from_thumb") != NULL)
481 name += 2;
482
483 /* On soft-float targets, __truncdfsf2 is called to convert promoted
484 arguments to their argument types in non-prototyped
485 functions. */
486 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
487 return 1;
488 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
489 return 1;
490
491 /* Internal functions related to thread-local storage. */
492 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
493 return 1;
494 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
495 return 1;
496 }
497 else
498 {
499 /* If we run against a stripped glibc, we may be unable to identify
500 special functions by name. Check for one important case,
501 __aeabi_read_tp, by comparing the *code* against the default
502 implementation (this is hand-written ARM assembler in glibc). */
503
504 if (!is_thumb
505 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
506 == 0xe3e00a0f /* mov r0, #0xffff0fff */
507 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
508 == 0xe240f01f) /* sub pc, r0, #31 */
509 return 1;
510 }
511
512 return 0;
513 }
514
515 /* Support routines for instruction parsing. */
516 #define submask(x) ((1L << ((x) + 1)) - 1)
517 #define bit(obj,st) (((obj) >> (st)) & 1)
518 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
519 #define sbits(obj,st,fn) \
520 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
521 #define BranchDest(addr,instr) \
522 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
523
524 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
525 the first 16-bit of instruction, and INSN2 is the second 16-bit of
526 instruction. */
527 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
528 ((bits ((insn1), 0, 3) << 12) \
529 | (bits ((insn1), 10, 10) << 11) \
530 | (bits ((insn2), 12, 14) << 8) \
531 | bits ((insn2), 0, 7))
532
533 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
534 the 32-bit instruction. */
535 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
536 ((bits ((insn), 16, 19) << 12) \
537 | bits ((insn), 0, 11))
538
539 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
540
541 static unsigned int
542 thumb_expand_immediate (unsigned int imm)
543 {
544 unsigned int count = imm >> 7;
545
546 if (count < 8)
547 switch (count / 2)
548 {
549 case 0:
550 return imm & 0xff;
551 case 1:
552 return (imm & 0xff) | ((imm & 0xff) << 16);
553 case 2:
554 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
555 case 3:
556 return (imm & 0xff) | ((imm & 0xff) << 8)
557 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
558 }
559
560 return (0x80 | (imm & 0x7f)) << (32 - count);
561 }
562
563 /* Return 1 if the 16-bit Thumb instruction INST might change
564 control flow, 0 otherwise. */
565
566 static int
567 thumb_instruction_changes_pc (unsigned short inst)
568 {
569 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
570 return 1;
571
572 if ((inst & 0xf000) == 0xd000) /* conditional branch */
573 return 1;
574
575 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
576 return 1;
577
578 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
579 return 1;
580
581 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
582 return 1;
583
584 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
585 return 1;
586
587 return 0;
588 }
589
590 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
591 might change control flow, 0 otherwise. */
592
593 static int
594 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
595 {
596 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
597 {
598 /* Branches and miscellaneous control instructions. */
599
600 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
601 {
602 /* B, BL, BLX. */
603 return 1;
604 }
605 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
606 {
607 /* SUBS PC, LR, #imm8. */
608 return 1;
609 }
610 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
611 {
612 /* Conditional branch. */
613 return 1;
614 }
615
616 return 0;
617 }
618
619 if ((inst1 & 0xfe50) == 0xe810)
620 {
621 /* Load multiple or RFE. */
622
623 if (bit (inst1, 7) && !bit (inst1, 8))
624 {
625 /* LDMIA or POP */
626 if (bit (inst2, 15))
627 return 1;
628 }
629 else if (!bit (inst1, 7) && bit (inst1, 8))
630 {
631 /* LDMDB */
632 if (bit (inst2, 15))
633 return 1;
634 }
635 else if (bit (inst1, 7) && bit (inst1, 8))
636 {
637 /* RFEIA */
638 return 1;
639 }
640 else if (!bit (inst1, 7) && !bit (inst1, 8))
641 {
642 /* RFEDB */
643 return 1;
644 }
645
646 return 0;
647 }
648
649 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
650 {
651 /* MOV PC or MOVS PC. */
652 return 1;
653 }
654
655 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
656 {
657 /* LDR PC. */
658 if (bits (inst1, 0, 3) == 15)
659 return 1;
660 if (bit (inst1, 7))
661 return 1;
662 if (bit (inst2, 11))
663 return 1;
664 if ((inst2 & 0x0fc0) == 0x0000)
665 return 1;
666
667 return 0;
668 }
669
670 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
671 {
672 /* TBB. */
673 return 1;
674 }
675
676 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
677 {
678 /* TBH. */
679 return 1;
680 }
681
682 return 0;
683 }
684
685 /* Analyze a Thumb prologue, looking for a recognizable stack frame
686 and frame pointer. Scan until we encounter a store that could
687 clobber the stack frame unexpectedly, or an unknown instruction.
688 Return the last address which is definitely safe to skip for an
689 initial breakpoint. */
690
691 static CORE_ADDR
692 thumb_analyze_prologue (struct gdbarch *gdbarch,
693 CORE_ADDR start, CORE_ADDR limit,
694 struct arm_prologue_cache *cache)
695 {
696 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
697 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
698 int i;
699 pv_t regs[16];
700 struct pv_area *stack;
701 struct cleanup *back_to;
702 CORE_ADDR offset;
703 CORE_ADDR unrecognized_pc = 0;
704
705 for (i = 0; i < 16; i++)
706 regs[i] = pv_register (i, 0);
707 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
708 back_to = make_cleanup_free_pv_area (stack);
709
710 while (start < limit)
711 {
712 unsigned short insn;
713
714 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
715
716 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
717 {
718 int regno;
719 int mask;
720
721 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
722 break;
723
724 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
725 whether to save LR (R14). */
726 mask = (insn & 0xff) | ((insn & 0x100) << 6);
727
728 /* Calculate offsets of saved R0-R7 and LR. */
729 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
730 if (mask & (1 << regno))
731 {
732 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
733 -4);
734 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
735 }
736 }
737 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
738 sub sp, #simm */
739 {
740 offset = (insn & 0x7f) << 2; /* get scaled offset */
741 if (insn & 0x80) /* Check for SUB. */
742 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
743 -offset);
744 else
745 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
746 offset);
747 }
748 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
749 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
750 (insn & 0xff) << 2);
751 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
752 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
753 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
754 bits (insn, 6, 8));
755 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
756 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
757 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
758 bits (insn, 0, 7));
759 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
760 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
761 && pv_is_constant (regs[bits (insn, 3, 5)]))
762 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
763 regs[bits (insn, 6, 8)]);
764 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
765 && pv_is_constant (regs[bits (insn, 3, 6)]))
766 {
767 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
768 int rm = bits (insn, 3, 6);
769 regs[rd] = pv_add (regs[rd], regs[rm]);
770 }
771 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
772 {
773 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
774 int src_reg = (insn & 0x78) >> 3;
775 regs[dst_reg] = regs[src_reg];
776 }
777 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
778 {
779 /* Handle stores to the stack. Normally pushes are used,
780 but with GCC -mtpcs-frame, there may be other stores
781 in the prologue to create the frame. */
782 int regno = (insn >> 8) & 0x7;
783 pv_t addr;
784
785 offset = (insn & 0xff) << 2;
786 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
787
788 if (pv_area_store_would_trash (stack, addr))
789 break;
790
791 pv_area_store (stack, addr, 4, regs[regno]);
792 }
793 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
794 {
795 int rd = bits (insn, 0, 2);
796 int rn = bits (insn, 3, 5);
797 pv_t addr;
798
799 offset = bits (insn, 6, 10) << 2;
800 addr = pv_add_constant (regs[rn], offset);
801
802 if (pv_area_store_would_trash (stack, addr))
803 break;
804
805 pv_area_store (stack, addr, 4, regs[rd]);
806 }
807 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
808 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
809 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
810 /* Ignore stores of argument registers to the stack. */
811 ;
812 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
813 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
814 /* Ignore block loads from the stack, potentially copying
815 parameters from memory. */
816 ;
817 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
818 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
819 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
820 /* Similarly ignore single loads from the stack. */
821 ;
822 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
823 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
824 /* Skip register copies, i.e. saves to another register
825 instead of the stack. */
826 ;
827 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
828 /* Recognize constant loads; even with small stacks these are necessary
829 on Thumb. */
830 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
831 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
832 {
833 /* Constant pool loads, for the same reason. */
834 unsigned int constant;
835 CORE_ADDR loc;
836
837 loc = start + 4 + bits (insn, 0, 7) * 4;
838 constant = read_memory_unsigned_integer (loc, 4, byte_order);
839 regs[bits (insn, 8, 10)] = pv_constant (constant);
840 }
841 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instructions. */
842 {
843 unsigned short inst2;
844
845 inst2 = read_memory_unsigned_integer (start + 2, 2,
846 byte_order_for_code);
847
848 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
849 {
850 /* BL, BLX. Allow some special function calls when
851 skipping the prologue; GCC generates these before
852 storing arguments to the stack. */
853 CORE_ADDR nextpc;
854 int j1, j2, imm1, imm2;
855
856 imm1 = sbits (insn, 0, 10);
857 imm2 = bits (inst2, 0, 10);
858 j1 = bit (inst2, 13);
859 j2 = bit (inst2, 11);
860
861 offset = ((imm1 << 12) + (imm2 << 1));
862 offset ^= ((!j2) << 22) | ((!j1) << 23);
863
864 nextpc = start + 4 + offset;
865 /* For BLX make sure to clear the low bits. */
866 if (bit (inst2, 12) == 0)
867 nextpc = nextpc & 0xfffffffc;
868
869 if (!skip_prologue_function (gdbarch, nextpc,
870 bit (inst2, 12) != 0))
871 break;
872 }
873
874 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
875 { registers } */
876 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
877 {
878 pv_t addr = regs[bits (insn, 0, 3)];
879 int regno;
880
881 if (pv_area_store_would_trash (stack, addr))
882 break;
883
884 /* Calculate offsets of saved registers. */
885 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
886 if (inst2 & (1 << regno))
887 {
888 addr = pv_add_constant (addr, -4);
889 pv_area_store (stack, addr, 4, regs[regno]);
890 }
891
892 if (insn & 0x0020)
893 regs[bits (insn, 0, 3)] = addr;
894 }
895
896 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
897 [Rn, #+/-imm]{!} */
898 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
899 {
900 int regno1 = bits (inst2, 12, 15);
901 int regno2 = bits (inst2, 8, 11);
902 pv_t addr = regs[bits (insn, 0, 3)];
903
904 offset = inst2 & 0xff;
905 if (insn & 0x0080)
906 addr = pv_add_constant (addr, offset);
907 else
908 addr = pv_add_constant (addr, -offset);
909
910 if (pv_area_store_would_trash (stack, addr))
911 break;
912
913 pv_area_store (stack, addr, 4, regs[regno1]);
914 pv_area_store (stack, pv_add_constant (addr, 4),
915 4, regs[regno2]);
916
917 if (insn & 0x0020)
918 regs[bits (insn, 0, 3)] = addr;
919 }
920
921 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
922 && (inst2 & 0x0c00) == 0x0c00
923 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
924 {
925 int regno = bits (inst2, 12, 15);
926 pv_t addr = regs[bits (insn, 0, 3)];
927
928 offset = inst2 & 0xff;
929 if (inst2 & 0x0200)
930 addr = pv_add_constant (addr, offset);
931 else
932 addr = pv_add_constant (addr, -offset);
933
934 if (pv_area_store_would_trash (stack, addr))
935 break;
936
937 pv_area_store (stack, addr, 4, regs[regno]);
938
939 if (inst2 & 0x0100)
940 regs[bits (insn, 0, 3)] = addr;
941 }
942
943 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
944 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
945 {
946 int regno = bits (inst2, 12, 15);
947 pv_t addr;
948
949 offset = inst2 & 0xfff;
950 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
951
952 if (pv_area_store_would_trash (stack, addr))
953 break;
954
955 pv_area_store (stack, addr, 4, regs[regno]);
956 }
957
958 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
959 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
960 /* Ignore stores of argument registers to the stack. */
961 ;
962
963 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
964 && (inst2 & 0x0d00) == 0x0c00
965 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
966 /* Ignore stores of argument registers to the stack. */
967 ;
968
969 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
970 { registers } */
971 && (inst2 & 0x8000) == 0x0000
972 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
973 /* Ignore block loads from the stack, potentially copying
974 parameters from memory. */
975 ;
976
977 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
978 [Rn, #+/-imm] */
979 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
980 /* Similarly ignore dual loads from the stack. */
981 ;
982
983 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
984 && (inst2 & 0x0d00) == 0x0c00
985 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
986 /* Similarly ignore single loads from the stack. */
987 ;
988
989 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
990 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
991 /* Similarly ignore single loads from the stack. */
992 ;
993
994 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
995 && (inst2 & 0x8000) == 0x0000)
996 {
997 unsigned int imm = ((bits (insn, 10, 10) << 11)
998 | (bits (inst2, 12, 14) << 8)
999 | bits (inst2, 0, 7));
1000
1001 regs[bits (inst2, 8, 11)]
1002 = pv_add_constant (regs[bits (insn, 0, 3)],
1003 thumb_expand_immediate (imm));
1004 }
1005
1006 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1007 && (inst2 & 0x8000) == 0x0000)
1008 {
1009 unsigned int imm = ((bits (insn, 10, 10) << 11)
1010 | (bits (inst2, 12, 14) << 8)
1011 | bits (inst2, 0, 7));
1012
1013 regs[bits (inst2, 8, 11)]
1014 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1015 }
1016
1017 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1018 && (inst2 & 0x8000) == 0x0000)
1019 {
1020 unsigned int imm = ((bits (insn, 10, 10) << 11)
1021 | (bits (inst2, 12, 14) << 8)
1022 | bits (inst2, 0, 7));
1023
1024 regs[bits (inst2, 8, 11)]
1025 = pv_add_constant (regs[bits (insn, 0, 3)],
1026 - (CORE_ADDR) thumb_expand_immediate (imm));
1027 }
1028
1029 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1030 && (inst2 & 0x8000) == 0x0000)
1031 {
1032 unsigned int imm = ((bits (insn, 10, 10) << 11)
1033 | (bits (inst2, 12, 14) << 8)
1034 | bits (inst2, 0, 7));
1035
1036 regs[bits (inst2, 8, 11)]
1037 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1038 }
1039
1040 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1041 {
1042 unsigned int imm = ((bits (insn, 10, 10) << 11)
1043 | (bits (inst2, 12, 14) << 8)
1044 | bits (inst2, 0, 7));
1045
1046 regs[bits (inst2, 8, 11)]
1047 = pv_constant (thumb_expand_immediate (imm));
1048 }
1049
1050 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1051 {
1052 unsigned int imm
1053 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1054
1055 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1056 }
1057
1058 else if (insn == 0xea5f /* mov.w Rd,Rm */
1059 && (inst2 & 0xf0f0) == 0)
1060 {
1061 int dst_reg = (inst2 & 0x0f00) >> 8;
1062 int src_reg = inst2 & 0xf;
1063 regs[dst_reg] = regs[src_reg];
1064 }
1065
1066 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1067 {
1068 /* Constant pool loads. */
1069 unsigned int constant;
1070 CORE_ADDR loc;
1071
1072 offset = bits (insn, 0, 11);
1073 if (insn & 0x0080)
1074 loc = start + 4 + offset;
1075 else
1076 loc = start + 4 - offset;
1077
1078 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1079 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1080 }
1081
1082 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1083 {
1084 /* Constant pool loads. */
1085 unsigned int constant;
1086 CORE_ADDR loc;
1087
1088 offset = bits (insn, 0, 7) << 2;
1089 if (insn & 0x0080)
1090 loc = start + 4 + offset;
1091 else
1092 loc = start + 4 - offset;
1093
1094 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1095 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1096
1097 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1098 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1099 }
1100
1101 else if (thumb2_instruction_changes_pc (insn, inst2))
1102 {
1103 /* Don't scan past anything that might change control flow. */
1104 break;
1105 }
1106 else
1107 {
1108 /* The optimizer might shove anything into the prologue,
1109 so we just skip what we don't recognize. */
1110 unrecognized_pc = start;
1111 }
1112
1113 start += 2;
1114 }
1115 else if (thumb_instruction_changes_pc (insn))
1116 {
1117 /* Don't scan past anything that might change control flow. */
1118 break;
1119 }
1120 else
1121 {
1122 /* The optimizer might shove anything into the prologue,
1123 so we just skip what we don't recognize. */
1124 unrecognized_pc = start;
1125 }
1126
1127 start += 2;
1128 }
1129
1130 if (arm_debug)
1131 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1132 paddress (gdbarch, start));
1133
1134 if (unrecognized_pc == 0)
1135 unrecognized_pc = start;
1136
1137 if (cache == NULL)
1138 {
1139 do_cleanups (back_to);
1140 return unrecognized_pc;
1141 }
1142
1143 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1144 {
1145 /* Frame pointer is fp. Frame size is constant. */
1146 cache->framereg = ARM_FP_REGNUM;
1147 cache->framesize = -regs[ARM_FP_REGNUM].k;
1148 }
1149 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1150 {
1151 /* Frame pointer is r7. Frame size is constant. */
1152 cache->framereg = THUMB_FP_REGNUM;
1153 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1154 }
1155 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1156 {
1157 /* Try the stack pointer... this is a bit desperate. */
1158 cache->framereg = ARM_SP_REGNUM;
1159 cache->framesize = -regs[ARM_SP_REGNUM].k;
1160 }
1161 else
1162 {
1163 /* We're just out of luck. We don't know where the frame is. */
1164 cache->framereg = -1;
1165 cache->framesize = 0;
1166 }
1167
1168 for (i = 0; i < 16; i++)
1169 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1170 cache->saved_regs[i].addr = offset;
1171
1172 do_cleanups (back_to);
1173 return unrecognized_pc;
1174 }
1175
1176
1177 /* Try to analyze the instructions starting from PC, which load symbol
1178 __stack_chk_guard. Return the address of instruction after loading this
1179 symbol, set the dest register number to *BASEREG, and set the size of
1180 instructions for loading symbol in OFFSET. Return 0 if instructions are
1181 not recognized. */
1182
1183 static CORE_ADDR
1184 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1185 unsigned int *destreg, int *offset)
1186 {
1187 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1188 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1189 unsigned int low, high, address;
1190
1191 address = 0;
1192 if (is_thumb)
1193 {
1194 unsigned short insn1
1195 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1196
1197 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1198 {
1199 *destreg = bits (insn1, 8, 10);
1200 *offset = 2;
1201 address = bits (insn1, 0, 7);
1202 }
1203 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1204 {
1205 unsigned short insn2
1206 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1207
1208 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1209
1210 insn1
1211 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1212 insn2
1213 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1214
1215 /* movt Rd, #const */
1216 if ((insn1 & 0xfbc0) == 0xf2c0)
1217 {
1218 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1219 *destreg = bits (insn2, 8, 11);
1220 *offset = 8;
1221 address = (high << 16 | low);
1222 }
1223 }
1224 }
1225 else
1226 {
1227 unsigned int insn
1228 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1229
1230 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1231 {
1232 address = bits (insn, 0, 11);
1233 *destreg = bits (insn, 12, 15);
1234 *offset = 4;
1235 }
1236 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1237 {
1238 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1239
1240 insn
1241 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1242
1243 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1244 {
1245 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1246 *destreg = bits (insn, 12, 15);
1247 *offset = 8;
1248 address = (high << 16 | low);
1249 }
1250 }
1251 }
1252
1253 return address;
1254 }
1255
1256 /* Try to skip a sequence of instructions used for stack protector. If PC
1257 points to the first instruction of this sequence, return the address of
1258 first instruction after this sequence, otherwise, return original PC.
1259
1260 On arm, this sequence of instructions is composed of mainly three steps,
1261 Step 1: load symbol __stack_chk_guard,
1262 Step 2: load from address of __stack_chk_guard,
1263 Step 3: store it to somewhere else.
1264
1265 Usually, instructions on step 2 and step 3 are the same on various ARM
1266 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1267 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1268 instructions in step 1 vary from different ARM architectures. On ARMv7,
1269 they are,
1270
1271 movw Rn, #:lower16:__stack_chk_guard
1272 movt Rn, #:upper16:__stack_chk_guard
1273
1274 On ARMv5t, it is,
1275
1276 ldr Rn, .Label
1277 ....
1278 .Lable:
1279 .word __stack_chk_guard
1280
1281 Since ldr/str is a very popular instruction, we can't use them as
1282 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1283 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1284 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1285
1286 static CORE_ADDR
1287 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1288 {
1289 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1290 unsigned int address, basereg;
1291 struct minimal_symbol *stack_chk_guard;
1292 int offset;
1293 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1294 CORE_ADDR addr;
1295
1296 /* Try to parse the instructions in Step 1. */
1297 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1298 &basereg, &offset);
1299 if (!addr)
1300 return pc;
1301
1302 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1303 /* If name of symbol doesn't start with '__stack_chk_guard', this
1304 instruction sequence is not for stack protector. If symbol is
1305 removed, we conservatively think this sequence is for stack protector. */
1306 if (stack_chk_guard
1307 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard), "__stack_chk_guard",
1308 strlen ("__stack_chk_guard")) != 0)
1309 return pc;
1310
1311 if (is_thumb)
1312 {
1313 unsigned int destreg;
1314 unsigned short insn
1315 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1316
1317 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1318 if ((insn & 0xf800) != 0x6800)
1319 return pc;
1320 if (bits (insn, 3, 5) != basereg)
1321 return pc;
1322 destreg = bits (insn, 0, 2);
1323
1324 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1325 byte_order_for_code);
1326 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1327 if ((insn & 0xf800) != 0x6000)
1328 return pc;
1329 if (destreg != bits (insn, 0, 2))
1330 return pc;
1331 }
1332 else
1333 {
1334 unsigned int destreg;
1335 unsigned int insn
1336 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1337
1338 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1339 if ((insn & 0x0e500000) != 0x04100000)
1340 return pc;
1341 if (bits (insn, 16, 19) != basereg)
1342 return pc;
1343 destreg = bits (insn, 12, 15);
1344 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1345 insn = read_memory_unsigned_integer (pc + offset + 4,
1346 4, byte_order_for_code);
1347 if ((insn & 0x0e500000) != 0x04000000)
1348 return pc;
1349 if (bits (insn, 12, 15) != destreg)
1350 return pc;
1351 }
1352 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1353 on arm. */
1354 if (is_thumb)
1355 return pc + offset + 4;
1356 else
1357 return pc + offset + 8;
1358 }
1359
1360 /* Advance the PC across any function entry prologue instructions to
1361 reach some "real" code.
1362
1363 The APCS (ARM Procedure Call Standard) defines the following
1364 prologue:
1365
1366 mov ip, sp
1367 [stmfd sp!, {a1,a2,a3,a4}]
1368 stmfd sp!, {...,fp,ip,lr,pc}
1369 [stfe f7, [sp, #-12]!]
1370 [stfe f6, [sp, #-12]!]
1371 [stfe f5, [sp, #-12]!]
1372 [stfe f4, [sp, #-12]!]
1373 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1374
1375 static CORE_ADDR
1376 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1377 {
1378 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1379 unsigned long inst;
1380 CORE_ADDR skip_pc;
1381 CORE_ADDR func_addr, limit_pc;
1382 struct symtab_and_line sal;
1383
1384 /* See if we can determine the end of the prologue via the symbol table.
1385 If so, then return either PC, or the PC after the prologue, whichever
1386 is greater. */
1387 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1388 {
1389 CORE_ADDR post_prologue_pc
1390 = skip_prologue_using_sal (gdbarch, func_addr);
1391 struct symtab *s = find_pc_symtab (func_addr);
1392
1393 if (post_prologue_pc)
1394 post_prologue_pc
1395 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1396
1397
1398 /* GCC always emits a line note before the prologue and another
1399 one after, even if the two are at the same address or on the
1400 same line. Take advantage of this so that we do not need to
1401 know every instruction that might appear in the prologue. We
1402 will have producer information for most binaries; if it is
1403 missing (e.g. for -gstabs), assuming the GNU tools. */
1404 if (post_prologue_pc
1405 && (s == NULL
1406 || s->producer == NULL
1407 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1408 return post_prologue_pc;
1409
1410 if (post_prologue_pc != 0)
1411 {
1412 CORE_ADDR analyzed_limit;
1413
1414 /* For non-GCC compilers, make sure the entire line is an
1415 acceptable prologue; GDB will round this function's
1416 return value up to the end of the following line so we
1417 can not skip just part of a line (and we do not want to).
1418
1419 RealView does not treat the prologue specially, but does
1420 associate prologue code with the opening brace; so this
1421 lets us skip the first line if we think it is the opening
1422 brace. */
1423 if (arm_pc_is_thumb (gdbarch, func_addr))
1424 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1425 post_prologue_pc, NULL);
1426 else
1427 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1428 post_prologue_pc, NULL);
1429
1430 if (analyzed_limit != post_prologue_pc)
1431 return func_addr;
1432
1433 return post_prologue_pc;
1434 }
1435 }
1436
1437 /* Can't determine prologue from the symbol table, need to examine
1438 instructions. */
1439
1440 /* Find an upper limit on the function prologue using the debug
1441 information. If the debug information could not be used to provide
1442 that bound, then use an arbitrary large number as the upper bound. */
1443 /* Like arm_scan_prologue, stop no later than pc + 64. */
1444 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1445 if (limit_pc == 0)
1446 limit_pc = pc + 64; /* Magic. */
1447
1448
1449 /* Check if this is Thumb code. */
1450 if (arm_pc_is_thumb (gdbarch, pc))
1451 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1452
1453 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1454 {
1455 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1456
1457 /* "mov ip, sp" is no longer a required part of the prologue. */
1458 if (inst == 0xe1a0c00d) /* mov ip, sp */
1459 continue;
1460
1461 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1462 continue;
1463
1464 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1465 continue;
1466
1467 /* Some prologues begin with "str lr, [sp, #-4]!". */
1468 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1469 continue;
1470
1471 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1472 continue;
1473
1474 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1475 continue;
1476
1477 /* Any insns after this point may float into the code, if it makes
1478 for better instruction scheduling, so we skip them only if we
1479 find them, but still consider the function to be frame-ful. */
1480
1481 /* We may have either one sfmfd instruction here, or several stfe
1482 insns, depending on the version of floating point code we
1483 support. */
1484 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1485 continue;
1486
1487 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1488 continue;
1489
1490 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1491 continue;
1492
1493 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1494 continue;
1495
1496 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1497 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1498 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1499 continue;
1500
1501 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1502 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1503 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1504 continue;
1505
1506 /* Un-recognized instruction; stop scanning. */
1507 break;
1508 }
1509
1510 return skip_pc; /* End of prologue. */
1511 }
1512
1513 /* *INDENT-OFF* */
1514 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1515 This function decodes a Thumb function prologue to determine:
1516 1) the size of the stack frame
1517 2) which registers are saved on it
1518 3) the offsets of saved regs
1519 4) the offset from the stack pointer to the frame pointer
1520
1521 A typical Thumb function prologue would create this stack frame
1522 (offsets relative to FP)
1523 old SP -> 24 stack parameters
1524 20 LR
1525 16 R7
1526 R7 -> 0 local variables (16 bytes)
1527 SP -> -12 additional stack space (12 bytes)
1528 The frame size would thus be 36 bytes, and the frame offset would be
1529 12 bytes. The frame register is R7.
1530
1531 The comments for thumb_skip_prolog() describe the algorithm we use
1532 to detect the end of the prolog. */
1533 /* *INDENT-ON* */
1534
1535 static void
1536 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1537 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1538 {
1539 CORE_ADDR prologue_start;
1540 CORE_ADDR prologue_end;
1541 CORE_ADDR current_pc;
1542
1543 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1544 &prologue_end))
1545 {
1546 /* See comment in arm_scan_prologue for an explanation of
1547 this heuristics. */
1548 if (prologue_end > prologue_start + 64)
1549 {
1550 prologue_end = prologue_start + 64;
1551 }
1552 }
1553 else
1554 /* We're in the boondocks: we have no idea where the start of the
1555 function is. */
1556 return;
1557
1558 prologue_end = min (prologue_end, prev_pc);
1559
1560 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1561 }
1562
1563 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1564
1565 static int
1566 arm_instruction_changes_pc (uint32_t this_instr)
1567 {
1568 if (bits (this_instr, 28, 31) == INST_NV)
1569 /* Unconditional instructions. */
1570 switch (bits (this_instr, 24, 27))
1571 {
1572 case 0xa:
1573 case 0xb:
1574 /* Branch with Link and change to Thumb. */
1575 return 1;
1576 case 0xc:
1577 case 0xd:
1578 case 0xe:
1579 /* Coprocessor register transfer. */
1580 if (bits (this_instr, 12, 15) == 15)
1581 error (_("Invalid update to pc in instruction"));
1582 return 0;
1583 default:
1584 return 0;
1585 }
1586 else
1587 switch (bits (this_instr, 25, 27))
1588 {
1589 case 0x0:
1590 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1591 {
1592 /* Multiplies and extra load/stores. */
1593 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1594 /* Neither multiplies nor extension load/stores are allowed
1595 to modify PC. */
1596 return 0;
1597
1598 /* Otherwise, miscellaneous instructions. */
1599
1600 /* BX <reg>, BXJ <reg>, BLX <reg> */
1601 if (bits (this_instr, 4, 27) == 0x12fff1
1602 || bits (this_instr, 4, 27) == 0x12fff2
1603 || bits (this_instr, 4, 27) == 0x12fff3)
1604 return 1;
1605
1606 /* Other miscellaneous instructions are unpredictable if they
1607 modify PC. */
1608 return 0;
1609 }
1610 /* Data processing instruction. Fall through. */
1611
1612 case 0x1:
1613 if (bits (this_instr, 12, 15) == 15)
1614 return 1;
1615 else
1616 return 0;
1617
1618 case 0x2:
1619 case 0x3:
1620 /* Media instructions and architecturally undefined instructions. */
1621 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1622 return 0;
1623
1624 /* Stores. */
1625 if (bit (this_instr, 20) == 0)
1626 return 0;
1627
1628 /* Loads. */
1629 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1630 return 1;
1631 else
1632 return 0;
1633
1634 case 0x4:
1635 /* Load/store multiple. */
1636 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1637 return 1;
1638 else
1639 return 0;
1640
1641 case 0x5:
1642 /* Branch and branch with link. */
1643 return 1;
1644
1645 case 0x6:
1646 case 0x7:
1647 /* Coprocessor transfers or SWIs can not affect PC. */
1648 return 0;
1649
1650 default:
1651 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1652 }
1653 }
1654
1655 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1656 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1657 fill it in. Return the first address not recognized as a prologue
1658 instruction.
1659
1660 We recognize all the instructions typically found in ARM prologues,
1661 plus harmless instructions which can be skipped (either for analysis
1662 purposes, or a more restrictive set that can be skipped when finding
1663 the end of the prologue). */
1664
1665 static CORE_ADDR
1666 arm_analyze_prologue (struct gdbarch *gdbarch,
1667 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1668 struct arm_prologue_cache *cache)
1669 {
1670 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1671 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1672 int regno;
1673 CORE_ADDR offset, current_pc;
1674 pv_t regs[ARM_FPS_REGNUM];
1675 struct pv_area *stack;
1676 struct cleanup *back_to;
1677 int framereg, framesize;
1678 CORE_ADDR unrecognized_pc = 0;
1679
1680 /* Search the prologue looking for instructions that set up the
1681 frame pointer, adjust the stack pointer, and save registers.
1682
1683 Be careful, however, and if it doesn't look like a prologue,
1684 don't try to scan it. If, for instance, a frameless function
1685 begins with stmfd sp!, then we will tell ourselves there is
1686 a frame, which will confuse stack traceback, as well as "finish"
1687 and other operations that rely on a knowledge of the stack
1688 traceback. */
1689
1690 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1691 regs[regno] = pv_register (regno, 0);
1692 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1693 back_to = make_cleanup_free_pv_area (stack);
1694
1695 for (current_pc = prologue_start;
1696 current_pc < prologue_end;
1697 current_pc += 4)
1698 {
1699 unsigned int insn
1700 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1701
1702 if (insn == 0xe1a0c00d) /* mov ip, sp */
1703 {
1704 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1705 continue;
1706 }
1707 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1708 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1709 {
1710 unsigned imm = insn & 0xff; /* immediate value */
1711 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1712 int rd = bits (insn, 12, 15);
1713 imm = (imm >> rot) | (imm << (32 - rot));
1714 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1715 continue;
1716 }
1717 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1718 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1719 {
1720 unsigned imm = insn & 0xff; /* immediate value */
1721 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1722 int rd = bits (insn, 12, 15);
1723 imm = (imm >> rot) | (imm << (32 - rot));
1724 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1725 continue;
1726 }
1727 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1728 [sp, #-4]! */
1729 {
1730 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1731 break;
1732 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1733 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1734 regs[bits (insn, 12, 15)]);
1735 continue;
1736 }
1737 else if ((insn & 0xffff0000) == 0xe92d0000)
1738 /* stmfd sp!, {..., fp, ip, lr, pc}
1739 or
1740 stmfd sp!, {a1, a2, a3, a4} */
1741 {
1742 int mask = insn & 0xffff;
1743
1744 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1745 break;
1746
1747 /* Calculate offsets of saved registers. */
1748 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1749 if (mask & (1 << regno))
1750 {
1751 regs[ARM_SP_REGNUM]
1752 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1753 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1754 }
1755 }
1756 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1757 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1758 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1759 {
1760 /* No need to add this to saved_regs -- it's just an arg reg. */
1761 continue;
1762 }
1763 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1764 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1765 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1766 {
1767 /* No need to add this to saved_regs -- it's just an arg reg. */
1768 continue;
1769 }
1770 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1771 { registers } */
1772 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1773 {
1774 /* No need to add this to saved_regs -- it's just arg regs. */
1775 continue;
1776 }
1777 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1778 {
1779 unsigned imm = insn & 0xff; /* immediate value */
1780 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1781 imm = (imm >> rot) | (imm << (32 - rot));
1782 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1783 }
1784 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1785 {
1786 unsigned imm = insn & 0xff; /* immediate value */
1787 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1788 imm = (imm >> rot) | (imm << (32 - rot));
1789 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1790 }
1791 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1792 [sp, -#c]! */
1793 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1794 {
1795 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1796 break;
1797
1798 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1799 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1800 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1801 }
1802 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1803 [sp!] */
1804 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1805 {
1806 int n_saved_fp_regs;
1807 unsigned int fp_start_reg, fp_bound_reg;
1808
1809 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1810 break;
1811
1812 if ((insn & 0x800) == 0x800) /* N0 is set */
1813 {
1814 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1815 n_saved_fp_regs = 3;
1816 else
1817 n_saved_fp_regs = 1;
1818 }
1819 else
1820 {
1821 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1822 n_saved_fp_regs = 2;
1823 else
1824 n_saved_fp_regs = 4;
1825 }
1826
1827 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1828 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1829 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1830 {
1831 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1832 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1833 regs[fp_start_reg++]);
1834 }
1835 }
1836 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1837 {
1838 /* Allow some special function calls when skipping the
1839 prologue; GCC generates these before storing arguments to
1840 the stack. */
1841 CORE_ADDR dest = BranchDest (current_pc, insn);
1842
1843 if (skip_prologue_function (gdbarch, dest, 0))
1844 continue;
1845 else
1846 break;
1847 }
1848 else if ((insn & 0xf0000000) != 0xe0000000)
1849 break; /* Condition not true, exit early. */
1850 else if (arm_instruction_changes_pc (insn))
1851 /* Don't scan past anything that might change control flow. */
1852 break;
1853 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1854 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1855 /* Ignore block loads from the stack, potentially copying
1856 parameters from memory. */
1857 continue;
1858 else if ((insn & 0xfc500000) == 0xe4100000
1859 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1860 /* Similarly ignore single loads from the stack. */
1861 continue;
1862 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1863 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1864 register instead of the stack. */
1865 continue;
1866 else
1867 {
1868 /* The optimizer might shove anything into the prologue,
1869 so we just skip what we don't recognize. */
1870 unrecognized_pc = current_pc;
1871 continue;
1872 }
1873 }
1874
1875 if (unrecognized_pc == 0)
1876 unrecognized_pc = current_pc;
1877
1878 /* The frame size is just the distance from the frame register
1879 to the original stack pointer. */
1880 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1881 {
1882 /* Frame pointer is fp. */
1883 framereg = ARM_FP_REGNUM;
1884 framesize = -regs[ARM_FP_REGNUM].k;
1885 }
1886 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1887 {
1888 /* Try the stack pointer... this is a bit desperate. */
1889 framereg = ARM_SP_REGNUM;
1890 framesize = -regs[ARM_SP_REGNUM].k;
1891 }
1892 else
1893 {
1894 /* We're just out of luck. We don't know where the frame is. */
1895 framereg = -1;
1896 framesize = 0;
1897 }
1898
1899 if (cache)
1900 {
1901 cache->framereg = framereg;
1902 cache->framesize = framesize;
1903
1904 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1905 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1906 cache->saved_regs[regno].addr = offset;
1907 }
1908
1909 if (arm_debug)
1910 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1911 paddress (gdbarch, unrecognized_pc));
1912
1913 do_cleanups (back_to);
1914 return unrecognized_pc;
1915 }
1916
1917 static void
1918 arm_scan_prologue (struct frame_info *this_frame,
1919 struct arm_prologue_cache *cache)
1920 {
1921 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1922 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1923 int regno;
1924 CORE_ADDR prologue_start, prologue_end, current_pc;
1925 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1926 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1927 pv_t regs[ARM_FPS_REGNUM];
1928 struct pv_area *stack;
1929 struct cleanup *back_to;
1930 CORE_ADDR offset;
1931
1932 /* Assume there is no frame until proven otherwise. */
1933 cache->framereg = ARM_SP_REGNUM;
1934 cache->framesize = 0;
1935
1936 /* Check for Thumb prologue. */
1937 if (arm_frame_is_thumb (this_frame))
1938 {
1939 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1940 return;
1941 }
1942
1943 /* Find the function prologue. If we can't find the function in
1944 the symbol table, peek in the stack frame to find the PC. */
1945 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1946 &prologue_end))
1947 {
1948 /* One way to find the end of the prologue (which works well
1949 for unoptimized code) is to do the following:
1950
1951 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1952
1953 if (sal.line == 0)
1954 prologue_end = prev_pc;
1955 else if (sal.end < prologue_end)
1956 prologue_end = sal.end;
1957
1958 This mechanism is very accurate so long as the optimizer
1959 doesn't move any instructions from the function body into the
1960 prologue. If this happens, sal.end will be the last
1961 instruction in the first hunk of prologue code just before
1962 the first instruction that the scheduler has moved from
1963 the body to the prologue.
1964
1965 In order to make sure that we scan all of the prologue
1966 instructions, we use a slightly less accurate mechanism which
1967 may scan more than necessary. To help compensate for this
1968 lack of accuracy, the prologue scanning loop below contains
1969 several clauses which'll cause the loop to terminate early if
1970 an implausible prologue instruction is encountered.
1971
1972 The expression
1973
1974 prologue_start + 64
1975
1976 is a suitable endpoint since it accounts for the largest
1977 possible prologue plus up to five instructions inserted by
1978 the scheduler. */
1979
1980 if (prologue_end > prologue_start + 64)
1981 {
1982 prologue_end = prologue_start + 64; /* See above. */
1983 }
1984 }
1985 else
1986 {
1987 /* We have no symbol information. Our only option is to assume this
1988 function has a standard stack frame and the normal frame register.
1989 Then, we can find the value of our frame pointer on entrance to
1990 the callee (or at the present moment if this is the innermost frame).
1991 The value stored there should be the address of the stmfd + 8. */
1992 CORE_ADDR frame_loc;
1993 LONGEST return_value;
1994
1995 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1996 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1997 return;
1998 else
1999 {
2000 prologue_start = gdbarch_addr_bits_remove
2001 (gdbarch, return_value) - 8;
2002 prologue_end = prologue_start + 64; /* See above. */
2003 }
2004 }
2005
2006 if (prev_pc < prologue_end)
2007 prologue_end = prev_pc;
2008
2009 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
2010 }
2011
2012 static struct arm_prologue_cache *
2013 arm_make_prologue_cache (struct frame_info *this_frame)
2014 {
2015 int reg;
2016 struct arm_prologue_cache *cache;
2017 CORE_ADDR unwound_fp;
2018
2019 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2020 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2021
2022 arm_scan_prologue (this_frame, cache);
2023
2024 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2025 if (unwound_fp == 0)
2026 return cache;
2027
2028 cache->prev_sp = unwound_fp + cache->framesize;
2029
2030 /* Calculate actual addresses of saved registers using offsets
2031 determined by arm_scan_prologue. */
2032 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2033 if (trad_frame_addr_p (cache->saved_regs, reg))
2034 cache->saved_regs[reg].addr += cache->prev_sp;
2035
2036 return cache;
2037 }
2038
2039 /* Our frame ID for a normal frame is the current function's starting PC
2040 and the caller's SP when we were called. */
2041
2042 static void
2043 arm_prologue_this_id (struct frame_info *this_frame,
2044 void **this_cache,
2045 struct frame_id *this_id)
2046 {
2047 struct arm_prologue_cache *cache;
2048 struct frame_id id;
2049 CORE_ADDR pc, func;
2050
2051 if (*this_cache == NULL)
2052 *this_cache = arm_make_prologue_cache (this_frame);
2053 cache = *this_cache;
2054
2055 /* This is meant to halt the backtrace at "_start". */
2056 pc = get_frame_pc (this_frame);
2057 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2058 return;
2059
2060 /* If we've hit a wall, stop. */
2061 if (cache->prev_sp == 0)
2062 return;
2063
2064 /* Use function start address as part of the frame ID. If we cannot
2065 identify the start address (due to missing symbol information),
2066 fall back to just using the current PC. */
2067 func = get_frame_func (this_frame);
2068 if (!func)
2069 func = pc;
2070
2071 id = frame_id_build (cache->prev_sp, func);
2072 *this_id = id;
2073 }
2074
2075 static struct value *
2076 arm_prologue_prev_register (struct frame_info *this_frame,
2077 void **this_cache,
2078 int prev_regnum)
2079 {
2080 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2081 struct arm_prologue_cache *cache;
2082
2083 if (*this_cache == NULL)
2084 *this_cache = arm_make_prologue_cache (this_frame);
2085 cache = *this_cache;
2086
2087 /* If we are asked to unwind the PC, then we need to return the LR
2088 instead. The prologue may save PC, but it will point into this
2089 frame's prologue, not the next frame's resume location. Also
2090 strip the saved T bit. A valid LR may have the low bit set, but
2091 a valid PC never does. */
2092 if (prev_regnum == ARM_PC_REGNUM)
2093 {
2094 CORE_ADDR lr;
2095
2096 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2097 return frame_unwind_got_constant (this_frame, prev_regnum,
2098 arm_addr_bits_remove (gdbarch, lr));
2099 }
2100
2101 /* SP is generally not saved to the stack, but this frame is
2102 identified by the next frame's stack pointer at the time of the call.
2103 The value was already reconstructed into PREV_SP. */
2104 if (prev_regnum == ARM_SP_REGNUM)
2105 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2106
2107 /* The CPSR may have been changed by the call instruction and by the
2108 called function. The only bit we can reconstruct is the T bit,
2109 by checking the low bit of LR as of the call. This is a reliable
2110 indicator of Thumb-ness except for some ARM v4T pre-interworking
2111 Thumb code, which could get away with a clear low bit as long as
2112 the called function did not use bx. Guess that all other
2113 bits are unchanged; the condition flags are presumably lost,
2114 but the processor status is likely valid. */
2115 if (prev_regnum == ARM_PS_REGNUM)
2116 {
2117 CORE_ADDR lr, cpsr;
2118 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2119
2120 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2121 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2122 if (IS_THUMB_ADDR (lr))
2123 cpsr |= t_bit;
2124 else
2125 cpsr &= ~t_bit;
2126 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2127 }
2128
2129 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2130 prev_regnum);
2131 }
2132
2133 struct frame_unwind arm_prologue_unwind = {
2134 NORMAL_FRAME,
2135 default_frame_unwind_stop_reason,
2136 arm_prologue_this_id,
2137 arm_prologue_prev_register,
2138 NULL,
2139 default_frame_sniffer
2140 };
2141
2142 /* Maintain a list of ARM exception table entries per objfile, similar to the
2143 list of mapping symbols. We only cache entries for standard ARM-defined
2144 personality routines; the cache will contain only the frame unwinding
2145 instructions associated with the entry (not the descriptors). */
2146
2147 static const struct objfile_data *arm_exidx_data_key;
2148
2149 struct arm_exidx_entry
2150 {
2151 bfd_vma addr;
2152 gdb_byte *entry;
2153 };
2154 typedef struct arm_exidx_entry arm_exidx_entry_s;
2155 DEF_VEC_O(arm_exidx_entry_s);
2156
2157 struct arm_exidx_data
2158 {
2159 VEC(arm_exidx_entry_s) **section_maps;
2160 };
2161
2162 static void
2163 arm_exidx_data_free (struct objfile *objfile, void *arg)
2164 {
2165 struct arm_exidx_data *data = arg;
2166 unsigned int i;
2167
2168 for (i = 0; i < objfile->obfd->section_count; i++)
2169 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2170 }
2171
2172 static inline int
2173 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2174 const struct arm_exidx_entry *rhs)
2175 {
2176 return lhs->addr < rhs->addr;
2177 }
2178
2179 static struct obj_section *
2180 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2181 {
2182 struct obj_section *osect;
2183
2184 ALL_OBJFILE_OSECTIONS (objfile, osect)
2185 if (bfd_get_section_flags (objfile->obfd,
2186 osect->the_bfd_section) & SEC_ALLOC)
2187 {
2188 bfd_vma start, size;
2189 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2190 size = bfd_get_section_size (osect->the_bfd_section);
2191
2192 if (start <= vma && vma < start + size)
2193 return osect;
2194 }
2195
2196 return NULL;
2197 }
2198
2199 /* Parse contents of exception table and exception index sections
2200 of OBJFILE, and fill in the exception table entry cache.
2201
2202 For each entry that refers to a standard ARM-defined personality
2203 routine, extract the frame unwinding instructions (from either
2204 the index or the table section). The unwinding instructions
2205 are normalized by:
2206 - extracting them from the rest of the table data
2207 - converting to host endianness
2208 - appending the implicit 0xb0 ("Finish") code
2209
2210 The extracted and normalized instructions are stored for later
2211 retrieval by the arm_find_exidx_entry routine. */
2212
2213 static void
2214 arm_exidx_new_objfile (struct objfile *objfile)
2215 {
2216 struct cleanup *cleanups;
2217 struct arm_exidx_data *data;
2218 asection *exidx, *extab;
2219 bfd_vma exidx_vma = 0, extab_vma = 0;
2220 bfd_size_type exidx_size = 0, extab_size = 0;
2221 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2222 LONGEST i;
2223
2224 /* If we've already touched this file, do nothing. */
2225 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2226 return;
2227 cleanups = make_cleanup (null_cleanup, NULL);
2228
2229 /* Read contents of exception table and index. */
2230 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2231 if (exidx)
2232 {
2233 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2234 exidx_size = bfd_get_section_size (exidx);
2235 exidx_data = xmalloc (exidx_size);
2236 make_cleanup (xfree, exidx_data);
2237
2238 if (!bfd_get_section_contents (objfile->obfd, exidx,
2239 exidx_data, 0, exidx_size))
2240 {
2241 do_cleanups (cleanups);
2242 return;
2243 }
2244 }
2245
2246 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2247 if (extab)
2248 {
2249 extab_vma = bfd_section_vma (objfile->obfd, extab);
2250 extab_size = bfd_get_section_size (extab);
2251 extab_data = xmalloc (extab_size);
2252 make_cleanup (xfree, extab_data);
2253
2254 if (!bfd_get_section_contents (objfile->obfd, extab,
2255 extab_data, 0, extab_size))
2256 {
2257 do_cleanups (cleanups);
2258 return;
2259 }
2260 }
2261
2262 /* Allocate exception table data structure. */
2263 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2264 set_objfile_data (objfile, arm_exidx_data_key, data);
2265 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2266 objfile->obfd->section_count,
2267 VEC(arm_exidx_entry_s) *);
2268
2269 /* Fill in exception table. */
2270 for (i = 0; i < exidx_size / 8; i++)
2271 {
2272 struct arm_exidx_entry new_exidx_entry;
2273 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2274 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2275 bfd_vma addr = 0, word = 0;
2276 int n_bytes = 0, n_words = 0;
2277 struct obj_section *sec;
2278 gdb_byte *entry = NULL;
2279
2280 /* Extract address of start of function. */
2281 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2282 idx += exidx_vma + i * 8;
2283
2284 /* Find section containing function and compute section offset. */
2285 sec = arm_obj_section_from_vma (objfile, idx);
2286 if (sec == NULL)
2287 continue;
2288 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2289
2290 /* Determine address of exception table entry. */
2291 if (val == 1)
2292 {
2293 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2294 }
2295 else if ((val & 0xff000000) == 0x80000000)
2296 {
2297 /* Exception table entry embedded in .ARM.exidx
2298 -- must be short form. */
2299 word = val;
2300 n_bytes = 3;
2301 }
2302 else if (!(val & 0x80000000))
2303 {
2304 /* Exception table entry in .ARM.extab. */
2305 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2306 addr += exidx_vma + i * 8 + 4;
2307
2308 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2309 {
2310 word = bfd_h_get_32 (objfile->obfd,
2311 extab_data + addr - extab_vma);
2312 addr += 4;
2313
2314 if ((word & 0xff000000) == 0x80000000)
2315 {
2316 /* Short form. */
2317 n_bytes = 3;
2318 }
2319 else if ((word & 0xff000000) == 0x81000000
2320 || (word & 0xff000000) == 0x82000000)
2321 {
2322 /* Long form. */
2323 n_bytes = 2;
2324 n_words = ((word >> 16) & 0xff);
2325 }
2326 else if (!(word & 0x80000000))
2327 {
2328 bfd_vma pers;
2329 struct obj_section *pers_sec;
2330 int gnu_personality = 0;
2331
2332 /* Custom personality routine. */
2333 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2334 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2335
2336 /* Check whether we've got one of the variants of the
2337 GNU personality routines. */
2338 pers_sec = arm_obj_section_from_vma (objfile, pers);
2339 if (pers_sec)
2340 {
2341 static const char *personality[] =
2342 {
2343 "__gcc_personality_v0",
2344 "__gxx_personality_v0",
2345 "__gcj_personality_v0",
2346 "__gnu_objc_personality_v0",
2347 NULL
2348 };
2349
2350 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2351 int k;
2352
2353 for (k = 0; personality[k]; k++)
2354 if (lookup_minimal_symbol_by_pc_name
2355 (pc, personality[k], objfile))
2356 {
2357 gnu_personality = 1;
2358 break;
2359 }
2360 }
2361
2362 /* If so, the next word contains a word count in the high
2363 byte, followed by the same unwind instructions as the
2364 pre-defined forms. */
2365 if (gnu_personality
2366 && addr + 4 <= extab_vma + extab_size)
2367 {
2368 word = bfd_h_get_32 (objfile->obfd,
2369 extab_data + addr - extab_vma);
2370 addr += 4;
2371 n_bytes = 3;
2372 n_words = ((word >> 24) & 0xff);
2373 }
2374 }
2375 }
2376 }
2377
2378 /* Sanity check address. */
2379 if (n_words)
2380 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2381 n_words = n_bytes = 0;
2382
2383 /* The unwind instructions reside in WORD (only the N_BYTES least
2384 significant bytes are valid), followed by N_WORDS words in the
2385 extab section starting at ADDR. */
2386 if (n_bytes || n_words)
2387 {
2388 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2389 n_bytes + n_words * 4 + 1);
2390
2391 while (n_bytes--)
2392 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2393
2394 while (n_words--)
2395 {
2396 word = bfd_h_get_32 (objfile->obfd,
2397 extab_data + addr - extab_vma);
2398 addr += 4;
2399
2400 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2401 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2402 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2403 *p++ = (gdb_byte) (word & 0xff);
2404 }
2405
2406 /* Implied "Finish" to terminate the list. */
2407 *p++ = 0xb0;
2408 }
2409
2410 /* Push entry onto vector. They are guaranteed to always
2411 appear in order of increasing addresses. */
2412 new_exidx_entry.addr = idx;
2413 new_exidx_entry.entry = entry;
2414 VEC_safe_push (arm_exidx_entry_s,
2415 data->section_maps[sec->the_bfd_section->index],
2416 &new_exidx_entry);
2417 }
2418
2419 do_cleanups (cleanups);
2420 }
2421
2422 /* Search for the exception table entry covering MEMADDR. If one is found,
2423 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2424 set *START to the start of the region covered by this entry. */
2425
2426 static gdb_byte *
2427 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2428 {
2429 struct obj_section *sec;
2430
2431 sec = find_pc_section (memaddr);
2432 if (sec != NULL)
2433 {
2434 struct arm_exidx_data *data;
2435 VEC(arm_exidx_entry_s) *map;
2436 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2437 unsigned int idx;
2438
2439 data = objfile_data (sec->objfile, arm_exidx_data_key);
2440 if (data != NULL)
2441 {
2442 map = data->section_maps[sec->the_bfd_section->index];
2443 if (!VEC_empty (arm_exidx_entry_s, map))
2444 {
2445 struct arm_exidx_entry *map_sym;
2446
2447 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2448 arm_compare_exidx_entries);
2449
2450 /* VEC_lower_bound finds the earliest ordered insertion
2451 point. If the following symbol starts at this exact
2452 address, we use that; otherwise, the preceding
2453 exception table entry covers this address. */
2454 if (idx < VEC_length (arm_exidx_entry_s, map))
2455 {
2456 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2457 if (map_sym->addr == map_key.addr)
2458 {
2459 if (start)
2460 *start = map_sym->addr + obj_section_addr (sec);
2461 return map_sym->entry;
2462 }
2463 }
2464
2465 if (idx > 0)
2466 {
2467 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2468 if (start)
2469 *start = map_sym->addr + obj_section_addr (sec);
2470 return map_sym->entry;
2471 }
2472 }
2473 }
2474 }
2475
2476 return NULL;
2477 }
2478
2479 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2480 instruction list from the ARM exception table entry ENTRY, allocate and
2481 return a prologue cache structure describing how to unwind this frame.
2482
2483 Return NULL if the unwinding instruction list contains a "spare",
2484 "reserved" or "refuse to unwind" instruction as defined in section
2485 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2486 for the ARM Architecture" document. */
2487
2488 static struct arm_prologue_cache *
2489 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2490 {
2491 CORE_ADDR vsp = 0;
2492 int vsp_valid = 0;
2493
2494 struct arm_prologue_cache *cache;
2495 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2496 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2497
2498 for (;;)
2499 {
2500 gdb_byte insn;
2501
2502 /* Whenever we reload SP, we actually have to retrieve its
2503 actual value in the current frame. */
2504 if (!vsp_valid)
2505 {
2506 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2507 {
2508 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2509 vsp = get_frame_register_unsigned (this_frame, reg);
2510 }
2511 else
2512 {
2513 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2514 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2515 }
2516
2517 vsp_valid = 1;
2518 }
2519
2520 /* Decode next unwind instruction. */
2521 insn = *entry++;
2522
2523 if ((insn & 0xc0) == 0)
2524 {
2525 int offset = insn & 0x3f;
2526 vsp += (offset << 2) + 4;
2527 }
2528 else if ((insn & 0xc0) == 0x40)
2529 {
2530 int offset = insn & 0x3f;
2531 vsp -= (offset << 2) + 4;
2532 }
2533 else if ((insn & 0xf0) == 0x80)
2534 {
2535 int mask = ((insn & 0xf) << 8) | *entry++;
2536 int i;
2537
2538 /* The special case of an all-zero mask identifies
2539 "Refuse to unwind". We return NULL to fall back
2540 to the prologue analyzer. */
2541 if (mask == 0)
2542 return NULL;
2543
2544 /* Pop registers r4..r15 under mask. */
2545 for (i = 0; i < 12; i++)
2546 if (mask & (1 << i))
2547 {
2548 cache->saved_regs[4 + i].addr = vsp;
2549 vsp += 4;
2550 }
2551
2552 /* Special-case popping SP -- we need to reload vsp. */
2553 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2554 vsp_valid = 0;
2555 }
2556 else if ((insn & 0xf0) == 0x90)
2557 {
2558 int reg = insn & 0xf;
2559
2560 /* Reserved cases. */
2561 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2562 return NULL;
2563
2564 /* Set SP from another register and mark VSP for reload. */
2565 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2566 vsp_valid = 0;
2567 }
2568 else if ((insn & 0xf0) == 0xa0)
2569 {
2570 int count = insn & 0x7;
2571 int pop_lr = (insn & 0x8) != 0;
2572 int i;
2573
2574 /* Pop r4..r[4+count]. */
2575 for (i = 0; i <= count; i++)
2576 {
2577 cache->saved_regs[4 + i].addr = vsp;
2578 vsp += 4;
2579 }
2580
2581 /* If indicated by flag, pop LR as well. */
2582 if (pop_lr)
2583 {
2584 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2585 vsp += 4;
2586 }
2587 }
2588 else if (insn == 0xb0)
2589 {
2590 /* We could only have updated PC by popping into it; if so, it
2591 will show up as address. Otherwise, copy LR into PC. */
2592 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2593 cache->saved_regs[ARM_PC_REGNUM]
2594 = cache->saved_regs[ARM_LR_REGNUM];
2595
2596 /* We're done. */
2597 break;
2598 }
2599 else if (insn == 0xb1)
2600 {
2601 int mask = *entry++;
2602 int i;
2603
2604 /* All-zero mask and mask >= 16 is "spare". */
2605 if (mask == 0 || mask >= 16)
2606 return NULL;
2607
2608 /* Pop r0..r3 under mask. */
2609 for (i = 0; i < 4; i++)
2610 if (mask & (1 << i))
2611 {
2612 cache->saved_regs[i].addr = vsp;
2613 vsp += 4;
2614 }
2615 }
2616 else if (insn == 0xb2)
2617 {
2618 ULONGEST offset = 0;
2619 unsigned shift = 0;
2620
2621 do
2622 {
2623 offset |= (*entry & 0x7f) << shift;
2624 shift += 7;
2625 }
2626 while (*entry++ & 0x80);
2627
2628 vsp += 0x204 + (offset << 2);
2629 }
2630 else if (insn == 0xb3)
2631 {
2632 int start = *entry >> 4;
2633 int count = (*entry++) & 0xf;
2634 int i;
2635
2636 /* Only registers D0..D15 are valid here. */
2637 if (start + count >= 16)
2638 return NULL;
2639
2640 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2641 for (i = 0; i <= count; i++)
2642 {
2643 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2644 vsp += 8;
2645 }
2646
2647 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2648 vsp += 4;
2649 }
2650 else if ((insn & 0xf8) == 0xb8)
2651 {
2652 int count = insn & 0x7;
2653 int i;
2654
2655 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2656 for (i = 0; i <= count; i++)
2657 {
2658 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2659 vsp += 8;
2660 }
2661
2662 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2663 vsp += 4;
2664 }
2665 else if (insn == 0xc6)
2666 {
2667 int start = *entry >> 4;
2668 int count = (*entry++) & 0xf;
2669 int i;
2670
2671 /* Only registers WR0..WR15 are valid. */
2672 if (start + count >= 16)
2673 return NULL;
2674
2675 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2676 for (i = 0; i <= count; i++)
2677 {
2678 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2679 vsp += 8;
2680 }
2681 }
2682 else if (insn == 0xc7)
2683 {
2684 int mask = *entry++;
2685 int i;
2686
2687 /* All-zero mask and mask >= 16 is "spare". */
2688 if (mask == 0 || mask >= 16)
2689 return NULL;
2690
2691 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2692 for (i = 0; i < 4; i++)
2693 if (mask & (1 << i))
2694 {
2695 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2696 vsp += 4;
2697 }
2698 }
2699 else if ((insn & 0xf8) == 0xc0)
2700 {
2701 int count = insn & 0x7;
2702 int i;
2703
2704 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2705 for (i = 0; i <= count; i++)
2706 {
2707 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2708 vsp += 8;
2709 }
2710 }
2711 else if (insn == 0xc8)
2712 {
2713 int start = *entry >> 4;
2714 int count = (*entry++) & 0xf;
2715 int i;
2716
2717 /* Only registers D0..D31 are valid. */
2718 if (start + count >= 16)
2719 return NULL;
2720
2721 /* Pop VFP double-precision registers
2722 D[16+start]..D[16+start+count]. */
2723 for (i = 0; i <= count; i++)
2724 {
2725 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2726 vsp += 8;
2727 }
2728 }
2729 else if (insn == 0xc9)
2730 {
2731 int start = *entry >> 4;
2732 int count = (*entry++) & 0xf;
2733 int i;
2734
2735 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2736 for (i = 0; i <= count; i++)
2737 {
2738 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2739 vsp += 8;
2740 }
2741 }
2742 else if ((insn & 0xf8) == 0xd0)
2743 {
2744 int count = insn & 0x7;
2745 int i;
2746
2747 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2748 for (i = 0; i <= count; i++)
2749 {
2750 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2751 vsp += 8;
2752 }
2753 }
2754 else
2755 {
2756 /* Everything else is "spare". */
2757 return NULL;
2758 }
2759 }
2760
2761 /* If we restore SP from a register, assume this was the frame register.
2762 Otherwise just fall back to SP as frame register. */
2763 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2764 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2765 else
2766 cache->framereg = ARM_SP_REGNUM;
2767
2768 /* Determine offset to previous frame. */
2769 cache->framesize
2770 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2771
2772 /* We already got the previous SP. */
2773 cache->prev_sp = vsp;
2774
2775 return cache;
2776 }
2777
2778 /* Unwinding via ARM exception table entries. Note that the sniffer
2779 already computes a filled-in prologue cache, which is then used
2780 with the same arm_prologue_this_id and arm_prologue_prev_register
2781 routines also used for prologue-parsing based unwinding. */
2782
2783 static int
2784 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2785 struct frame_info *this_frame,
2786 void **this_prologue_cache)
2787 {
2788 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2789 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2790 CORE_ADDR addr_in_block, exidx_region, func_start;
2791 struct arm_prologue_cache *cache;
2792 gdb_byte *entry;
2793
2794 /* See if we have an ARM exception table entry covering this address. */
2795 addr_in_block = get_frame_address_in_block (this_frame);
2796 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2797 if (!entry)
2798 return 0;
2799
2800 /* The ARM exception table does not describe unwind information
2801 for arbitrary PC values, but is guaranteed to be correct only
2802 at call sites. We have to decide here whether we want to use
2803 ARM exception table information for this frame, or fall back
2804 to using prologue parsing. (Note that if we have DWARF CFI,
2805 this sniffer isn't even called -- CFI is always preferred.)
2806
2807 Before we make this decision, however, we check whether we
2808 actually have *symbol* information for the current frame.
2809 If not, prologue parsing would not work anyway, so we might
2810 as well use the exception table and hope for the best. */
2811 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2812 {
2813 int exc_valid = 0;
2814
2815 /* If the next frame is "normal", we are at a call site in this
2816 frame, so exception information is guaranteed to be valid. */
2817 if (get_next_frame (this_frame)
2818 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2819 exc_valid = 1;
2820
2821 /* We also assume exception information is valid if we're currently
2822 blocked in a system call. The system library is supposed to
2823 ensure this, so that e.g. pthread cancellation works. */
2824 if (arm_frame_is_thumb (this_frame))
2825 {
2826 LONGEST insn;
2827
2828 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2829 byte_order_for_code, &insn)
2830 && (insn & 0xff00) == 0xdf00 /* svc */)
2831 exc_valid = 1;
2832 }
2833 else
2834 {
2835 LONGEST insn;
2836
2837 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2838 byte_order_for_code, &insn)
2839 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2840 exc_valid = 1;
2841 }
2842
2843 /* Bail out if we don't know that exception information is valid. */
2844 if (!exc_valid)
2845 return 0;
2846
2847 /* The ARM exception index does not mark the *end* of the region
2848 covered by the entry, and some functions will not have any entry.
2849 To correctly recognize the end of the covered region, the linker
2850 should have inserted dummy records with a CANTUNWIND marker.
2851
2852 Unfortunately, current versions of GNU ld do not reliably do
2853 this, and thus we may have found an incorrect entry above.
2854 As a (temporary) sanity check, we only use the entry if it
2855 lies *within* the bounds of the function. Note that this check
2856 might reject perfectly valid entries that just happen to cover
2857 multiple functions; therefore this check ought to be removed
2858 once the linker is fixed. */
2859 if (func_start > exidx_region)
2860 return 0;
2861 }
2862
2863 /* Decode the list of unwinding instructions into a prologue cache.
2864 Note that this may fail due to e.g. a "refuse to unwind" code. */
2865 cache = arm_exidx_fill_cache (this_frame, entry);
2866 if (!cache)
2867 return 0;
2868
2869 *this_prologue_cache = cache;
2870 return 1;
2871 }
2872
2873 struct frame_unwind arm_exidx_unwind = {
2874 NORMAL_FRAME,
2875 default_frame_unwind_stop_reason,
2876 arm_prologue_this_id,
2877 arm_prologue_prev_register,
2878 NULL,
2879 arm_exidx_unwind_sniffer
2880 };
2881
2882 static struct arm_prologue_cache *
2883 arm_make_stub_cache (struct frame_info *this_frame)
2884 {
2885 struct arm_prologue_cache *cache;
2886
2887 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2888 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2889
2890 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2891
2892 return cache;
2893 }
2894
2895 /* Our frame ID for a stub frame is the current SP and LR. */
2896
2897 static void
2898 arm_stub_this_id (struct frame_info *this_frame,
2899 void **this_cache,
2900 struct frame_id *this_id)
2901 {
2902 struct arm_prologue_cache *cache;
2903
2904 if (*this_cache == NULL)
2905 *this_cache = arm_make_stub_cache (this_frame);
2906 cache = *this_cache;
2907
2908 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2909 }
2910
2911 static int
2912 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2913 struct frame_info *this_frame,
2914 void **this_prologue_cache)
2915 {
2916 CORE_ADDR addr_in_block;
2917 char dummy[4];
2918
2919 addr_in_block = get_frame_address_in_block (this_frame);
2920 if (in_plt_section (addr_in_block, NULL)
2921 /* We also use the stub winder if the target memory is unreadable
2922 to avoid having the prologue unwinder trying to read it. */
2923 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
2924 return 1;
2925
2926 return 0;
2927 }
2928
2929 struct frame_unwind arm_stub_unwind = {
2930 NORMAL_FRAME,
2931 default_frame_unwind_stop_reason,
2932 arm_stub_this_id,
2933 arm_prologue_prev_register,
2934 NULL,
2935 arm_stub_unwind_sniffer
2936 };
2937
2938 static CORE_ADDR
2939 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
2940 {
2941 struct arm_prologue_cache *cache;
2942
2943 if (*this_cache == NULL)
2944 *this_cache = arm_make_prologue_cache (this_frame);
2945 cache = *this_cache;
2946
2947 return cache->prev_sp - cache->framesize;
2948 }
2949
2950 struct frame_base arm_normal_base = {
2951 &arm_prologue_unwind,
2952 arm_normal_frame_base,
2953 arm_normal_frame_base,
2954 arm_normal_frame_base
2955 };
2956
2957 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2958 dummy frame. The frame ID's base needs to match the TOS value
2959 saved by save_dummy_frame_tos() and returned from
2960 arm_push_dummy_call, and the PC needs to match the dummy frame's
2961 breakpoint. */
2962
2963 static struct frame_id
2964 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2965 {
2966 return frame_id_build (get_frame_register_unsigned (this_frame,
2967 ARM_SP_REGNUM),
2968 get_frame_pc (this_frame));
2969 }
2970
2971 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2972 be used to construct the previous frame's ID, after looking up the
2973 containing function). */
2974
2975 static CORE_ADDR
2976 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
2977 {
2978 CORE_ADDR pc;
2979 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2980 return arm_addr_bits_remove (gdbarch, pc);
2981 }
2982
2983 static CORE_ADDR
2984 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2985 {
2986 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2987 }
2988
2989 static struct value *
2990 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
2991 int regnum)
2992 {
2993 struct gdbarch * gdbarch = get_frame_arch (this_frame);
2994 CORE_ADDR lr, cpsr;
2995 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2996
2997 switch (regnum)
2998 {
2999 case ARM_PC_REGNUM:
3000 /* The PC is normally copied from the return column, which
3001 describes saves of LR. However, that version may have an
3002 extra bit set to indicate Thumb state. The bit is not
3003 part of the PC. */
3004 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3005 return frame_unwind_got_constant (this_frame, regnum,
3006 arm_addr_bits_remove (gdbarch, lr));
3007
3008 case ARM_PS_REGNUM:
3009 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3010 cpsr = get_frame_register_unsigned (this_frame, regnum);
3011 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3012 if (IS_THUMB_ADDR (lr))
3013 cpsr |= t_bit;
3014 else
3015 cpsr &= ~t_bit;
3016 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3017
3018 default:
3019 internal_error (__FILE__, __LINE__,
3020 _("Unexpected register %d"), regnum);
3021 }
3022 }
3023
3024 static void
3025 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3026 struct dwarf2_frame_state_reg *reg,
3027 struct frame_info *this_frame)
3028 {
3029 switch (regnum)
3030 {
3031 case ARM_PC_REGNUM:
3032 case ARM_PS_REGNUM:
3033 reg->how = DWARF2_FRAME_REG_FN;
3034 reg->loc.fn = arm_dwarf2_prev_register;
3035 break;
3036 case ARM_SP_REGNUM:
3037 reg->how = DWARF2_FRAME_REG_CFA;
3038 break;
3039 }
3040 }
3041
3042 /* Return true if we are in the function's epilogue, i.e. after the
3043 instruction that destroyed the function's stack frame. */
3044
3045 static int
3046 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3047 {
3048 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3049 unsigned int insn, insn2;
3050 int found_return = 0, found_stack_adjust = 0;
3051 CORE_ADDR func_start, func_end;
3052 CORE_ADDR scan_pc;
3053 gdb_byte buf[4];
3054
3055 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3056 return 0;
3057
3058 /* The epilogue is a sequence of instructions along the following lines:
3059
3060 - add stack frame size to SP or FP
3061 - [if frame pointer used] restore SP from FP
3062 - restore registers from SP [may include PC]
3063 - a return-type instruction [if PC wasn't already restored]
3064
3065 In a first pass, we scan forward from the current PC and verify the
3066 instructions we find as compatible with this sequence, ending in a
3067 return instruction.
3068
3069 However, this is not sufficient to distinguish indirect function calls
3070 within a function from indirect tail calls in the epilogue in some cases.
3071 Therefore, if we didn't already find any SP-changing instruction during
3072 forward scan, we add a backward scanning heuristic to ensure we actually
3073 are in the epilogue. */
3074
3075 scan_pc = pc;
3076 while (scan_pc < func_end && !found_return)
3077 {
3078 if (target_read_memory (scan_pc, buf, 2))
3079 break;
3080
3081 scan_pc += 2;
3082 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3083
3084 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3085 found_return = 1;
3086 else if (insn == 0x46f7) /* mov pc, lr */
3087 found_return = 1;
3088 else if (insn == 0x46bd) /* mov sp, r7 */
3089 found_stack_adjust = 1;
3090 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3091 found_stack_adjust = 1;
3092 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
3093 {
3094 found_stack_adjust = 1;
3095 if (insn & 0x0100) /* <registers> include PC. */
3096 found_return = 1;
3097 }
3098 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instruction */
3099 {
3100 if (target_read_memory (scan_pc, buf, 2))
3101 break;
3102
3103 scan_pc += 2;
3104 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3105
3106 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3107 {
3108 found_stack_adjust = 1;
3109 if (insn2 & 0x8000) /* <registers> include PC. */
3110 found_return = 1;
3111 }
3112 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3113 && (insn2 & 0x0fff) == 0x0b04)
3114 {
3115 found_stack_adjust = 1;
3116 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3117 found_return = 1;
3118 }
3119 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3120 && (insn2 & 0x0e00) == 0x0a00)
3121 found_stack_adjust = 1;
3122 else
3123 break;
3124 }
3125 else
3126 break;
3127 }
3128
3129 if (!found_return)
3130 return 0;
3131
3132 /* Since any instruction in the epilogue sequence, with the possible
3133 exception of return itself, updates the stack pointer, we need to
3134 scan backwards for at most one instruction. Try either a 16-bit or
3135 a 32-bit instruction. This is just a heuristic, so we do not worry
3136 too much about false positives. */
3137
3138 if (!found_stack_adjust)
3139 {
3140 if (pc - 4 < func_start)
3141 return 0;
3142 if (target_read_memory (pc - 4, buf, 4))
3143 return 0;
3144
3145 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3146 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3147
3148 if (insn2 == 0x46bd) /* mov sp, r7 */
3149 found_stack_adjust = 1;
3150 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3151 found_stack_adjust = 1;
3152 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
3153 found_stack_adjust = 1;
3154 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3155 found_stack_adjust = 1;
3156 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3157 && (insn2 & 0x0fff) == 0x0b04)
3158 found_stack_adjust = 1;
3159 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3160 && (insn2 & 0x0e00) == 0x0a00)
3161 found_stack_adjust = 1;
3162 }
3163
3164 return found_stack_adjust;
3165 }
3166
3167 /* Return true if we are in the function's epilogue, i.e. after the
3168 instruction that destroyed the function's stack frame. */
3169
3170 static int
3171 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3172 {
3173 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3174 unsigned int insn;
3175 int found_return, found_stack_adjust;
3176 CORE_ADDR func_start, func_end;
3177
3178 if (arm_pc_is_thumb (gdbarch, pc))
3179 return thumb_in_function_epilogue_p (gdbarch, pc);
3180
3181 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3182 return 0;
3183
3184 /* We are in the epilogue if the previous instruction was a stack
3185 adjustment and the next instruction is a possible return (bx, mov
3186 pc, or pop). We could have to scan backwards to find the stack
3187 adjustment, or forwards to find the return, but this is a decent
3188 approximation. First scan forwards. */
3189
3190 found_return = 0;
3191 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3192 if (bits (insn, 28, 31) != INST_NV)
3193 {
3194 if ((insn & 0x0ffffff0) == 0x012fff10)
3195 /* BX. */
3196 found_return = 1;
3197 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3198 /* MOV PC. */
3199 found_return = 1;
3200 else if ((insn & 0x0fff0000) == 0x08bd0000
3201 && (insn & 0x0000c000) != 0)
3202 /* POP (LDMIA), including PC or LR. */
3203 found_return = 1;
3204 }
3205
3206 if (!found_return)
3207 return 0;
3208
3209 /* Scan backwards. This is just a heuristic, so do not worry about
3210 false positives from mode changes. */
3211
3212 if (pc < func_start + 4)
3213 return 0;
3214
3215 found_stack_adjust = 0;
3216 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3217 if (bits (insn, 28, 31) != INST_NV)
3218 {
3219 if ((insn & 0x0df0f000) == 0x0080d000)
3220 /* ADD SP (register or immediate). */
3221 found_stack_adjust = 1;
3222 else if ((insn & 0x0df0f000) == 0x0040d000)
3223 /* SUB SP (register or immediate). */
3224 found_stack_adjust = 1;
3225 else if ((insn & 0x0ffffff0) == 0x01a0d000)
3226 /* MOV SP. */
3227 found_stack_adjust = 1;
3228 else if ((insn & 0x0fff0000) == 0x08bd0000)
3229 /* POP (LDMIA). */
3230 found_stack_adjust = 1;
3231 }
3232
3233 if (found_stack_adjust)
3234 return 1;
3235
3236 return 0;
3237 }
3238
3239
3240 /* When arguments must be pushed onto the stack, they go on in reverse
3241 order. The code below implements a FILO (stack) to do this. */
3242
3243 struct stack_item
3244 {
3245 int len;
3246 struct stack_item *prev;
3247 void *data;
3248 };
3249
3250 static struct stack_item *
3251 push_stack_item (struct stack_item *prev, const void *contents, int len)
3252 {
3253 struct stack_item *si;
3254 si = xmalloc (sizeof (struct stack_item));
3255 si->data = xmalloc (len);
3256 si->len = len;
3257 si->prev = prev;
3258 memcpy (si->data, contents, len);
3259 return si;
3260 }
3261
3262 static struct stack_item *
3263 pop_stack_item (struct stack_item *si)
3264 {
3265 struct stack_item *dead = si;
3266 si = si->prev;
3267 xfree (dead->data);
3268 xfree (dead);
3269 return si;
3270 }
3271
3272
3273 /* Return the alignment (in bytes) of the given type. */
3274
3275 static int
3276 arm_type_align (struct type *t)
3277 {
3278 int n;
3279 int align;
3280 int falign;
3281
3282 t = check_typedef (t);
3283 switch (TYPE_CODE (t))
3284 {
3285 default:
3286 /* Should never happen. */
3287 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3288 return 4;
3289
3290 case TYPE_CODE_PTR:
3291 case TYPE_CODE_ENUM:
3292 case TYPE_CODE_INT:
3293 case TYPE_CODE_FLT:
3294 case TYPE_CODE_SET:
3295 case TYPE_CODE_RANGE:
3296 case TYPE_CODE_BITSTRING:
3297 case TYPE_CODE_REF:
3298 case TYPE_CODE_CHAR:
3299 case TYPE_CODE_BOOL:
3300 return TYPE_LENGTH (t);
3301
3302 case TYPE_CODE_ARRAY:
3303 case TYPE_CODE_COMPLEX:
3304 /* TODO: What about vector types? */
3305 return arm_type_align (TYPE_TARGET_TYPE (t));
3306
3307 case TYPE_CODE_STRUCT:
3308 case TYPE_CODE_UNION:
3309 align = 1;
3310 for (n = 0; n < TYPE_NFIELDS (t); n++)
3311 {
3312 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3313 if (falign > align)
3314 align = falign;
3315 }
3316 return align;
3317 }
3318 }
3319
3320 /* Possible base types for a candidate for passing and returning in
3321 VFP registers. */
3322
3323 enum arm_vfp_cprc_base_type
3324 {
3325 VFP_CPRC_UNKNOWN,
3326 VFP_CPRC_SINGLE,
3327 VFP_CPRC_DOUBLE,
3328 VFP_CPRC_VEC64,
3329 VFP_CPRC_VEC128
3330 };
3331
3332 /* The length of one element of base type B. */
3333
3334 static unsigned
3335 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3336 {
3337 switch (b)
3338 {
3339 case VFP_CPRC_SINGLE:
3340 return 4;
3341 case VFP_CPRC_DOUBLE:
3342 return 8;
3343 case VFP_CPRC_VEC64:
3344 return 8;
3345 case VFP_CPRC_VEC128:
3346 return 16;
3347 default:
3348 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3349 (int) b);
3350 }
3351 }
3352
3353 /* The character ('s', 'd' or 'q') for the type of VFP register used
3354 for passing base type B. */
3355
3356 static int
3357 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3358 {
3359 switch (b)
3360 {
3361 case VFP_CPRC_SINGLE:
3362 return 's';
3363 case VFP_CPRC_DOUBLE:
3364 return 'd';
3365 case VFP_CPRC_VEC64:
3366 return 'd';
3367 case VFP_CPRC_VEC128:
3368 return 'q';
3369 default:
3370 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3371 (int) b);
3372 }
3373 }
3374
3375 /* Determine whether T may be part of a candidate for passing and
3376 returning in VFP registers, ignoring the limit on the total number
3377 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3378 classification of the first valid component found; if it is not
3379 VFP_CPRC_UNKNOWN, all components must have the same classification
3380 as *BASE_TYPE. If it is found that T contains a type not permitted
3381 for passing and returning in VFP registers, a type differently
3382 classified from *BASE_TYPE, or two types differently classified
3383 from each other, return -1, otherwise return the total number of
3384 base-type elements found (possibly 0 in an empty structure or
3385 array). Vectors and complex types are not currently supported,
3386 matching the generic AAPCS support. */
3387
3388 static int
3389 arm_vfp_cprc_sub_candidate (struct type *t,
3390 enum arm_vfp_cprc_base_type *base_type)
3391 {
3392 t = check_typedef (t);
3393 switch (TYPE_CODE (t))
3394 {
3395 case TYPE_CODE_FLT:
3396 switch (TYPE_LENGTH (t))
3397 {
3398 case 4:
3399 if (*base_type == VFP_CPRC_UNKNOWN)
3400 *base_type = VFP_CPRC_SINGLE;
3401 else if (*base_type != VFP_CPRC_SINGLE)
3402 return -1;
3403 return 1;
3404
3405 case 8:
3406 if (*base_type == VFP_CPRC_UNKNOWN)
3407 *base_type = VFP_CPRC_DOUBLE;
3408 else if (*base_type != VFP_CPRC_DOUBLE)
3409 return -1;
3410 return 1;
3411
3412 default:
3413 return -1;
3414 }
3415 break;
3416
3417 case TYPE_CODE_ARRAY:
3418 {
3419 int count;
3420 unsigned unitlen;
3421 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3422 if (count == -1)
3423 return -1;
3424 if (TYPE_LENGTH (t) == 0)
3425 {
3426 gdb_assert (count == 0);
3427 return 0;
3428 }
3429 else if (count == 0)
3430 return -1;
3431 unitlen = arm_vfp_cprc_unit_length (*base_type);
3432 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3433 return TYPE_LENGTH (t) / unitlen;
3434 }
3435 break;
3436
3437 case TYPE_CODE_STRUCT:
3438 {
3439 int count = 0;
3440 unsigned unitlen;
3441 int i;
3442 for (i = 0; i < TYPE_NFIELDS (t); i++)
3443 {
3444 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3445 base_type);
3446 if (sub_count == -1)
3447 return -1;
3448 count += sub_count;
3449 }
3450 if (TYPE_LENGTH (t) == 0)
3451 {
3452 gdb_assert (count == 0);
3453 return 0;
3454 }
3455 else if (count == 0)
3456 return -1;
3457 unitlen = arm_vfp_cprc_unit_length (*base_type);
3458 if (TYPE_LENGTH (t) != unitlen * count)
3459 return -1;
3460 return count;
3461 }
3462
3463 case TYPE_CODE_UNION:
3464 {
3465 int count = 0;
3466 unsigned unitlen;
3467 int i;
3468 for (i = 0; i < TYPE_NFIELDS (t); i++)
3469 {
3470 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3471 base_type);
3472 if (sub_count == -1)
3473 return -1;
3474 count = (count > sub_count ? count : sub_count);
3475 }
3476 if (TYPE_LENGTH (t) == 0)
3477 {
3478 gdb_assert (count == 0);
3479 return 0;
3480 }
3481 else if (count == 0)
3482 return -1;
3483 unitlen = arm_vfp_cprc_unit_length (*base_type);
3484 if (TYPE_LENGTH (t) != unitlen * count)
3485 return -1;
3486 return count;
3487 }
3488
3489 default:
3490 break;
3491 }
3492
3493 return -1;
3494 }
3495
3496 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3497 if passed to or returned from a non-variadic function with the VFP
3498 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3499 *BASE_TYPE to the base type for T and *COUNT to the number of
3500 elements of that base type before returning. */
3501
3502 static int
3503 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3504 int *count)
3505 {
3506 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3507 int c = arm_vfp_cprc_sub_candidate (t, &b);
3508 if (c <= 0 || c > 4)
3509 return 0;
3510 *base_type = b;
3511 *count = c;
3512 return 1;
3513 }
3514
3515 /* Return 1 if the VFP ABI should be used for passing arguments to and
3516 returning values from a function of type FUNC_TYPE, 0
3517 otherwise. */
3518
3519 static int
3520 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3521 {
3522 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3523 /* Variadic functions always use the base ABI. Assume that functions
3524 without debug info are not variadic. */
3525 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3526 return 0;
3527 /* The VFP ABI is only supported as a variant of AAPCS. */
3528 if (tdep->arm_abi != ARM_ABI_AAPCS)
3529 return 0;
3530 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3531 }
3532
3533 /* We currently only support passing parameters in integer registers, which
3534 conforms with GCC's default model, and VFP argument passing following
3535 the VFP variant of AAPCS. Several other variants exist and
3536 we should probably support some of them based on the selected ABI. */
3537
3538 static CORE_ADDR
3539 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3540 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3541 struct value **args, CORE_ADDR sp, int struct_return,
3542 CORE_ADDR struct_addr)
3543 {
3544 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3545 int argnum;
3546 int argreg;
3547 int nstack;
3548 struct stack_item *si = NULL;
3549 int use_vfp_abi;
3550 struct type *ftype;
3551 unsigned vfp_regs_free = (1 << 16) - 1;
3552
3553 /* Determine the type of this function and whether the VFP ABI
3554 applies. */
3555 ftype = check_typedef (value_type (function));
3556 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3557 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3558 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3559
3560 /* Set the return address. For the ARM, the return breakpoint is
3561 always at BP_ADDR. */
3562 if (arm_pc_is_thumb (gdbarch, bp_addr))
3563 bp_addr |= 1;
3564 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3565
3566 /* Walk through the list of args and determine how large a temporary
3567 stack is required. Need to take care here as structs may be
3568 passed on the stack, and we have to push them. */
3569 nstack = 0;
3570
3571 argreg = ARM_A1_REGNUM;
3572 nstack = 0;
3573
3574 /* The struct_return pointer occupies the first parameter
3575 passing register. */
3576 if (struct_return)
3577 {
3578 if (arm_debug)
3579 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3580 gdbarch_register_name (gdbarch, argreg),
3581 paddress (gdbarch, struct_addr));
3582 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3583 argreg++;
3584 }
3585
3586 for (argnum = 0; argnum < nargs; argnum++)
3587 {
3588 int len;
3589 struct type *arg_type;
3590 struct type *target_type;
3591 enum type_code typecode;
3592 const bfd_byte *val;
3593 int align;
3594 enum arm_vfp_cprc_base_type vfp_base_type;
3595 int vfp_base_count;
3596 int may_use_core_reg = 1;
3597
3598 arg_type = check_typedef (value_type (args[argnum]));
3599 len = TYPE_LENGTH (arg_type);
3600 target_type = TYPE_TARGET_TYPE (arg_type);
3601 typecode = TYPE_CODE (arg_type);
3602 val = value_contents (args[argnum]);
3603
3604 align = arm_type_align (arg_type);
3605 /* Round alignment up to a whole number of words. */
3606 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3607 /* Different ABIs have different maximum alignments. */
3608 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3609 {
3610 /* The APCS ABI only requires word alignment. */
3611 align = INT_REGISTER_SIZE;
3612 }
3613 else
3614 {
3615 /* The AAPCS requires at most doubleword alignment. */
3616 if (align > INT_REGISTER_SIZE * 2)
3617 align = INT_REGISTER_SIZE * 2;
3618 }
3619
3620 if (use_vfp_abi
3621 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3622 &vfp_base_count))
3623 {
3624 int regno;
3625 int unit_length;
3626 int shift;
3627 unsigned mask;
3628
3629 /* Because this is a CPRC it cannot go in a core register or
3630 cause a core register to be skipped for alignment.
3631 Either it goes in VFP registers and the rest of this loop
3632 iteration is skipped for this argument, or it goes on the
3633 stack (and the stack alignment code is correct for this
3634 case). */
3635 may_use_core_reg = 0;
3636
3637 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3638 shift = unit_length / 4;
3639 mask = (1 << (shift * vfp_base_count)) - 1;
3640 for (regno = 0; regno < 16; regno += shift)
3641 if (((vfp_regs_free >> regno) & mask) == mask)
3642 break;
3643
3644 if (regno < 16)
3645 {
3646 int reg_char;
3647 int reg_scaled;
3648 int i;
3649
3650 vfp_regs_free &= ~(mask << regno);
3651 reg_scaled = regno / shift;
3652 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3653 for (i = 0; i < vfp_base_count; i++)
3654 {
3655 char name_buf[4];
3656 int regnum;
3657 if (reg_char == 'q')
3658 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3659 val + i * unit_length);
3660 else
3661 {
3662 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
3663 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3664 strlen (name_buf));
3665 regcache_cooked_write (regcache, regnum,
3666 val + i * unit_length);
3667 }
3668 }
3669 continue;
3670 }
3671 else
3672 {
3673 /* This CPRC could not go in VFP registers, so all VFP
3674 registers are now marked as used. */
3675 vfp_regs_free = 0;
3676 }
3677 }
3678
3679 /* Push stack padding for dowubleword alignment. */
3680 if (nstack & (align - 1))
3681 {
3682 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3683 nstack += INT_REGISTER_SIZE;
3684 }
3685
3686 /* Doubleword aligned quantities must go in even register pairs. */
3687 if (may_use_core_reg
3688 && argreg <= ARM_LAST_ARG_REGNUM
3689 && align > INT_REGISTER_SIZE
3690 && argreg & 1)
3691 argreg++;
3692
3693 /* If the argument is a pointer to a function, and it is a
3694 Thumb function, create a LOCAL copy of the value and set
3695 the THUMB bit in it. */
3696 if (TYPE_CODE_PTR == typecode
3697 && target_type != NULL
3698 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3699 {
3700 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3701 if (arm_pc_is_thumb (gdbarch, regval))
3702 {
3703 bfd_byte *copy = alloca (len);
3704 store_unsigned_integer (copy, len, byte_order,
3705 MAKE_THUMB_ADDR (regval));
3706 val = copy;
3707 }
3708 }
3709
3710 /* Copy the argument to general registers or the stack in
3711 register-sized pieces. Large arguments are split between
3712 registers and stack. */
3713 while (len > 0)
3714 {
3715 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3716
3717 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3718 {
3719 /* The argument is being passed in a general purpose
3720 register. */
3721 CORE_ADDR regval
3722 = extract_unsigned_integer (val, partial_len, byte_order);
3723 if (byte_order == BFD_ENDIAN_BIG)
3724 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3725 if (arm_debug)
3726 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3727 argnum,
3728 gdbarch_register_name
3729 (gdbarch, argreg),
3730 phex (regval, INT_REGISTER_SIZE));
3731 regcache_cooked_write_unsigned (regcache, argreg, regval);
3732 argreg++;
3733 }
3734 else
3735 {
3736 /* Push the arguments onto the stack. */
3737 if (arm_debug)
3738 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3739 argnum, nstack);
3740 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3741 nstack += INT_REGISTER_SIZE;
3742 }
3743
3744 len -= partial_len;
3745 val += partial_len;
3746 }
3747 }
3748 /* If we have an odd number of words to push, then decrement the stack
3749 by one word now, so first stack argument will be dword aligned. */
3750 if (nstack & 4)
3751 sp -= 4;
3752
3753 while (si)
3754 {
3755 sp -= si->len;
3756 write_memory (sp, si->data, si->len);
3757 si = pop_stack_item (si);
3758 }
3759
3760 /* Finally, update teh SP register. */
3761 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3762
3763 return sp;
3764 }
3765
3766
3767 /* Always align the frame to an 8-byte boundary. This is required on
3768 some platforms and harmless on the rest. */
3769
3770 static CORE_ADDR
3771 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3772 {
3773 /* Align the stack to eight bytes. */
3774 return sp & ~ (CORE_ADDR) 7;
3775 }
3776
3777 static void
3778 print_fpu_flags (int flags)
3779 {
3780 if (flags & (1 << 0))
3781 fputs ("IVO ", stdout);
3782 if (flags & (1 << 1))
3783 fputs ("DVZ ", stdout);
3784 if (flags & (1 << 2))
3785 fputs ("OFL ", stdout);
3786 if (flags & (1 << 3))
3787 fputs ("UFL ", stdout);
3788 if (flags & (1 << 4))
3789 fputs ("INX ", stdout);
3790 putchar ('\n');
3791 }
3792
3793 /* Print interesting information about the floating point processor
3794 (if present) or emulator. */
3795 static void
3796 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3797 struct frame_info *frame, const char *args)
3798 {
3799 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3800 int type;
3801
3802 type = (status >> 24) & 127;
3803 if (status & (1 << 31))
3804 printf (_("Hardware FPU type %d\n"), type);
3805 else
3806 printf (_("Software FPU type %d\n"), type);
3807 /* i18n: [floating point unit] mask */
3808 fputs (_("mask: "), stdout);
3809 print_fpu_flags (status >> 16);
3810 /* i18n: [floating point unit] flags */
3811 fputs (_("flags: "), stdout);
3812 print_fpu_flags (status);
3813 }
3814
3815 /* Construct the ARM extended floating point type. */
3816 static struct type *
3817 arm_ext_type (struct gdbarch *gdbarch)
3818 {
3819 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3820
3821 if (!tdep->arm_ext_type)
3822 tdep->arm_ext_type
3823 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3824 floatformats_arm_ext);
3825
3826 return tdep->arm_ext_type;
3827 }
3828
3829 static struct type *
3830 arm_neon_double_type (struct gdbarch *gdbarch)
3831 {
3832 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3833
3834 if (tdep->neon_double_type == NULL)
3835 {
3836 struct type *t, *elem;
3837
3838 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3839 TYPE_CODE_UNION);
3840 elem = builtin_type (gdbarch)->builtin_uint8;
3841 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3842 elem = builtin_type (gdbarch)->builtin_uint16;
3843 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3844 elem = builtin_type (gdbarch)->builtin_uint32;
3845 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3846 elem = builtin_type (gdbarch)->builtin_uint64;
3847 append_composite_type_field (t, "u64", elem);
3848 elem = builtin_type (gdbarch)->builtin_float;
3849 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3850 elem = builtin_type (gdbarch)->builtin_double;
3851 append_composite_type_field (t, "f64", elem);
3852
3853 TYPE_VECTOR (t) = 1;
3854 TYPE_NAME (t) = "neon_d";
3855 tdep->neon_double_type = t;
3856 }
3857
3858 return tdep->neon_double_type;
3859 }
3860
3861 /* FIXME: The vector types are not correctly ordered on big-endian
3862 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3863 bits of d0 - regardless of what unit size is being held in d0. So
3864 the offset of the first uint8 in d0 is 7, but the offset of the
3865 first float is 4. This code works as-is for little-endian
3866 targets. */
3867
3868 static struct type *
3869 arm_neon_quad_type (struct gdbarch *gdbarch)
3870 {
3871 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3872
3873 if (tdep->neon_quad_type == NULL)
3874 {
3875 struct type *t, *elem;
3876
3877 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3878 TYPE_CODE_UNION);
3879 elem = builtin_type (gdbarch)->builtin_uint8;
3880 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3881 elem = builtin_type (gdbarch)->builtin_uint16;
3882 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3883 elem = builtin_type (gdbarch)->builtin_uint32;
3884 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3885 elem = builtin_type (gdbarch)->builtin_uint64;
3886 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
3887 elem = builtin_type (gdbarch)->builtin_float;
3888 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
3889 elem = builtin_type (gdbarch)->builtin_double;
3890 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
3891
3892 TYPE_VECTOR (t) = 1;
3893 TYPE_NAME (t) = "neon_q";
3894 tdep->neon_quad_type = t;
3895 }
3896
3897 return tdep->neon_quad_type;
3898 }
3899
3900 /* Return the GDB type object for the "standard" data type of data in
3901 register N. */
3902
3903 static struct type *
3904 arm_register_type (struct gdbarch *gdbarch, int regnum)
3905 {
3906 int num_regs = gdbarch_num_regs (gdbarch);
3907
3908 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
3909 && regnum >= num_regs && regnum < num_regs + 32)
3910 return builtin_type (gdbarch)->builtin_float;
3911
3912 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
3913 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
3914 return arm_neon_quad_type (gdbarch);
3915
3916 /* If the target description has register information, we are only
3917 in this function so that we can override the types of
3918 double-precision registers for NEON. */
3919 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
3920 {
3921 struct type *t = tdesc_register_type (gdbarch, regnum);
3922
3923 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
3924 && TYPE_CODE (t) == TYPE_CODE_FLT
3925 && gdbarch_tdep (gdbarch)->have_neon)
3926 return arm_neon_double_type (gdbarch);
3927 else
3928 return t;
3929 }
3930
3931 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
3932 {
3933 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
3934 return builtin_type (gdbarch)->builtin_void;
3935
3936 return arm_ext_type (gdbarch);
3937 }
3938 else if (regnum == ARM_SP_REGNUM)
3939 return builtin_type (gdbarch)->builtin_data_ptr;
3940 else if (regnum == ARM_PC_REGNUM)
3941 return builtin_type (gdbarch)->builtin_func_ptr;
3942 else if (regnum >= ARRAY_SIZE (arm_register_names))
3943 /* These registers are only supported on targets which supply
3944 an XML description. */
3945 return builtin_type (gdbarch)->builtin_int0;
3946 else
3947 return builtin_type (gdbarch)->builtin_uint32;
3948 }
3949
3950 /* Map a DWARF register REGNUM onto the appropriate GDB register
3951 number. */
3952
3953 static int
3954 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
3955 {
3956 /* Core integer regs. */
3957 if (reg >= 0 && reg <= 15)
3958 return reg;
3959
3960 /* Legacy FPA encoding. These were once used in a way which
3961 overlapped with VFP register numbering, so their use is
3962 discouraged, but GDB doesn't support the ARM toolchain
3963 which used them for VFP. */
3964 if (reg >= 16 && reg <= 23)
3965 return ARM_F0_REGNUM + reg - 16;
3966
3967 /* New assignments for the FPA registers. */
3968 if (reg >= 96 && reg <= 103)
3969 return ARM_F0_REGNUM + reg - 96;
3970
3971 /* WMMX register assignments. */
3972 if (reg >= 104 && reg <= 111)
3973 return ARM_WCGR0_REGNUM + reg - 104;
3974
3975 if (reg >= 112 && reg <= 127)
3976 return ARM_WR0_REGNUM + reg - 112;
3977
3978 if (reg >= 192 && reg <= 199)
3979 return ARM_WC0_REGNUM + reg - 192;
3980
3981 /* VFP v2 registers. A double precision value is actually
3982 in d1 rather than s2, but the ABI only defines numbering
3983 for the single precision registers. This will "just work"
3984 in GDB for little endian targets (we'll read eight bytes,
3985 starting in s0 and then progressing to s1), but will be
3986 reversed on big endian targets with VFP. This won't
3987 be a problem for the new Neon quad registers; you're supposed
3988 to use DW_OP_piece for those. */
3989 if (reg >= 64 && reg <= 95)
3990 {
3991 char name_buf[4];
3992
3993 sprintf (name_buf, "s%d", reg - 64);
3994 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3995 strlen (name_buf));
3996 }
3997
3998 /* VFP v3 / Neon registers. This range is also used for VFP v2
3999 registers, except that it now describes d0 instead of s0. */
4000 if (reg >= 256 && reg <= 287)
4001 {
4002 char name_buf[4];
4003
4004 sprintf (name_buf, "d%d", reg - 256);
4005 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4006 strlen (name_buf));
4007 }
4008
4009 return -1;
4010 }
4011
4012 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4013 static int
4014 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4015 {
4016 int reg = regnum;
4017 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4018
4019 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4020 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4021
4022 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4023 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4024
4025 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4026 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4027
4028 if (reg < NUM_GREGS)
4029 return SIM_ARM_R0_REGNUM + reg;
4030 reg -= NUM_GREGS;
4031
4032 if (reg < NUM_FREGS)
4033 return SIM_ARM_FP0_REGNUM + reg;
4034 reg -= NUM_FREGS;
4035
4036 if (reg < NUM_SREGS)
4037 return SIM_ARM_FPS_REGNUM + reg;
4038 reg -= NUM_SREGS;
4039
4040 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4041 }
4042
4043 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4044 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4045 It is thought that this is is the floating-point register format on
4046 little-endian systems. */
4047
4048 static void
4049 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4050 void *dbl, int endianess)
4051 {
4052 DOUBLEST d;
4053
4054 if (endianess == BFD_ENDIAN_BIG)
4055 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4056 else
4057 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4058 ptr, &d);
4059 floatformat_from_doublest (fmt, &d, dbl);
4060 }
4061
4062 static void
4063 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4064 int endianess)
4065 {
4066 DOUBLEST d;
4067
4068 floatformat_to_doublest (fmt, ptr, &d);
4069 if (endianess == BFD_ENDIAN_BIG)
4070 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4071 else
4072 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4073 &d, dbl);
4074 }
4075
4076 static int
4077 condition_true (unsigned long cond, unsigned long status_reg)
4078 {
4079 if (cond == INST_AL || cond == INST_NV)
4080 return 1;
4081
4082 switch (cond)
4083 {
4084 case INST_EQ:
4085 return ((status_reg & FLAG_Z) != 0);
4086 case INST_NE:
4087 return ((status_reg & FLAG_Z) == 0);
4088 case INST_CS:
4089 return ((status_reg & FLAG_C) != 0);
4090 case INST_CC:
4091 return ((status_reg & FLAG_C) == 0);
4092 case INST_MI:
4093 return ((status_reg & FLAG_N) != 0);
4094 case INST_PL:
4095 return ((status_reg & FLAG_N) == 0);
4096 case INST_VS:
4097 return ((status_reg & FLAG_V) != 0);
4098 case INST_VC:
4099 return ((status_reg & FLAG_V) == 0);
4100 case INST_HI:
4101 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4102 case INST_LS:
4103 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4104 case INST_GE:
4105 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4106 case INST_LT:
4107 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4108 case INST_GT:
4109 return (((status_reg & FLAG_Z) == 0)
4110 && (((status_reg & FLAG_N) == 0)
4111 == ((status_reg & FLAG_V) == 0)));
4112 case INST_LE:
4113 return (((status_reg & FLAG_Z) != 0)
4114 || (((status_reg & FLAG_N) == 0)
4115 != ((status_reg & FLAG_V) == 0)));
4116 }
4117 return 1;
4118 }
4119
4120 static unsigned long
4121 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4122 unsigned long pc_val, unsigned long status_reg)
4123 {
4124 unsigned long res, shift;
4125 int rm = bits (inst, 0, 3);
4126 unsigned long shifttype = bits (inst, 5, 6);
4127
4128 if (bit (inst, 4))
4129 {
4130 int rs = bits (inst, 8, 11);
4131 shift = (rs == 15 ? pc_val + 8
4132 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4133 }
4134 else
4135 shift = bits (inst, 7, 11);
4136
4137 res = (rm == ARM_PC_REGNUM
4138 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4139 : get_frame_register_unsigned (frame, rm));
4140
4141 switch (shifttype)
4142 {
4143 case 0: /* LSL */
4144 res = shift >= 32 ? 0 : res << shift;
4145 break;
4146
4147 case 1: /* LSR */
4148 res = shift >= 32 ? 0 : res >> shift;
4149 break;
4150
4151 case 2: /* ASR */
4152 if (shift >= 32)
4153 shift = 31;
4154 res = ((res & 0x80000000L)
4155 ? ~((~res) >> shift) : res >> shift);
4156 break;
4157
4158 case 3: /* ROR/RRX */
4159 shift &= 31;
4160 if (shift == 0)
4161 res = (res >> 1) | (carry ? 0x80000000L : 0);
4162 else
4163 res = (res >> shift) | (res << (32 - shift));
4164 break;
4165 }
4166
4167 return res & 0xffffffff;
4168 }
4169
4170 /* Return number of 1-bits in VAL. */
4171
4172 static int
4173 bitcount (unsigned long val)
4174 {
4175 int nbits;
4176 for (nbits = 0; val != 0; nbits++)
4177 val &= val - 1; /* Delete rightmost 1-bit in val. */
4178 return nbits;
4179 }
4180
4181 /* Return the size in bytes of the complete Thumb instruction whose
4182 first halfword is INST1. */
4183
4184 static int
4185 thumb_insn_size (unsigned short inst1)
4186 {
4187 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4188 return 4;
4189 else
4190 return 2;
4191 }
4192
4193 static int
4194 thumb_advance_itstate (unsigned int itstate)
4195 {
4196 /* Preserve IT[7:5], the first three bits of the condition. Shift
4197 the upcoming condition flags left by one bit. */
4198 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4199
4200 /* If we have finished the IT block, clear the state. */
4201 if ((itstate & 0x0f) == 0)
4202 itstate = 0;
4203
4204 return itstate;
4205 }
4206
4207 /* Find the next PC after the current instruction executes. In some
4208 cases we can not statically determine the answer (see the IT state
4209 handling in this function); in that case, a breakpoint may be
4210 inserted in addition to the returned PC, which will be used to set
4211 another breakpoint by our caller. */
4212
4213 static CORE_ADDR
4214 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4215 {
4216 struct gdbarch *gdbarch = get_frame_arch (frame);
4217 struct address_space *aspace = get_frame_address_space (frame);
4218 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4219 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4220 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4221 unsigned short inst1;
4222 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4223 unsigned long offset;
4224 ULONGEST status, itstate;
4225
4226 nextpc = MAKE_THUMB_ADDR (nextpc);
4227 pc_val = MAKE_THUMB_ADDR (pc_val);
4228
4229 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4230
4231 /* Thumb-2 conditional execution support. There are eight bits in
4232 the CPSR which describe conditional execution state. Once
4233 reconstructed (they're in a funny order), the low five bits
4234 describe the low bit of the condition for each instruction and
4235 how many instructions remain. The high three bits describe the
4236 base condition. One of the low four bits will be set if an IT
4237 block is active. These bits read as zero on earlier
4238 processors. */
4239 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4240 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4241
4242 /* If-Then handling. On GNU/Linux, where this routine is used, we
4243 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4244 can disable execution of the undefined instruction. So we might
4245 miss the breakpoint if we set it on a skipped conditional
4246 instruction. Because conditional instructions can change the
4247 flags, affecting the execution of further instructions, we may
4248 need to set two breakpoints. */
4249
4250 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4251 {
4252 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4253 {
4254 /* An IT instruction. Because this instruction does not
4255 modify the flags, we can accurately predict the next
4256 executed instruction. */
4257 itstate = inst1 & 0x00ff;
4258 pc += thumb_insn_size (inst1);
4259
4260 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4261 {
4262 inst1 = read_memory_unsigned_integer (pc, 2,
4263 byte_order_for_code);
4264 pc += thumb_insn_size (inst1);
4265 itstate = thumb_advance_itstate (itstate);
4266 }
4267
4268 return MAKE_THUMB_ADDR (pc);
4269 }
4270 else if (itstate != 0)
4271 {
4272 /* We are in a conditional block. Check the condition. */
4273 if (! condition_true (itstate >> 4, status))
4274 {
4275 /* Advance to the next executed instruction. */
4276 pc += thumb_insn_size (inst1);
4277 itstate = thumb_advance_itstate (itstate);
4278
4279 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4280 {
4281 inst1 = read_memory_unsigned_integer (pc, 2,
4282 byte_order_for_code);
4283 pc += thumb_insn_size (inst1);
4284 itstate = thumb_advance_itstate (itstate);
4285 }
4286
4287 return MAKE_THUMB_ADDR (pc);
4288 }
4289 else if ((itstate & 0x0f) == 0x08)
4290 {
4291 /* This is the last instruction of the conditional
4292 block, and it is executed. We can handle it normally
4293 because the following instruction is not conditional,
4294 and we must handle it normally because it is
4295 permitted to branch. Fall through. */
4296 }
4297 else
4298 {
4299 int cond_negated;
4300
4301 /* There are conditional instructions after this one.
4302 If this instruction modifies the flags, then we can
4303 not predict what the next executed instruction will
4304 be. Fortunately, this instruction is architecturally
4305 forbidden to branch; we know it will fall through.
4306 Start by skipping past it. */
4307 pc += thumb_insn_size (inst1);
4308 itstate = thumb_advance_itstate (itstate);
4309
4310 /* Set a breakpoint on the following instruction. */
4311 gdb_assert ((itstate & 0x0f) != 0);
4312 arm_insert_single_step_breakpoint (gdbarch, aspace,
4313 MAKE_THUMB_ADDR (pc));
4314 cond_negated = (itstate >> 4) & 1;
4315
4316 /* Skip all following instructions with the same
4317 condition. If there is a later instruction in the IT
4318 block with the opposite condition, set the other
4319 breakpoint there. If not, then set a breakpoint on
4320 the instruction after the IT block. */
4321 do
4322 {
4323 inst1 = read_memory_unsigned_integer (pc, 2,
4324 byte_order_for_code);
4325 pc += thumb_insn_size (inst1);
4326 itstate = thumb_advance_itstate (itstate);
4327 }
4328 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4329
4330 return MAKE_THUMB_ADDR (pc);
4331 }
4332 }
4333 }
4334 else if (itstate & 0x0f)
4335 {
4336 /* We are in a conditional block. Check the condition. */
4337 int cond = itstate >> 4;
4338
4339 if (! condition_true (cond, status))
4340 /* Advance to the next instruction. All the 32-bit
4341 instructions share a common prefix. */
4342 return MAKE_THUMB_ADDR (pc + thumb_insn_size (inst1));
4343
4344 /* Otherwise, handle the instruction normally. */
4345 }
4346
4347 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4348 {
4349 CORE_ADDR sp;
4350
4351 /* Fetch the saved PC from the stack. It's stored above
4352 all of the other registers. */
4353 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4354 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4355 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4356 }
4357 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4358 {
4359 unsigned long cond = bits (inst1, 8, 11);
4360 if (cond == 0x0f) /* 0x0f = SWI */
4361 {
4362 struct gdbarch_tdep *tdep;
4363 tdep = gdbarch_tdep (gdbarch);
4364
4365 if (tdep->syscall_next_pc != NULL)
4366 nextpc = tdep->syscall_next_pc (frame);
4367
4368 }
4369 else if (cond != 0x0f && condition_true (cond, status))
4370 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4371 }
4372 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4373 {
4374 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4375 }
4376 else if (thumb_insn_size (inst1) == 4) /* 32-bit instruction */
4377 {
4378 unsigned short inst2;
4379 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4380
4381 /* Default to the next instruction. */
4382 nextpc = pc + 4;
4383 nextpc = MAKE_THUMB_ADDR (nextpc);
4384
4385 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4386 {
4387 /* Branches and miscellaneous control instructions. */
4388
4389 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4390 {
4391 /* B, BL, BLX. */
4392 int j1, j2, imm1, imm2;
4393
4394 imm1 = sbits (inst1, 0, 10);
4395 imm2 = bits (inst2, 0, 10);
4396 j1 = bit (inst2, 13);
4397 j2 = bit (inst2, 11);
4398
4399 offset = ((imm1 << 12) + (imm2 << 1));
4400 offset ^= ((!j2) << 22) | ((!j1) << 23);
4401
4402 nextpc = pc_val + offset;
4403 /* For BLX make sure to clear the low bits. */
4404 if (bit (inst2, 12) == 0)
4405 nextpc = nextpc & 0xfffffffc;
4406 }
4407 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4408 {
4409 /* SUBS PC, LR, #imm8. */
4410 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4411 nextpc -= inst2 & 0x00ff;
4412 }
4413 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4414 {
4415 /* Conditional branch. */
4416 if (condition_true (bits (inst1, 6, 9), status))
4417 {
4418 int sign, j1, j2, imm1, imm2;
4419
4420 sign = sbits (inst1, 10, 10);
4421 imm1 = bits (inst1, 0, 5);
4422 imm2 = bits (inst2, 0, 10);
4423 j1 = bit (inst2, 13);
4424 j2 = bit (inst2, 11);
4425
4426 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4427 offset += (imm1 << 12) + (imm2 << 1);
4428
4429 nextpc = pc_val + offset;
4430 }
4431 }
4432 }
4433 else if ((inst1 & 0xfe50) == 0xe810)
4434 {
4435 /* Load multiple or RFE. */
4436 int rn, offset, load_pc = 1;
4437
4438 rn = bits (inst1, 0, 3);
4439 if (bit (inst1, 7) && !bit (inst1, 8))
4440 {
4441 /* LDMIA or POP */
4442 if (!bit (inst2, 15))
4443 load_pc = 0;
4444 offset = bitcount (inst2) * 4 - 4;
4445 }
4446 else if (!bit (inst1, 7) && bit (inst1, 8))
4447 {
4448 /* LDMDB */
4449 if (!bit (inst2, 15))
4450 load_pc = 0;
4451 offset = -4;
4452 }
4453 else if (bit (inst1, 7) && bit (inst1, 8))
4454 {
4455 /* RFEIA */
4456 offset = 0;
4457 }
4458 else if (!bit (inst1, 7) && !bit (inst1, 8))
4459 {
4460 /* RFEDB */
4461 offset = -8;
4462 }
4463 else
4464 load_pc = 0;
4465
4466 if (load_pc)
4467 {
4468 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4469 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4470 }
4471 }
4472 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4473 {
4474 /* MOV PC or MOVS PC. */
4475 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4476 nextpc = MAKE_THUMB_ADDR (nextpc);
4477 }
4478 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4479 {
4480 /* LDR PC. */
4481 CORE_ADDR base;
4482 int rn, load_pc = 1;
4483
4484 rn = bits (inst1, 0, 3);
4485 base = get_frame_register_unsigned (frame, rn);
4486 if (rn == ARM_PC_REGNUM)
4487 {
4488 base = (base + 4) & ~(CORE_ADDR) 0x3;
4489 if (bit (inst1, 7))
4490 base += bits (inst2, 0, 11);
4491 else
4492 base -= bits (inst2, 0, 11);
4493 }
4494 else if (bit (inst1, 7))
4495 base += bits (inst2, 0, 11);
4496 else if (bit (inst2, 11))
4497 {
4498 if (bit (inst2, 10))
4499 {
4500 if (bit (inst2, 9))
4501 base += bits (inst2, 0, 7);
4502 else
4503 base -= bits (inst2, 0, 7);
4504 }
4505 }
4506 else if ((inst2 & 0x0fc0) == 0x0000)
4507 {
4508 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4509 base += get_frame_register_unsigned (frame, rm) << shift;
4510 }
4511 else
4512 /* Reserved. */
4513 load_pc = 0;
4514
4515 if (load_pc)
4516 nextpc = get_frame_memory_unsigned (frame, base, 4);
4517 }
4518 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4519 {
4520 /* TBB. */
4521 CORE_ADDR tbl_reg, table, offset, length;
4522
4523 tbl_reg = bits (inst1, 0, 3);
4524 if (tbl_reg == 0x0f)
4525 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4526 else
4527 table = get_frame_register_unsigned (frame, tbl_reg);
4528
4529 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4530 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4531 nextpc = pc_val + length;
4532 }
4533 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4534 {
4535 /* TBH. */
4536 CORE_ADDR tbl_reg, table, offset, length;
4537
4538 tbl_reg = bits (inst1, 0, 3);
4539 if (tbl_reg == 0x0f)
4540 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4541 else
4542 table = get_frame_register_unsigned (frame, tbl_reg);
4543
4544 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4545 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4546 nextpc = pc_val + length;
4547 }
4548 }
4549 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4550 {
4551 if (bits (inst1, 3, 6) == 0x0f)
4552 nextpc = pc_val;
4553 else
4554 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4555 }
4556 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4557 {
4558 if (bits (inst1, 3, 6) == 0x0f)
4559 nextpc = pc_val;
4560 else
4561 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4562
4563 nextpc = MAKE_THUMB_ADDR (nextpc);
4564 }
4565 else if ((inst1 & 0xf500) == 0xb100)
4566 {
4567 /* CBNZ or CBZ. */
4568 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4569 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4570
4571 if (bit (inst1, 11) && reg != 0)
4572 nextpc = pc_val + imm;
4573 else if (!bit (inst1, 11) && reg == 0)
4574 nextpc = pc_val + imm;
4575 }
4576 return nextpc;
4577 }
4578
4579 /* Get the raw next address. PC is the current program counter, in
4580 FRAME, which is assumed to be executing in ARM mode.
4581
4582 The value returned has the execution state of the next instruction
4583 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4584 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4585 address. */
4586
4587 static CORE_ADDR
4588 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4589 {
4590 struct gdbarch *gdbarch = get_frame_arch (frame);
4591 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4592 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4593 unsigned long pc_val;
4594 unsigned long this_instr;
4595 unsigned long status;
4596 CORE_ADDR nextpc;
4597
4598 pc_val = (unsigned long) pc;
4599 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4600
4601 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4602 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4603
4604 if (bits (this_instr, 28, 31) == INST_NV)
4605 switch (bits (this_instr, 24, 27))
4606 {
4607 case 0xa:
4608 case 0xb:
4609 {
4610 /* Branch with Link and change to Thumb. */
4611 nextpc = BranchDest (pc, this_instr);
4612 nextpc |= bit (this_instr, 24) << 1;
4613 nextpc = MAKE_THUMB_ADDR (nextpc);
4614 break;
4615 }
4616 case 0xc:
4617 case 0xd:
4618 case 0xe:
4619 /* Coprocessor register transfer. */
4620 if (bits (this_instr, 12, 15) == 15)
4621 error (_("Invalid update to pc in instruction"));
4622 break;
4623 }
4624 else if (condition_true (bits (this_instr, 28, 31), status))
4625 {
4626 switch (bits (this_instr, 24, 27))
4627 {
4628 case 0x0:
4629 case 0x1: /* data processing */
4630 case 0x2:
4631 case 0x3:
4632 {
4633 unsigned long operand1, operand2, result = 0;
4634 unsigned long rn;
4635 int c;
4636
4637 if (bits (this_instr, 12, 15) != 15)
4638 break;
4639
4640 if (bits (this_instr, 22, 25) == 0
4641 && bits (this_instr, 4, 7) == 9) /* multiply */
4642 error (_("Invalid update to pc in instruction"));
4643
4644 /* BX <reg>, BLX <reg> */
4645 if (bits (this_instr, 4, 27) == 0x12fff1
4646 || bits (this_instr, 4, 27) == 0x12fff3)
4647 {
4648 rn = bits (this_instr, 0, 3);
4649 nextpc = ((rn == ARM_PC_REGNUM)
4650 ? (pc_val + 8)
4651 : get_frame_register_unsigned (frame, rn));
4652
4653 return nextpc;
4654 }
4655
4656 /* Multiply into PC. */
4657 c = (status & FLAG_C) ? 1 : 0;
4658 rn = bits (this_instr, 16, 19);
4659 operand1 = ((rn == ARM_PC_REGNUM)
4660 ? (pc_val + 8)
4661 : get_frame_register_unsigned (frame, rn));
4662
4663 if (bit (this_instr, 25))
4664 {
4665 unsigned long immval = bits (this_instr, 0, 7);
4666 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4667 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4668 & 0xffffffff;
4669 }
4670 else /* operand 2 is a shifted register. */
4671 operand2 = shifted_reg_val (frame, this_instr, c,
4672 pc_val, status);
4673
4674 switch (bits (this_instr, 21, 24))
4675 {
4676 case 0x0: /*and */
4677 result = operand1 & operand2;
4678 break;
4679
4680 case 0x1: /*eor */
4681 result = operand1 ^ operand2;
4682 break;
4683
4684 case 0x2: /*sub */
4685 result = operand1 - operand2;
4686 break;
4687
4688 case 0x3: /*rsb */
4689 result = operand2 - operand1;
4690 break;
4691
4692 case 0x4: /*add */
4693 result = operand1 + operand2;
4694 break;
4695
4696 case 0x5: /*adc */
4697 result = operand1 + operand2 + c;
4698 break;
4699
4700 case 0x6: /*sbc */
4701 result = operand1 - operand2 + c;
4702 break;
4703
4704 case 0x7: /*rsc */
4705 result = operand2 - operand1 + c;
4706 break;
4707
4708 case 0x8:
4709 case 0x9:
4710 case 0xa:
4711 case 0xb: /* tst, teq, cmp, cmn */
4712 result = (unsigned long) nextpc;
4713 break;
4714
4715 case 0xc: /*orr */
4716 result = operand1 | operand2;
4717 break;
4718
4719 case 0xd: /*mov */
4720 /* Always step into a function. */
4721 result = operand2;
4722 break;
4723
4724 case 0xe: /*bic */
4725 result = operand1 & ~operand2;
4726 break;
4727
4728 case 0xf: /*mvn */
4729 result = ~operand2;
4730 break;
4731 }
4732
4733 /* In 26-bit APCS the bottom two bits of the result are
4734 ignored, and we always end up in ARM state. */
4735 if (!arm_apcs_32)
4736 nextpc = arm_addr_bits_remove (gdbarch, result);
4737 else
4738 nextpc = result;
4739
4740 break;
4741 }
4742
4743 case 0x4:
4744 case 0x5: /* data transfer */
4745 case 0x6:
4746 case 0x7:
4747 if (bit (this_instr, 20))
4748 {
4749 /* load */
4750 if (bits (this_instr, 12, 15) == 15)
4751 {
4752 /* rd == pc */
4753 unsigned long rn;
4754 unsigned long base;
4755
4756 if (bit (this_instr, 22))
4757 error (_("Invalid update to pc in instruction"));
4758
4759 /* byte write to PC */
4760 rn = bits (this_instr, 16, 19);
4761 base = ((rn == ARM_PC_REGNUM)
4762 ? (pc_val + 8)
4763 : get_frame_register_unsigned (frame, rn));
4764
4765 if (bit (this_instr, 24))
4766 {
4767 /* pre-indexed */
4768 int c = (status & FLAG_C) ? 1 : 0;
4769 unsigned long offset =
4770 (bit (this_instr, 25)
4771 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4772 : bits (this_instr, 0, 11));
4773
4774 if (bit (this_instr, 23))
4775 base += offset;
4776 else
4777 base -= offset;
4778 }
4779 nextpc =
4780 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR) base,
4781 4, byte_order);
4782 }
4783 }
4784 break;
4785
4786 case 0x8:
4787 case 0x9: /* block transfer */
4788 if (bit (this_instr, 20))
4789 {
4790 /* LDM */
4791 if (bit (this_instr, 15))
4792 {
4793 /* loading pc */
4794 int offset = 0;
4795 unsigned long rn_val
4796 = get_frame_register_unsigned (frame,
4797 bits (this_instr, 16, 19));
4798
4799 if (bit (this_instr, 23))
4800 {
4801 /* up */
4802 unsigned long reglist = bits (this_instr, 0, 14);
4803 offset = bitcount (reglist) * 4;
4804 if (bit (this_instr, 24)) /* pre */
4805 offset += 4;
4806 }
4807 else if (bit (this_instr, 24))
4808 offset = -4;
4809
4810 nextpc =
4811 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR)
4812 (rn_val + offset),
4813 4, byte_order);
4814 }
4815 }
4816 break;
4817
4818 case 0xb: /* branch & link */
4819 case 0xa: /* branch */
4820 {
4821 nextpc = BranchDest (pc, this_instr);
4822 break;
4823 }
4824
4825 case 0xc:
4826 case 0xd:
4827 case 0xe: /* coproc ops */
4828 break;
4829 case 0xf: /* SWI */
4830 {
4831 struct gdbarch_tdep *tdep;
4832 tdep = gdbarch_tdep (gdbarch);
4833
4834 if (tdep->syscall_next_pc != NULL)
4835 nextpc = tdep->syscall_next_pc (frame);
4836
4837 }
4838 break;
4839
4840 default:
4841 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
4842 return (pc);
4843 }
4844 }
4845
4846 return nextpc;
4847 }
4848
4849 /* Determine next PC after current instruction executes. Will call either
4850 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
4851 loop is detected. */
4852
4853 CORE_ADDR
4854 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
4855 {
4856 CORE_ADDR nextpc;
4857
4858 if (arm_frame_is_thumb (frame))
4859 {
4860 nextpc = thumb_get_next_pc_raw (frame, pc);
4861 if (nextpc == MAKE_THUMB_ADDR (pc))
4862 error (_("Infinite loop detected"));
4863 }
4864 else
4865 {
4866 nextpc = arm_get_next_pc_raw (frame, pc);
4867 if (nextpc == pc)
4868 error (_("Infinite loop detected"));
4869 }
4870
4871 return nextpc;
4872 }
4873
4874 /* Like insert_single_step_breakpoint, but make sure we use a breakpoint
4875 of the appropriate mode (as encoded in the PC value), even if this
4876 differs from what would be expected according to the symbol tables. */
4877
4878 void
4879 arm_insert_single_step_breakpoint (struct gdbarch *gdbarch,
4880 struct address_space *aspace,
4881 CORE_ADDR pc)
4882 {
4883 struct cleanup *old_chain
4884 = make_cleanup_restore_integer (&arm_override_mode);
4885
4886 arm_override_mode = IS_THUMB_ADDR (pc);
4887 pc = gdbarch_addr_bits_remove (gdbarch, pc);
4888
4889 insert_single_step_breakpoint (gdbarch, aspace, pc);
4890
4891 do_cleanups (old_chain);
4892 }
4893
4894 /* single_step() is called just before we want to resume the inferior,
4895 if we want to single-step it but there is no hardware or kernel
4896 single-step support. We find the target of the coming instruction
4897 and breakpoint it. */
4898
4899 int
4900 arm_software_single_step (struct frame_info *frame)
4901 {
4902 struct gdbarch *gdbarch = get_frame_arch (frame);
4903 struct address_space *aspace = get_frame_address_space (frame);
4904 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
4905
4906 arm_insert_single_step_breakpoint (gdbarch, aspace, next_pc);
4907
4908 return 1;
4909 }
4910
4911 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4912 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4913 NULL if an error occurs. BUF is freed. */
4914
4915 static gdb_byte *
4916 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
4917 int old_len, int new_len)
4918 {
4919 gdb_byte *new_buf, *middle;
4920 int bytes_to_read = new_len - old_len;
4921
4922 new_buf = xmalloc (new_len);
4923 memcpy (new_buf + bytes_to_read, buf, old_len);
4924 xfree (buf);
4925 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
4926 {
4927 xfree (new_buf);
4928 return NULL;
4929 }
4930 return new_buf;
4931 }
4932
4933 /* An IT block is at most the 2-byte IT instruction followed by
4934 four 4-byte instructions. The furthest back we must search to
4935 find an IT block that affects the current instruction is thus
4936 2 + 3 * 4 == 14 bytes. */
4937 #define MAX_IT_BLOCK_PREFIX 14
4938
4939 /* Use a quick scan if there are more than this many bytes of
4940 code. */
4941 #define IT_SCAN_THRESHOLD 32
4942
4943 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4944 A breakpoint in an IT block may not be hit, depending on the
4945 condition flags. */
4946 static CORE_ADDR
4947 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
4948 {
4949 gdb_byte *buf;
4950 char map_type;
4951 CORE_ADDR boundary, func_start;
4952 int buf_len, buf2_len;
4953 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
4954 int i, any, last_it, last_it_count;
4955
4956 /* If we are using BKPT breakpoints, none of this is necessary. */
4957 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
4958 return bpaddr;
4959
4960 /* ARM mode does not have this problem. */
4961 if (!arm_pc_is_thumb (gdbarch, bpaddr))
4962 return bpaddr;
4963
4964 /* We are setting a breakpoint in Thumb code that could potentially
4965 contain an IT block. The first step is to find how much Thumb
4966 code there is; we do not need to read outside of known Thumb
4967 sequences. */
4968 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
4969 if (map_type == 0)
4970 /* Thumb-2 code must have mapping symbols to have a chance. */
4971 return bpaddr;
4972
4973 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
4974
4975 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
4976 && func_start > boundary)
4977 boundary = func_start;
4978
4979 /* Search for a candidate IT instruction. We have to do some fancy
4980 footwork to distinguish a real IT instruction from the second
4981 half of a 32-bit instruction, but there is no need for that if
4982 there's no candidate. */
4983 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
4984 if (buf_len == 0)
4985 /* No room for an IT instruction. */
4986 return bpaddr;
4987
4988 buf = xmalloc (buf_len);
4989 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
4990 return bpaddr;
4991 any = 0;
4992 for (i = 0; i < buf_len; i += 2)
4993 {
4994 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4995 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4996 {
4997 any = 1;
4998 break;
4999 }
5000 }
5001 if (any == 0)
5002 {
5003 xfree (buf);
5004 return bpaddr;
5005 }
5006
5007 /* OK, the code bytes before this instruction contain at least one
5008 halfword which resembles an IT instruction. We know that it's
5009 Thumb code, but there are still two possibilities. Either the
5010 halfword really is an IT instruction, or it is the second half of
5011 a 32-bit Thumb instruction. The only way we can tell is to
5012 scan forwards from a known instruction boundary. */
5013 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5014 {
5015 int definite;
5016
5017 /* There's a lot of code before this instruction. Start with an
5018 optimistic search; it's easy to recognize halfwords that can
5019 not be the start of a 32-bit instruction, and use that to
5020 lock on to the instruction boundaries. */
5021 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5022 if (buf == NULL)
5023 return bpaddr;
5024 buf_len = IT_SCAN_THRESHOLD;
5025
5026 definite = 0;
5027 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5028 {
5029 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5030 if (thumb_insn_size (inst1) == 2)
5031 {
5032 definite = 1;
5033 break;
5034 }
5035 }
5036
5037 /* At this point, if DEFINITE, BUF[I] is the first place we
5038 are sure that we know the instruction boundaries, and it is far
5039 enough from BPADDR that we could not miss an IT instruction
5040 affecting BPADDR. If ! DEFINITE, give up - start from a
5041 known boundary. */
5042 if (! definite)
5043 {
5044 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5045 bpaddr - boundary);
5046 if (buf == NULL)
5047 return bpaddr;
5048 buf_len = bpaddr - boundary;
5049 i = 0;
5050 }
5051 }
5052 else
5053 {
5054 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5055 if (buf == NULL)
5056 return bpaddr;
5057 buf_len = bpaddr - boundary;
5058 i = 0;
5059 }
5060
5061 /* Scan forwards. Find the last IT instruction before BPADDR. */
5062 last_it = -1;
5063 last_it_count = 0;
5064 while (i < buf_len)
5065 {
5066 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5067 last_it_count--;
5068 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5069 {
5070 last_it = i;
5071 if (inst1 & 0x0001)
5072 last_it_count = 4;
5073 else if (inst1 & 0x0002)
5074 last_it_count = 3;
5075 else if (inst1 & 0x0004)
5076 last_it_count = 2;
5077 else
5078 last_it_count = 1;
5079 }
5080 i += thumb_insn_size (inst1);
5081 }
5082
5083 xfree (buf);
5084
5085 if (last_it == -1)
5086 /* There wasn't really an IT instruction after all. */
5087 return bpaddr;
5088
5089 if (last_it_count < 1)
5090 /* It was too far away. */
5091 return bpaddr;
5092
5093 /* This really is a trouble spot. Move the breakpoint to the IT
5094 instruction. */
5095 return bpaddr - buf_len + last_it;
5096 }
5097
5098 /* ARM displaced stepping support.
5099
5100 Generally ARM displaced stepping works as follows:
5101
5102 1. When an instruction is to be single-stepped, it is first decoded by
5103 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5104 Depending on the type of instruction, it is then copied to a scratch
5105 location, possibly in a modified form. The copy_* set of functions
5106 performs such modification, as necessary. A breakpoint is placed after
5107 the modified instruction in the scratch space to return control to GDB.
5108 Note in particular that instructions which modify the PC will no longer
5109 do so after modification.
5110
5111 2. The instruction is single-stepped, by setting the PC to the scratch
5112 location address, and resuming. Control returns to GDB when the
5113 breakpoint is hit.
5114
5115 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5116 function used for the current instruction. This function's job is to
5117 put the CPU/memory state back to what it would have been if the
5118 instruction had been executed unmodified in its original location. */
5119
5120 /* NOP instruction (mov r0, r0). */
5121 #define ARM_NOP 0xe1a00000
5122 #define THUMB_NOP 0x4600
5123
5124 /* Helper for register reads for displaced stepping. In particular, this
5125 returns the PC as it would be seen by the instruction at its original
5126 location. */
5127
5128 ULONGEST
5129 displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5130 int regno)
5131 {
5132 ULONGEST ret;
5133 CORE_ADDR from = dsc->insn_addr;
5134
5135 if (regno == ARM_PC_REGNUM)
5136 {
5137 /* Compute pipeline offset:
5138 - When executing an ARM instruction, PC reads as the address of the
5139 current instruction plus 8.
5140 - When executing a Thumb instruction, PC reads as the address of the
5141 current instruction plus 4. */
5142
5143 if (!dsc->is_thumb)
5144 from += 8;
5145 else
5146 from += 4;
5147
5148 if (debug_displaced)
5149 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5150 (unsigned long) from);
5151 return (ULONGEST) from;
5152 }
5153 else
5154 {
5155 regcache_cooked_read_unsigned (regs, regno, &ret);
5156 if (debug_displaced)
5157 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5158 regno, (unsigned long) ret);
5159 return ret;
5160 }
5161 }
5162
5163 static int
5164 displaced_in_arm_mode (struct regcache *regs)
5165 {
5166 ULONGEST ps;
5167 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5168
5169 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5170
5171 return (ps & t_bit) == 0;
5172 }
5173
5174 /* Write to the PC as from a branch instruction. */
5175
5176 static void
5177 branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5178 ULONGEST val)
5179 {
5180 if (!dsc->is_thumb)
5181 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5182 architecture versions < 6. */
5183 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5184 val & ~(ULONGEST) 0x3);
5185 else
5186 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5187 val & ~(ULONGEST) 0x1);
5188 }
5189
5190 /* Write to the PC as from a branch-exchange instruction. */
5191
5192 static void
5193 bx_write_pc (struct regcache *regs, ULONGEST val)
5194 {
5195 ULONGEST ps;
5196 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5197
5198 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5199
5200 if ((val & 1) == 1)
5201 {
5202 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5203 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5204 }
5205 else if ((val & 2) == 0)
5206 {
5207 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5208 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5209 }
5210 else
5211 {
5212 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5213 mode, align dest to 4 bytes). */
5214 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5215 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5216 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5217 }
5218 }
5219
5220 /* Write to the PC as if from a load instruction. */
5221
5222 static void
5223 load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5224 ULONGEST val)
5225 {
5226 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5227 bx_write_pc (regs, val);
5228 else
5229 branch_write_pc (regs, dsc, val);
5230 }
5231
5232 /* Write to the PC as if from an ALU instruction. */
5233
5234 static void
5235 alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5236 ULONGEST val)
5237 {
5238 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
5239 bx_write_pc (regs, val);
5240 else
5241 branch_write_pc (regs, dsc, val);
5242 }
5243
5244 /* Helper for writing to registers for displaced stepping. Writing to the PC
5245 has a varying effects depending on the instruction which does the write:
5246 this is controlled by the WRITE_PC argument. */
5247
5248 void
5249 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5250 int regno, ULONGEST val, enum pc_write_style write_pc)
5251 {
5252 if (regno == ARM_PC_REGNUM)
5253 {
5254 if (debug_displaced)
5255 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5256 (unsigned long) val);
5257 switch (write_pc)
5258 {
5259 case BRANCH_WRITE_PC:
5260 branch_write_pc (regs, dsc, val);
5261 break;
5262
5263 case BX_WRITE_PC:
5264 bx_write_pc (regs, val);
5265 break;
5266
5267 case LOAD_WRITE_PC:
5268 load_write_pc (regs, dsc, val);
5269 break;
5270
5271 case ALU_WRITE_PC:
5272 alu_write_pc (regs, dsc, val);
5273 break;
5274
5275 case CANNOT_WRITE_PC:
5276 warning (_("Instruction wrote to PC in an unexpected way when "
5277 "single-stepping"));
5278 break;
5279
5280 default:
5281 internal_error (__FILE__, __LINE__,
5282 _("Invalid argument to displaced_write_reg"));
5283 }
5284
5285 dsc->wrote_to_pc = 1;
5286 }
5287 else
5288 {
5289 if (debug_displaced)
5290 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5291 regno, (unsigned long) val);
5292 regcache_cooked_write_unsigned (regs, regno, val);
5293 }
5294 }
5295
5296 /* This function is used to concisely determine if an instruction INSN
5297 references PC. Register fields of interest in INSN should have the
5298 corresponding fields of BITMASK set to 0b1111. The function
5299 returns return 1 if any of these fields in INSN reference the PC
5300 (also 0b1111, r15), else it returns 0. */
5301
5302 static int
5303 insn_references_pc (uint32_t insn, uint32_t bitmask)
5304 {
5305 uint32_t lowbit = 1;
5306
5307 while (bitmask != 0)
5308 {
5309 uint32_t mask;
5310
5311 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5312 ;
5313
5314 if (!lowbit)
5315 break;
5316
5317 mask = lowbit * 0xf;
5318
5319 if ((insn & mask) == mask)
5320 return 1;
5321
5322 bitmask &= ~mask;
5323 }
5324
5325 return 0;
5326 }
5327
5328 /* The simplest copy function. Many instructions have the same effect no
5329 matter what address they are executed at: in those cases, use this. */
5330
5331 static int
5332 arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5333 const char *iname, struct displaced_step_closure *dsc)
5334 {
5335 if (debug_displaced)
5336 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5337 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5338 iname);
5339
5340 dsc->modinsn[0] = insn;
5341
5342 return 0;
5343 }
5344
5345 static int
5346 thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
5347 uint16_t insn2, const char *iname,
5348 struct displaced_step_closure *dsc)
5349 {
5350 if (debug_displaced)
5351 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
5352 "opcode/class '%s' unmodified\n", insn1, insn2,
5353 iname);
5354
5355 dsc->modinsn[0] = insn1;
5356 dsc->modinsn[1] = insn2;
5357 dsc->numinsns = 2;
5358
5359 return 0;
5360 }
5361
5362 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
5363 modification. */
5364 static int
5365 thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
5366 const char *iname,
5367 struct displaced_step_closure *dsc)
5368 {
5369 if (debug_displaced)
5370 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
5371 "opcode/class '%s' unmodified\n", insn,
5372 iname);
5373
5374 dsc->modinsn[0] = insn;
5375
5376 return 0;
5377 }
5378
5379 /* Preload instructions with immediate offset. */
5380
5381 static void
5382 cleanup_preload (struct gdbarch *gdbarch,
5383 struct regcache *regs, struct displaced_step_closure *dsc)
5384 {
5385 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5386 if (!dsc->u.preload.immed)
5387 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5388 }
5389
5390 static void
5391 install_preload (struct gdbarch *gdbarch, struct regcache *regs,
5392 struct displaced_step_closure *dsc, unsigned int rn)
5393 {
5394 ULONGEST rn_val;
5395 /* Preload instructions:
5396
5397 {pli/pld} [rn, #+/-imm]
5398 ->
5399 {pli/pld} [r0, #+/-imm]. */
5400
5401 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5402 rn_val = displaced_read_reg (regs, dsc, rn);
5403 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5404 dsc->u.preload.immed = 1;
5405
5406 dsc->cleanup = &cleanup_preload;
5407 }
5408
5409 static int
5410 arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5411 struct displaced_step_closure *dsc)
5412 {
5413 unsigned int rn = bits (insn, 16, 19);
5414
5415 if (!insn_references_pc (insn, 0x000f0000ul))
5416 return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
5417
5418 if (debug_displaced)
5419 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5420 (unsigned long) insn);
5421
5422 dsc->modinsn[0] = insn & 0xfff0ffff;
5423
5424 install_preload (gdbarch, regs, dsc, rn);
5425
5426 return 0;
5427 }
5428
5429 static int
5430 thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
5431 struct regcache *regs, struct displaced_step_closure *dsc)
5432 {
5433 unsigned int rn = bits (insn1, 0, 3);
5434 unsigned int u_bit = bit (insn1, 7);
5435 int imm12 = bits (insn2, 0, 11);
5436 ULONGEST pc_val;
5437
5438 if (rn != ARM_PC_REGNUM)
5439 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
5440
5441 /* PC is only allowed to use in PLI (immediate,literal) Encoding T3, and
5442 PLD (literal) Encoding T1. */
5443 if (debug_displaced)
5444 fprintf_unfiltered (gdb_stdlog,
5445 "displaced: copying pld/pli pc (0x%x) %c imm12 %.4x\n",
5446 (unsigned int) dsc->insn_addr, u_bit ? '+' : '-',
5447 imm12);
5448
5449 if (!u_bit)
5450 imm12 = -1 * imm12;
5451
5452 /* Rewrite instruction {pli/pld} PC imm12 into:
5453 Prepare: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
5454
5455 {pli/pld} [r0, r1]
5456
5457 Cleanup: r0 <- tmp[0], r1 <- tmp[1]. */
5458
5459 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5460 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5461
5462 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
5463
5464 displaced_write_reg (regs, dsc, 0, pc_val, CANNOT_WRITE_PC);
5465 displaced_write_reg (regs, dsc, 1, imm12, CANNOT_WRITE_PC);
5466 dsc->u.preload.immed = 0;
5467
5468 /* {pli/pld} [r0, r1] */
5469 dsc->modinsn[0] = insn1 & 0xfff0;
5470 dsc->modinsn[1] = 0xf001;
5471 dsc->numinsns = 2;
5472
5473 dsc->cleanup = &cleanup_preload;
5474 return 0;
5475 }
5476
5477 /* Preload instructions with register offset. */
5478
5479 static void
5480 install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
5481 struct displaced_step_closure *dsc, unsigned int rn,
5482 unsigned int rm)
5483 {
5484 ULONGEST rn_val, rm_val;
5485
5486 /* Preload register-offset instructions:
5487
5488 {pli/pld} [rn, rm {, shift}]
5489 ->
5490 {pli/pld} [r0, r1 {, shift}]. */
5491
5492 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5493 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5494 rn_val = displaced_read_reg (regs, dsc, rn);
5495 rm_val = displaced_read_reg (regs, dsc, rm);
5496 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5497 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5498 dsc->u.preload.immed = 0;
5499
5500 dsc->cleanup = &cleanup_preload;
5501 }
5502
5503 static int
5504 arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5505 struct regcache *regs,
5506 struct displaced_step_closure *dsc)
5507 {
5508 unsigned int rn = bits (insn, 16, 19);
5509 unsigned int rm = bits (insn, 0, 3);
5510
5511
5512 if (!insn_references_pc (insn, 0x000f000ful))
5513 return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
5514
5515 if (debug_displaced)
5516 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5517 (unsigned long) insn);
5518
5519 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5520
5521 install_preload_reg (gdbarch, regs, dsc, rn, rm);
5522 return 0;
5523 }
5524
5525 /* Copy/cleanup coprocessor load and store instructions. */
5526
5527 static void
5528 cleanup_copro_load_store (struct gdbarch *gdbarch,
5529 struct regcache *regs,
5530 struct displaced_step_closure *dsc)
5531 {
5532 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
5533
5534 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5535
5536 if (dsc->u.ldst.writeback)
5537 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5538 }
5539
5540 static void
5541 install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
5542 struct displaced_step_closure *dsc,
5543 int writeback, unsigned int rn)
5544 {
5545 ULONGEST rn_val;
5546
5547 /* Coprocessor load/store instructions:
5548
5549 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5550 ->
5551 {stc/stc2} [r0, #+/-imm].
5552
5553 ldc/ldc2 are handled identically. */
5554
5555 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5556 rn_val = displaced_read_reg (regs, dsc, rn);
5557 /* PC should be 4-byte aligned. */
5558 rn_val = rn_val & 0xfffffffc;
5559 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5560
5561 dsc->u.ldst.writeback = writeback;
5562 dsc->u.ldst.rn = rn;
5563
5564 dsc->cleanup = &cleanup_copro_load_store;
5565 }
5566
5567 static int
5568 arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5569 struct regcache *regs,
5570 struct displaced_step_closure *dsc)
5571 {
5572 unsigned int rn = bits (insn, 16, 19);
5573
5574 if (!insn_references_pc (insn, 0x000f0000ul))
5575 return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5576
5577 if (debug_displaced)
5578 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5579 "load/store insn %.8lx\n", (unsigned long) insn);
5580
5581 dsc->modinsn[0] = insn & 0xfff0ffff;
5582
5583 install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
5584
5585 return 0;
5586 }
5587
5588 static int
5589 thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
5590 uint16_t insn2, struct regcache *regs,
5591 struct displaced_step_closure *dsc)
5592 {
5593 unsigned int rn = bits (insn1, 0, 3);
5594
5595 if (rn != ARM_PC_REGNUM)
5596 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
5597 "copro load/store", dsc);
5598
5599 if (debug_displaced)
5600 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5601 "load/store insn %.4x%.4x\n", insn1, insn2);
5602
5603 dsc->modinsn[0] = insn1 & 0xfff0;
5604 dsc->modinsn[1] = insn2;
5605 dsc->numinsns = 2;
5606
5607 /* This function is called for copying instruction LDC/LDC2/VLDR, which
5608 doesn't support writeback, so pass 0. */
5609 install_copro_load_store (gdbarch, regs, dsc, 0, rn);
5610
5611 return 0;
5612 }
5613
5614 /* Clean up branch instructions (actually perform the branch, by setting
5615 PC). */
5616
5617 static void
5618 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
5619 struct displaced_step_closure *dsc)
5620 {
5621 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5622 int branch_taken = condition_true (dsc->u.branch.cond, status);
5623 enum pc_write_style write_pc = dsc->u.branch.exchange
5624 ? BX_WRITE_PC : BRANCH_WRITE_PC;
5625
5626 if (!branch_taken)
5627 return;
5628
5629 if (dsc->u.branch.link)
5630 {
5631 /* The value of LR should be the next insn of current one. In order
5632 not to confuse logic hanlding later insn `bx lr', if current insn mode
5633 is Thumb, the bit 0 of LR value should be set to 1. */
5634 ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
5635
5636 if (dsc->is_thumb)
5637 next_insn_addr |= 0x1;
5638
5639 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
5640 CANNOT_WRITE_PC);
5641 }
5642
5643 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
5644 }
5645
5646 /* Copy B/BL/BLX instructions with immediate destinations. */
5647
5648 static void
5649 install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
5650 struct displaced_step_closure *dsc,
5651 unsigned int cond, int exchange, int link, long offset)
5652 {
5653 /* Implement "BL<cond> <label>" as:
5654
5655 Preparation: cond <- instruction condition
5656 Insn: mov r0, r0 (nop)
5657 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5658
5659 B<cond> similar, but don't set r14 in cleanup. */
5660
5661 dsc->u.branch.cond = cond;
5662 dsc->u.branch.link = link;
5663 dsc->u.branch.exchange = exchange;
5664
5665 dsc->u.branch.dest = dsc->insn_addr;
5666 if (link && exchange)
5667 /* For BLX, offset is computed from the Align (PC, 4). */
5668 dsc->u.branch.dest = dsc->u.branch.dest & 0xfffffffc;
5669
5670 if (dsc->is_thumb)
5671 dsc->u.branch.dest += 4 + offset;
5672 else
5673 dsc->u.branch.dest += 8 + offset;
5674
5675 dsc->cleanup = &cleanup_branch;
5676 }
5677 static int
5678 arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
5679 struct regcache *regs, struct displaced_step_closure *dsc)
5680 {
5681 unsigned int cond = bits (insn, 28, 31);
5682 int exchange = (cond == 0xf);
5683 int link = exchange || bit (insn, 24);
5684 long offset;
5685
5686 if (debug_displaced)
5687 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
5688 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
5689 (unsigned long) insn);
5690 if (exchange)
5691 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5692 then arrange the switch into Thumb mode. */
5693 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
5694 else
5695 offset = bits (insn, 0, 23) << 2;
5696
5697 if (bit (offset, 25))
5698 offset = offset | ~0x3ffffff;
5699
5700 dsc->modinsn[0] = ARM_NOP;
5701
5702 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
5703 return 0;
5704 }
5705
5706 static int
5707 thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
5708 uint16_t insn2, struct regcache *regs,
5709 struct displaced_step_closure *dsc)
5710 {
5711 int link = bit (insn2, 14);
5712 int exchange = link && !bit (insn2, 12);
5713 int cond = INST_AL;
5714 long offset = 0;
5715 int j1 = bit (insn2, 13);
5716 int j2 = bit (insn2, 11);
5717 int s = sbits (insn1, 10, 10);
5718 int i1 = !(j1 ^ bit (insn1, 10));
5719 int i2 = !(j2 ^ bit (insn1, 10));
5720
5721 if (!link && !exchange) /* B */
5722 {
5723 offset = (bits (insn2, 0, 10) << 1);
5724 if (bit (insn2, 12)) /* Encoding T4 */
5725 {
5726 offset |= (bits (insn1, 0, 9) << 12)
5727 | (i2 << 22)
5728 | (i1 << 23)
5729 | (s << 24);
5730 cond = INST_AL;
5731 }
5732 else /* Encoding T3 */
5733 {
5734 offset |= (bits (insn1, 0, 5) << 12)
5735 | (j1 << 18)
5736 | (j2 << 19)
5737 | (s << 20);
5738 cond = bits (insn1, 6, 9);
5739 }
5740 }
5741 else
5742 {
5743 offset = (bits (insn1, 0, 9) << 12);
5744 offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
5745 offset |= exchange ?
5746 (bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
5747 }
5748
5749 if (debug_displaced)
5750 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s insn "
5751 "%.4x %.4x with offset %.8lx\n",
5752 link ? (exchange) ? "blx" : "bl" : "b",
5753 insn1, insn2, offset);
5754
5755 dsc->modinsn[0] = THUMB_NOP;
5756
5757 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
5758 return 0;
5759 }
5760
5761 /* Copy B Thumb instructions. */
5762 static int
5763 thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
5764 struct displaced_step_closure *dsc)
5765 {
5766 unsigned int cond = 0;
5767 int offset = 0;
5768 unsigned short bit_12_15 = bits (insn, 12, 15);
5769 CORE_ADDR from = dsc->insn_addr;
5770
5771 if (bit_12_15 == 0xd)
5772 {
5773 /* offset = SignExtend (imm8:0, 32) */
5774 offset = sbits ((insn << 1), 0, 8);
5775 cond = bits (insn, 8, 11);
5776 }
5777 else if (bit_12_15 == 0xe) /* Encoding T2 */
5778 {
5779 offset = sbits ((insn << 1), 0, 11);
5780 cond = INST_AL;
5781 }
5782
5783 if (debug_displaced)
5784 fprintf_unfiltered (gdb_stdlog,
5785 "displaced: copying b immediate insn %.4x "
5786 "with offset %d\n", insn, offset);
5787
5788 dsc->u.branch.cond = cond;
5789 dsc->u.branch.link = 0;
5790 dsc->u.branch.exchange = 0;
5791 dsc->u.branch.dest = from + 4 + offset;
5792
5793 dsc->modinsn[0] = THUMB_NOP;
5794
5795 dsc->cleanup = &cleanup_branch;
5796
5797 return 0;
5798 }
5799
5800 /* Copy BX/BLX with register-specified destinations. */
5801
5802 static void
5803 install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
5804 struct displaced_step_closure *dsc, int link,
5805 unsigned int cond, unsigned int rm)
5806 {
5807 /* Implement {BX,BLX}<cond> <reg>" as:
5808
5809 Preparation: cond <- instruction condition
5810 Insn: mov r0, r0 (nop)
5811 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
5812
5813 Don't set r14 in cleanup for BX. */
5814
5815 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
5816
5817 dsc->u.branch.cond = cond;
5818 dsc->u.branch.link = link;
5819
5820 dsc->u.branch.exchange = 1;
5821
5822 dsc->cleanup = &cleanup_branch;
5823 }
5824
5825 static int
5826 arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
5827 struct regcache *regs, struct displaced_step_closure *dsc)
5828 {
5829 unsigned int cond = bits (insn, 28, 31);
5830 /* BX: x12xxx1x
5831 BLX: x12xxx3x. */
5832 int link = bit (insn, 5);
5833 unsigned int rm = bits (insn, 0, 3);
5834
5835 if (debug_displaced)
5836 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
5837 (unsigned long) insn);
5838
5839 dsc->modinsn[0] = ARM_NOP;
5840
5841 install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
5842 return 0;
5843 }
5844
5845 static int
5846 thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
5847 struct regcache *regs,
5848 struct displaced_step_closure *dsc)
5849 {
5850 int link = bit (insn, 7);
5851 unsigned int rm = bits (insn, 3, 6);
5852
5853 if (debug_displaced)
5854 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x",
5855 (unsigned short) insn);
5856
5857 dsc->modinsn[0] = THUMB_NOP;
5858
5859 install_bx_blx_reg (gdbarch, regs, dsc, link, INST_AL, rm);
5860
5861 return 0;
5862 }
5863
5864
5865 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
5866
5867 static void
5868 cleanup_alu_imm (struct gdbarch *gdbarch,
5869 struct regcache *regs, struct displaced_step_closure *dsc)
5870 {
5871 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5872 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5873 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5874 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5875 }
5876
5877 static int
5878 arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5879 struct displaced_step_closure *dsc)
5880 {
5881 unsigned int rn = bits (insn, 16, 19);
5882 unsigned int rd = bits (insn, 12, 15);
5883 unsigned int op = bits (insn, 21, 24);
5884 int is_mov = (op == 0xd);
5885 ULONGEST rd_val, rn_val;
5886
5887 if (!insn_references_pc (insn, 0x000ff000ul))
5888 return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
5889
5890 if (debug_displaced)
5891 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
5892 "%.8lx\n", is_mov ? "move" : "ALU",
5893 (unsigned long) insn);
5894
5895 /* Instruction is of form:
5896
5897 <op><cond> rd, [rn,] #imm
5898
5899 Rewrite as:
5900
5901 Preparation: tmp1, tmp2 <- r0, r1;
5902 r0, r1 <- rd, rn
5903 Insn: <op><cond> r0, r1, #imm
5904 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5905 */
5906
5907 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5908 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5909 rn_val = displaced_read_reg (regs, dsc, rn);
5910 rd_val = displaced_read_reg (regs, dsc, rd);
5911 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5912 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5913 dsc->rd = rd;
5914
5915 if (is_mov)
5916 dsc->modinsn[0] = insn & 0xfff00fff;
5917 else
5918 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
5919
5920 dsc->cleanup = &cleanup_alu_imm;
5921
5922 return 0;
5923 }
5924
5925 static int
5926 thumb2_copy_alu_imm (struct gdbarch *gdbarch, uint16_t insn1,
5927 uint16_t insn2, struct regcache *regs,
5928 struct displaced_step_closure *dsc)
5929 {
5930 unsigned int op = bits (insn1, 5, 8);
5931 unsigned int rn, rm, rd;
5932 ULONGEST rd_val, rn_val;
5933
5934 rn = bits (insn1, 0, 3); /* Rn */
5935 rm = bits (insn2, 0, 3); /* Rm */
5936 rd = bits (insn2, 8, 11); /* Rd */
5937
5938 /* This routine is only called for instruction MOV. */
5939 gdb_assert (op == 0x2 && rn == 0xf);
5940
5941 if (rm != ARM_PC_REGNUM && rd != ARM_PC_REGNUM)
5942 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU imm", dsc);
5943
5944 if (debug_displaced)
5945 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
5946 "ALU", insn1, insn2);
5947
5948 /* Instruction is of form:
5949
5950 <op><cond> rd, [rn,] #imm
5951
5952 Rewrite as:
5953
5954 Preparation: tmp1, tmp2 <- r0, r1;
5955 r0, r1 <- rd, rn
5956 Insn: <op><cond> r0, r1, #imm
5957 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5958 */
5959
5960 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5961 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5962 rn_val = displaced_read_reg (regs, dsc, rn);
5963 rd_val = displaced_read_reg (regs, dsc, rd);
5964 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5965 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5966 dsc->rd = rd;
5967
5968 dsc->modinsn[0] = insn1;
5969 dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x1);
5970 dsc->numinsns = 2;
5971
5972 dsc->cleanup = &cleanup_alu_imm;
5973
5974 return 0;
5975 }
5976
5977 /* Copy/cleanup arithmetic/logic insns with register RHS. */
5978
5979 static void
5980 cleanup_alu_reg (struct gdbarch *gdbarch,
5981 struct regcache *regs, struct displaced_step_closure *dsc)
5982 {
5983 ULONGEST rd_val;
5984 int i;
5985
5986 rd_val = displaced_read_reg (regs, dsc, 0);
5987
5988 for (i = 0; i < 3; i++)
5989 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5990
5991 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5992 }
5993
5994 static void
5995 install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
5996 struct displaced_step_closure *dsc,
5997 unsigned int rd, unsigned int rn, unsigned int rm)
5998 {
5999 ULONGEST rd_val, rn_val, rm_val;
6000
6001 /* Instruction is of form:
6002
6003 <op><cond> rd, [rn,] rm [, <shift>]
6004
6005 Rewrite as:
6006
6007 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
6008 r0, r1, r2 <- rd, rn, rm
6009 Insn: <op><cond> r0, r1, r2 [, <shift>]
6010 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
6011 */
6012
6013 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6014 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6015 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6016 rd_val = displaced_read_reg (regs, dsc, rd);
6017 rn_val = displaced_read_reg (regs, dsc, rn);
6018 rm_val = displaced_read_reg (regs, dsc, rm);
6019 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6020 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6021 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6022 dsc->rd = rd;
6023
6024 dsc->cleanup = &cleanup_alu_reg;
6025 }
6026
6027 static int
6028 arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6029 struct displaced_step_closure *dsc)
6030 {
6031 unsigned int op = bits (insn, 21, 24);
6032 int is_mov = (op == 0xd);
6033
6034 if (!insn_references_pc (insn, 0x000ff00ful))
6035 return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
6036
6037 if (debug_displaced)
6038 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
6039 is_mov ? "move" : "ALU", (unsigned long) insn);
6040
6041 if (is_mov)
6042 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
6043 else
6044 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
6045
6046 install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
6047 bits (insn, 0, 3));
6048 return 0;
6049 }
6050
6051 static int
6052 thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
6053 struct regcache *regs,
6054 struct displaced_step_closure *dsc)
6055 {
6056 unsigned rn, rm, rd;
6057
6058 rd = bits (insn, 3, 6);
6059 rn = (bit (insn, 7) << 3) | bits (insn, 0, 2);
6060 rm = 2;
6061
6062 if (rd != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6063 return thumb_copy_unmodified_16bit (gdbarch, insn, "ALU reg", dsc);
6064
6065 if (debug_displaced)
6066 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x\n",
6067 "ALU", (unsigned short) insn);
6068
6069 dsc->modinsn[0] = ((insn & 0xff00) | 0x08);
6070
6071 install_alu_reg (gdbarch, regs, dsc, rd, rn, rm);
6072
6073 return 0;
6074 }
6075
6076 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
6077
6078 static void
6079 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
6080 struct regcache *regs,
6081 struct displaced_step_closure *dsc)
6082 {
6083 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
6084 int i;
6085
6086 for (i = 0; i < 4; i++)
6087 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
6088
6089 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6090 }
6091
6092 static void
6093 install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
6094 struct displaced_step_closure *dsc,
6095 unsigned int rd, unsigned int rn, unsigned int rm,
6096 unsigned rs)
6097 {
6098 int i;
6099 ULONGEST rd_val, rn_val, rm_val, rs_val;
6100
6101 /* Instruction is of form:
6102
6103 <op><cond> rd, [rn,] rm, <shift> rs
6104
6105 Rewrite as:
6106
6107 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
6108 r0, r1, r2, r3 <- rd, rn, rm, rs
6109 Insn: <op><cond> r0, r1, r2, <shift> r3
6110 Cleanup: tmp5 <- r0
6111 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
6112 rd <- tmp5
6113 */
6114
6115 for (i = 0; i < 4; i++)
6116 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6117
6118 rd_val = displaced_read_reg (regs, dsc, rd);
6119 rn_val = displaced_read_reg (regs, dsc, rn);
6120 rm_val = displaced_read_reg (regs, dsc, rm);
6121 rs_val = displaced_read_reg (regs, dsc, rs);
6122 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6123 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6124 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6125 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
6126 dsc->rd = rd;
6127 dsc->cleanup = &cleanup_alu_shifted_reg;
6128 }
6129
6130 static int
6131 arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
6132 struct regcache *regs,
6133 struct displaced_step_closure *dsc)
6134 {
6135 unsigned int op = bits (insn, 21, 24);
6136 int is_mov = (op == 0xd);
6137 unsigned int rd, rn, rm, rs;
6138
6139 if (!insn_references_pc (insn, 0x000fff0ful))
6140 return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
6141
6142 if (debug_displaced)
6143 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
6144 "%.8lx\n", is_mov ? "move" : "ALU",
6145 (unsigned long) insn);
6146
6147 rn = bits (insn, 16, 19);
6148 rm = bits (insn, 0, 3);
6149 rs = bits (insn, 8, 11);
6150 rd = bits (insn, 12, 15);
6151
6152 if (is_mov)
6153 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
6154 else
6155 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
6156
6157 install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
6158
6159 return 0;
6160 }
6161
6162 /* Clean up load instructions. */
6163
6164 static void
6165 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
6166 struct displaced_step_closure *dsc)
6167 {
6168 ULONGEST rt_val, rt_val2 = 0, rn_val;
6169
6170 rt_val = displaced_read_reg (regs, dsc, 0);
6171 if (dsc->u.ldst.xfersize == 8)
6172 rt_val2 = displaced_read_reg (regs, dsc, 1);
6173 rn_val = displaced_read_reg (regs, dsc, 2);
6174
6175 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6176 if (dsc->u.ldst.xfersize > 4)
6177 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6178 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6179 if (!dsc->u.ldst.immed)
6180 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6181
6182 /* Handle register writeback. */
6183 if (dsc->u.ldst.writeback)
6184 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6185 /* Put result in right place. */
6186 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
6187 if (dsc->u.ldst.xfersize == 8)
6188 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
6189 }
6190
6191 /* Clean up store instructions. */
6192
6193 static void
6194 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
6195 struct displaced_step_closure *dsc)
6196 {
6197 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
6198
6199 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6200 if (dsc->u.ldst.xfersize > 4)
6201 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6202 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6203 if (!dsc->u.ldst.immed)
6204 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6205 if (!dsc->u.ldst.restore_r4)
6206 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
6207
6208 /* Writeback. */
6209 if (dsc->u.ldst.writeback)
6210 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6211 }
6212
6213 /* Copy "extra" load/store instructions. These are halfword/doubleword
6214 transfers, which have a different encoding to byte/word transfers. */
6215
6216 static int
6217 arm_copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
6218 struct regcache *regs, struct displaced_step_closure *dsc)
6219 {
6220 unsigned int op1 = bits (insn, 20, 24);
6221 unsigned int op2 = bits (insn, 5, 6);
6222 unsigned int rt = bits (insn, 12, 15);
6223 unsigned int rn = bits (insn, 16, 19);
6224 unsigned int rm = bits (insn, 0, 3);
6225 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
6226 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
6227 int immed = (op1 & 0x4) != 0;
6228 int opcode;
6229 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
6230
6231 if (!insn_references_pc (insn, 0x000ff00ful))
6232 return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
6233
6234 if (debug_displaced)
6235 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
6236 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
6237 (unsigned long) insn);
6238
6239 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
6240
6241 if (opcode < 0)
6242 internal_error (__FILE__, __LINE__,
6243 _("copy_extra_ld_st: instruction decode error"));
6244
6245 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6246 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6247 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6248 if (!immed)
6249 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6250
6251 rt_val = displaced_read_reg (regs, dsc, rt);
6252 if (bytesize[opcode] == 8)
6253 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
6254 rn_val = displaced_read_reg (regs, dsc, rn);
6255 if (!immed)
6256 rm_val = displaced_read_reg (regs, dsc, rm);
6257
6258 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6259 if (bytesize[opcode] == 8)
6260 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
6261 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6262 if (!immed)
6263 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6264
6265 dsc->rd = rt;
6266 dsc->u.ldst.xfersize = bytesize[opcode];
6267 dsc->u.ldst.rn = rn;
6268 dsc->u.ldst.immed = immed;
6269 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
6270 dsc->u.ldst.restore_r4 = 0;
6271
6272 if (immed)
6273 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
6274 ->
6275 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
6276 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6277 else
6278 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
6279 ->
6280 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
6281 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6282
6283 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
6284
6285 return 0;
6286 }
6287
6288 /* Copy byte/half word/word loads and stores. */
6289
6290 static void
6291 install_load_store (struct gdbarch *gdbarch, struct regcache *regs,
6292 struct displaced_step_closure *dsc, int load,
6293 int immed, int writeback, int size, int usermode,
6294 int rt, int rm, int rn)
6295 {
6296 ULONGEST rt_val, rn_val, rm_val = 0;
6297
6298 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6299 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6300 if (!immed)
6301 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6302 if (!load)
6303 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
6304
6305 rt_val = displaced_read_reg (regs, dsc, rt);
6306 rn_val = displaced_read_reg (regs, dsc, rn);
6307 if (!immed)
6308 rm_val = displaced_read_reg (regs, dsc, rm);
6309
6310 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6311 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6312 if (!immed)
6313 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6314 dsc->rd = rt;
6315 dsc->u.ldst.xfersize = size;
6316 dsc->u.ldst.rn = rn;
6317 dsc->u.ldst.immed = immed;
6318 dsc->u.ldst.writeback = writeback;
6319
6320 /* To write PC we can do:
6321
6322 Before this sequence of instructions:
6323 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
6324 r2 is the Rn value got from dispalced_read_reg.
6325
6326 Insn1: push {pc} Write address of STR instruction + offset on stack
6327 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
6328 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
6329 = addr(Insn1) + offset - addr(Insn3) - 8
6330 = offset - 16
6331 Insn4: add r4, r4, #8 r4 = offset - 8
6332 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
6333 = from + offset
6334 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
6335
6336 Otherwise we don't know what value to write for PC, since the offset is
6337 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
6338 of this can be found in Section "Saving from r15" in
6339 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
6340
6341 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6342 }
6343
6344
6345 static int
6346 thumb2_copy_load_literal (struct gdbarch *gdbarch, uint16_t insn1,
6347 uint16_t insn2, struct regcache *regs,
6348 struct displaced_step_closure *dsc, int size)
6349 {
6350 unsigned int u_bit = bit (insn1, 7);
6351 unsigned int rt = bits (insn2, 12, 15);
6352 int imm12 = bits (insn2, 0, 11);
6353 ULONGEST pc_val;
6354
6355 if (debug_displaced)
6356 fprintf_unfiltered (gdb_stdlog,
6357 "displaced: copying ldr pc (0x%x) R%d %c imm12 %.4x\n",
6358 (unsigned int) dsc->insn_addr, rt, u_bit ? '+' : '-',
6359 imm12);
6360
6361 if (!u_bit)
6362 imm12 = -1 * imm12;
6363
6364 /* Rewrite instruction LDR Rt imm12 into:
6365
6366 Prepare: tmp[0] <- r0, tmp[1] <- r2, tmp[2] <- r3, r2 <- pc, r3 <- imm12
6367
6368 LDR R0, R2, R3,
6369
6370 Cleanup: rt <- r0, r0 <- tmp[0], r2 <- tmp[1], r3 <- tmp[2]. */
6371
6372
6373 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6374 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6375 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6376
6377 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
6378
6379 pc_val = pc_val & 0xfffffffc;
6380
6381 displaced_write_reg (regs, dsc, 2, pc_val, CANNOT_WRITE_PC);
6382 displaced_write_reg (regs, dsc, 3, imm12, CANNOT_WRITE_PC);
6383
6384 dsc->rd = rt;
6385
6386 dsc->u.ldst.xfersize = size;
6387 dsc->u.ldst.immed = 0;
6388 dsc->u.ldst.writeback = 0;
6389 dsc->u.ldst.restore_r4 = 0;
6390
6391 /* LDR R0, R2, R3 */
6392 dsc->modinsn[0] = 0xf852;
6393 dsc->modinsn[1] = 0x3;
6394 dsc->numinsns = 2;
6395
6396 dsc->cleanup = &cleanup_load;
6397
6398 return 0;
6399 }
6400
6401 static int
6402 thumb2_copy_load_reg_imm (struct gdbarch *gdbarch, uint16_t insn1,
6403 uint16_t insn2, struct regcache *regs,
6404 struct displaced_step_closure *dsc,
6405 int writeback, int immed)
6406 {
6407 unsigned int rt = bits (insn2, 12, 15);
6408 unsigned int rn = bits (insn1, 0, 3);
6409 unsigned int rm = bits (insn2, 0, 3); /* Only valid if !immed. */
6410 /* In LDR (register), there is also a register Rm, which is not allowed to
6411 be PC, so we don't have to check it. */
6412
6413 if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6414 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load",
6415 dsc);
6416
6417 if (debug_displaced)
6418 fprintf_unfiltered (gdb_stdlog,
6419 "displaced: copying ldr r%d [r%d] insn %.4x%.4x\n",
6420 rt, rn, insn1, insn2);
6421
6422 install_load_store (gdbarch, regs, dsc, 1, immed, writeback, 4,
6423 0, rt, rm, rn);
6424
6425 dsc->u.ldst.restore_r4 = 0;
6426
6427 if (immed)
6428 /* ldr[b]<cond> rt, [rn, #imm], etc.
6429 ->
6430 ldr[b]<cond> r0, [r2, #imm]. */
6431 {
6432 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6433 dsc->modinsn[1] = insn2 & 0x0fff;
6434 }
6435 else
6436 /* ldr[b]<cond> rt, [rn, rm], etc.
6437 ->
6438 ldr[b]<cond> r0, [r2, r3]. */
6439 {
6440 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6441 dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
6442 }
6443
6444 dsc->numinsns = 2;
6445
6446 return 0;
6447 }
6448
6449
6450 static int
6451 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
6452 struct regcache *regs,
6453 struct displaced_step_closure *dsc,
6454 int load, int size, int usermode)
6455 {
6456 int immed = !bit (insn, 25);
6457 int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
6458 unsigned int rt = bits (insn, 12, 15);
6459 unsigned int rn = bits (insn, 16, 19);
6460 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
6461
6462 if (!insn_references_pc (insn, 0x000ff00ful))
6463 return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
6464
6465 if (debug_displaced)
6466 fprintf_unfiltered (gdb_stdlog,
6467 "displaced: copying %s%s r%d [r%d] insn %.8lx\n",
6468 load ? (size == 1 ? "ldrb" : "ldr")
6469 : (size == 1 ? "strb" : "str"), usermode ? "t" : "",
6470 rt, rn,
6471 (unsigned long) insn);
6472
6473 install_load_store (gdbarch, regs, dsc, load, immed, writeback, size,
6474 usermode, rt, rm, rn);
6475
6476 if (load || rt != ARM_PC_REGNUM)
6477 {
6478 dsc->u.ldst.restore_r4 = 0;
6479
6480 if (immed)
6481 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
6482 ->
6483 {ldr,str}[b]<cond> r0, [r2, #imm]. */
6484 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6485 else
6486 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
6487 ->
6488 {ldr,str}[b]<cond> r0, [r2, r3]. */
6489 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6490 }
6491 else
6492 {
6493 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
6494 dsc->u.ldst.restore_r4 = 1;
6495 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
6496 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
6497 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
6498 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
6499 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
6500
6501 /* As above. */
6502 if (immed)
6503 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
6504 else
6505 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
6506
6507 dsc->numinsns = 6;
6508 }
6509
6510 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6511
6512 return 0;
6513 }
6514
6515 /* Cleanup LDM instructions with fully-populated register list. This is an
6516 unfortunate corner case: it's impossible to implement correctly by modifying
6517 the instruction. The issue is as follows: we have an instruction,
6518
6519 ldm rN, {r0-r15}
6520
6521 which we must rewrite to avoid loading PC. A possible solution would be to
6522 do the load in two halves, something like (with suitable cleanup
6523 afterwards):
6524
6525 mov r8, rN
6526 ldm[id][ab] r8!, {r0-r7}
6527 str r7, <temp>
6528 ldm[id][ab] r8, {r7-r14}
6529 <bkpt>
6530
6531 but at present there's no suitable place for <temp>, since the scratch space
6532 is overwritten before the cleanup routine is called. For now, we simply
6533 emulate the instruction. */
6534
6535 static void
6536 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6537 struct displaced_step_closure *dsc)
6538 {
6539 int inc = dsc->u.block.increment;
6540 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6541 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6542 uint32_t regmask = dsc->u.block.regmask;
6543 int regno = inc ? 0 : 15;
6544 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6545 int exception_return = dsc->u.block.load && dsc->u.block.user
6546 && (regmask & 0x8000) != 0;
6547 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6548 int do_transfer = condition_true (dsc->u.block.cond, status);
6549 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6550
6551 if (!do_transfer)
6552 return;
6553
6554 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6555 sensible we can do here. Complain loudly. */
6556 if (exception_return)
6557 error (_("Cannot single-step exception return"));
6558
6559 /* We don't handle any stores here for now. */
6560 gdb_assert (dsc->u.block.load != 0);
6561
6562 if (debug_displaced)
6563 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6564 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6565 dsc->u.block.increment ? "inc" : "dec",
6566 dsc->u.block.before ? "before" : "after");
6567
6568 while (regmask)
6569 {
6570 uint32_t memword;
6571
6572 if (inc)
6573 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
6574 regno++;
6575 else
6576 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6577 regno--;
6578
6579 xfer_addr += bump_before;
6580
6581 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6582 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6583
6584 xfer_addr += bump_after;
6585
6586 regmask &= ~(1 << regno);
6587 }
6588
6589 if (dsc->u.block.writeback)
6590 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6591 CANNOT_WRITE_PC);
6592 }
6593
6594 /* Clean up an STM which included the PC in the register list. */
6595
6596 static void
6597 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6598 struct displaced_step_closure *dsc)
6599 {
6600 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6601 int store_executed = condition_true (dsc->u.block.cond, status);
6602 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
6603 CORE_ADDR stm_insn_addr;
6604 uint32_t pc_val;
6605 long offset;
6606 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6607
6608 /* If condition code fails, there's nothing else to do. */
6609 if (!store_executed)
6610 return;
6611
6612 if (dsc->u.block.increment)
6613 {
6614 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
6615
6616 if (dsc->u.block.before)
6617 pc_stored_at += 4;
6618 }
6619 else
6620 {
6621 pc_stored_at = dsc->u.block.xfer_addr;
6622
6623 if (dsc->u.block.before)
6624 pc_stored_at -= 4;
6625 }
6626
6627 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
6628 stm_insn_addr = dsc->scratch_base;
6629 offset = pc_val - stm_insn_addr;
6630
6631 if (debug_displaced)
6632 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
6633 "STM instruction\n", offset);
6634
6635 /* Rewrite the stored PC to the proper value for the non-displaced original
6636 instruction. */
6637 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
6638 dsc->insn_addr + offset);
6639 }
6640
6641 /* Clean up an LDM which includes the PC in the register list. We clumped all
6642 the registers in the transferred list into a contiguous range r0...rX (to
6643 avoid loading PC directly and losing control of the debugged program), so we
6644 must undo that here. */
6645
6646 static void
6647 cleanup_block_load_pc (struct gdbarch *gdbarch,
6648 struct regcache *regs,
6649 struct displaced_step_closure *dsc)
6650 {
6651 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6652 int load_executed = condition_true (dsc->u.block.cond, status), i;
6653 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
6654 unsigned int regs_loaded = bitcount (mask);
6655 unsigned int num_to_shuffle = regs_loaded, clobbered;
6656
6657 /* The method employed here will fail if the register list is fully populated
6658 (we need to avoid loading PC directly). */
6659 gdb_assert (num_to_shuffle < 16);
6660
6661 if (!load_executed)
6662 return;
6663
6664 clobbered = (1 << num_to_shuffle) - 1;
6665
6666 while (num_to_shuffle > 0)
6667 {
6668 if ((mask & (1 << write_reg)) != 0)
6669 {
6670 unsigned int read_reg = num_to_shuffle - 1;
6671
6672 if (read_reg != write_reg)
6673 {
6674 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
6675 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
6676 if (debug_displaced)
6677 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
6678 "loaded register r%d to r%d\n"), read_reg,
6679 write_reg);
6680 }
6681 else if (debug_displaced)
6682 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
6683 "r%d already in the right place\n"),
6684 write_reg);
6685
6686 clobbered &= ~(1 << write_reg);
6687
6688 num_to_shuffle--;
6689 }
6690
6691 write_reg--;
6692 }
6693
6694 /* Restore any registers we scribbled over. */
6695 for (write_reg = 0; clobbered != 0; write_reg++)
6696 {
6697 if ((clobbered & (1 << write_reg)) != 0)
6698 {
6699 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
6700 CANNOT_WRITE_PC);
6701 if (debug_displaced)
6702 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
6703 "clobbered register r%d\n"), write_reg);
6704 clobbered &= ~(1 << write_reg);
6705 }
6706 }
6707
6708 /* Perform register writeback manually. */
6709 if (dsc->u.block.writeback)
6710 {
6711 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
6712
6713 if (dsc->u.block.increment)
6714 new_rn_val += regs_loaded * 4;
6715 else
6716 new_rn_val -= regs_loaded * 4;
6717
6718 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
6719 CANNOT_WRITE_PC);
6720 }
6721 }
6722
6723 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6724 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6725
6726 static int
6727 arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
6728 struct regcache *regs,
6729 struct displaced_step_closure *dsc)
6730 {
6731 int load = bit (insn, 20);
6732 int user = bit (insn, 22);
6733 int increment = bit (insn, 23);
6734 int before = bit (insn, 24);
6735 int writeback = bit (insn, 21);
6736 int rn = bits (insn, 16, 19);
6737
6738 /* Block transfers which don't mention PC can be run directly
6739 out-of-line. */
6740 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
6741 return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
6742
6743 if (rn == ARM_PC_REGNUM)
6744 {
6745 warning (_("displaced: Unpredictable LDM or STM with "
6746 "base register r15"));
6747 return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
6748 }
6749
6750 if (debug_displaced)
6751 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6752 "%.8lx\n", (unsigned long) insn);
6753
6754 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6755 dsc->u.block.rn = rn;
6756
6757 dsc->u.block.load = load;
6758 dsc->u.block.user = user;
6759 dsc->u.block.increment = increment;
6760 dsc->u.block.before = before;
6761 dsc->u.block.writeback = writeback;
6762 dsc->u.block.cond = bits (insn, 28, 31);
6763
6764 dsc->u.block.regmask = insn & 0xffff;
6765
6766 if (load)
6767 {
6768 if ((insn & 0xffff) == 0xffff)
6769 {
6770 /* LDM with a fully-populated register list. This case is
6771 particularly tricky. Implement for now by fully emulating the
6772 instruction (which might not behave perfectly in all cases, but
6773 these instructions should be rare enough for that not to matter
6774 too much). */
6775 dsc->modinsn[0] = ARM_NOP;
6776
6777 dsc->cleanup = &cleanup_block_load_all;
6778 }
6779 else
6780 {
6781 /* LDM of a list of registers which includes PC. Implement by
6782 rewriting the list of registers to be transferred into a
6783 contiguous chunk r0...rX before doing the transfer, then shuffling
6784 registers into the correct places in the cleanup routine. */
6785 unsigned int regmask = insn & 0xffff;
6786 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
6787 unsigned int to = 0, from = 0, i, new_rn;
6788
6789 for (i = 0; i < num_in_list; i++)
6790 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6791
6792 /* Writeback makes things complicated. We need to avoid clobbering
6793 the base register with one of the registers in our modified
6794 register list, but just using a different register can't work in
6795 all cases, e.g.:
6796
6797 ldm r14!, {r0-r13,pc}
6798
6799 which would need to be rewritten as:
6800
6801 ldm rN!, {r0-r14}
6802
6803 but that can't work, because there's no free register for N.
6804
6805 Solve this by turning off the writeback bit, and emulating
6806 writeback manually in the cleanup routine. */
6807
6808 if (writeback)
6809 insn &= ~(1 << 21);
6810
6811 new_regmask = (1 << num_in_list) - 1;
6812
6813 if (debug_displaced)
6814 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
6815 "{..., pc}: original reg list %.4x, modified "
6816 "list %.4x\n"), rn, writeback ? "!" : "",
6817 (int) insn & 0xffff, new_regmask);
6818
6819 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
6820
6821 dsc->cleanup = &cleanup_block_load_pc;
6822 }
6823 }
6824 else
6825 {
6826 /* STM of a list of registers which includes PC. Run the instruction
6827 as-is, but out of line: this will store the wrong value for the PC,
6828 so we must manually fix up the memory in the cleanup routine.
6829 Doing things this way has the advantage that we can auto-detect
6830 the offset of the PC write (which is architecture-dependent) in
6831 the cleanup routine. */
6832 dsc->modinsn[0] = insn;
6833
6834 dsc->cleanup = &cleanup_block_store_pc;
6835 }
6836
6837 return 0;
6838 }
6839
6840 static int
6841 thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
6842 struct regcache *regs,
6843 struct displaced_step_closure *dsc)
6844 {
6845 int rn = bits (insn1, 0, 3);
6846 int load = bit (insn1, 4);
6847 int writeback = bit (insn1, 5);
6848
6849 /* Block transfers which don't mention PC can be run directly
6850 out-of-line. */
6851 if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
6852 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
6853
6854 if (rn == ARM_PC_REGNUM)
6855 {
6856 warning (_("displaced: Unpredictable LDM or STM with "
6857 "base register r15"));
6858 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6859 "unpredictable ldm/stm", dsc);
6860 }
6861
6862 if (debug_displaced)
6863 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6864 "%.4x%.4x\n", insn1, insn2);
6865
6866 /* Clear bit 13, since it should be always zero. */
6867 dsc->u.block.regmask = (insn2 & 0xdfff);
6868 dsc->u.block.rn = rn;
6869
6870 dsc->u.block.load = load;
6871 dsc->u.block.user = 0;
6872 dsc->u.block.increment = bit (insn1, 7);
6873 dsc->u.block.before = bit (insn1, 8);
6874 dsc->u.block.writeback = writeback;
6875 dsc->u.block.cond = INST_AL;
6876 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6877
6878 if (load)
6879 {
6880 if (dsc->u.block.regmask == 0xffff)
6881 {
6882 /* This branch is impossible to happen. */
6883 gdb_assert (0);
6884 }
6885 else
6886 {
6887 unsigned int regmask = dsc->u.block.regmask;
6888 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
6889 unsigned int to = 0, from = 0, i, new_rn;
6890
6891 for (i = 0; i < num_in_list; i++)
6892 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6893
6894 if (writeback)
6895 insn1 &= ~(1 << 5);
6896
6897 new_regmask = (1 << num_in_list) - 1;
6898
6899 if (debug_displaced)
6900 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
6901 "{..., pc}: original reg list %.4x, modified "
6902 "list %.4x\n"), rn, writeback ? "!" : "",
6903 (int) dsc->u.block.regmask, new_regmask);
6904
6905 dsc->modinsn[0] = insn1;
6906 dsc->modinsn[1] = (new_regmask & 0xffff);
6907 dsc->numinsns = 2;
6908
6909 dsc->cleanup = &cleanup_block_load_pc;
6910 }
6911 }
6912 else
6913 {
6914 dsc->modinsn[0] = insn1;
6915 dsc->modinsn[1] = insn2;
6916 dsc->numinsns = 2;
6917 dsc->cleanup = &cleanup_block_store_pc;
6918 }
6919 return 0;
6920 }
6921
6922 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
6923 for Linux, where some SVC instructions must be treated specially. */
6924
6925 static void
6926 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
6927 struct displaced_step_closure *dsc)
6928 {
6929 CORE_ADDR resume_addr = dsc->insn_addr + dsc->insn_size;
6930
6931 if (debug_displaced)
6932 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
6933 "%.8lx\n", (unsigned long) resume_addr);
6934
6935 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
6936 }
6937
6938
6939 /* Common copy routine for svc instruciton. */
6940
6941 static int
6942 install_svc (struct gdbarch *gdbarch, struct regcache *regs,
6943 struct displaced_step_closure *dsc)
6944 {
6945 /* Preparation: none.
6946 Insn: unmodified svc.
6947 Cleanup: pc <- insn_addr + insn_size. */
6948
6949 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
6950 instruction. */
6951 dsc->wrote_to_pc = 1;
6952
6953 /* Allow OS-specific code to override SVC handling. */
6954 if (dsc->u.svc.copy_svc_os)
6955 return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
6956 else
6957 {
6958 dsc->cleanup = &cleanup_svc;
6959 return 0;
6960 }
6961 }
6962
6963 static int
6964 arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
6965 struct regcache *regs, struct displaced_step_closure *dsc)
6966 {
6967
6968 if (debug_displaced)
6969 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
6970 (unsigned long) insn);
6971
6972 dsc->modinsn[0] = insn;
6973
6974 return install_svc (gdbarch, regs, dsc);
6975 }
6976
6977 static int
6978 thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
6979 struct regcache *regs, struct displaced_step_closure *dsc)
6980 {
6981
6982 if (debug_displaced)
6983 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.4x\n",
6984 insn);
6985
6986 dsc->modinsn[0] = insn;
6987
6988 return install_svc (gdbarch, regs, dsc);
6989 }
6990
6991 /* Copy undefined instructions. */
6992
6993 static int
6994 arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
6995 struct displaced_step_closure *dsc)
6996 {
6997 if (debug_displaced)
6998 fprintf_unfiltered (gdb_stdlog,
6999 "displaced: copying undefined insn %.8lx\n",
7000 (unsigned long) insn);
7001
7002 dsc->modinsn[0] = insn;
7003
7004 return 0;
7005 }
7006
7007 static int
7008 thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
7009 struct displaced_step_closure *dsc)
7010 {
7011
7012 if (debug_displaced)
7013 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn "
7014 "%.4x %.4x\n", (unsigned short) insn1,
7015 (unsigned short) insn2);
7016
7017 dsc->modinsn[0] = insn1;
7018 dsc->modinsn[1] = insn2;
7019 dsc->numinsns = 2;
7020
7021 return 0;
7022 }
7023
7024 /* Copy unpredictable instructions. */
7025
7026 static int
7027 arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
7028 struct displaced_step_closure *dsc)
7029 {
7030 if (debug_displaced)
7031 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
7032 "%.8lx\n", (unsigned long) insn);
7033
7034 dsc->modinsn[0] = insn;
7035
7036 return 0;
7037 }
7038
7039 /* The decode_* functions are instruction decoding helpers. They mostly follow
7040 the presentation in the ARM ARM. */
7041
7042 static int
7043 arm_decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
7044 struct regcache *regs,
7045 struct displaced_step_closure *dsc)
7046 {
7047 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
7048 unsigned int rn = bits (insn, 16, 19);
7049
7050 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
7051 return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
7052 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
7053 return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
7054 else if ((op1 & 0x60) == 0x20)
7055 return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
7056 else if ((op1 & 0x71) == 0x40)
7057 return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
7058 dsc);
7059 else if ((op1 & 0x77) == 0x41)
7060 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7061 else if ((op1 & 0x77) == 0x45)
7062 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pli. */
7063 else if ((op1 & 0x77) == 0x51)
7064 {
7065 if (rn != 0xf)
7066 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7067 else
7068 return arm_copy_unpred (gdbarch, insn, dsc);
7069 }
7070 else if ((op1 & 0x77) == 0x55)
7071 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7072 else if (op1 == 0x57)
7073 switch (op2)
7074 {
7075 case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
7076 case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
7077 case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
7078 case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
7079 default: return arm_copy_unpred (gdbarch, insn, dsc);
7080 }
7081 else if ((op1 & 0x63) == 0x43)
7082 return arm_copy_unpred (gdbarch, insn, dsc);
7083 else if ((op2 & 0x1) == 0x0)
7084 switch (op1 & ~0x80)
7085 {
7086 case 0x61:
7087 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7088 case 0x65:
7089 return arm_copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
7090 case 0x71: case 0x75:
7091 /* pld/pldw reg. */
7092 return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
7093 case 0x63: case 0x67: case 0x73: case 0x77:
7094 return arm_copy_unpred (gdbarch, insn, dsc);
7095 default:
7096 return arm_copy_undef (gdbarch, insn, dsc);
7097 }
7098 else
7099 return arm_copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
7100 }
7101
7102 static int
7103 arm_decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
7104 struct regcache *regs,
7105 struct displaced_step_closure *dsc)
7106 {
7107 if (bit (insn, 27) == 0)
7108 return arm_decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
7109 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
7110 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
7111 {
7112 case 0x0: case 0x2:
7113 return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
7114
7115 case 0x1: case 0x3:
7116 return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
7117
7118 case 0x4: case 0x5: case 0x6: case 0x7:
7119 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7120
7121 case 0x8:
7122 switch ((insn & 0xe00000) >> 21)
7123 {
7124 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
7125 /* stc/stc2. */
7126 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7127
7128 case 0x2:
7129 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7130
7131 default:
7132 return arm_copy_undef (gdbarch, insn, dsc);
7133 }
7134
7135 case 0x9:
7136 {
7137 int rn_f = (bits (insn, 16, 19) == 0xf);
7138 switch ((insn & 0xe00000) >> 21)
7139 {
7140 case 0x1: case 0x3:
7141 /* ldc/ldc2 imm (undefined for rn == pc). */
7142 return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
7143 : arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7144
7145 case 0x2:
7146 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7147
7148 case 0x4: case 0x5: case 0x6: case 0x7:
7149 /* ldc/ldc2 lit (undefined for rn != pc). */
7150 return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
7151 : arm_copy_undef (gdbarch, insn, dsc);
7152
7153 default:
7154 return arm_copy_undef (gdbarch, insn, dsc);
7155 }
7156 }
7157
7158 case 0xa:
7159 return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
7160
7161 case 0xb:
7162 if (bits (insn, 16, 19) == 0xf)
7163 /* ldc/ldc2 lit. */
7164 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7165 else
7166 return arm_copy_undef (gdbarch, insn, dsc);
7167
7168 case 0xc:
7169 if (bit (insn, 4))
7170 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7171 else
7172 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7173
7174 case 0xd:
7175 if (bit (insn, 4))
7176 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7177 else
7178 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7179
7180 default:
7181 return arm_copy_undef (gdbarch, insn, dsc);
7182 }
7183 }
7184
7185 /* Decode miscellaneous instructions in dp/misc encoding space. */
7186
7187 static int
7188 arm_decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
7189 struct regcache *regs,
7190 struct displaced_step_closure *dsc)
7191 {
7192 unsigned int op2 = bits (insn, 4, 6);
7193 unsigned int op = bits (insn, 21, 22);
7194 unsigned int op1 = bits (insn, 16, 19);
7195
7196 switch (op2)
7197 {
7198 case 0x0:
7199 return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
7200
7201 case 0x1:
7202 if (op == 0x1) /* bx. */
7203 return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
7204 else if (op == 0x3)
7205 return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
7206 else
7207 return arm_copy_undef (gdbarch, insn, dsc);
7208
7209 case 0x2:
7210 if (op == 0x1)
7211 /* Not really supported. */
7212 return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
7213 else
7214 return arm_copy_undef (gdbarch, insn, dsc);
7215
7216 case 0x3:
7217 if (op == 0x1)
7218 return arm_copy_bx_blx_reg (gdbarch, insn,
7219 regs, dsc); /* blx register. */
7220 else
7221 return arm_copy_undef (gdbarch, insn, dsc);
7222
7223 case 0x5:
7224 return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
7225
7226 case 0x7:
7227 if (op == 0x1)
7228 return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
7229 else if (op == 0x3)
7230 /* Not really supported. */
7231 return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
7232
7233 default:
7234 return arm_copy_undef (gdbarch, insn, dsc);
7235 }
7236 }
7237
7238 static int
7239 arm_decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn,
7240 struct regcache *regs,
7241 struct displaced_step_closure *dsc)
7242 {
7243 if (bit (insn, 25))
7244 switch (bits (insn, 20, 24))
7245 {
7246 case 0x10:
7247 return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
7248
7249 case 0x14:
7250 return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
7251
7252 case 0x12: case 0x16:
7253 return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
7254
7255 default:
7256 return arm_copy_alu_imm (gdbarch, insn, regs, dsc);
7257 }
7258 else
7259 {
7260 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
7261
7262 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
7263 return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
7264 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
7265 return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
7266 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
7267 return arm_decode_miscellaneous (gdbarch, insn, regs, dsc);
7268 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
7269 return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
7270 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
7271 return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
7272 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
7273 return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
7274 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
7275 /* 2nd arg means "unpriveleged". */
7276 return arm_copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
7277 dsc);
7278 }
7279
7280 /* Should be unreachable. */
7281 return 1;
7282 }
7283
7284 static int
7285 arm_decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
7286 struct regcache *regs,
7287 struct displaced_step_closure *dsc)
7288 {
7289 int a = bit (insn, 25), b = bit (insn, 4);
7290 uint32_t op1 = bits (insn, 20, 24);
7291 int rn_f = bits (insn, 16, 19) == 0xf;
7292
7293 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
7294 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
7295 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 0);
7296 else if ((!a && (op1 & 0x17) == 0x02)
7297 || (a && (op1 & 0x17) == 0x02 && !b))
7298 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 1);
7299 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
7300 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
7301 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 0);
7302 else if ((!a && (op1 & 0x17) == 0x03)
7303 || (a && (op1 & 0x17) == 0x03 && !b))
7304 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 1);
7305 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
7306 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
7307 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
7308 else if ((!a && (op1 & 0x17) == 0x06)
7309 || (a && (op1 & 0x17) == 0x06 && !b))
7310 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
7311 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
7312 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
7313 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
7314 else if ((!a && (op1 & 0x17) == 0x07)
7315 || (a && (op1 & 0x17) == 0x07 && !b))
7316 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
7317
7318 /* Should be unreachable. */
7319 return 1;
7320 }
7321
7322 static int
7323 arm_decode_media (struct gdbarch *gdbarch, uint32_t insn,
7324 struct displaced_step_closure *dsc)
7325 {
7326 switch (bits (insn, 20, 24))
7327 {
7328 case 0x00: case 0x01: case 0x02: case 0x03:
7329 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
7330
7331 case 0x04: case 0x05: case 0x06: case 0x07:
7332 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
7333
7334 case 0x08: case 0x09: case 0x0a: case 0x0b:
7335 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
7336 return arm_copy_unmodified (gdbarch, insn,
7337 "decode/pack/unpack/saturate/reverse", dsc);
7338
7339 case 0x18:
7340 if (bits (insn, 5, 7) == 0) /* op2. */
7341 {
7342 if (bits (insn, 12, 15) == 0xf)
7343 return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
7344 else
7345 return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
7346 }
7347 else
7348 return arm_copy_undef (gdbarch, insn, dsc);
7349
7350 case 0x1a: case 0x1b:
7351 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7352 return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
7353 else
7354 return arm_copy_undef (gdbarch, insn, dsc);
7355
7356 case 0x1c: case 0x1d:
7357 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
7358 {
7359 if (bits (insn, 0, 3) == 0xf)
7360 return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
7361 else
7362 return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
7363 }
7364 else
7365 return arm_copy_undef (gdbarch, insn, dsc);
7366
7367 case 0x1e: case 0x1f:
7368 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7369 return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
7370 else
7371 return arm_copy_undef (gdbarch, insn, dsc);
7372 }
7373
7374 /* Should be unreachable. */
7375 return 1;
7376 }
7377
7378 static int
7379 arm_decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
7380 struct regcache *regs,
7381 struct displaced_step_closure *dsc)
7382 {
7383 if (bit (insn, 25))
7384 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7385 else
7386 return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
7387 }
7388
7389 static int
7390 arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
7391 struct regcache *regs,
7392 struct displaced_step_closure *dsc)
7393 {
7394 unsigned int opcode = bits (insn, 20, 24);
7395
7396 switch (opcode)
7397 {
7398 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
7399 return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
7400
7401 case 0x08: case 0x0a: case 0x0c: case 0x0e:
7402 case 0x12: case 0x16:
7403 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
7404
7405 case 0x09: case 0x0b: case 0x0d: case 0x0f:
7406 case 0x13: case 0x17:
7407 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
7408
7409 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7410 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7411 /* Note: no writeback for these instructions. Bit 25 will always be
7412 zero though (via caller), so the following works OK. */
7413 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7414 }
7415
7416 /* Should be unreachable. */
7417 return 1;
7418 }
7419
7420 /* Decode shifted register instructions. */
7421
7422 static int
7423 thumb2_decode_dp_shift_reg (struct gdbarch *gdbarch, uint16_t insn1,
7424 uint16_t insn2, struct regcache *regs,
7425 struct displaced_step_closure *dsc)
7426 {
7427 /* PC is only allowed to be used in instruction MOV. */
7428
7429 unsigned int op = bits (insn1, 5, 8);
7430 unsigned int rn = bits (insn1, 0, 3);
7431
7432 if (op == 0x2 && rn == 0xf) /* MOV */
7433 return thumb2_copy_alu_imm (gdbarch, insn1, insn2, regs, dsc);
7434 else
7435 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7436 "dp (shift reg)", dsc);
7437 }
7438
7439
7440 /* Decode extension register load/store. Exactly the same as
7441 arm_decode_ext_reg_ld_st. */
7442
7443 static int
7444 thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
7445 uint16_t insn2, struct regcache *regs,
7446 struct displaced_step_closure *dsc)
7447 {
7448 unsigned int opcode = bits (insn1, 4, 8);
7449
7450 switch (opcode)
7451 {
7452 case 0x04: case 0x05:
7453 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7454 "vfp/neon vmov", dsc);
7455
7456 case 0x08: case 0x0c: /* 01x00 */
7457 case 0x0a: case 0x0e: /* 01x10 */
7458 case 0x12: case 0x16: /* 10x10 */
7459 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7460 "vfp/neon vstm/vpush", dsc);
7461
7462 case 0x09: case 0x0d: /* 01x01 */
7463 case 0x0b: case 0x0f: /* 01x11 */
7464 case 0x13: case 0x17: /* 10x11 */
7465 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7466 "vfp/neon vldm/vpop", dsc);
7467
7468 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7469 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7470 "vstr", dsc);
7471 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7472 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
7473 }
7474
7475 /* Should be unreachable. */
7476 return 1;
7477 }
7478
7479 static int
7480 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
7481 struct regcache *regs, struct displaced_step_closure *dsc)
7482 {
7483 unsigned int op1 = bits (insn, 20, 25);
7484 int op = bit (insn, 4);
7485 unsigned int coproc = bits (insn, 8, 11);
7486 unsigned int rn = bits (insn, 16, 19);
7487
7488 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
7489 return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
7490 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
7491 && (coproc & 0xe) != 0xa)
7492 /* stc/stc2. */
7493 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7494 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
7495 && (coproc & 0xe) != 0xa)
7496 /* ldc/ldc2 imm/lit. */
7497 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7498 else if ((op1 & 0x3e) == 0x00)
7499 return arm_copy_undef (gdbarch, insn, dsc);
7500 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
7501 return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
7502 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
7503 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7504 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
7505 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7506 else if ((op1 & 0x30) == 0x20 && !op)
7507 {
7508 if ((coproc & 0xe) == 0xa)
7509 return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
7510 else
7511 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7512 }
7513 else if ((op1 & 0x30) == 0x20 && op)
7514 return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
7515 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
7516 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7517 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
7518 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7519 else if ((op1 & 0x30) == 0x30)
7520 return arm_copy_svc (gdbarch, insn, regs, dsc);
7521 else
7522 return arm_copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
7523 }
7524
7525 static int
7526 thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
7527 uint16_t insn2, struct regcache *regs,
7528 struct displaced_step_closure *dsc)
7529 {
7530 unsigned int coproc = bits (insn2, 8, 11);
7531 unsigned int op1 = bits (insn1, 4, 9);
7532 unsigned int bit_5_8 = bits (insn1, 5, 8);
7533 unsigned int bit_9 = bit (insn1, 9);
7534 unsigned int bit_4 = bit (insn1, 4);
7535 unsigned int rn = bits (insn1, 0, 3);
7536
7537 if (bit_9 == 0)
7538 {
7539 if (bit_5_8 == 2)
7540 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7541 "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
7542 dsc);
7543 else if (bit_5_8 == 0) /* UNDEFINED. */
7544 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
7545 else
7546 {
7547 /*coproc is 101x. SIMD/VFP, ext registers load/store. */
7548 if ((coproc & 0xe) == 0xa)
7549 return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
7550 dsc);
7551 else /* coproc is not 101x. */
7552 {
7553 if (bit_4 == 0) /* STC/STC2. */
7554 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7555 "stc/stc2", dsc);
7556 else /* LDC/LDC2 {literal, immeidate}. */
7557 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
7558 regs, dsc);
7559 }
7560 }
7561 }
7562 else
7563 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "coproc", dsc);
7564
7565 return 0;
7566 }
7567
7568 static void
7569 install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
7570 struct displaced_step_closure *dsc, int rd)
7571 {
7572 /* ADR Rd, #imm
7573
7574 Rewrite as:
7575
7576 Preparation: Rd <- PC
7577 Insn: ADD Rd, #imm
7578 Cleanup: Null.
7579 */
7580
7581 /* Rd <- PC */
7582 int val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
7583 displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
7584 }
7585
7586 static int
7587 thumb_copy_pc_relative_16bit (struct gdbarch *gdbarch, struct regcache *regs,
7588 struct displaced_step_closure *dsc,
7589 int rd, unsigned int imm)
7590 {
7591
7592 /* Encoding T2: ADDS Rd, #imm */
7593 dsc->modinsn[0] = (0x3000 | (rd << 8) | imm);
7594
7595 install_pc_relative (gdbarch, regs, dsc, rd);
7596
7597 return 0;
7598 }
7599
7600 static int
7601 thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
7602 struct regcache *regs,
7603 struct displaced_step_closure *dsc)
7604 {
7605 unsigned int rd = bits (insn, 8, 10);
7606 unsigned int imm8 = bits (insn, 0, 7);
7607
7608 if (debug_displaced)
7609 fprintf_unfiltered (gdb_stdlog,
7610 "displaced: copying thumb adr r%d, #%d insn %.4x\n",
7611 rd, imm8, insn);
7612
7613 return thumb_copy_pc_relative_16bit (gdbarch, regs, dsc, rd, imm8);
7614 }
7615
7616 static int
7617 thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
7618 uint16_t insn2, struct regcache *regs,
7619 struct displaced_step_closure *dsc)
7620 {
7621 unsigned int rd = bits (insn2, 8, 11);
7622 /* Since immediate has the same encoding in ADR ADD and SUB, so we simply
7623 extract raw immediate encoding rather than computing immediate. When
7624 generating ADD or SUB instruction, we can simply perform OR operation to
7625 set immediate into ADD. */
7626 unsigned int imm_3_8 = insn2 & 0x70ff;
7627 unsigned int imm_i = insn1 & 0x0400; /* Clear all bits except bit 10. */
7628
7629 if (debug_displaced)
7630 fprintf_unfiltered (gdb_stdlog,
7631 "displaced: copying thumb adr r%d, #%d:%d insn %.4x%.4x\n",
7632 rd, imm_i, imm_3_8, insn1, insn2);
7633
7634 if (bit (insn1, 7)) /* Encoding T2 */
7635 {
7636 /* Encoding T3: SUB Rd, Rd, #imm */
7637 dsc->modinsn[0] = (0xf1a0 | rd | imm_i);
7638 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
7639 }
7640 else /* Encoding T3 */
7641 {
7642 /* Encoding T3: ADD Rd, Rd, #imm */
7643 dsc->modinsn[0] = (0xf100 | rd | imm_i);
7644 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
7645 }
7646 dsc->numinsns = 2;
7647
7648 install_pc_relative (gdbarch, regs, dsc, rd);
7649
7650 return 0;
7651 }
7652
7653 static int
7654 thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
7655 struct regcache *regs,
7656 struct displaced_step_closure *dsc)
7657 {
7658 unsigned int rt = bits (insn1, 8, 10);
7659 unsigned int pc;
7660 int imm8 = (bits (insn1, 0, 7) << 2);
7661 CORE_ADDR from = dsc->insn_addr;
7662
7663 /* LDR Rd, #imm8
7664
7665 Rwrite as:
7666
7667 Preparation: tmp0 <- R0, tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
7668
7669 Insn: LDR R0, [R2, R3];
7670 Cleanup: R2 <- tmp2, R3 <- tmp3, Rd <- R0, R0 <- tmp0 */
7671
7672 if (debug_displaced)
7673 fprintf_unfiltered (gdb_stdlog,
7674 "displaced: copying thumb ldr r%d [pc #%d]\n"
7675 , rt, imm8);
7676
7677 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
7678 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
7679 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
7680 pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
7681 /* The assembler calculates the required value of the offset from the
7682 Align(PC,4) value of this instruction to the label. */
7683 pc = pc & 0xfffffffc;
7684
7685 displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
7686 displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
7687
7688 dsc->rd = rt;
7689 dsc->u.ldst.xfersize = 4;
7690 dsc->u.ldst.rn = 0;
7691 dsc->u.ldst.immed = 0;
7692 dsc->u.ldst.writeback = 0;
7693 dsc->u.ldst.restore_r4 = 0;
7694
7695 dsc->modinsn[0] = 0x58d0; /* ldr r0, [r2, r3]*/
7696
7697 dsc->cleanup = &cleanup_load;
7698
7699 return 0;
7700 }
7701
7702 /* Copy Thumb cbnz/cbz insruction. */
7703
7704 static int
7705 thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
7706 struct regcache *regs,
7707 struct displaced_step_closure *dsc)
7708 {
7709 int non_zero = bit (insn1, 11);
7710 unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
7711 CORE_ADDR from = dsc->insn_addr;
7712 int rn = bits (insn1, 0, 2);
7713 int rn_val = displaced_read_reg (regs, dsc, rn);
7714
7715 dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
7716 /* CBNZ and CBZ do not affect the condition flags. If condition is true,
7717 set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
7718 condition is false, let it be, cleanup_branch will do nothing. */
7719 if (dsc->u.branch.cond)
7720 {
7721 dsc->u.branch.cond = INST_AL;
7722 dsc->u.branch.dest = from + 4 + imm5;
7723 }
7724 else
7725 dsc->u.branch.dest = from + 2;
7726
7727 dsc->u.branch.link = 0;
7728 dsc->u.branch.exchange = 0;
7729
7730 if (debug_displaced)
7731 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s [r%d = 0x%x]"
7732 " insn %.4x to %.8lx\n", non_zero ? "cbnz" : "cbz",
7733 rn, rn_val, insn1, dsc->u.branch.dest);
7734
7735 dsc->modinsn[0] = THUMB_NOP;
7736
7737 dsc->cleanup = &cleanup_branch;
7738 return 0;
7739 }
7740
7741 /* Copy Table Branch Byte/Halfword */
7742 static int
7743 thumb2_copy_table_branch (struct gdbarch *gdbarch, uint16_t insn1,
7744 uint16_t insn2, struct regcache *regs,
7745 struct displaced_step_closure *dsc)
7746 {
7747 ULONGEST rn_val, rm_val;
7748 int is_tbh = bit (insn2, 4);
7749 CORE_ADDR halfwords = 0;
7750 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7751
7752 rn_val = displaced_read_reg (regs, dsc, bits (insn1, 0, 3));
7753 rm_val = displaced_read_reg (regs, dsc, bits (insn2, 0, 3));
7754
7755 if (is_tbh)
7756 {
7757 gdb_byte buf[2];
7758
7759 target_read_memory (rn_val + 2 * rm_val, buf, 2);
7760 halfwords = extract_unsigned_integer (buf, 2, byte_order);
7761 }
7762 else
7763 {
7764 gdb_byte buf[1];
7765
7766 target_read_memory (rn_val + rm_val, buf, 1);
7767 halfwords = extract_unsigned_integer (buf, 1, byte_order);
7768 }
7769
7770 if (debug_displaced)
7771 fprintf_unfiltered (gdb_stdlog, "displaced: %s base 0x%x offset 0x%x"
7772 " offset 0x%x\n", is_tbh ? "tbh" : "tbb",
7773 (unsigned int) rn_val, (unsigned int) rm_val,
7774 (unsigned int) halfwords);
7775
7776 dsc->u.branch.cond = INST_AL;
7777 dsc->u.branch.link = 0;
7778 dsc->u.branch.exchange = 0;
7779 dsc->u.branch.dest = dsc->insn_addr + 4 + 2 * halfwords;
7780
7781 dsc->cleanup = &cleanup_branch;
7782
7783 return 0;
7784 }
7785
7786 static void
7787 cleanup_pop_pc_16bit_all (struct gdbarch *gdbarch, struct regcache *regs,
7788 struct displaced_step_closure *dsc)
7789 {
7790 /* PC <- r7 */
7791 int val = displaced_read_reg (regs, dsc, 7);
7792 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, val, BX_WRITE_PC);
7793
7794 /* r7 <- r8 */
7795 val = displaced_read_reg (regs, dsc, 8);
7796 displaced_write_reg (regs, dsc, 7, val, CANNOT_WRITE_PC);
7797
7798 /* r8 <- tmp[0] */
7799 displaced_write_reg (regs, dsc, 8, dsc->tmp[0], CANNOT_WRITE_PC);
7800
7801 }
7802
7803 static int
7804 thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, unsigned short insn1,
7805 struct regcache *regs,
7806 struct displaced_step_closure *dsc)
7807 {
7808 dsc->u.block.regmask = insn1 & 0x00ff;
7809
7810 /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
7811 to :
7812
7813 (1) register list is full, that is, r0-r7 are used.
7814 Prepare: tmp[0] <- r8
7815
7816 POP {r0, r1, ...., r6, r7}; remove PC from reglist
7817 MOV r8, r7; Move value of r7 to r8;
7818 POP {r7}; Store PC value into r7.
7819
7820 Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
7821
7822 (2) register list is not full, supposing there are N registers in
7823 register list (except PC, 0 <= N <= 7).
7824 Prepare: for each i, 0 - N, tmp[i] <- ri.
7825
7826 POP {r0, r1, ...., rN};
7827
7828 Cleanup: Set registers in original reglist from r0 - rN. Restore r0 - rN
7829 from tmp[] properly.
7830 */
7831 if (debug_displaced)
7832 fprintf_unfiltered (gdb_stdlog,
7833 "displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
7834 dsc->u.block.regmask, insn1);
7835
7836 if (dsc->u.block.regmask == 0xff)
7837 {
7838 dsc->tmp[0] = displaced_read_reg (regs, dsc, 8);
7839
7840 dsc->modinsn[0] = (insn1 & 0xfeff); /* POP {r0,r1,...,r6, r7} */
7841 dsc->modinsn[1] = 0x46b8; /* MOV r8, r7 */
7842 dsc->modinsn[2] = 0xbc80; /* POP {r7} */
7843
7844 dsc->numinsns = 3;
7845 dsc->cleanup = &cleanup_pop_pc_16bit_all;
7846 }
7847 else
7848 {
7849 unsigned int num_in_list = bitcount (dsc->u.block.regmask);
7850 unsigned int new_regmask, bit = 1;
7851 unsigned int to = 0, from = 0, i, new_rn;
7852
7853 for (i = 0; i < num_in_list + 1; i++)
7854 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7855
7856 new_regmask = (1 << (num_in_list + 1)) - 1;
7857
7858 if (debug_displaced)
7859 fprintf_unfiltered (gdb_stdlog, _("displaced: POP "
7860 "{..., pc}: original reg list %.4x,"
7861 " modified list %.4x\n"),
7862 (int) dsc->u.block.regmask, new_regmask);
7863
7864 dsc->u.block.regmask |= 0x8000;
7865 dsc->u.block.writeback = 0;
7866 dsc->u.block.cond = INST_AL;
7867
7868 dsc->modinsn[0] = (insn1 & ~0x1ff) | (new_regmask & 0xff);
7869
7870 dsc->cleanup = &cleanup_block_load_pc;
7871 }
7872
7873 return 0;
7874 }
7875
7876 static void
7877 thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
7878 struct regcache *regs,
7879 struct displaced_step_closure *dsc)
7880 {
7881 unsigned short op_bit_12_15 = bits (insn1, 12, 15);
7882 unsigned short op_bit_10_11 = bits (insn1, 10, 11);
7883 int err = 0;
7884
7885 /* 16-bit thumb instructions. */
7886 switch (op_bit_12_15)
7887 {
7888 /* Shift (imme), add, subtract, move and compare. */
7889 case 0: case 1: case 2: case 3:
7890 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
7891 "shift/add/sub/mov/cmp",
7892 dsc);
7893 break;
7894 case 4:
7895 switch (op_bit_10_11)
7896 {
7897 case 0: /* Data-processing */
7898 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
7899 "data-processing",
7900 dsc);
7901 break;
7902 case 1: /* Special data instructions and branch and exchange. */
7903 {
7904 unsigned short op = bits (insn1, 7, 9);
7905 if (op == 6 || op == 7) /* BX or BLX */
7906 err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
7907 else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers. */
7908 err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
7909 else
7910 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
7911 dsc);
7912 }
7913 break;
7914 default: /* LDR (literal) */
7915 err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
7916 }
7917 break;
7918 case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
7919 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldr/str", dsc);
7920 break;
7921 case 10:
7922 if (op_bit_10_11 < 2) /* Generate PC-relative address */
7923 err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
7924 else /* Generate SP-relative address */
7925 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "sp-relative", dsc);
7926 break;
7927 case 11: /* Misc 16-bit instructions */
7928 {
7929 switch (bits (insn1, 8, 11))
7930 {
7931 case 1: case 3: case 9: case 11: /* CBNZ, CBZ */
7932 err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
7933 break;
7934 case 12: case 13: /* POP */
7935 if (bit (insn1, 8)) /* PC is in register list. */
7936 err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
7937 else
7938 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "pop", dsc);
7939 break;
7940 case 15: /* If-Then, and hints */
7941 if (bits (insn1, 0, 3))
7942 /* If-Then makes up to four following instructions conditional.
7943 IT instruction itself is not conditional, so handle it as a
7944 common unmodified instruction. */
7945 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "If-Then",
7946 dsc);
7947 else
7948 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "hints", dsc);
7949 break;
7950 default:
7951 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "misc", dsc);
7952 }
7953 }
7954 break;
7955 case 12:
7956 if (op_bit_10_11 < 2) /* Store multiple registers */
7957 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "stm", dsc);
7958 else /* Load multiple registers */
7959 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldm", dsc);
7960 break;
7961 case 13: /* Conditional branch and supervisor call */
7962 if (bits (insn1, 9, 11) != 7) /* conditional branch */
7963 err = thumb_copy_b (gdbarch, insn1, dsc);
7964 else
7965 err = thumb_copy_svc (gdbarch, insn1, regs, dsc);
7966 break;
7967 case 14: /* Unconditional branch */
7968 err = thumb_copy_b (gdbarch, insn1, dsc);
7969 break;
7970 default:
7971 err = 1;
7972 }
7973
7974 if (err)
7975 internal_error (__FILE__, __LINE__,
7976 _("thumb_process_displaced_16bit_insn: Instruction decode error"));
7977 }
7978
7979 static int
7980 decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
7981 uint16_t insn1, uint16_t insn2,
7982 struct regcache *regs,
7983 struct displaced_step_closure *dsc)
7984 {
7985 int rt = bits (insn2, 12, 15);
7986 int rn = bits (insn1, 0, 3);
7987 int op1 = bits (insn1, 7, 8);
7988 int err = 0;
7989
7990 switch (bits (insn1, 5, 6))
7991 {
7992 case 0: /* Load byte and memory hints */
7993 if (rt == 0xf) /* PLD/PLI */
7994 {
7995 if (rn == 0xf)
7996 /* PLD literal or Encoding T3 of PLI(immediate, literal). */
7997 return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
7998 else
7999 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8000 "pli/pld", dsc);
8001 }
8002 else
8003 {
8004 if (rn == 0xf) /* LDRB/LDRSB (literal) */
8005 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8006 1);
8007 else
8008 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8009 "ldrb{reg, immediate}/ldrbt",
8010 dsc);
8011 }
8012
8013 break;
8014 case 1: /* Load halfword and memory hints. */
8015 if (rt == 0xf) /* PLD{W} and Unalloc memory hint. */
8016 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8017 "pld/unalloc memhint", dsc);
8018 else
8019 {
8020 if (rn == 0xf)
8021 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8022 2);
8023 else
8024 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8025 "ldrh/ldrht", dsc);
8026 }
8027 break;
8028 case 2: /* Load word */
8029 {
8030 int insn2_bit_8_11 = bits (insn2, 8, 11);
8031
8032 if (rn == 0xf)
8033 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc, 4);
8034 else if (op1 == 0x1) /* Encoding T3 */
8035 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs, dsc,
8036 0, 1);
8037 else /* op1 == 0x0 */
8038 {
8039 if (insn2_bit_8_11 == 0xc || (insn2_bit_8_11 & 0x9) == 0x9)
8040 /* LDR (immediate) */
8041 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8042 dsc, bit (insn2, 8), 1);
8043 else if (insn2_bit_8_11 == 0xe) /* LDRT */
8044 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8045 "ldrt", dsc);
8046 else
8047 /* LDR (register) */
8048 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8049 dsc, 0, 0);
8050 }
8051 break;
8052 }
8053 default:
8054 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
8055 break;
8056 }
8057 return 0;
8058 }
8059
8060 static void
8061 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
8062 uint16_t insn2, struct regcache *regs,
8063 struct displaced_step_closure *dsc)
8064 {
8065 int err = 0;
8066 unsigned short op = bit (insn2, 15);
8067 unsigned int op1 = bits (insn1, 11, 12);
8068
8069 switch (op1)
8070 {
8071 case 1:
8072 {
8073 switch (bits (insn1, 9, 10))
8074 {
8075 case 0:
8076 if (bit (insn1, 6))
8077 {
8078 /* Load/store {dual, execlusive}, table branch. */
8079 if (bits (insn1, 7, 8) == 1 && bits (insn1, 4, 5) == 1
8080 && bits (insn2, 5, 7) == 0)
8081 err = thumb2_copy_table_branch (gdbarch, insn1, insn2, regs,
8082 dsc);
8083 else
8084 /* PC is not allowed to use in load/store {dual, exclusive}
8085 instructions. */
8086 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8087 "load/store dual/ex", dsc);
8088 }
8089 else /* load/store multiple */
8090 {
8091 switch (bits (insn1, 7, 8))
8092 {
8093 case 0: case 3: /* SRS, RFE */
8094 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8095 "srs/rfe", dsc);
8096 break;
8097 case 1: case 2: /* LDM/STM/PUSH/POP */
8098 err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
8099 break;
8100 }
8101 }
8102 break;
8103
8104 case 1:
8105 /* Data-processing (shift register). */
8106 err = thumb2_decode_dp_shift_reg (gdbarch, insn1, insn2, regs,
8107 dsc);
8108 break;
8109 default: /* Coprocessor instructions. */
8110 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8111 break;
8112 }
8113 break;
8114 }
8115 case 2: /* op1 = 2 */
8116 if (op) /* Branch and misc control. */
8117 {
8118 if (bit (insn2, 14) /* BLX/BL */
8119 || bit (insn2, 12) /* Unconditional branch */
8120 || (bits (insn1, 7, 9) != 0x7)) /* Conditional branch */
8121 err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
8122 else
8123 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8124 "misc ctrl", dsc);
8125 }
8126 else
8127 {
8128 if (bit (insn1, 9)) /* Data processing (plain binary imm). */
8129 {
8130 int op = bits (insn1, 4, 8);
8131 int rn = bits (insn1, 0, 3);
8132 if ((op == 0 || op == 0xa) && rn == 0xf)
8133 err = thumb_copy_pc_relative_32bit (gdbarch, insn1, insn2,
8134 regs, dsc);
8135 else
8136 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8137 "dp/pb", dsc);
8138 }
8139 else /* Data processing (modified immeidate) */
8140 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8141 "dp/mi", dsc);
8142 }
8143 break;
8144 case 3: /* op1 = 3 */
8145 switch (bits (insn1, 9, 10))
8146 {
8147 case 0:
8148 if (bit (insn1, 4))
8149 err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
8150 regs, dsc);
8151 else /* NEON Load/Store and Store single data item */
8152 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8153 "neon elt/struct load/store",
8154 dsc);
8155 break;
8156 case 1: /* op1 = 3, bits (9, 10) == 1 */
8157 switch (bits (insn1, 7, 8))
8158 {
8159 case 0: case 1: /* Data processing (register) */
8160 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8161 "dp(reg)", dsc);
8162 break;
8163 case 2: /* Multiply and absolute difference */
8164 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8165 "mul/mua/diff", dsc);
8166 break;
8167 case 3: /* Long multiply and divide */
8168 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8169 "lmul/lmua", dsc);
8170 break;
8171 }
8172 break;
8173 default: /* Coprocessor instructions */
8174 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8175 break;
8176 }
8177 break;
8178 default:
8179 err = 1;
8180 }
8181
8182 if (err)
8183 internal_error (__FILE__, __LINE__,
8184 _("thumb_process_displaced_32bit_insn: Instruction decode error"));
8185
8186 }
8187
8188 static void
8189 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8190 CORE_ADDR to, struct regcache *regs,
8191 struct displaced_step_closure *dsc)
8192 {
8193 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8194 uint16_t insn1
8195 = read_memory_unsigned_integer (from, 2, byte_order_for_code);
8196
8197 if (debug_displaced)
8198 fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
8199 "at %.8lx\n", insn1, (unsigned long) from);
8200
8201 dsc->is_thumb = 1;
8202 dsc->insn_size = thumb_insn_size (insn1);
8203 if (thumb_insn_size (insn1) == 4)
8204 {
8205 uint16_t insn2
8206 = read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
8207 thumb_process_displaced_32bit_insn (gdbarch, insn1, insn2, regs, dsc);
8208 }
8209 else
8210 thumb_process_displaced_16bit_insn (gdbarch, insn1, regs, dsc);
8211 }
8212
8213 void
8214 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8215 CORE_ADDR to, struct regcache *regs,
8216 struct displaced_step_closure *dsc)
8217 {
8218 int err = 0;
8219 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8220 uint32_t insn;
8221
8222 /* Most displaced instructions use a 1-instruction scratch space, so set this
8223 here and override below if/when necessary. */
8224 dsc->numinsns = 1;
8225 dsc->insn_addr = from;
8226 dsc->scratch_base = to;
8227 dsc->cleanup = NULL;
8228 dsc->wrote_to_pc = 0;
8229
8230 if (!displaced_in_arm_mode (regs))
8231 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
8232
8233 dsc->is_thumb = 0;
8234 dsc->insn_size = 4;
8235 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
8236 if (debug_displaced)
8237 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
8238 "at %.8lx\n", (unsigned long) insn,
8239 (unsigned long) from);
8240
8241 if ((insn & 0xf0000000) == 0xf0000000)
8242 err = arm_decode_unconditional (gdbarch, insn, regs, dsc);
8243 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
8244 {
8245 case 0x0: case 0x1: case 0x2: case 0x3:
8246 err = arm_decode_dp_misc (gdbarch, insn, regs, dsc);
8247 break;
8248
8249 case 0x4: case 0x5: case 0x6:
8250 err = arm_decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
8251 break;
8252
8253 case 0x7:
8254 err = arm_decode_media (gdbarch, insn, dsc);
8255 break;
8256
8257 case 0x8: case 0x9: case 0xa: case 0xb:
8258 err = arm_decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
8259 break;
8260
8261 case 0xc: case 0xd: case 0xe: case 0xf:
8262 err = arm_decode_svc_copro (gdbarch, insn, to, regs, dsc);
8263 break;
8264 }
8265
8266 if (err)
8267 internal_error (__FILE__, __LINE__,
8268 _("arm_process_displaced_insn: Instruction decode error"));
8269 }
8270
8271 /* Actually set up the scratch space for a displaced instruction. */
8272
8273 void
8274 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
8275 CORE_ADDR to, struct displaced_step_closure *dsc)
8276 {
8277 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8278 unsigned int i, len, offset;
8279 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8280 int size = dsc->is_thumb? 2 : 4;
8281 const unsigned char *bkp_insn;
8282
8283 offset = 0;
8284 /* Poke modified instruction(s). */
8285 for (i = 0; i < dsc->numinsns; i++)
8286 {
8287 if (debug_displaced)
8288 {
8289 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
8290 if (size == 4)
8291 fprintf_unfiltered (gdb_stdlog, "%.8lx",
8292 dsc->modinsn[i]);
8293 else if (size == 2)
8294 fprintf_unfiltered (gdb_stdlog, "%.4x",
8295 (unsigned short)dsc->modinsn[i]);
8296
8297 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
8298 (unsigned long) to + offset);
8299
8300 }
8301 write_memory_unsigned_integer (to + offset, size,
8302 byte_order_for_code,
8303 dsc->modinsn[i]);
8304 offset += size;
8305 }
8306
8307 /* Choose the correct breakpoint instruction. */
8308 if (dsc->is_thumb)
8309 {
8310 bkp_insn = tdep->thumb_breakpoint;
8311 len = tdep->thumb_breakpoint_size;
8312 }
8313 else
8314 {
8315 bkp_insn = tdep->arm_breakpoint;
8316 len = tdep->arm_breakpoint_size;
8317 }
8318
8319 /* Put breakpoint afterwards. */
8320 write_memory (to + offset, bkp_insn, len);
8321
8322 if (debug_displaced)
8323 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
8324 paddress (gdbarch, from), paddress (gdbarch, to));
8325 }
8326
8327 /* Entry point for copying an instruction into scratch space for displaced
8328 stepping. */
8329
8330 struct displaced_step_closure *
8331 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
8332 CORE_ADDR from, CORE_ADDR to,
8333 struct regcache *regs)
8334 {
8335 struct displaced_step_closure *dsc
8336 = xmalloc (sizeof (struct displaced_step_closure));
8337 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
8338 arm_displaced_init_closure (gdbarch, from, to, dsc);
8339
8340 return dsc;
8341 }
8342
8343 /* Entry point for cleaning things up after a displaced instruction has been
8344 single-stepped. */
8345
8346 void
8347 arm_displaced_step_fixup (struct gdbarch *gdbarch,
8348 struct displaced_step_closure *dsc,
8349 CORE_ADDR from, CORE_ADDR to,
8350 struct regcache *regs)
8351 {
8352 if (dsc->cleanup)
8353 dsc->cleanup (gdbarch, regs, dsc);
8354
8355 if (!dsc->wrote_to_pc)
8356 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
8357 dsc->insn_addr + dsc->insn_size);
8358
8359 }
8360
8361 #include "bfd-in2.h"
8362 #include "libcoff.h"
8363
8364 static int
8365 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
8366 {
8367 struct gdbarch *gdbarch = info->application_data;
8368
8369 if (arm_pc_is_thumb (gdbarch, memaddr))
8370 {
8371 static asymbol *asym;
8372 static combined_entry_type ce;
8373 static struct coff_symbol_struct csym;
8374 static struct bfd fake_bfd;
8375 static bfd_target fake_target;
8376
8377 if (csym.native == NULL)
8378 {
8379 /* Create a fake symbol vector containing a Thumb symbol.
8380 This is solely so that the code in print_insn_little_arm()
8381 and print_insn_big_arm() in opcodes/arm-dis.c will detect
8382 the presence of a Thumb symbol and switch to decoding
8383 Thumb instructions. */
8384
8385 fake_target.flavour = bfd_target_coff_flavour;
8386 fake_bfd.xvec = &fake_target;
8387 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
8388 csym.native = &ce;
8389 csym.symbol.the_bfd = &fake_bfd;
8390 csym.symbol.name = "fake";
8391 asym = (asymbol *) & csym;
8392 }
8393
8394 memaddr = UNMAKE_THUMB_ADDR (memaddr);
8395 info->symbols = &asym;
8396 }
8397 else
8398 info->symbols = NULL;
8399
8400 if (info->endian == BFD_ENDIAN_BIG)
8401 return print_insn_big_arm (memaddr, info);
8402 else
8403 return print_insn_little_arm (memaddr, info);
8404 }
8405
8406 /* The following define instruction sequences that will cause ARM
8407 cpu's to take an undefined instruction trap. These are used to
8408 signal a breakpoint to GDB.
8409
8410 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
8411 modes. A different instruction is required for each mode. The ARM
8412 cpu's can also be big or little endian. Thus four different
8413 instructions are needed to support all cases.
8414
8415 Note: ARMv4 defines several new instructions that will take the
8416 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
8417 not in fact add the new instructions. The new undefined
8418 instructions in ARMv4 are all instructions that had no defined
8419 behaviour in earlier chips. There is no guarantee that they will
8420 raise an exception, but may be treated as NOP's. In practice, it
8421 may only safe to rely on instructions matching:
8422
8423 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
8424 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
8425 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
8426
8427 Even this may only true if the condition predicate is true. The
8428 following use a condition predicate of ALWAYS so it is always TRUE.
8429
8430 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
8431 and NetBSD all use a software interrupt rather than an undefined
8432 instruction to force a trap. This can be handled by by the
8433 abi-specific code during establishment of the gdbarch vector. */
8434
8435 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
8436 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
8437 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
8438 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
8439
8440 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
8441 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
8442 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
8443 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
8444
8445 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
8446 the program counter value to determine whether a 16-bit or 32-bit
8447 breakpoint should be used. It returns a pointer to a string of
8448 bytes that encode a breakpoint instruction, stores the length of
8449 the string to *lenptr, and adjusts the program counter (if
8450 necessary) to point to the actual memory location where the
8451 breakpoint should be inserted. */
8452
8453 static const unsigned char *
8454 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
8455 {
8456 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8457 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8458
8459 if (arm_pc_is_thumb (gdbarch, *pcptr))
8460 {
8461 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
8462
8463 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
8464 check whether we are replacing a 32-bit instruction. */
8465 if (tdep->thumb2_breakpoint != NULL)
8466 {
8467 gdb_byte buf[2];
8468 if (target_read_memory (*pcptr, buf, 2) == 0)
8469 {
8470 unsigned short inst1;
8471 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
8472 if (thumb_insn_size (inst1) == 4)
8473 {
8474 *lenptr = tdep->thumb2_breakpoint_size;
8475 return tdep->thumb2_breakpoint;
8476 }
8477 }
8478 }
8479
8480 *lenptr = tdep->thumb_breakpoint_size;
8481 return tdep->thumb_breakpoint;
8482 }
8483 else
8484 {
8485 *lenptr = tdep->arm_breakpoint_size;
8486 return tdep->arm_breakpoint;
8487 }
8488 }
8489
8490 static void
8491 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
8492 int *kindptr)
8493 {
8494 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8495
8496 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
8497
8498 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
8499 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
8500 that this is not confused with a 32-bit ARM breakpoint. */
8501 *kindptr = 3;
8502 }
8503
8504 /* Extract from an array REGBUF containing the (raw) register state a
8505 function return value of type TYPE, and copy that, in virtual
8506 format, into VALBUF. */
8507
8508 static void
8509 arm_extract_return_value (struct type *type, struct regcache *regs,
8510 gdb_byte *valbuf)
8511 {
8512 struct gdbarch *gdbarch = get_regcache_arch (regs);
8513 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8514
8515 if (TYPE_CODE_FLT == TYPE_CODE (type))
8516 {
8517 switch (gdbarch_tdep (gdbarch)->fp_model)
8518 {
8519 case ARM_FLOAT_FPA:
8520 {
8521 /* The value is in register F0 in internal format. We need to
8522 extract the raw value and then convert it to the desired
8523 internal type. */
8524 bfd_byte tmpbuf[FP_REGISTER_SIZE];
8525
8526 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
8527 convert_from_extended (floatformat_from_type (type), tmpbuf,
8528 valbuf, gdbarch_byte_order (gdbarch));
8529 }
8530 break;
8531
8532 case ARM_FLOAT_SOFT_FPA:
8533 case ARM_FLOAT_SOFT_VFP:
8534 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8535 not using the VFP ABI code. */
8536 case ARM_FLOAT_VFP:
8537 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
8538 if (TYPE_LENGTH (type) > 4)
8539 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
8540 valbuf + INT_REGISTER_SIZE);
8541 break;
8542
8543 default:
8544 internal_error (__FILE__, __LINE__,
8545 _("arm_extract_return_value: "
8546 "Floating point model not supported"));
8547 break;
8548 }
8549 }
8550 else if (TYPE_CODE (type) == TYPE_CODE_INT
8551 || TYPE_CODE (type) == TYPE_CODE_CHAR
8552 || TYPE_CODE (type) == TYPE_CODE_BOOL
8553 || TYPE_CODE (type) == TYPE_CODE_PTR
8554 || TYPE_CODE (type) == TYPE_CODE_REF
8555 || TYPE_CODE (type) == TYPE_CODE_ENUM)
8556 {
8557 /* If the type is a plain integer, then the access is
8558 straight-forward. Otherwise we have to play around a bit
8559 more. */
8560 int len = TYPE_LENGTH (type);
8561 int regno = ARM_A1_REGNUM;
8562 ULONGEST tmp;
8563
8564 while (len > 0)
8565 {
8566 /* By using store_unsigned_integer we avoid having to do
8567 anything special for small big-endian values. */
8568 regcache_cooked_read_unsigned (regs, regno++, &tmp);
8569 store_unsigned_integer (valbuf,
8570 (len > INT_REGISTER_SIZE
8571 ? INT_REGISTER_SIZE : len),
8572 byte_order, tmp);
8573 len -= INT_REGISTER_SIZE;
8574 valbuf += INT_REGISTER_SIZE;
8575 }
8576 }
8577 else
8578 {
8579 /* For a structure or union the behaviour is as if the value had
8580 been stored to word-aligned memory and then loaded into
8581 registers with 32-bit load instruction(s). */
8582 int len = TYPE_LENGTH (type);
8583 int regno = ARM_A1_REGNUM;
8584 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8585
8586 while (len > 0)
8587 {
8588 regcache_cooked_read (regs, regno++, tmpbuf);
8589 memcpy (valbuf, tmpbuf,
8590 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
8591 len -= INT_REGISTER_SIZE;
8592 valbuf += INT_REGISTER_SIZE;
8593 }
8594 }
8595 }
8596
8597
8598 /* Will a function return an aggregate type in memory or in a
8599 register? Return 0 if an aggregate type can be returned in a
8600 register, 1 if it must be returned in memory. */
8601
8602 static int
8603 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
8604 {
8605 int nRc;
8606 enum type_code code;
8607
8608 CHECK_TYPEDEF (type);
8609
8610 /* In the ARM ABI, "integer" like aggregate types are returned in
8611 registers. For an aggregate type to be integer like, its size
8612 must be less than or equal to INT_REGISTER_SIZE and the
8613 offset of each addressable subfield must be zero. Note that bit
8614 fields are not addressable, and all addressable subfields of
8615 unions always start at offset zero.
8616
8617 This function is based on the behaviour of GCC 2.95.1.
8618 See: gcc/arm.c: arm_return_in_memory() for details.
8619
8620 Note: All versions of GCC before GCC 2.95.2 do not set up the
8621 parameters correctly for a function returning the following
8622 structure: struct { float f;}; This should be returned in memory,
8623 not a register. Richard Earnshaw sent me a patch, but I do not
8624 know of any way to detect if a function like the above has been
8625 compiled with the correct calling convention. */
8626
8627 /* All aggregate types that won't fit in a register must be returned
8628 in memory. */
8629 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
8630 {
8631 return 1;
8632 }
8633
8634 /* The AAPCS says all aggregates not larger than a word are returned
8635 in a register. */
8636 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
8637 return 0;
8638
8639 /* The only aggregate types that can be returned in a register are
8640 structs and unions. Arrays must be returned in memory. */
8641 code = TYPE_CODE (type);
8642 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
8643 {
8644 return 1;
8645 }
8646
8647 /* Assume all other aggregate types can be returned in a register.
8648 Run a check for structures, unions and arrays. */
8649 nRc = 0;
8650
8651 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
8652 {
8653 int i;
8654 /* Need to check if this struct/union is "integer" like. For
8655 this to be true, its size must be less than or equal to
8656 INT_REGISTER_SIZE and the offset of each addressable
8657 subfield must be zero. Note that bit fields are not
8658 addressable, and unions always start at offset zero. If any
8659 of the subfields is a floating point type, the struct/union
8660 cannot be an integer type. */
8661
8662 /* For each field in the object, check:
8663 1) Is it FP? --> yes, nRc = 1;
8664 2) Is it addressable (bitpos != 0) and
8665 not packed (bitsize == 0)?
8666 --> yes, nRc = 1
8667 */
8668
8669 for (i = 0; i < TYPE_NFIELDS (type); i++)
8670 {
8671 enum type_code field_type_code;
8672 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
8673 i)));
8674
8675 /* Is it a floating point type field? */
8676 if (field_type_code == TYPE_CODE_FLT)
8677 {
8678 nRc = 1;
8679 break;
8680 }
8681
8682 /* If bitpos != 0, then we have to care about it. */
8683 if (TYPE_FIELD_BITPOS (type, i) != 0)
8684 {
8685 /* Bitfields are not addressable. If the field bitsize is
8686 zero, then the field is not packed. Hence it cannot be
8687 a bitfield or any other packed type. */
8688 if (TYPE_FIELD_BITSIZE (type, i) == 0)
8689 {
8690 nRc = 1;
8691 break;
8692 }
8693 }
8694 }
8695 }
8696
8697 return nRc;
8698 }
8699
8700 /* Write into appropriate registers a function return value of type
8701 TYPE, given in virtual format. */
8702
8703 static void
8704 arm_store_return_value (struct type *type, struct regcache *regs,
8705 const gdb_byte *valbuf)
8706 {
8707 struct gdbarch *gdbarch = get_regcache_arch (regs);
8708 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8709
8710 if (TYPE_CODE (type) == TYPE_CODE_FLT)
8711 {
8712 char buf[MAX_REGISTER_SIZE];
8713
8714 switch (gdbarch_tdep (gdbarch)->fp_model)
8715 {
8716 case ARM_FLOAT_FPA:
8717
8718 convert_to_extended (floatformat_from_type (type), buf, valbuf,
8719 gdbarch_byte_order (gdbarch));
8720 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
8721 break;
8722
8723 case ARM_FLOAT_SOFT_FPA:
8724 case ARM_FLOAT_SOFT_VFP:
8725 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8726 not using the VFP ABI code. */
8727 case ARM_FLOAT_VFP:
8728 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
8729 if (TYPE_LENGTH (type) > 4)
8730 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
8731 valbuf + INT_REGISTER_SIZE);
8732 break;
8733
8734 default:
8735 internal_error (__FILE__, __LINE__,
8736 _("arm_store_return_value: Floating "
8737 "point model not supported"));
8738 break;
8739 }
8740 }
8741 else if (TYPE_CODE (type) == TYPE_CODE_INT
8742 || TYPE_CODE (type) == TYPE_CODE_CHAR
8743 || TYPE_CODE (type) == TYPE_CODE_BOOL
8744 || TYPE_CODE (type) == TYPE_CODE_PTR
8745 || TYPE_CODE (type) == TYPE_CODE_REF
8746 || TYPE_CODE (type) == TYPE_CODE_ENUM)
8747 {
8748 if (TYPE_LENGTH (type) <= 4)
8749 {
8750 /* Values of one word or less are zero/sign-extended and
8751 returned in r0. */
8752 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8753 LONGEST val = unpack_long (type, valbuf);
8754
8755 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
8756 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
8757 }
8758 else
8759 {
8760 /* Integral values greater than one word are stored in consecutive
8761 registers starting with r0. This will always be a multiple of
8762 the regiser size. */
8763 int len = TYPE_LENGTH (type);
8764 int regno = ARM_A1_REGNUM;
8765
8766 while (len > 0)
8767 {
8768 regcache_cooked_write (regs, regno++, valbuf);
8769 len -= INT_REGISTER_SIZE;
8770 valbuf += INT_REGISTER_SIZE;
8771 }
8772 }
8773 }
8774 else
8775 {
8776 /* For a structure or union the behaviour is as if the value had
8777 been stored to word-aligned memory and then loaded into
8778 registers with 32-bit load instruction(s). */
8779 int len = TYPE_LENGTH (type);
8780 int regno = ARM_A1_REGNUM;
8781 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8782
8783 while (len > 0)
8784 {
8785 memcpy (tmpbuf, valbuf,
8786 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
8787 regcache_cooked_write (regs, regno++, tmpbuf);
8788 len -= INT_REGISTER_SIZE;
8789 valbuf += INT_REGISTER_SIZE;
8790 }
8791 }
8792 }
8793
8794
8795 /* Handle function return values. */
8796
8797 static enum return_value_convention
8798 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
8799 struct type *valtype, struct regcache *regcache,
8800 gdb_byte *readbuf, const gdb_byte *writebuf)
8801 {
8802 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8803 enum arm_vfp_cprc_base_type vfp_base_type;
8804 int vfp_base_count;
8805
8806 if (arm_vfp_abi_for_function (gdbarch, func_type)
8807 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
8808 {
8809 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
8810 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
8811 int i;
8812 for (i = 0; i < vfp_base_count; i++)
8813 {
8814 if (reg_char == 'q')
8815 {
8816 if (writebuf)
8817 arm_neon_quad_write (gdbarch, regcache, i,
8818 writebuf + i * unit_length);
8819
8820 if (readbuf)
8821 arm_neon_quad_read (gdbarch, regcache, i,
8822 readbuf + i * unit_length);
8823 }
8824 else
8825 {
8826 char name_buf[4];
8827 int regnum;
8828
8829 sprintf (name_buf, "%c%d", reg_char, i);
8830 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8831 strlen (name_buf));
8832 if (writebuf)
8833 regcache_cooked_write (regcache, regnum,
8834 writebuf + i * unit_length);
8835 if (readbuf)
8836 regcache_cooked_read (regcache, regnum,
8837 readbuf + i * unit_length);
8838 }
8839 }
8840 return RETURN_VALUE_REGISTER_CONVENTION;
8841 }
8842
8843 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
8844 || TYPE_CODE (valtype) == TYPE_CODE_UNION
8845 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
8846 {
8847 if (tdep->struct_return == pcc_struct_return
8848 || arm_return_in_memory (gdbarch, valtype))
8849 return RETURN_VALUE_STRUCT_CONVENTION;
8850 }
8851
8852 if (writebuf)
8853 arm_store_return_value (valtype, regcache, writebuf);
8854
8855 if (readbuf)
8856 arm_extract_return_value (valtype, regcache, readbuf);
8857
8858 return RETURN_VALUE_REGISTER_CONVENTION;
8859 }
8860
8861
8862 static int
8863 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
8864 {
8865 struct gdbarch *gdbarch = get_frame_arch (frame);
8866 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8867 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8868 CORE_ADDR jb_addr;
8869 char buf[INT_REGISTER_SIZE];
8870
8871 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
8872
8873 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
8874 INT_REGISTER_SIZE))
8875 return 0;
8876
8877 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
8878 return 1;
8879 }
8880
8881 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
8882 return the target PC. Otherwise return 0. */
8883
8884 CORE_ADDR
8885 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
8886 {
8887 char *name;
8888 int namelen;
8889 CORE_ADDR start_addr;
8890
8891 /* Find the starting address and name of the function containing the PC. */
8892 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
8893 return 0;
8894
8895 /* If PC is in a Thumb call or return stub, return the address of the
8896 target PC, which is in a register. The thunk functions are called
8897 _call_via_xx, where x is the register name. The possible names
8898 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
8899 functions, named __ARM_call_via_r[0-7]. */
8900 if (strncmp (name, "_call_via_", 10) == 0
8901 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
8902 {
8903 /* Use the name suffix to determine which register contains the
8904 target PC. */
8905 static char *table[15] =
8906 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
8907 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
8908 };
8909 int regno;
8910 int offset = strlen (name) - 2;
8911
8912 for (regno = 0; regno <= 14; regno++)
8913 if (strcmp (&name[offset], table[regno]) == 0)
8914 return get_frame_register_unsigned (frame, regno);
8915 }
8916
8917 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
8918 non-interworking calls to foo. We could decode the stubs
8919 to find the target but it's easier to use the symbol table. */
8920 namelen = strlen (name);
8921 if (name[0] == '_' && name[1] == '_'
8922 && ((namelen > 2 + strlen ("_from_thumb")
8923 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
8924 strlen ("_from_thumb")) == 0)
8925 || (namelen > 2 + strlen ("_from_arm")
8926 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
8927 strlen ("_from_arm")) == 0)))
8928 {
8929 char *target_name;
8930 int target_len = namelen - 2;
8931 struct minimal_symbol *minsym;
8932 struct objfile *objfile;
8933 struct obj_section *sec;
8934
8935 if (name[namelen - 1] == 'b')
8936 target_len -= strlen ("_from_thumb");
8937 else
8938 target_len -= strlen ("_from_arm");
8939
8940 target_name = alloca (target_len + 1);
8941 memcpy (target_name, name + 2, target_len);
8942 target_name[target_len] = '\0';
8943
8944 sec = find_pc_section (pc);
8945 objfile = (sec == NULL) ? NULL : sec->objfile;
8946 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
8947 if (minsym != NULL)
8948 return SYMBOL_VALUE_ADDRESS (minsym);
8949 else
8950 return 0;
8951 }
8952
8953 return 0; /* not a stub */
8954 }
8955
8956 static void
8957 set_arm_command (char *args, int from_tty)
8958 {
8959 printf_unfiltered (_("\
8960 \"set arm\" must be followed by an apporpriate subcommand.\n"));
8961 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
8962 }
8963
8964 static void
8965 show_arm_command (char *args, int from_tty)
8966 {
8967 cmd_show_list (showarmcmdlist, from_tty, "");
8968 }
8969
8970 static void
8971 arm_update_current_architecture (void)
8972 {
8973 struct gdbarch_info info;
8974
8975 /* If the current architecture is not ARM, we have nothing to do. */
8976 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
8977 return;
8978
8979 /* Update the architecture. */
8980 gdbarch_info_init (&info);
8981
8982 if (!gdbarch_update_p (info))
8983 internal_error (__FILE__, __LINE__, _("could not update architecture"));
8984 }
8985
8986 static void
8987 set_fp_model_sfunc (char *args, int from_tty,
8988 struct cmd_list_element *c)
8989 {
8990 enum arm_float_model fp_model;
8991
8992 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
8993 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
8994 {
8995 arm_fp_model = fp_model;
8996 break;
8997 }
8998
8999 if (fp_model == ARM_FLOAT_LAST)
9000 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
9001 current_fp_model);
9002
9003 arm_update_current_architecture ();
9004 }
9005
9006 static void
9007 show_fp_model (struct ui_file *file, int from_tty,
9008 struct cmd_list_element *c, const char *value)
9009 {
9010 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9011
9012 if (arm_fp_model == ARM_FLOAT_AUTO
9013 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
9014 fprintf_filtered (file, _("\
9015 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
9016 fp_model_strings[tdep->fp_model]);
9017 else
9018 fprintf_filtered (file, _("\
9019 The current ARM floating point model is \"%s\".\n"),
9020 fp_model_strings[arm_fp_model]);
9021 }
9022
9023 static void
9024 arm_set_abi (char *args, int from_tty,
9025 struct cmd_list_element *c)
9026 {
9027 enum arm_abi_kind arm_abi;
9028
9029 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
9030 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
9031 {
9032 arm_abi_global = arm_abi;
9033 break;
9034 }
9035
9036 if (arm_abi == ARM_ABI_LAST)
9037 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
9038 arm_abi_string);
9039
9040 arm_update_current_architecture ();
9041 }
9042
9043 static void
9044 arm_show_abi (struct ui_file *file, int from_tty,
9045 struct cmd_list_element *c, const char *value)
9046 {
9047 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9048
9049 if (arm_abi_global == ARM_ABI_AUTO
9050 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
9051 fprintf_filtered (file, _("\
9052 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
9053 arm_abi_strings[tdep->arm_abi]);
9054 else
9055 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
9056 arm_abi_string);
9057 }
9058
9059 static void
9060 arm_show_fallback_mode (struct ui_file *file, int from_tty,
9061 struct cmd_list_element *c, const char *value)
9062 {
9063 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9064
9065 fprintf_filtered (file,
9066 _("The current execution mode assumed "
9067 "(when symbols are unavailable) is \"%s\".\n"),
9068 arm_fallback_mode_string);
9069 }
9070
9071 static void
9072 arm_show_force_mode (struct ui_file *file, int from_tty,
9073 struct cmd_list_element *c, const char *value)
9074 {
9075 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9076
9077 fprintf_filtered (file,
9078 _("The current execution mode assumed "
9079 "(even when symbols are available) is \"%s\".\n"),
9080 arm_force_mode_string);
9081 }
9082
9083 /* If the user changes the register disassembly style used for info
9084 register and other commands, we have to also switch the style used
9085 in opcodes for disassembly output. This function is run in the "set
9086 arm disassembly" command, and does that. */
9087
9088 static void
9089 set_disassembly_style_sfunc (char *args, int from_tty,
9090 struct cmd_list_element *c)
9091 {
9092 set_disassembly_style ();
9093 }
9094 \f
9095 /* Return the ARM register name corresponding to register I. */
9096 static const char *
9097 arm_register_name (struct gdbarch *gdbarch, int i)
9098 {
9099 const int num_regs = gdbarch_num_regs (gdbarch);
9100
9101 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
9102 && i >= num_regs && i < num_regs + 32)
9103 {
9104 static const char *const vfp_pseudo_names[] = {
9105 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
9106 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
9107 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
9108 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
9109 };
9110
9111 return vfp_pseudo_names[i - num_regs];
9112 }
9113
9114 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
9115 && i >= num_regs + 32 && i < num_regs + 32 + 16)
9116 {
9117 static const char *const neon_pseudo_names[] = {
9118 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
9119 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
9120 };
9121
9122 return neon_pseudo_names[i - num_regs - 32];
9123 }
9124
9125 if (i >= ARRAY_SIZE (arm_register_names))
9126 /* These registers are only supported on targets which supply
9127 an XML description. */
9128 return "";
9129
9130 return arm_register_names[i];
9131 }
9132
9133 static void
9134 set_disassembly_style (void)
9135 {
9136 int current;
9137
9138 /* Find the style that the user wants. */
9139 for (current = 0; current < num_disassembly_options; current++)
9140 if (disassembly_style == valid_disassembly_styles[current])
9141 break;
9142 gdb_assert (current < num_disassembly_options);
9143
9144 /* Synchronize the disassembler. */
9145 set_arm_regname_option (current);
9146 }
9147
9148 /* Test whether the coff symbol specific value corresponds to a Thumb
9149 function. */
9150
9151 static int
9152 coff_sym_is_thumb (int val)
9153 {
9154 return (val == C_THUMBEXT
9155 || val == C_THUMBSTAT
9156 || val == C_THUMBEXTFUNC
9157 || val == C_THUMBSTATFUNC
9158 || val == C_THUMBLABEL);
9159 }
9160
9161 /* arm_coff_make_msymbol_special()
9162 arm_elf_make_msymbol_special()
9163
9164 These functions test whether the COFF or ELF symbol corresponds to
9165 an address in thumb code, and set a "special" bit in a minimal
9166 symbol to indicate that it does. */
9167
9168 static void
9169 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
9170 {
9171 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type *)sym)->internal_elf_sym)
9172 == ST_BRANCH_TO_THUMB)
9173 MSYMBOL_SET_SPECIAL (msym);
9174 }
9175
9176 static void
9177 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
9178 {
9179 if (coff_sym_is_thumb (val))
9180 MSYMBOL_SET_SPECIAL (msym);
9181 }
9182
9183 static void
9184 arm_objfile_data_free (struct objfile *objfile, void *arg)
9185 {
9186 struct arm_per_objfile *data = arg;
9187 unsigned int i;
9188
9189 for (i = 0; i < objfile->obfd->section_count; i++)
9190 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
9191 }
9192
9193 static void
9194 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
9195 asymbol *sym)
9196 {
9197 const char *name = bfd_asymbol_name (sym);
9198 struct arm_per_objfile *data;
9199 VEC(arm_mapping_symbol_s) **map_p;
9200 struct arm_mapping_symbol new_map_sym;
9201
9202 gdb_assert (name[0] == '$');
9203 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
9204 return;
9205
9206 data = objfile_data (objfile, arm_objfile_data_key);
9207 if (data == NULL)
9208 {
9209 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
9210 struct arm_per_objfile);
9211 set_objfile_data (objfile, arm_objfile_data_key, data);
9212 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
9213 objfile->obfd->section_count,
9214 VEC(arm_mapping_symbol_s) *);
9215 }
9216 map_p = &data->section_maps[bfd_get_section (sym)->index];
9217
9218 new_map_sym.value = sym->value;
9219 new_map_sym.type = name[1];
9220
9221 /* Assume that most mapping symbols appear in order of increasing
9222 value. If they were randomly distributed, it would be faster to
9223 always push here and then sort at first use. */
9224 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
9225 {
9226 struct arm_mapping_symbol *prev_map_sym;
9227
9228 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
9229 if (prev_map_sym->value >= sym->value)
9230 {
9231 unsigned int idx;
9232 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
9233 arm_compare_mapping_symbols);
9234 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
9235 return;
9236 }
9237 }
9238
9239 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
9240 }
9241
9242 static void
9243 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
9244 {
9245 struct gdbarch *gdbarch = get_regcache_arch (regcache);
9246 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
9247
9248 /* If necessary, set the T bit. */
9249 if (arm_apcs_32)
9250 {
9251 ULONGEST val, t_bit;
9252 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
9253 t_bit = arm_psr_thumb_bit (gdbarch);
9254 if (arm_pc_is_thumb (gdbarch, pc))
9255 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9256 val | t_bit);
9257 else
9258 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9259 val & ~t_bit);
9260 }
9261 }
9262
9263 /* Read the contents of a NEON quad register, by reading from two
9264 double registers. This is used to implement the quad pseudo
9265 registers, and for argument passing in case the quad registers are
9266 missing; vectors are passed in quad registers when using the VFP
9267 ABI, even if a NEON unit is not present. REGNUM is the index of
9268 the quad register, in [0, 15]. */
9269
9270 static enum register_status
9271 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
9272 int regnum, gdb_byte *buf)
9273 {
9274 char name_buf[4];
9275 gdb_byte reg_buf[8];
9276 int offset, double_regnum;
9277 enum register_status status;
9278
9279 sprintf (name_buf, "d%d", regnum << 1);
9280 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9281 strlen (name_buf));
9282
9283 /* d0 is always the least significant half of q0. */
9284 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9285 offset = 8;
9286 else
9287 offset = 0;
9288
9289 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9290 if (status != REG_VALID)
9291 return status;
9292 memcpy (buf + offset, reg_buf, 8);
9293
9294 offset = 8 - offset;
9295 status = regcache_raw_read (regcache, double_regnum + 1, reg_buf);
9296 if (status != REG_VALID)
9297 return status;
9298 memcpy (buf + offset, reg_buf, 8);
9299
9300 return REG_VALID;
9301 }
9302
9303 static enum register_status
9304 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
9305 int regnum, gdb_byte *buf)
9306 {
9307 const int num_regs = gdbarch_num_regs (gdbarch);
9308 char name_buf[4];
9309 gdb_byte reg_buf[8];
9310 int offset, double_regnum;
9311
9312 gdb_assert (regnum >= num_regs);
9313 regnum -= num_regs;
9314
9315 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9316 /* Quad-precision register. */
9317 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
9318 else
9319 {
9320 enum register_status status;
9321
9322 /* Single-precision register. */
9323 gdb_assert (regnum < 32);
9324
9325 /* s0 is always the least significant half of d0. */
9326 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9327 offset = (regnum & 1) ? 0 : 4;
9328 else
9329 offset = (regnum & 1) ? 4 : 0;
9330
9331 sprintf (name_buf, "d%d", regnum >> 1);
9332 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9333 strlen (name_buf));
9334
9335 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9336 if (status == REG_VALID)
9337 memcpy (buf, reg_buf + offset, 4);
9338 return status;
9339 }
9340 }
9341
9342 /* Store the contents of BUF to a NEON quad register, by writing to
9343 two double registers. This is used to implement the quad pseudo
9344 registers, and for argument passing in case the quad registers are
9345 missing; vectors are passed in quad registers when using the VFP
9346 ABI, even if a NEON unit is not present. REGNUM is the index
9347 of the quad register, in [0, 15]. */
9348
9349 static void
9350 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
9351 int regnum, const gdb_byte *buf)
9352 {
9353 char name_buf[4];
9354 gdb_byte reg_buf[8];
9355 int offset, double_regnum;
9356
9357 sprintf (name_buf, "d%d", regnum << 1);
9358 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9359 strlen (name_buf));
9360
9361 /* d0 is always the least significant half of q0. */
9362 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9363 offset = 8;
9364 else
9365 offset = 0;
9366
9367 regcache_raw_write (regcache, double_regnum, buf + offset);
9368 offset = 8 - offset;
9369 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
9370 }
9371
9372 static void
9373 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
9374 int regnum, const gdb_byte *buf)
9375 {
9376 const int num_regs = gdbarch_num_regs (gdbarch);
9377 char name_buf[4];
9378 gdb_byte reg_buf[8];
9379 int offset, double_regnum;
9380
9381 gdb_assert (regnum >= num_regs);
9382 regnum -= num_regs;
9383
9384 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9385 /* Quad-precision register. */
9386 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
9387 else
9388 {
9389 /* Single-precision register. */
9390 gdb_assert (regnum < 32);
9391
9392 /* s0 is always the least significant half of d0. */
9393 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9394 offset = (regnum & 1) ? 0 : 4;
9395 else
9396 offset = (regnum & 1) ? 4 : 0;
9397
9398 sprintf (name_buf, "d%d", regnum >> 1);
9399 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9400 strlen (name_buf));
9401
9402 regcache_raw_read (regcache, double_regnum, reg_buf);
9403 memcpy (reg_buf + offset, buf, 4);
9404 regcache_raw_write (regcache, double_regnum, reg_buf);
9405 }
9406 }
9407
9408 static struct value *
9409 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
9410 {
9411 const int *reg_p = baton;
9412 return value_of_register (*reg_p, frame);
9413 }
9414 \f
9415 static enum gdb_osabi
9416 arm_elf_osabi_sniffer (bfd *abfd)
9417 {
9418 unsigned int elfosabi;
9419 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
9420
9421 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
9422
9423 if (elfosabi == ELFOSABI_ARM)
9424 /* GNU tools use this value. Check note sections in this case,
9425 as well. */
9426 bfd_map_over_sections (abfd,
9427 generic_elf_osabi_sniff_abi_tag_sections,
9428 &osabi);
9429
9430 /* Anything else will be handled by the generic ELF sniffer. */
9431 return osabi;
9432 }
9433
9434 static int
9435 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
9436 struct reggroup *group)
9437 {
9438 /* FPS register's type is INT, but belongs to float_reggroup. Beside
9439 this, FPS register belongs to save_regroup, restore_reggroup, and
9440 all_reggroup, of course. */
9441 if (regnum == ARM_FPS_REGNUM)
9442 return (group == float_reggroup
9443 || group == save_reggroup
9444 || group == restore_reggroup
9445 || group == all_reggroup);
9446 else
9447 return default_register_reggroup_p (gdbarch, regnum, group);
9448 }
9449
9450 \f
9451 /* Initialize the current architecture based on INFO. If possible,
9452 re-use an architecture from ARCHES, which is a list of
9453 architectures already created during this debugging session.
9454
9455 Called e.g. at program startup, when reading a core file, and when
9456 reading a binary file. */
9457
9458 static struct gdbarch *
9459 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
9460 {
9461 struct gdbarch_tdep *tdep;
9462 struct gdbarch *gdbarch;
9463 struct gdbarch_list *best_arch;
9464 enum arm_abi_kind arm_abi = arm_abi_global;
9465 enum arm_float_model fp_model = arm_fp_model;
9466 struct tdesc_arch_data *tdesc_data = NULL;
9467 int i, is_m = 0;
9468 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
9469 int have_neon = 0;
9470 int have_fpa_registers = 1;
9471 const struct target_desc *tdesc = info.target_desc;
9472
9473 /* If we have an object to base this architecture on, try to determine
9474 its ABI. */
9475
9476 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
9477 {
9478 int ei_osabi, e_flags;
9479
9480 switch (bfd_get_flavour (info.abfd))
9481 {
9482 case bfd_target_aout_flavour:
9483 /* Assume it's an old APCS-style ABI. */
9484 arm_abi = ARM_ABI_APCS;
9485 break;
9486
9487 case bfd_target_coff_flavour:
9488 /* Assume it's an old APCS-style ABI. */
9489 /* XXX WinCE? */
9490 arm_abi = ARM_ABI_APCS;
9491 break;
9492
9493 case bfd_target_elf_flavour:
9494 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
9495 e_flags = elf_elfheader (info.abfd)->e_flags;
9496
9497 if (ei_osabi == ELFOSABI_ARM)
9498 {
9499 /* GNU tools used to use this value, but do not for EABI
9500 objects. There's nowhere to tag an EABI version
9501 anyway, so assume APCS. */
9502 arm_abi = ARM_ABI_APCS;
9503 }
9504 else if (ei_osabi == ELFOSABI_NONE)
9505 {
9506 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
9507 int attr_arch, attr_profile;
9508
9509 switch (eabi_ver)
9510 {
9511 case EF_ARM_EABI_UNKNOWN:
9512 /* Assume GNU tools. */
9513 arm_abi = ARM_ABI_APCS;
9514 break;
9515
9516 case EF_ARM_EABI_VER4:
9517 case EF_ARM_EABI_VER5:
9518 arm_abi = ARM_ABI_AAPCS;
9519 /* EABI binaries default to VFP float ordering.
9520 They may also contain build attributes that can
9521 be used to identify if the VFP argument-passing
9522 ABI is in use. */
9523 if (fp_model == ARM_FLOAT_AUTO)
9524 {
9525 #ifdef HAVE_ELF
9526 switch (bfd_elf_get_obj_attr_int (info.abfd,
9527 OBJ_ATTR_PROC,
9528 Tag_ABI_VFP_args))
9529 {
9530 case 0:
9531 /* "The user intended FP parameter/result
9532 passing to conform to AAPCS, base
9533 variant". */
9534 fp_model = ARM_FLOAT_SOFT_VFP;
9535 break;
9536 case 1:
9537 /* "The user intended FP parameter/result
9538 passing to conform to AAPCS, VFP
9539 variant". */
9540 fp_model = ARM_FLOAT_VFP;
9541 break;
9542 case 2:
9543 /* "The user intended FP parameter/result
9544 passing to conform to tool chain-specific
9545 conventions" - we don't know any such
9546 conventions, so leave it as "auto". */
9547 break;
9548 default:
9549 /* Attribute value not mentioned in the
9550 October 2008 ABI, so leave it as
9551 "auto". */
9552 break;
9553 }
9554 #else
9555 fp_model = ARM_FLOAT_SOFT_VFP;
9556 #endif
9557 }
9558 break;
9559
9560 default:
9561 /* Leave it as "auto". */
9562 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
9563 break;
9564 }
9565
9566 #ifdef HAVE_ELF
9567 /* Detect M-profile programs. This only works if the
9568 executable file includes build attributes; GCC does
9569 copy them to the executable, but e.g. RealView does
9570 not. */
9571 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
9572 Tag_CPU_arch);
9573 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
9574 OBJ_ATTR_PROC,
9575 Tag_CPU_arch_profile);
9576 /* GCC specifies the profile for v6-M; RealView only
9577 specifies the profile for architectures starting with
9578 V7 (as opposed to architectures with a tag
9579 numerically greater than TAG_CPU_ARCH_V7). */
9580 if (!tdesc_has_registers (tdesc)
9581 && (attr_arch == TAG_CPU_ARCH_V6_M
9582 || attr_arch == TAG_CPU_ARCH_V6S_M
9583 || attr_profile == 'M'))
9584 tdesc = tdesc_arm_with_m;
9585 #endif
9586 }
9587
9588 if (fp_model == ARM_FLOAT_AUTO)
9589 {
9590 int e_flags = elf_elfheader (info.abfd)->e_flags;
9591
9592 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
9593 {
9594 case 0:
9595 /* Leave it as "auto". Strictly speaking this case
9596 means FPA, but almost nobody uses that now, and
9597 many toolchains fail to set the appropriate bits
9598 for the floating-point model they use. */
9599 break;
9600 case EF_ARM_SOFT_FLOAT:
9601 fp_model = ARM_FLOAT_SOFT_FPA;
9602 break;
9603 case EF_ARM_VFP_FLOAT:
9604 fp_model = ARM_FLOAT_VFP;
9605 break;
9606 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
9607 fp_model = ARM_FLOAT_SOFT_VFP;
9608 break;
9609 }
9610 }
9611
9612 if (e_flags & EF_ARM_BE8)
9613 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
9614
9615 break;
9616
9617 default:
9618 /* Leave it as "auto". */
9619 break;
9620 }
9621 }
9622
9623 /* Check any target description for validity. */
9624 if (tdesc_has_registers (tdesc))
9625 {
9626 /* For most registers we require GDB's default names; but also allow
9627 the numeric names for sp / lr / pc, as a convenience. */
9628 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
9629 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
9630 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
9631
9632 const struct tdesc_feature *feature;
9633 int valid_p;
9634
9635 feature = tdesc_find_feature (tdesc,
9636 "org.gnu.gdb.arm.core");
9637 if (feature == NULL)
9638 {
9639 feature = tdesc_find_feature (tdesc,
9640 "org.gnu.gdb.arm.m-profile");
9641 if (feature == NULL)
9642 return NULL;
9643 else
9644 is_m = 1;
9645 }
9646
9647 tdesc_data = tdesc_data_alloc ();
9648
9649 valid_p = 1;
9650 for (i = 0; i < ARM_SP_REGNUM; i++)
9651 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
9652 arm_register_names[i]);
9653 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9654 ARM_SP_REGNUM,
9655 arm_sp_names);
9656 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9657 ARM_LR_REGNUM,
9658 arm_lr_names);
9659 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9660 ARM_PC_REGNUM,
9661 arm_pc_names);
9662 if (is_m)
9663 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9664 ARM_PS_REGNUM, "xpsr");
9665 else
9666 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9667 ARM_PS_REGNUM, "cpsr");
9668
9669 if (!valid_p)
9670 {
9671 tdesc_data_cleanup (tdesc_data);
9672 return NULL;
9673 }
9674
9675 feature = tdesc_find_feature (tdesc,
9676 "org.gnu.gdb.arm.fpa");
9677 if (feature != NULL)
9678 {
9679 valid_p = 1;
9680 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
9681 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
9682 arm_register_names[i]);
9683 if (!valid_p)
9684 {
9685 tdesc_data_cleanup (tdesc_data);
9686 return NULL;
9687 }
9688 }
9689 else
9690 have_fpa_registers = 0;
9691
9692 feature = tdesc_find_feature (tdesc,
9693 "org.gnu.gdb.xscale.iwmmxt");
9694 if (feature != NULL)
9695 {
9696 static const char *const iwmmxt_names[] = {
9697 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
9698 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
9699 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
9700 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
9701 };
9702
9703 valid_p = 1;
9704 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
9705 valid_p
9706 &= tdesc_numbered_register (feature, tdesc_data, i,
9707 iwmmxt_names[i - ARM_WR0_REGNUM]);
9708
9709 /* Check for the control registers, but do not fail if they
9710 are missing. */
9711 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
9712 tdesc_numbered_register (feature, tdesc_data, i,
9713 iwmmxt_names[i - ARM_WR0_REGNUM]);
9714
9715 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
9716 valid_p
9717 &= tdesc_numbered_register (feature, tdesc_data, i,
9718 iwmmxt_names[i - ARM_WR0_REGNUM]);
9719
9720 if (!valid_p)
9721 {
9722 tdesc_data_cleanup (tdesc_data);
9723 return NULL;
9724 }
9725 }
9726
9727 /* If we have a VFP unit, check whether the single precision registers
9728 are present. If not, then we will synthesize them as pseudo
9729 registers. */
9730 feature = tdesc_find_feature (tdesc,
9731 "org.gnu.gdb.arm.vfp");
9732 if (feature != NULL)
9733 {
9734 static const char *const vfp_double_names[] = {
9735 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
9736 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
9737 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
9738 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
9739 };
9740
9741 /* Require the double precision registers. There must be either
9742 16 or 32. */
9743 valid_p = 1;
9744 for (i = 0; i < 32; i++)
9745 {
9746 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9747 ARM_D0_REGNUM + i,
9748 vfp_double_names[i]);
9749 if (!valid_p)
9750 break;
9751 }
9752 if (!valid_p && i == 16)
9753 valid_p = 1;
9754
9755 /* Also require FPSCR. */
9756 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9757 ARM_FPSCR_REGNUM, "fpscr");
9758 if (!valid_p)
9759 {
9760 tdesc_data_cleanup (tdesc_data);
9761 return NULL;
9762 }
9763
9764 if (tdesc_unnumbered_register (feature, "s0") == 0)
9765 have_vfp_pseudos = 1;
9766
9767 have_vfp_registers = 1;
9768
9769 /* If we have VFP, also check for NEON. The architecture allows
9770 NEON without VFP (integer vector operations only), but GDB
9771 does not support that. */
9772 feature = tdesc_find_feature (tdesc,
9773 "org.gnu.gdb.arm.neon");
9774 if (feature != NULL)
9775 {
9776 /* NEON requires 32 double-precision registers. */
9777 if (i != 32)
9778 {
9779 tdesc_data_cleanup (tdesc_data);
9780 return NULL;
9781 }
9782
9783 /* If there are quad registers defined by the stub, use
9784 their type; otherwise (normally) provide them with
9785 the default type. */
9786 if (tdesc_unnumbered_register (feature, "q0") == 0)
9787 have_neon_pseudos = 1;
9788
9789 have_neon = 1;
9790 }
9791 }
9792 }
9793
9794 /* If there is already a candidate, use it. */
9795 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
9796 best_arch != NULL;
9797 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
9798 {
9799 if (arm_abi != ARM_ABI_AUTO
9800 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
9801 continue;
9802
9803 if (fp_model != ARM_FLOAT_AUTO
9804 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
9805 continue;
9806
9807 /* There are various other properties in tdep that we do not
9808 need to check here: those derived from a target description,
9809 since gdbarches with a different target description are
9810 automatically disqualified. */
9811
9812 /* Do check is_m, though, since it might come from the binary. */
9813 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
9814 continue;
9815
9816 /* Found a match. */
9817 break;
9818 }
9819
9820 if (best_arch != NULL)
9821 {
9822 if (tdesc_data != NULL)
9823 tdesc_data_cleanup (tdesc_data);
9824 return best_arch->gdbarch;
9825 }
9826
9827 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
9828 gdbarch = gdbarch_alloc (&info, tdep);
9829
9830 /* Record additional information about the architecture we are defining.
9831 These are gdbarch discriminators, like the OSABI. */
9832 tdep->arm_abi = arm_abi;
9833 tdep->fp_model = fp_model;
9834 tdep->is_m = is_m;
9835 tdep->have_fpa_registers = have_fpa_registers;
9836 tdep->have_vfp_registers = have_vfp_registers;
9837 tdep->have_vfp_pseudos = have_vfp_pseudos;
9838 tdep->have_neon_pseudos = have_neon_pseudos;
9839 tdep->have_neon = have_neon;
9840
9841 /* Breakpoints. */
9842 switch (info.byte_order_for_code)
9843 {
9844 case BFD_ENDIAN_BIG:
9845 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
9846 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
9847 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
9848 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
9849
9850 break;
9851
9852 case BFD_ENDIAN_LITTLE:
9853 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
9854 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
9855 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
9856 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
9857
9858 break;
9859
9860 default:
9861 internal_error (__FILE__, __LINE__,
9862 _("arm_gdbarch_init: bad byte order for float format"));
9863 }
9864
9865 /* On ARM targets char defaults to unsigned. */
9866 set_gdbarch_char_signed (gdbarch, 0);
9867
9868 /* Note: for displaced stepping, this includes the breakpoint, and one word
9869 of additional scratch space. This setting isn't used for anything beside
9870 displaced stepping at present. */
9871 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
9872
9873 /* This should be low enough for everything. */
9874 tdep->lowest_pc = 0x20;
9875 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
9876
9877 /* The default, for both APCS and AAPCS, is to return small
9878 structures in registers. */
9879 tdep->struct_return = reg_struct_return;
9880
9881 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
9882 set_gdbarch_frame_align (gdbarch, arm_frame_align);
9883
9884 set_gdbarch_write_pc (gdbarch, arm_write_pc);
9885
9886 /* Frame handling. */
9887 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
9888 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
9889 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
9890
9891 frame_base_set_default (gdbarch, &arm_normal_base);
9892
9893 /* Address manipulation. */
9894 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
9895 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
9896
9897 /* Advance PC across function entry code. */
9898 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
9899
9900 /* Detect whether PC is in function epilogue. */
9901 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
9902
9903 /* Skip trampolines. */
9904 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
9905
9906 /* The stack grows downward. */
9907 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
9908
9909 /* Breakpoint manipulation. */
9910 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
9911 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
9912 arm_remote_breakpoint_from_pc);
9913
9914 /* Information about registers, etc. */
9915 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
9916 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
9917 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
9918 set_gdbarch_register_type (gdbarch, arm_register_type);
9919 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
9920
9921 /* This "info float" is FPA-specific. Use the generic version if we
9922 do not have FPA. */
9923 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
9924 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
9925
9926 /* Internal <-> external register number maps. */
9927 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
9928 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
9929
9930 set_gdbarch_register_name (gdbarch, arm_register_name);
9931
9932 /* Returning results. */
9933 set_gdbarch_return_value (gdbarch, arm_return_value);
9934
9935 /* Disassembly. */
9936 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
9937
9938 /* Minsymbol frobbing. */
9939 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
9940 set_gdbarch_coff_make_msymbol_special (gdbarch,
9941 arm_coff_make_msymbol_special);
9942 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
9943
9944 /* Thumb-2 IT block support. */
9945 set_gdbarch_adjust_breakpoint_address (gdbarch,
9946 arm_adjust_breakpoint_address);
9947
9948 /* Virtual tables. */
9949 set_gdbarch_vbit_in_delta (gdbarch, 1);
9950
9951 /* Hook in the ABI-specific overrides, if they have been registered. */
9952 gdbarch_init_osabi (info, gdbarch);
9953
9954 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
9955
9956 /* Add some default predicates. */
9957 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
9958 dwarf2_append_unwinders (gdbarch);
9959 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
9960 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
9961
9962 /* Now we have tuned the configuration, set a few final things,
9963 based on what the OS ABI has told us. */
9964
9965 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
9966 binaries are always marked. */
9967 if (tdep->arm_abi == ARM_ABI_AUTO)
9968 tdep->arm_abi = ARM_ABI_APCS;
9969
9970 /* Watchpoints are not steppable. */
9971 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9972
9973 /* We used to default to FPA for generic ARM, but almost nobody
9974 uses that now, and we now provide a way for the user to force
9975 the model. So default to the most useful variant. */
9976 if (tdep->fp_model == ARM_FLOAT_AUTO)
9977 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
9978
9979 if (tdep->jb_pc >= 0)
9980 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
9981
9982 /* Floating point sizes and format. */
9983 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
9984 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
9985 {
9986 set_gdbarch_double_format
9987 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
9988 set_gdbarch_long_double_format
9989 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
9990 }
9991 else
9992 {
9993 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
9994 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
9995 }
9996
9997 if (have_vfp_pseudos)
9998 {
9999 /* NOTE: These are the only pseudo registers used by
10000 the ARM target at the moment. If more are added, a
10001 little more care in numbering will be needed. */
10002
10003 int num_pseudos = 32;
10004 if (have_neon_pseudos)
10005 num_pseudos += 16;
10006 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
10007 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
10008 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
10009 }
10010
10011 if (tdesc_data)
10012 {
10013 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
10014
10015 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
10016
10017 /* Override tdesc_register_type to adjust the types of VFP
10018 registers for NEON. */
10019 set_gdbarch_register_type (gdbarch, arm_register_type);
10020 }
10021
10022 /* Add standard register aliases. We add aliases even for those
10023 nanes which are used by the current architecture - it's simpler,
10024 and does no harm, since nothing ever lists user registers. */
10025 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
10026 user_reg_add (gdbarch, arm_register_aliases[i].name,
10027 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
10028
10029 return gdbarch;
10030 }
10031
10032 static void
10033 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
10034 {
10035 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
10036
10037 if (tdep == NULL)
10038 return;
10039
10040 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
10041 (unsigned long) tdep->lowest_pc);
10042 }
10043
10044 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
10045
10046 void
10047 _initialize_arm_tdep (void)
10048 {
10049 struct ui_file *stb;
10050 long length;
10051 struct cmd_list_element *new_set, *new_show;
10052 const char *setname;
10053 const char *setdesc;
10054 const char *const *regnames;
10055 int numregs, i, j;
10056 static char *helptext;
10057 char regdesc[1024], *rdptr = regdesc;
10058 size_t rest = sizeof (regdesc);
10059
10060 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
10061
10062 arm_objfile_data_key
10063 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
10064
10065 /* Add ourselves to objfile event chain. */
10066 observer_attach_new_objfile (arm_exidx_new_objfile);
10067 arm_exidx_data_key
10068 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
10069
10070 /* Register an ELF OS ABI sniffer for ARM binaries. */
10071 gdbarch_register_osabi_sniffer (bfd_arch_arm,
10072 bfd_target_elf_flavour,
10073 arm_elf_osabi_sniffer);
10074
10075 /* Initialize the standard target descriptions. */
10076 initialize_tdesc_arm_with_m ();
10077 initialize_tdesc_arm_with_iwmmxt ();
10078 initialize_tdesc_arm_with_vfpv2 ();
10079 initialize_tdesc_arm_with_vfpv3 ();
10080 initialize_tdesc_arm_with_neon ();
10081
10082 /* Get the number of possible sets of register names defined in opcodes. */
10083 num_disassembly_options = get_arm_regname_num_options ();
10084
10085 /* Add root prefix command for all "set arm"/"show arm" commands. */
10086 add_prefix_cmd ("arm", no_class, set_arm_command,
10087 _("Various ARM-specific commands."),
10088 &setarmcmdlist, "set arm ", 0, &setlist);
10089
10090 add_prefix_cmd ("arm", no_class, show_arm_command,
10091 _("Various ARM-specific commands."),
10092 &showarmcmdlist, "show arm ", 0, &showlist);
10093
10094 /* Sync the opcode insn printer with our register viewer. */
10095 parse_arm_disassembler_option ("reg-names-std");
10096
10097 /* Initialize the array that will be passed to
10098 add_setshow_enum_cmd(). */
10099 valid_disassembly_styles
10100 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
10101 for (i = 0; i < num_disassembly_options; i++)
10102 {
10103 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
10104 valid_disassembly_styles[i] = setname;
10105 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
10106 rdptr += length;
10107 rest -= length;
10108 /* When we find the default names, tell the disassembler to use
10109 them. */
10110 if (!strcmp (setname, "std"))
10111 {
10112 disassembly_style = setname;
10113 set_arm_regname_option (i);
10114 }
10115 }
10116 /* Mark the end of valid options. */
10117 valid_disassembly_styles[num_disassembly_options] = NULL;
10118
10119 /* Create the help text. */
10120 stb = mem_fileopen ();
10121 fprintf_unfiltered (stb, "%s%s%s",
10122 _("The valid values are:\n"),
10123 regdesc,
10124 _("The default is \"std\"."));
10125 helptext = ui_file_xstrdup (stb, NULL);
10126 ui_file_delete (stb);
10127
10128 add_setshow_enum_cmd("disassembler", no_class,
10129 valid_disassembly_styles, &disassembly_style,
10130 _("Set the disassembly style."),
10131 _("Show the disassembly style."),
10132 helptext,
10133 set_disassembly_style_sfunc,
10134 NULL, /* FIXME: i18n: The disassembly style is
10135 \"%s\". */
10136 &setarmcmdlist, &showarmcmdlist);
10137
10138 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
10139 _("Set usage of ARM 32-bit mode."),
10140 _("Show usage of ARM 32-bit mode."),
10141 _("When off, a 26-bit PC will be used."),
10142 NULL,
10143 NULL, /* FIXME: i18n: Usage of ARM 32-bit
10144 mode is %s. */
10145 &setarmcmdlist, &showarmcmdlist);
10146
10147 /* Add a command to allow the user to force the FPU model. */
10148 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
10149 _("Set the floating point type."),
10150 _("Show the floating point type."),
10151 _("auto - Determine the FP typefrom the OS-ABI.\n\
10152 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
10153 fpa - FPA co-processor (GCC compiled).\n\
10154 softvfp - Software FP with pure-endian doubles.\n\
10155 vfp - VFP co-processor."),
10156 set_fp_model_sfunc, show_fp_model,
10157 &setarmcmdlist, &showarmcmdlist);
10158
10159 /* Add a command to allow the user to force the ABI. */
10160 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
10161 _("Set the ABI."),
10162 _("Show the ABI."),
10163 NULL, arm_set_abi, arm_show_abi,
10164 &setarmcmdlist, &showarmcmdlist);
10165
10166 /* Add two commands to allow the user to force the assumed
10167 execution mode. */
10168 add_setshow_enum_cmd ("fallback-mode", class_support,
10169 arm_mode_strings, &arm_fallback_mode_string,
10170 _("Set the mode assumed when symbols are unavailable."),
10171 _("Show the mode assumed when symbols are unavailable."),
10172 NULL, NULL, arm_show_fallback_mode,
10173 &setarmcmdlist, &showarmcmdlist);
10174 add_setshow_enum_cmd ("force-mode", class_support,
10175 arm_mode_strings, &arm_force_mode_string,
10176 _("Set the mode assumed even when symbols are available."),
10177 _("Show the mode assumed even when symbols are available."),
10178 NULL, NULL, arm_show_force_mode,
10179 &setarmcmdlist, &showarmcmdlist);
10180
10181 /* Debugging flag. */
10182 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
10183 _("Set ARM debugging."),
10184 _("Show ARM debugging."),
10185 _("When on, arm-specific debugging is enabled."),
10186 NULL,
10187 NULL, /* FIXME: i18n: "ARM debugging is %s. */
10188 &setdebuglist, &showdebuglist);
10189 }
This page took 0.244657 seconds and 4 git commands to generate.