2011-01-07 Michael Snyder <msnyder@vmware.com>
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "reggroups.h"
33 #include "doublest.h"
34 #include "value.h"
35 #include "arch-utils.h"
36 #include "osabi.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
40 #include "objfiles.h"
41 #include "dwarf2-frame.h"
42 #include "gdbtypes.h"
43 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
46
47 #include "arm-tdep.h"
48 #include "gdb/sim-arm.h"
49
50 #include "elf-bfd.h"
51 #include "coff/internal.h"
52 #include "elf/arm.h"
53
54 #include "gdb_assert.h"
55 #include "vec.h"
56
57 #include "features/arm-with-m.c"
58
59 static int arm_debug;
60
61 /* Macros for setting and testing a bit in a minimal symbol that marks
62 it as Thumb function. The MSB of the minimal symbol's "info" field
63 is used for this purpose.
64
65 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
66 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
67
68 #define MSYMBOL_SET_SPECIAL(msym) \
69 MSYMBOL_TARGET_FLAG_1 (msym) = 1
70
71 #define MSYMBOL_IS_SPECIAL(msym) \
72 MSYMBOL_TARGET_FLAG_1 (msym)
73
74 /* Per-objfile data used for mapping symbols. */
75 static const struct objfile_data *arm_objfile_data_key;
76
77 struct arm_mapping_symbol
78 {
79 bfd_vma value;
80 char type;
81 };
82 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
83 DEF_VEC_O(arm_mapping_symbol_s);
84
85 struct arm_per_objfile
86 {
87 VEC(arm_mapping_symbol_s) **section_maps;
88 };
89
90 /* The list of available "set arm ..." and "show arm ..." commands. */
91 static struct cmd_list_element *setarmcmdlist = NULL;
92 static struct cmd_list_element *showarmcmdlist = NULL;
93
94 /* The type of floating-point to use. Keep this in sync with enum
95 arm_float_model, and the help string in _initialize_arm_tdep. */
96 static const char *fp_model_strings[] =
97 {
98 "auto",
99 "softfpa",
100 "fpa",
101 "softvfp",
102 "vfp",
103 NULL
104 };
105
106 /* A variable that can be configured by the user. */
107 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
108 static const char *current_fp_model = "auto";
109
110 /* The ABI to use. Keep this in sync with arm_abi_kind. */
111 static const char *arm_abi_strings[] =
112 {
113 "auto",
114 "APCS",
115 "AAPCS",
116 NULL
117 };
118
119 /* A variable that can be configured by the user. */
120 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
121 static const char *arm_abi_string = "auto";
122
123 /* The execution mode to assume. */
124 static const char *arm_mode_strings[] =
125 {
126 "auto",
127 "arm",
128 "thumb",
129 NULL
130 };
131
132 static const char *arm_fallback_mode_string = "auto";
133 static const char *arm_force_mode_string = "auto";
134
135 /* Number of different reg name sets (options). */
136 static int num_disassembly_options;
137
138 /* The standard register names, and all the valid aliases for them. Note
139 that `fp', `sp' and `pc' are not added in this alias list, because they
140 have been added as builtin user registers in
141 std-regs.c:_initialize_frame_reg. */
142 static const struct
143 {
144 const char *name;
145 int regnum;
146 } arm_register_aliases[] = {
147 /* Basic register numbers. */
148 { "r0", 0 },
149 { "r1", 1 },
150 { "r2", 2 },
151 { "r3", 3 },
152 { "r4", 4 },
153 { "r5", 5 },
154 { "r6", 6 },
155 { "r7", 7 },
156 { "r8", 8 },
157 { "r9", 9 },
158 { "r10", 10 },
159 { "r11", 11 },
160 { "r12", 12 },
161 { "r13", 13 },
162 { "r14", 14 },
163 { "r15", 15 },
164 /* Synonyms (argument and variable registers). */
165 { "a1", 0 },
166 { "a2", 1 },
167 { "a3", 2 },
168 { "a4", 3 },
169 { "v1", 4 },
170 { "v2", 5 },
171 { "v3", 6 },
172 { "v4", 7 },
173 { "v5", 8 },
174 { "v6", 9 },
175 { "v7", 10 },
176 { "v8", 11 },
177 /* Other platform-specific names for r9. */
178 { "sb", 9 },
179 { "tr", 9 },
180 /* Special names. */
181 { "ip", 12 },
182 { "lr", 14 },
183 /* Names used by GCC (not listed in the ARM EABI). */
184 { "sl", 10 },
185 /* A special name from the older ATPCS. */
186 { "wr", 7 },
187 };
188
189 static const char *const arm_register_names[] =
190 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
191 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
192 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
193 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
194 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
195 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
196 "fps", "cpsr" }; /* 24 25 */
197
198 /* Valid register name styles. */
199 static const char **valid_disassembly_styles;
200
201 /* Disassembly style to use. Default to "std" register names. */
202 static const char *disassembly_style;
203
204 /* This is used to keep the bfd arch_info in sync with the disassembly
205 style. */
206 static void set_disassembly_style_sfunc(char *, int,
207 struct cmd_list_element *);
208 static void set_disassembly_style (void);
209
210 static void convert_from_extended (const struct floatformat *, const void *,
211 void *, int);
212 static void convert_to_extended (const struct floatformat *, void *,
213 const void *, int);
214
215 static void arm_neon_quad_read (struct gdbarch *gdbarch,
216 struct regcache *regcache,
217 int regnum, gdb_byte *buf);
218 static void arm_neon_quad_write (struct gdbarch *gdbarch,
219 struct regcache *regcache,
220 int regnum, const gdb_byte *buf);
221
222 struct arm_prologue_cache
223 {
224 /* The stack pointer at the time this frame was created; i.e. the
225 caller's stack pointer when this function was called. It is used
226 to identify this frame. */
227 CORE_ADDR prev_sp;
228
229 /* The frame base for this frame is just prev_sp - frame size.
230 FRAMESIZE is the distance from the frame pointer to the
231 initial stack pointer. */
232
233 int framesize;
234
235 /* The register used to hold the frame pointer for this frame. */
236 int framereg;
237
238 /* Saved register offsets. */
239 struct trad_frame_saved_reg *saved_regs;
240 };
241
242 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
243 CORE_ADDR prologue_start,
244 CORE_ADDR prologue_end,
245 struct arm_prologue_cache *cache);
246
247 /* Architecture version for displaced stepping. This effects the behaviour of
248 certain instructions, and really should not be hard-wired. */
249
250 #define DISPLACED_STEPPING_ARCH_VERSION 5
251
252 /* Addresses for calling Thumb functions have the bit 0 set.
253 Here are some macros to test, set, or clear bit 0 of addresses. */
254 #define IS_THUMB_ADDR(addr) ((addr) & 1)
255 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
256 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
257
258 /* Set to true if the 32-bit mode is in use. */
259
260 int arm_apcs_32 = 1;
261
262 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
263
264 static int
265 arm_psr_thumb_bit (struct gdbarch *gdbarch)
266 {
267 if (gdbarch_tdep (gdbarch)->is_m)
268 return XPSR_T;
269 else
270 return CPSR_T;
271 }
272
273 /* Determine if FRAME is executing in Thumb mode. */
274
275 int
276 arm_frame_is_thumb (struct frame_info *frame)
277 {
278 CORE_ADDR cpsr;
279 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
280
281 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
282 directly (from a signal frame or dummy frame) or by interpreting
283 the saved LR (from a prologue or DWARF frame). So consult it and
284 trust the unwinders. */
285 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
286
287 return (cpsr & t_bit) != 0;
288 }
289
290 /* Callback for VEC_lower_bound. */
291
292 static inline int
293 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
294 const struct arm_mapping_symbol *rhs)
295 {
296 return lhs->value < rhs->value;
297 }
298
299 /* Search for the mapping symbol covering MEMADDR. If one is found,
300 return its type. Otherwise, return 0. If START is non-NULL,
301 set *START to the location of the mapping symbol. */
302
303 static char
304 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
305 {
306 struct obj_section *sec;
307
308 /* If there are mapping symbols, consult them. */
309 sec = find_pc_section (memaddr);
310 if (sec != NULL)
311 {
312 struct arm_per_objfile *data;
313 VEC(arm_mapping_symbol_s) *map;
314 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
315 0 };
316 unsigned int idx;
317
318 data = objfile_data (sec->objfile, arm_objfile_data_key);
319 if (data != NULL)
320 {
321 map = data->section_maps[sec->the_bfd_section->index];
322 if (!VEC_empty (arm_mapping_symbol_s, map))
323 {
324 struct arm_mapping_symbol *map_sym;
325
326 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
327 arm_compare_mapping_symbols);
328
329 /* VEC_lower_bound finds the earliest ordered insertion
330 point. If the following symbol starts at this exact
331 address, we use that; otherwise, the preceding
332 mapping symbol covers this address. */
333 if (idx < VEC_length (arm_mapping_symbol_s, map))
334 {
335 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
336 if (map_sym->value == map_key.value)
337 {
338 if (start)
339 *start = map_sym->value + obj_section_addr (sec);
340 return map_sym->type;
341 }
342 }
343
344 if (idx > 0)
345 {
346 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
347 if (start)
348 *start = map_sym->value + obj_section_addr (sec);
349 return map_sym->type;
350 }
351 }
352 }
353 }
354
355 return 0;
356 }
357
358 static CORE_ADDR arm_get_next_pc_raw (struct frame_info *frame,
359 CORE_ADDR pc, int insert_bkpt);
360
361 /* Determine if the program counter specified in MEMADDR is in a Thumb
362 function. This function should be called for addresses unrelated to
363 any executing frame; otherwise, prefer arm_frame_is_thumb. */
364
365 static int
366 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
367 {
368 struct obj_section *sec;
369 struct minimal_symbol *sym;
370 char type;
371
372 /* If bit 0 of the address is set, assume this is a Thumb address. */
373 if (IS_THUMB_ADDR (memaddr))
374 return 1;
375
376 /* If the user wants to override the symbol table, let him. */
377 if (strcmp (arm_force_mode_string, "arm") == 0)
378 return 0;
379 if (strcmp (arm_force_mode_string, "thumb") == 0)
380 return 1;
381
382 /* ARM v6-M and v7-M are always in Thumb mode. */
383 if (gdbarch_tdep (gdbarch)->is_m)
384 return 1;
385
386 /* If there are mapping symbols, consult them. */
387 type = arm_find_mapping_symbol (memaddr, NULL);
388 if (type)
389 return type == 't';
390
391 /* Thumb functions have a "special" bit set in minimal symbols. */
392 sym = lookup_minimal_symbol_by_pc (memaddr);
393 if (sym)
394 return (MSYMBOL_IS_SPECIAL (sym));
395
396 /* If the user wants to override the fallback mode, let them. */
397 if (strcmp (arm_fallback_mode_string, "arm") == 0)
398 return 0;
399 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
400 return 1;
401
402 /* If we couldn't find any symbol, but we're talking to a running
403 target, then trust the current value of $cpsr. This lets
404 "display/i $pc" always show the correct mode (though if there is
405 a symbol table we will not reach here, so it still may not be
406 displayed in the mode it will be executed).
407
408 As a further heuristic if we detect that we are doing a single-step we
409 see what state executing the current instruction ends up with us being
410 in. */
411 if (target_has_registers)
412 {
413 struct frame_info *current_frame = get_current_frame ();
414 CORE_ADDR current_pc = get_frame_pc (current_frame);
415 int is_thumb = arm_frame_is_thumb (current_frame);
416 CORE_ADDR next_pc;
417 if (memaddr == current_pc)
418 return is_thumb;
419 else
420 {
421 struct gdbarch *gdbarch = get_frame_arch (current_frame);
422 next_pc = arm_get_next_pc_raw (current_frame, current_pc, FALSE);
423 if (memaddr == gdbarch_addr_bits_remove (gdbarch, next_pc))
424 return IS_THUMB_ADDR (next_pc);
425 else
426 return is_thumb;
427 }
428 }
429
430 /* Otherwise we're out of luck; we assume ARM. */
431 return 0;
432 }
433
434 /* Remove useless bits from addresses in a running program. */
435 static CORE_ADDR
436 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
437 {
438 if (arm_apcs_32)
439 return UNMAKE_THUMB_ADDR (val);
440 else
441 return (val & 0x03fffffc);
442 }
443
444 /* When reading symbols, we need to zap the low bit of the address,
445 which may be set to 1 for Thumb functions. */
446 static CORE_ADDR
447 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
448 {
449 return val & ~1;
450 }
451
452 /* Return 1 if PC is the start of a compiler helper function which
453 can be safely ignored during prologue skipping. */
454 static int
455 skip_prologue_function (CORE_ADDR pc)
456 {
457 struct minimal_symbol *msym;
458 const char *name;
459
460 msym = lookup_minimal_symbol_by_pc (pc);
461 if (msym == NULL || SYMBOL_VALUE_ADDRESS (msym) != pc)
462 return 0;
463
464 name = SYMBOL_LINKAGE_NAME (msym);
465 if (name == NULL)
466 return 0;
467
468 /* The GNU linker's Thumb call stub to foo is named
469 __foo_from_thumb. */
470 if (strstr (name, "_from_thumb") != NULL)
471 name += 2;
472
473 /* On soft-float targets, __truncdfsf2 is called to convert promoted
474 arguments to their argument types in non-prototyped
475 functions. */
476 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
477 return 1;
478 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
479 return 1;
480
481 /* Internal functions related to thread-local storage. */
482 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
483 return 1;
484 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
485 return 1;
486
487 return 0;
488 }
489
490 /* Support routines for instruction parsing. */
491 #define submask(x) ((1L << ((x) + 1)) - 1)
492 #define bit(obj,st) (((obj) >> (st)) & 1)
493 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
494 #define sbits(obj,st,fn) \
495 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
496 #define BranchDest(addr,instr) \
497 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
498
499 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
500 the first 16-bit of instruction, and INSN2 is the second 16-bit of
501 instruction. */
502 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
503 ((bits ((insn1), 0, 3) << 12) \
504 | (bits ((insn1), 10, 10) << 11) \
505 | (bits ((insn2), 12, 14) << 8) \
506 | bits ((insn2), 0, 7))
507
508 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
509 the 32-bit instruction. */
510 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
511 ((bits ((insn), 16, 19) << 12) \
512 | bits ((insn), 0, 11))
513
514 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
515
516 static unsigned int
517 thumb_expand_immediate (unsigned int imm)
518 {
519 unsigned int count = imm >> 7;
520
521 if (count < 8)
522 switch (count / 2)
523 {
524 case 0:
525 return imm & 0xff;
526 case 1:
527 return (imm & 0xff) | ((imm & 0xff) << 16);
528 case 2:
529 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
530 case 3:
531 return (imm & 0xff) | ((imm & 0xff) << 8)
532 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
533 }
534
535 return (0x80 | (imm & 0x7f)) << (32 - count);
536 }
537
538 /* Return 1 if the 16-bit Thumb instruction INST might change
539 control flow, 0 otherwise. */
540
541 static int
542 thumb_instruction_changes_pc (unsigned short inst)
543 {
544 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
545 return 1;
546
547 if ((inst & 0xf000) == 0xd000) /* conditional branch */
548 return 1;
549
550 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
551 return 1;
552
553 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
554 return 1;
555
556 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
557 return 1;
558
559 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
560 return 1;
561
562 return 0;
563 }
564
565 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
566 might change control flow, 0 otherwise. */
567
568 static int
569 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
570 {
571 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
572 {
573 /* Branches and miscellaneous control instructions. */
574
575 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
576 {
577 /* B, BL, BLX. */
578 return 1;
579 }
580 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
581 {
582 /* SUBS PC, LR, #imm8. */
583 return 1;
584 }
585 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
586 {
587 /* Conditional branch. */
588 return 1;
589 }
590
591 return 0;
592 }
593
594 if ((inst1 & 0xfe50) == 0xe810)
595 {
596 /* Load multiple or RFE. */
597
598 if (bit (inst1, 7) && !bit (inst1, 8))
599 {
600 /* LDMIA or POP */
601 if (bit (inst2, 15))
602 return 1;
603 }
604 else if (!bit (inst1, 7) && bit (inst1, 8))
605 {
606 /* LDMDB */
607 if (bit (inst2, 15))
608 return 1;
609 }
610 else if (bit (inst1, 7) && bit (inst1, 8))
611 {
612 /* RFEIA */
613 return 1;
614 }
615 else if (!bit (inst1, 7) && !bit (inst1, 8))
616 {
617 /* RFEDB */
618 return 1;
619 }
620
621 return 0;
622 }
623
624 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
625 {
626 /* MOV PC or MOVS PC. */
627 return 1;
628 }
629
630 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
631 {
632 /* LDR PC. */
633 if (bits (inst1, 0, 3) == 15)
634 return 1;
635 if (bit (inst1, 7))
636 return 1;
637 if (bit (inst2, 11))
638 return 1;
639 if ((inst2 & 0x0fc0) == 0x0000)
640 return 1;
641
642 return 0;
643 }
644
645 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
646 {
647 /* TBB. */
648 return 1;
649 }
650
651 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
652 {
653 /* TBH. */
654 return 1;
655 }
656
657 return 0;
658 }
659
660 /* Analyze a Thumb prologue, looking for a recognizable stack frame
661 and frame pointer. Scan until we encounter a store that could
662 clobber the stack frame unexpectedly, or an unknown instruction.
663 Return the last address which is definitely safe to skip for an
664 initial breakpoint. */
665
666 static CORE_ADDR
667 thumb_analyze_prologue (struct gdbarch *gdbarch,
668 CORE_ADDR start, CORE_ADDR limit,
669 struct arm_prologue_cache *cache)
670 {
671 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
672 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
673 int i;
674 pv_t regs[16];
675 struct pv_area *stack;
676 struct cleanup *back_to;
677 CORE_ADDR offset;
678 CORE_ADDR unrecognized_pc = 0;
679
680 for (i = 0; i < 16; i++)
681 regs[i] = pv_register (i, 0);
682 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
683 back_to = make_cleanup_free_pv_area (stack);
684
685 while (start < limit)
686 {
687 unsigned short insn;
688
689 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
690
691 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
692 {
693 int regno;
694 int mask;
695
696 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
697 break;
698
699 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
700 whether to save LR (R14). */
701 mask = (insn & 0xff) | ((insn & 0x100) << 6);
702
703 /* Calculate offsets of saved R0-R7 and LR. */
704 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
705 if (mask & (1 << regno))
706 {
707 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
708 -4);
709 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
710 }
711 }
712 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
713 sub sp, #simm */
714 {
715 offset = (insn & 0x7f) << 2; /* get scaled offset */
716 if (insn & 0x80) /* Check for SUB. */
717 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
718 -offset);
719 else
720 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
721 offset);
722 }
723 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
724 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
725 (insn & 0xff) << 2);
726 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
727 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
728 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
729 bits (insn, 6, 8));
730 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
731 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
732 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
733 bits (insn, 0, 7));
734 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
735 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
736 && pv_is_constant (regs[bits (insn, 3, 5)]))
737 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
738 regs[bits (insn, 6, 8)]);
739 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
740 && pv_is_constant (regs[bits (insn, 3, 6)]))
741 {
742 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
743 int rm = bits (insn, 3, 6);
744 regs[rd] = pv_add (regs[rd], regs[rm]);
745 }
746 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
747 {
748 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
749 int src_reg = (insn & 0x78) >> 3;
750 regs[dst_reg] = regs[src_reg];
751 }
752 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
753 {
754 /* Handle stores to the stack. Normally pushes are used,
755 but with GCC -mtpcs-frame, there may be other stores
756 in the prologue to create the frame. */
757 int regno = (insn >> 8) & 0x7;
758 pv_t addr;
759
760 offset = (insn & 0xff) << 2;
761 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
762
763 if (pv_area_store_would_trash (stack, addr))
764 break;
765
766 pv_area_store (stack, addr, 4, regs[regno]);
767 }
768 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
769 {
770 int rd = bits (insn, 0, 2);
771 int rn = bits (insn, 3, 5);
772 pv_t addr;
773
774 offset = bits (insn, 6, 10) << 2;
775 addr = pv_add_constant (regs[rn], offset);
776
777 if (pv_area_store_would_trash (stack, addr))
778 break;
779
780 pv_area_store (stack, addr, 4, regs[rd]);
781 }
782 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
783 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
784 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
785 /* Ignore stores of argument registers to the stack. */
786 ;
787 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
788 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
789 /* Ignore block loads from the stack, potentially copying
790 parameters from memory. */
791 ;
792 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
793 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
794 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
795 /* Similarly ignore single loads from the stack. */
796 ;
797 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
798 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
799 /* Skip register copies, i.e. saves to another register
800 instead of the stack. */
801 ;
802 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
803 /* Recognize constant loads; even with small stacks these are necessary
804 on Thumb. */
805 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
806 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
807 {
808 /* Constant pool loads, for the same reason. */
809 unsigned int constant;
810 CORE_ADDR loc;
811
812 loc = start + 4 + bits (insn, 0, 7) * 4;
813 constant = read_memory_unsigned_integer (loc, 4, byte_order);
814 regs[bits (insn, 8, 10)] = pv_constant (constant);
815 }
816 else if ((insn & 0xe000) == 0xe000)
817 {
818 unsigned short inst2;
819
820 inst2 = read_memory_unsigned_integer (start + 2, 2,
821 byte_order_for_code);
822
823 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
824 {
825 /* BL, BLX. Allow some special function calls when
826 skipping the prologue; GCC generates these before
827 storing arguments to the stack. */
828 CORE_ADDR nextpc;
829 int j1, j2, imm1, imm2;
830
831 imm1 = sbits (insn, 0, 10);
832 imm2 = bits (inst2, 0, 10);
833 j1 = bit (inst2, 13);
834 j2 = bit (inst2, 11);
835
836 offset = ((imm1 << 12) + (imm2 << 1));
837 offset ^= ((!j2) << 22) | ((!j1) << 23);
838
839 nextpc = start + 4 + offset;
840 /* For BLX make sure to clear the low bits. */
841 if (bit (inst2, 12) == 0)
842 nextpc = nextpc & 0xfffffffc;
843
844 if (!skip_prologue_function (nextpc))
845 break;
846 }
847
848 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
849 { registers } */
850 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
851 {
852 pv_t addr = regs[bits (insn, 0, 3)];
853 int regno;
854
855 if (pv_area_store_would_trash (stack, addr))
856 break;
857
858 /* Calculate offsets of saved registers. */
859 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
860 if (inst2 & (1 << regno))
861 {
862 addr = pv_add_constant (addr, -4);
863 pv_area_store (stack, addr, 4, regs[regno]);
864 }
865
866 if (insn & 0x0020)
867 regs[bits (insn, 0, 3)] = addr;
868 }
869
870 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
871 [Rn, #+/-imm]{!} */
872 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
873 {
874 int regno1 = bits (inst2, 12, 15);
875 int regno2 = bits (inst2, 8, 11);
876 pv_t addr = regs[bits (insn, 0, 3)];
877
878 offset = inst2 & 0xff;
879 if (insn & 0x0080)
880 addr = pv_add_constant (addr, offset);
881 else
882 addr = pv_add_constant (addr, -offset);
883
884 if (pv_area_store_would_trash (stack, addr))
885 break;
886
887 pv_area_store (stack, addr, 4, regs[regno1]);
888 pv_area_store (stack, pv_add_constant (addr, 4),
889 4, regs[regno2]);
890
891 if (insn & 0x0020)
892 regs[bits (insn, 0, 3)] = addr;
893 }
894
895 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
896 && (inst2 & 0x0c00) == 0x0c00
897 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
898 {
899 int regno = bits (inst2, 12, 15);
900 pv_t addr = regs[bits (insn, 0, 3)];
901
902 offset = inst2 & 0xff;
903 if (inst2 & 0x0200)
904 addr = pv_add_constant (addr, offset);
905 else
906 addr = pv_add_constant (addr, -offset);
907
908 if (pv_area_store_would_trash (stack, addr))
909 break;
910
911 pv_area_store (stack, addr, 4, regs[regno]);
912
913 if (inst2 & 0x0100)
914 regs[bits (insn, 0, 3)] = addr;
915 }
916
917 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
918 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
919 {
920 int regno = bits (inst2, 12, 15);
921 pv_t addr;
922
923 offset = inst2 & 0xfff;
924 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
925
926 if (pv_area_store_would_trash (stack, addr))
927 break;
928
929 pv_area_store (stack, addr, 4, regs[regno]);
930 }
931
932 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
933 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
934 /* Ignore stores of argument registers to the stack. */
935 ;
936
937 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
938 && (inst2 & 0x0d00) == 0x0c00
939 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
940 /* Ignore stores of argument registers to the stack. */
941 ;
942
943 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
944 { registers } */
945 && (inst2 & 0x8000) == 0x0000
946 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
947 /* Ignore block loads from the stack, potentially copying
948 parameters from memory. */
949 ;
950
951 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
952 [Rn, #+/-imm] */
953 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
954 /* Similarly ignore dual loads from the stack. */
955 ;
956
957 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
958 && (inst2 & 0x0d00) == 0x0c00
959 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
960 /* Similarly ignore single loads from the stack. */
961 ;
962
963 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
964 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
965 /* Similarly ignore single loads from the stack. */
966 ;
967
968 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
969 && (inst2 & 0x8000) == 0x0000)
970 {
971 unsigned int imm = ((bits (insn, 10, 10) << 11)
972 | (bits (inst2, 12, 14) << 8)
973 | bits (inst2, 0, 7));
974
975 regs[bits (inst2, 8, 11)]
976 = pv_add_constant (regs[bits (insn, 0, 3)],
977 thumb_expand_immediate (imm));
978 }
979
980 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
981 && (inst2 & 0x8000) == 0x0000)
982 {
983 unsigned int imm = ((bits (insn, 10, 10) << 11)
984 | (bits (inst2, 12, 14) << 8)
985 | bits (inst2, 0, 7));
986
987 regs[bits (inst2, 8, 11)]
988 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
989 }
990
991 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
992 && (inst2 & 0x8000) == 0x0000)
993 {
994 unsigned int imm = ((bits (insn, 10, 10) << 11)
995 | (bits (inst2, 12, 14) << 8)
996 | bits (inst2, 0, 7));
997
998 regs[bits (inst2, 8, 11)]
999 = pv_add_constant (regs[bits (insn, 0, 3)],
1000 - (CORE_ADDR) thumb_expand_immediate (imm));
1001 }
1002
1003 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1004 && (inst2 & 0x8000) == 0x0000)
1005 {
1006 unsigned int imm = ((bits (insn, 10, 10) << 11)
1007 | (bits (inst2, 12, 14) << 8)
1008 | bits (inst2, 0, 7));
1009
1010 regs[bits (inst2, 8, 11)]
1011 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1012 }
1013
1014 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1015 {
1016 unsigned int imm = ((bits (insn, 10, 10) << 11)
1017 | (bits (inst2, 12, 14) << 8)
1018 | bits (inst2, 0, 7));
1019
1020 regs[bits (inst2, 8, 11)]
1021 = pv_constant (thumb_expand_immediate (imm));
1022 }
1023
1024 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1025 {
1026 unsigned int imm
1027 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1028
1029 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1030 }
1031
1032 else if (insn == 0xea5f /* mov.w Rd,Rm */
1033 && (inst2 & 0xf0f0) == 0)
1034 {
1035 int dst_reg = (inst2 & 0x0f00) >> 8;
1036 int src_reg = inst2 & 0xf;
1037 regs[dst_reg] = regs[src_reg];
1038 }
1039
1040 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1041 {
1042 /* Constant pool loads. */
1043 unsigned int constant;
1044 CORE_ADDR loc;
1045
1046 offset = bits (insn, 0, 11);
1047 if (insn & 0x0080)
1048 loc = start + 4 + offset;
1049 else
1050 loc = start + 4 - offset;
1051
1052 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1053 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1054 }
1055
1056 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1057 {
1058 /* Constant pool loads. */
1059 unsigned int constant;
1060 CORE_ADDR loc;
1061
1062 offset = bits (insn, 0, 7) << 2;
1063 if (insn & 0x0080)
1064 loc = start + 4 + offset;
1065 else
1066 loc = start + 4 - offset;
1067
1068 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1069 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1070
1071 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1072 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1073 }
1074
1075 else if (thumb2_instruction_changes_pc (insn, inst2))
1076 {
1077 /* Don't scan past anything that might change control flow. */
1078 break;
1079 }
1080 else
1081 {
1082 /* The optimizer might shove anything into the prologue,
1083 so we just skip what we don't recognize. */
1084 unrecognized_pc = start;
1085 }
1086
1087 start += 2;
1088 }
1089 else if (thumb_instruction_changes_pc (insn))
1090 {
1091 /* Don't scan past anything that might change control flow. */
1092 break;
1093 }
1094 else
1095 {
1096 /* The optimizer might shove anything into the prologue,
1097 so we just skip what we don't recognize. */
1098 unrecognized_pc = start;
1099 }
1100
1101 start += 2;
1102 }
1103
1104 if (arm_debug)
1105 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1106 paddress (gdbarch, start));
1107
1108 if (unrecognized_pc == 0)
1109 unrecognized_pc = start;
1110
1111 if (cache == NULL)
1112 {
1113 do_cleanups (back_to);
1114 return unrecognized_pc;
1115 }
1116
1117 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1118 {
1119 /* Frame pointer is fp. Frame size is constant. */
1120 cache->framereg = ARM_FP_REGNUM;
1121 cache->framesize = -regs[ARM_FP_REGNUM].k;
1122 }
1123 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1124 {
1125 /* Frame pointer is r7. Frame size is constant. */
1126 cache->framereg = THUMB_FP_REGNUM;
1127 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1128 }
1129 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1130 {
1131 /* Try the stack pointer... this is a bit desperate. */
1132 cache->framereg = ARM_SP_REGNUM;
1133 cache->framesize = -regs[ARM_SP_REGNUM].k;
1134 }
1135 else
1136 {
1137 /* We're just out of luck. We don't know where the frame is. */
1138 cache->framereg = -1;
1139 cache->framesize = 0;
1140 }
1141
1142 for (i = 0; i < 16; i++)
1143 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1144 cache->saved_regs[i].addr = offset;
1145
1146 do_cleanups (back_to);
1147 return unrecognized_pc;
1148 }
1149
1150
1151 /* Try to analyze the instructions starting from PC, which load symbol
1152 __stack_chk_guard. Return the address of instruction after loading this
1153 symbol, set the dest register number to *BASEREG, and set the size of
1154 instructions for loading symbol in OFFSET. Return 0 if instructions are
1155 not recognized. */
1156
1157 static CORE_ADDR
1158 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1159 unsigned int *destreg, int *offset)
1160 {
1161 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1162 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1163 unsigned int low, high, address;
1164
1165 address = 0;
1166 if (is_thumb)
1167 {
1168 unsigned short insn1
1169 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1170
1171 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1172 {
1173 *destreg = bits (insn1, 8, 10);
1174 *offset = 2;
1175 address = bits (insn1, 0, 7);
1176 }
1177 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1178 {
1179 unsigned short insn2
1180 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1181
1182 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1183
1184 insn1
1185 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1186 insn2
1187 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1188
1189 /* movt Rd, #const */
1190 if ((insn1 & 0xfbc0) == 0xf2c0)
1191 {
1192 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1193 *destreg = bits (insn2, 8, 11);
1194 *offset = 8;
1195 address = (high << 16 | low);
1196 }
1197 }
1198 }
1199 else
1200 {
1201 unsigned int insn
1202 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1203
1204 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1205 {
1206 address = bits (insn, 0, 11);
1207 *destreg = bits (insn, 12, 15);
1208 *offset = 4;
1209 }
1210 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1211 {
1212 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1213
1214 insn
1215 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1216
1217 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1218 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1219
1220 address = (high << 16 | low);
1221 *destreg = bits (insn, 12, 15);
1222 *offset = 8;
1223 }
1224 }
1225
1226 return address;
1227 }
1228
1229 /* Try to skip a sequence of instructions used for stack protector. If PC
1230 points to the first instruction of this sequence, return the address of
1231 first instruction after this sequence, otherwise, return original PC.
1232
1233 On arm, this sequence of instructions is composed of mainly three steps,
1234 Step 1: load symbol __stack_chk_guard,
1235 Step 2: load from address of __stack_chk_guard,
1236 Step 3: store it to somewhere else.
1237
1238 Usually, instructions on step 2 and step 3 are the same on various ARM
1239 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1240 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1241 instructions in step 1 vary from different ARM architectures. On ARMv7,
1242 they are,
1243
1244 movw Rn, #:lower16:__stack_chk_guard
1245 movt Rn, #:upper16:__stack_chk_guard
1246
1247 On ARMv5t, it is,
1248
1249 ldr Rn, .Label
1250 ....
1251 .Lable:
1252 .word __stack_chk_guard
1253
1254 Since ldr/str is a very popular instruction, we can't use them as
1255 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1256 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1257 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1258
1259 static CORE_ADDR
1260 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1261 {
1262 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1263 unsigned int address, basereg;
1264 struct minimal_symbol *stack_chk_guard;
1265 int offset;
1266 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1267 CORE_ADDR addr;
1268
1269 /* Try to parse the instructions in Step 1. */
1270 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1271 &basereg, &offset);
1272 if (!addr)
1273 return pc;
1274
1275 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1276 /* If name of symbol doesn't start with '__stack_chk_guard', this
1277 instruction sequence is not for stack protector. If symbol is
1278 removed, we conservatively think this sequence is for stack protector. */
1279 if (stack_chk_guard
1280 && strcmp (SYMBOL_LINKAGE_NAME(stack_chk_guard), "__stack_chk_guard"))
1281 return pc;
1282
1283 if (is_thumb)
1284 {
1285 unsigned int destreg;
1286 unsigned short insn
1287 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1288
1289 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1290 if ((insn & 0xf800) != 0x6800)
1291 return pc;
1292 if (bits (insn, 3, 5) != basereg)
1293 return pc;
1294 destreg = bits (insn, 0, 2);
1295
1296 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1297 byte_order_for_code);
1298 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1299 if ((insn & 0xf800) != 0x6000)
1300 return pc;
1301 if (destreg != bits (insn, 0, 2))
1302 return pc;
1303 }
1304 else
1305 {
1306 unsigned int destreg;
1307 unsigned int insn
1308 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1309
1310 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1311 if ((insn & 0x0e500000) != 0x04100000)
1312 return pc;
1313 if (bits (insn, 16, 19) != basereg)
1314 return pc;
1315 destreg = bits (insn, 12, 15);
1316 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1317 insn = read_memory_unsigned_integer (pc + offset + 4,
1318 4, byte_order_for_code);
1319 if ((insn & 0x0e500000) != 0x04000000)
1320 return pc;
1321 if (bits (insn, 12, 15) != destreg)
1322 return pc;
1323 }
1324 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1325 on arm. */
1326 if (is_thumb)
1327 return pc + offset + 4;
1328 else
1329 return pc + offset + 8;
1330 }
1331
1332 /* Advance the PC across any function entry prologue instructions to
1333 reach some "real" code.
1334
1335 The APCS (ARM Procedure Call Standard) defines the following
1336 prologue:
1337
1338 mov ip, sp
1339 [stmfd sp!, {a1,a2,a3,a4}]
1340 stmfd sp!, {...,fp,ip,lr,pc}
1341 [stfe f7, [sp, #-12]!]
1342 [stfe f6, [sp, #-12]!]
1343 [stfe f5, [sp, #-12]!]
1344 [stfe f4, [sp, #-12]!]
1345 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1346
1347 static CORE_ADDR
1348 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1349 {
1350 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1351 unsigned long inst;
1352 CORE_ADDR skip_pc;
1353 CORE_ADDR func_addr, limit_pc;
1354 struct symtab_and_line sal;
1355
1356 /* See if we can determine the end of the prologue via the symbol table.
1357 If so, then return either PC, or the PC after the prologue, whichever
1358 is greater. */
1359 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1360 {
1361 CORE_ADDR post_prologue_pc
1362 = skip_prologue_using_sal (gdbarch, func_addr);
1363 struct symtab *s = find_pc_symtab (func_addr);
1364
1365 if (post_prologue_pc)
1366 post_prologue_pc
1367 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1368
1369
1370 /* GCC always emits a line note before the prologue and another
1371 one after, even if the two are at the same address or on the
1372 same line. Take advantage of this so that we do not need to
1373 know every instruction that might appear in the prologue. We
1374 will have producer information for most binaries; if it is
1375 missing (e.g. for -gstabs), assuming the GNU tools. */
1376 if (post_prologue_pc
1377 && (s == NULL
1378 || s->producer == NULL
1379 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1380 return post_prologue_pc;
1381
1382 if (post_prologue_pc != 0)
1383 {
1384 CORE_ADDR analyzed_limit;
1385
1386 /* For non-GCC compilers, make sure the entire line is an
1387 acceptable prologue; GDB will round this function's
1388 return value up to the end of the following line so we
1389 can not skip just part of a line (and we do not want to).
1390
1391 RealView does not treat the prologue specially, but does
1392 associate prologue code with the opening brace; so this
1393 lets us skip the first line if we think it is the opening
1394 brace. */
1395 if (arm_pc_is_thumb (gdbarch, func_addr))
1396 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1397 post_prologue_pc, NULL);
1398 else
1399 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1400 post_prologue_pc, NULL);
1401
1402 if (analyzed_limit != post_prologue_pc)
1403 return func_addr;
1404
1405 return post_prologue_pc;
1406 }
1407 }
1408
1409 /* Can't determine prologue from the symbol table, need to examine
1410 instructions. */
1411
1412 /* Find an upper limit on the function prologue using the debug
1413 information. If the debug information could not be used to provide
1414 that bound, then use an arbitrary large number as the upper bound. */
1415 /* Like arm_scan_prologue, stop no later than pc + 64. */
1416 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1417 if (limit_pc == 0)
1418 limit_pc = pc + 64; /* Magic. */
1419
1420
1421 /* Check if this is Thumb code. */
1422 if (arm_pc_is_thumb (gdbarch, pc))
1423 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1424
1425 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1426 {
1427 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1428
1429 /* "mov ip, sp" is no longer a required part of the prologue. */
1430 if (inst == 0xe1a0c00d) /* mov ip, sp */
1431 continue;
1432
1433 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1434 continue;
1435
1436 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1437 continue;
1438
1439 /* Some prologues begin with "str lr, [sp, #-4]!". */
1440 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1441 continue;
1442
1443 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1444 continue;
1445
1446 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1447 continue;
1448
1449 /* Any insns after this point may float into the code, if it makes
1450 for better instruction scheduling, so we skip them only if we
1451 find them, but still consider the function to be frame-ful. */
1452
1453 /* We may have either one sfmfd instruction here, or several stfe
1454 insns, depending on the version of floating point code we
1455 support. */
1456 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1457 continue;
1458
1459 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1460 continue;
1461
1462 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1463 continue;
1464
1465 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1466 continue;
1467
1468 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1469 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1470 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1471 continue;
1472
1473 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1474 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1475 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1476 continue;
1477
1478 /* Un-recognized instruction; stop scanning. */
1479 break;
1480 }
1481
1482 return skip_pc; /* End of prologue. */
1483 }
1484
1485 /* *INDENT-OFF* */
1486 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1487 This function decodes a Thumb function prologue to determine:
1488 1) the size of the stack frame
1489 2) which registers are saved on it
1490 3) the offsets of saved regs
1491 4) the offset from the stack pointer to the frame pointer
1492
1493 A typical Thumb function prologue would create this stack frame
1494 (offsets relative to FP)
1495 old SP -> 24 stack parameters
1496 20 LR
1497 16 R7
1498 R7 -> 0 local variables (16 bytes)
1499 SP -> -12 additional stack space (12 bytes)
1500 The frame size would thus be 36 bytes, and the frame offset would be
1501 12 bytes. The frame register is R7.
1502
1503 The comments for thumb_skip_prolog() describe the algorithm we use
1504 to detect the end of the prolog. */
1505 /* *INDENT-ON* */
1506
1507 static void
1508 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1509 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1510 {
1511 CORE_ADDR prologue_start;
1512 CORE_ADDR prologue_end;
1513 CORE_ADDR current_pc;
1514
1515 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1516 &prologue_end))
1517 {
1518 /* See comment in arm_scan_prologue for an explanation of
1519 this heuristics. */
1520 if (prologue_end > prologue_start + 64)
1521 {
1522 prologue_end = prologue_start + 64;
1523 }
1524 }
1525 else
1526 /* We're in the boondocks: we have no idea where the start of the
1527 function is. */
1528 return;
1529
1530 prologue_end = min (prologue_end, prev_pc);
1531
1532 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1533 }
1534
1535 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1536
1537 static int
1538 arm_instruction_changes_pc (uint32_t this_instr)
1539 {
1540 if (bits (this_instr, 28, 31) == INST_NV)
1541 /* Unconditional instructions. */
1542 switch (bits (this_instr, 24, 27))
1543 {
1544 case 0xa:
1545 case 0xb:
1546 /* Branch with Link and change to Thumb. */
1547 return 1;
1548 case 0xc:
1549 case 0xd:
1550 case 0xe:
1551 /* Coprocessor register transfer. */
1552 if (bits (this_instr, 12, 15) == 15)
1553 error (_("Invalid update to pc in instruction"));
1554 return 0;
1555 default:
1556 return 0;
1557 }
1558 else
1559 switch (bits (this_instr, 25, 27))
1560 {
1561 case 0x0:
1562 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1563 {
1564 /* Multiplies and extra load/stores. */
1565 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1566 /* Neither multiplies nor extension load/stores are allowed
1567 to modify PC. */
1568 return 0;
1569
1570 /* Otherwise, miscellaneous instructions. */
1571
1572 /* BX <reg>, BXJ <reg>, BLX <reg> */
1573 if (bits (this_instr, 4, 27) == 0x12fff1
1574 || bits (this_instr, 4, 27) == 0x12fff2
1575 || bits (this_instr, 4, 27) == 0x12fff3)
1576 return 1;
1577
1578 /* Other miscellaneous instructions are unpredictable if they
1579 modify PC. */
1580 return 0;
1581 }
1582 /* Data processing instruction. Fall through. */
1583
1584 case 0x1:
1585 if (bits (this_instr, 12, 15) == 15)
1586 return 1;
1587 else
1588 return 0;
1589
1590 case 0x2:
1591 case 0x3:
1592 /* Media instructions and architecturally undefined instructions. */
1593 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1594 return 0;
1595
1596 /* Stores. */
1597 if (bit (this_instr, 20) == 0)
1598 return 0;
1599
1600 /* Loads. */
1601 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1602 return 1;
1603 else
1604 return 0;
1605
1606 case 0x4:
1607 /* Load/store multiple. */
1608 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1609 return 1;
1610 else
1611 return 0;
1612
1613 case 0x5:
1614 /* Branch and branch with link. */
1615 return 1;
1616
1617 case 0x6:
1618 case 0x7:
1619 /* Coprocessor transfers or SWIs can not affect PC. */
1620 return 0;
1621
1622 default:
1623 internal_error (__FILE__, __LINE__, "bad value in switch");
1624 }
1625 }
1626
1627 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1628 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1629 fill it in. Return the first address not recognized as a prologue
1630 instruction.
1631
1632 We recognize all the instructions typically found in ARM prologues,
1633 plus harmless instructions which can be skipped (either for analysis
1634 purposes, or a more restrictive set that can be skipped when finding
1635 the end of the prologue). */
1636
1637 static CORE_ADDR
1638 arm_analyze_prologue (struct gdbarch *gdbarch,
1639 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1640 struct arm_prologue_cache *cache)
1641 {
1642 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1643 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1644 int regno;
1645 CORE_ADDR offset, current_pc;
1646 pv_t regs[ARM_FPS_REGNUM];
1647 struct pv_area *stack;
1648 struct cleanup *back_to;
1649 int framereg, framesize;
1650 CORE_ADDR unrecognized_pc = 0;
1651
1652 /* Search the prologue looking for instructions that set up the
1653 frame pointer, adjust the stack pointer, and save registers.
1654
1655 Be careful, however, and if it doesn't look like a prologue,
1656 don't try to scan it. If, for instance, a frameless function
1657 begins with stmfd sp!, then we will tell ourselves there is
1658 a frame, which will confuse stack traceback, as well as "finish"
1659 and other operations that rely on a knowledge of the stack
1660 traceback. */
1661
1662 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1663 regs[regno] = pv_register (regno, 0);
1664 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1665 back_to = make_cleanup_free_pv_area (stack);
1666
1667 for (current_pc = prologue_start;
1668 current_pc < prologue_end;
1669 current_pc += 4)
1670 {
1671 unsigned int insn
1672 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1673
1674 if (insn == 0xe1a0c00d) /* mov ip, sp */
1675 {
1676 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1677 continue;
1678 }
1679 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1680 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1681 {
1682 unsigned imm = insn & 0xff; /* immediate value */
1683 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1684 int rd = bits (insn, 12, 15);
1685 imm = (imm >> rot) | (imm << (32 - rot));
1686 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1687 continue;
1688 }
1689 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1690 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1691 {
1692 unsigned imm = insn & 0xff; /* immediate value */
1693 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1694 int rd = bits (insn, 12, 15);
1695 imm = (imm >> rot) | (imm << (32 - rot));
1696 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1697 continue;
1698 }
1699 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1700 [sp, #-4]! */
1701 {
1702 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1703 break;
1704 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1705 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1706 regs[bits (insn, 12, 15)]);
1707 continue;
1708 }
1709 else if ((insn & 0xffff0000) == 0xe92d0000)
1710 /* stmfd sp!, {..., fp, ip, lr, pc}
1711 or
1712 stmfd sp!, {a1, a2, a3, a4} */
1713 {
1714 int mask = insn & 0xffff;
1715
1716 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1717 break;
1718
1719 /* Calculate offsets of saved registers. */
1720 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1721 if (mask & (1 << regno))
1722 {
1723 regs[ARM_SP_REGNUM]
1724 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1725 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1726 }
1727 }
1728 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1729 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1730 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1731 {
1732 /* No need to add this to saved_regs -- it's just an arg reg. */
1733 continue;
1734 }
1735 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1736 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1737 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1738 {
1739 /* No need to add this to saved_regs -- it's just an arg reg. */
1740 continue;
1741 }
1742 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1743 { registers } */
1744 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1745 {
1746 /* No need to add this to saved_regs -- it's just arg regs. */
1747 continue;
1748 }
1749 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1750 {
1751 unsigned imm = insn & 0xff; /* immediate value */
1752 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1753 imm = (imm >> rot) | (imm << (32 - rot));
1754 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1755 }
1756 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1757 {
1758 unsigned imm = insn & 0xff; /* immediate value */
1759 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1760 imm = (imm >> rot) | (imm << (32 - rot));
1761 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1762 }
1763 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1764 [sp, -#c]! */
1765 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1766 {
1767 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1768 break;
1769
1770 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1771 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1772 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1773 }
1774 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1775 [sp!] */
1776 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1777 {
1778 int n_saved_fp_regs;
1779 unsigned int fp_start_reg, fp_bound_reg;
1780
1781 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1782 break;
1783
1784 if ((insn & 0x800) == 0x800) /* N0 is set */
1785 {
1786 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1787 n_saved_fp_regs = 3;
1788 else
1789 n_saved_fp_regs = 1;
1790 }
1791 else
1792 {
1793 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1794 n_saved_fp_regs = 2;
1795 else
1796 n_saved_fp_regs = 4;
1797 }
1798
1799 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1800 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1801 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1802 {
1803 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1804 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1805 regs[fp_start_reg++]);
1806 }
1807 }
1808 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1809 {
1810 /* Allow some special function calls when skipping the
1811 prologue; GCC generates these before storing arguments to
1812 the stack. */
1813 CORE_ADDR dest = BranchDest (current_pc, insn);
1814
1815 if (skip_prologue_function (dest))
1816 continue;
1817 else
1818 break;
1819 }
1820 else if ((insn & 0xf0000000) != 0xe0000000)
1821 break; /* Condition not true, exit early. */
1822 else if (arm_instruction_changes_pc (insn))
1823 /* Don't scan past anything that might change control flow. */
1824 break;
1825 else if ((insn & 0xfe500000) == 0xe8100000) /* ldm */
1826 {
1827 /* Ignore block loads from the stack, potentially copying
1828 parameters from memory. */
1829 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1830 continue;
1831 else
1832 break;
1833 }
1834 else if ((insn & 0xfc500000) == 0xe4100000)
1835 {
1836 /* Similarly ignore single loads from the stack. */
1837 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1838 continue;
1839 else
1840 break;
1841 }
1842 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1843 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1844 register instead of the stack. */
1845 continue;
1846 else
1847 {
1848 /* The optimizer might shove anything into the prologue,
1849 so we just skip what we don't recognize. */
1850 unrecognized_pc = current_pc;
1851 continue;
1852 }
1853 }
1854
1855 if (unrecognized_pc == 0)
1856 unrecognized_pc = current_pc;
1857
1858 /* The frame size is just the distance from the frame register
1859 to the original stack pointer. */
1860 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1861 {
1862 /* Frame pointer is fp. */
1863 framereg = ARM_FP_REGNUM;
1864 framesize = -regs[ARM_FP_REGNUM].k;
1865 }
1866 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1867 {
1868 /* Try the stack pointer... this is a bit desperate. */
1869 framereg = ARM_SP_REGNUM;
1870 framesize = -regs[ARM_SP_REGNUM].k;
1871 }
1872 else
1873 {
1874 /* We're just out of luck. We don't know where the frame is. */
1875 framereg = -1;
1876 framesize = 0;
1877 }
1878
1879 if (cache)
1880 {
1881 cache->framereg = framereg;
1882 cache->framesize = framesize;
1883
1884 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1885 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1886 cache->saved_regs[regno].addr = offset;
1887 }
1888
1889 if (arm_debug)
1890 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1891 paddress (gdbarch, unrecognized_pc));
1892
1893 do_cleanups (back_to);
1894 return unrecognized_pc;
1895 }
1896
1897 static void
1898 arm_scan_prologue (struct frame_info *this_frame,
1899 struct arm_prologue_cache *cache)
1900 {
1901 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1902 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1903 int regno;
1904 CORE_ADDR prologue_start, prologue_end, current_pc;
1905 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1906 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1907 pv_t regs[ARM_FPS_REGNUM];
1908 struct pv_area *stack;
1909 struct cleanup *back_to;
1910 CORE_ADDR offset;
1911
1912 /* Assume there is no frame until proven otherwise. */
1913 cache->framereg = ARM_SP_REGNUM;
1914 cache->framesize = 0;
1915
1916 /* Check for Thumb prologue. */
1917 if (arm_frame_is_thumb (this_frame))
1918 {
1919 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1920 return;
1921 }
1922
1923 /* Find the function prologue. If we can't find the function in
1924 the symbol table, peek in the stack frame to find the PC. */
1925 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1926 &prologue_end))
1927 {
1928 /* One way to find the end of the prologue (which works well
1929 for unoptimized code) is to do the following:
1930
1931 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1932
1933 if (sal.line == 0)
1934 prologue_end = prev_pc;
1935 else if (sal.end < prologue_end)
1936 prologue_end = sal.end;
1937
1938 This mechanism is very accurate so long as the optimizer
1939 doesn't move any instructions from the function body into the
1940 prologue. If this happens, sal.end will be the last
1941 instruction in the first hunk of prologue code just before
1942 the first instruction that the scheduler has moved from
1943 the body to the prologue.
1944
1945 In order to make sure that we scan all of the prologue
1946 instructions, we use a slightly less accurate mechanism which
1947 may scan more than necessary. To help compensate for this
1948 lack of accuracy, the prologue scanning loop below contains
1949 several clauses which'll cause the loop to terminate early if
1950 an implausible prologue instruction is encountered.
1951
1952 The expression
1953
1954 prologue_start + 64
1955
1956 is a suitable endpoint since it accounts for the largest
1957 possible prologue plus up to five instructions inserted by
1958 the scheduler. */
1959
1960 if (prologue_end > prologue_start + 64)
1961 {
1962 prologue_end = prologue_start + 64; /* See above. */
1963 }
1964 }
1965 else
1966 {
1967 /* We have no symbol information. Our only option is to assume this
1968 function has a standard stack frame and the normal frame register.
1969 Then, we can find the value of our frame pointer on entrance to
1970 the callee (or at the present moment if this is the innermost frame).
1971 The value stored there should be the address of the stmfd + 8. */
1972 CORE_ADDR frame_loc;
1973 LONGEST return_value;
1974
1975 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1976 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1977 return;
1978 else
1979 {
1980 prologue_start = gdbarch_addr_bits_remove
1981 (gdbarch, return_value) - 8;
1982 prologue_end = prologue_start + 64; /* See above. */
1983 }
1984 }
1985
1986 if (prev_pc < prologue_end)
1987 prologue_end = prev_pc;
1988
1989 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1990 }
1991
1992 static struct arm_prologue_cache *
1993 arm_make_prologue_cache (struct frame_info *this_frame)
1994 {
1995 int reg;
1996 struct arm_prologue_cache *cache;
1997 CORE_ADDR unwound_fp;
1998
1999 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2000 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2001
2002 arm_scan_prologue (this_frame, cache);
2003
2004 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2005 if (unwound_fp == 0)
2006 return cache;
2007
2008 cache->prev_sp = unwound_fp + cache->framesize;
2009
2010 /* Calculate actual addresses of saved registers using offsets
2011 determined by arm_scan_prologue. */
2012 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2013 if (trad_frame_addr_p (cache->saved_regs, reg))
2014 cache->saved_regs[reg].addr += cache->prev_sp;
2015
2016 return cache;
2017 }
2018
2019 /* Our frame ID for a normal frame is the current function's starting PC
2020 and the caller's SP when we were called. */
2021
2022 static void
2023 arm_prologue_this_id (struct frame_info *this_frame,
2024 void **this_cache,
2025 struct frame_id *this_id)
2026 {
2027 struct arm_prologue_cache *cache;
2028 struct frame_id id;
2029 CORE_ADDR pc, func;
2030
2031 if (*this_cache == NULL)
2032 *this_cache = arm_make_prologue_cache (this_frame);
2033 cache = *this_cache;
2034
2035 /* This is meant to halt the backtrace at "_start". */
2036 pc = get_frame_pc (this_frame);
2037 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2038 return;
2039
2040 /* If we've hit a wall, stop. */
2041 if (cache->prev_sp == 0)
2042 return;
2043
2044 func = get_frame_func (this_frame);
2045 id = frame_id_build (cache->prev_sp, func);
2046 *this_id = id;
2047 }
2048
2049 static struct value *
2050 arm_prologue_prev_register (struct frame_info *this_frame,
2051 void **this_cache,
2052 int prev_regnum)
2053 {
2054 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2055 struct arm_prologue_cache *cache;
2056
2057 if (*this_cache == NULL)
2058 *this_cache = arm_make_prologue_cache (this_frame);
2059 cache = *this_cache;
2060
2061 /* If we are asked to unwind the PC, then we need to return the LR
2062 instead. The prologue may save PC, but it will point into this
2063 frame's prologue, not the next frame's resume location. Also
2064 strip the saved T bit. A valid LR may have the low bit set, but
2065 a valid PC never does. */
2066 if (prev_regnum == ARM_PC_REGNUM)
2067 {
2068 CORE_ADDR lr;
2069
2070 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2071 return frame_unwind_got_constant (this_frame, prev_regnum,
2072 arm_addr_bits_remove (gdbarch, lr));
2073 }
2074
2075 /* SP is generally not saved to the stack, but this frame is
2076 identified by the next frame's stack pointer at the time of the call.
2077 The value was already reconstructed into PREV_SP. */
2078 if (prev_regnum == ARM_SP_REGNUM)
2079 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2080
2081 /* The CPSR may have been changed by the call instruction and by the
2082 called function. The only bit we can reconstruct is the T bit,
2083 by checking the low bit of LR as of the call. This is a reliable
2084 indicator of Thumb-ness except for some ARM v4T pre-interworking
2085 Thumb code, which could get away with a clear low bit as long as
2086 the called function did not use bx. Guess that all other
2087 bits are unchanged; the condition flags are presumably lost,
2088 but the processor status is likely valid. */
2089 if (prev_regnum == ARM_PS_REGNUM)
2090 {
2091 CORE_ADDR lr, cpsr;
2092 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2093
2094 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2095 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2096 if (IS_THUMB_ADDR (lr))
2097 cpsr |= t_bit;
2098 else
2099 cpsr &= ~t_bit;
2100 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2101 }
2102
2103 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2104 prev_regnum);
2105 }
2106
2107 struct frame_unwind arm_prologue_unwind = {
2108 NORMAL_FRAME,
2109 arm_prologue_this_id,
2110 arm_prologue_prev_register,
2111 NULL,
2112 default_frame_sniffer
2113 };
2114
2115 static struct arm_prologue_cache *
2116 arm_make_stub_cache (struct frame_info *this_frame)
2117 {
2118 struct arm_prologue_cache *cache;
2119
2120 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2121 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2122
2123 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2124
2125 return cache;
2126 }
2127
2128 /* Our frame ID for a stub frame is the current SP and LR. */
2129
2130 static void
2131 arm_stub_this_id (struct frame_info *this_frame,
2132 void **this_cache,
2133 struct frame_id *this_id)
2134 {
2135 struct arm_prologue_cache *cache;
2136
2137 if (*this_cache == NULL)
2138 *this_cache = arm_make_stub_cache (this_frame);
2139 cache = *this_cache;
2140
2141 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2142 }
2143
2144 static int
2145 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2146 struct frame_info *this_frame,
2147 void **this_prologue_cache)
2148 {
2149 CORE_ADDR addr_in_block;
2150 char dummy[4];
2151
2152 addr_in_block = get_frame_address_in_block (this_frame);
2153 if (in_plt_section (addr_in_block, NULL)
2154 /* We also use the stub winder if the target memory is unreadable
2155 to avoid having the prologue unwinder trying to read it. */
2156 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
2157 return 1;
2158
2159 return 0;
2160 }
2161
2162 struct frame_unwind arm_stub_unwind = {
2163 NORMAL_FRAME,
2164 arm_stub_this_id,
2165 arm_prologue_prev_register,
2166 NULL,
2167 arm_stub_unwind_sniffer
2168 };
2169
2170 static CORE_ADDR
2171 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
2172 {
2173 struct arm_prologue_cache *cache;
2174
2175 if (*this_cache == NULL)
2176 *this_cache = arm_make_prologue_cache (this_frame);
2177 cache = *this_cache;
2178
2179 return cache->prev_sp - cache->framesize;
2180 }
2181
2182 struct frame_base arm_normal_base = {
2183 &arm_prologue_unwind,
2184 arm_normal_frame_base,
2185 arm_normal_frame_base,
2186 arm_normal_frame_base
2187 };
2188
2189 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2190 dummy frame. The frame ID's base needs to match the TOS value
2191 saved by save_dummy_frame_tos() and returned from
2192 arm_push_dummy_call, and the PC needs to match the dummy frame's
2193 breakpoint. */
2194
2195 static struct frame_id
2196 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2197 {
2198 return frame_id_build (get_frame_register_unsigned (this_frame,
2199 ARM_SP_REGNUM),
2200 get_frame_pc (this_frame));
2201 }
2202
2203 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2204 be used to construct the previous frame's ID, after looking up the
2205 containing function). */
2206
2207 static CORE_ADDR
2208 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
2209 {
2210 CORE_ADDR pc;
2211 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2212 return arm_addr_bits_remove (gdbarch, pc);
2213 }
2214
2215 static CORE_ADDR
2216 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2217 {
2218 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2219 }
2220
2221 static struct value *
2222 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
2223 int regnum)
2224 {
2225 struct gdbarch * gdbarch = get_frame_arch (this_frame);
2226 CORE_ADDR lr, cpsr;
2227 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2228
2229 switch (regnum)
2230 {
2231 case ARM_PC_REGNUM:
2232 /* The PC is normally copied from the return column, which
2233 describes saves of LR. However, that version may have an
2234 extra bit set to indicate Thumb state. The bit is not
2235 part of the PC. */
2236 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2237 return frame_unwind_got_constant (this_frame, regnum,
2238 arm_addr_bits_remove (gdbarch, lr));
2239
2240 case ARM_PS_REGNUM:
2241 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
2242 cpsr = get_frame_register_unsigned (this_frame, regnum);
2243 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2244 if (IS_THUMB_ADDR (lr))
2245 cpsr |= t_bit;
2246 else
2247 cpsr &= ~t_bit;
2248 return frame_unwind_got_constant (this_frame, regnum, cpsr);
2249
2250 default:
2251 internal_error (__FILE__, __LINE__,
2252 _("Unexpected register %d"), regnum);
2253 }
2254 }
2255
2256 static void
2257 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
2258 struct dwarf2_frame_state_reg *reg,
2259 struct frame_info *this_frame)
2260 {
2261 switch (regnum)
2262 {
2263 case ARM_PC_REGNUM:
2264 case ARM_PS_REGNUM:
2265 reg->how = DWARF2_FRAME_REG_FN;
2266 reg->loc.fn = arm_dwarf2_prev_register;
2267 break;
2268 case ARM_SP_REGNUM:
2269 reg->how = DWARF2_FRAME_REG_CFA;
2270 break;
2271 }
2272 }
2273
2274 /* Return true if we are in the function's epilogue, i.e. after the
2275 instruction that destroyed the function's stack frame. */
2276
2277 static int
2278 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2279 {
2280 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2281 unsigned int insn, insn2;
2282 int found_return = 0, found_stack_adjust = 0;
2283 CORE_ADDR func_start, func_end;
2284 CORE_ADDR scan_pc;
2285 gdb_byte buf[4];
2286
2287 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
2288 return 0;
2289
2290 /* The epilogue is a sequence of instructions along the following lines:
2291
2292 - add stack frame size to SP or FP
2293 - [if frame pointer used] restore SP from FP
2294 - restore registers from SP [may include PC]
2295 - a return-type instruction [if PC wasn't already restored]
2296
2297 In a first pass, we scan forward from the current PC and verify the
2298 instructions we find as compatible with this sequence, ending in a
2299 return instruction.
2300
2301 However, this is not sufficient to distinguish indirect function calls
2302 within a function from indirect tail calls in the epilogue in some cases.
2303 Therefore, if we didn't already find any SP-changing instruction during
2304 forward scan, we add a backward scanning heuristic to ensure we actually
2305 are in the epilogue. */
2306
2307 scan_pc = pc;
2308 while (scan_pc < func_end && !found_return)
2309 {
2310 if (target_read_memory (scan_pc, buf, 2))
2311 break;
2312
2313 scan_pc += 2;
2314 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
2315
2316 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
2317 found_return = 1;
2318 else if (insn == 0x46f7) /* mov pc, lr */
2319 found_return = 1;
2320 else if (insn == 0x46bd) /* mov sp, r7 */
2321 found_stack_adjust = 1;
2322 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
2323 found_stack_adjust = 1;
2324 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
2325 {
2326 found_stack_adjust = 1;
2327 if (insn & 0x0100) /* <registers> include PC. */
2328 found_return = 1;
2329 }
2330 else if ((insn & 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
2331 {
2332 if (target_read_memory (scan_pc, buf, 2))
2333 break;
2334
2335 scan_pc += 2;
2336 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
2337
2338 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
2339 {
2340 found_stack_adjust = 1;
2341 if (insn2 & 0x8000) /* <registers> include PC. */
2342 found_return = 1;
2343 }
2344 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
2345 && (insn2 & 0x0fff) == 0x0b04)
2346 {
2347 found_stack_adjust = 1;
2348 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
2349 found_return = 1;
2350 }
2351 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
2352 && (insn2 & 0x0e00) == 0x0a00)
2353 found_stack_adjust = 1;
2354 else
2355 break;
2356 }
2357 else
2358 break;
2359 }
2360
2361 if (!found_return)
2362 return 0;
2363
2364 /* Since any instruction in the epilogue sequence, with the possible
2365 exception of return itself, updates the stack pointer, we need to
2366 scan backwards for at most one instruction. Try either a 16-bit or
2367 a 32-bit instruction. This is just a heuristic, so we do not worry
2368 too much about false positives. */
2369
2370 if (!found_stack_adjust)
2371 {
2372 if (pc - 4 < func_start)
2373 return 0;
2374 if (target_read_memory (pc - 4, buf, 4))
2375 return 0;
2376
2377 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
2378 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
2379
2380 if (insn2 == 0x46bd) /* mov sp, r7 */
2381 found_stack_adjust = 1;
2382 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
2383 found_stack_adjust = 1;
2384 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
2385 found_stack_adjust = 1;
2386 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
2387 found_stack_adjust = 1;
2388 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
2389 && (insn2 & 0x0fff) == 0x0b04)
2390 found_stack_adjust = 1;
2391 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
2392 && (insn2 & 0x0e00) == 0x0a00)
2393 found_stack_adjust = 1;
2394 }
2395
2396 return found_stack_adjust;
2397 }
2398
2399 /* Return true if we are in the function's epilogue, i.e. after the
2400 instruction that destroyed the function's stack frame. */
2401
2402 static int
2403 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2404 {
2405 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2406 unsigned int insn;
2407 int found_return, found_stack_adjust;
2408 CORE_ADDR func_start, func_end;
2409
2410 if (arm_pc_is_thumb (gdbarch, pc))
2411 return thumb_in_function_epilogue_p (gdbarch, pc);
2412
2413 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
2414 return 0;
2415
2416 /* We are in the epilogue if the previous instruction was a stack
2417 adjustment and the next instruction is a possible return (bx, mov
2418 pc, or pop). We could have to scan backwards to find the stack
2419 adjustment, or forwards to find the return, but this is a decent
2420 approximation. First scan forwards. */
2421
2422 found_return = 0;
2423 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
2424 if (bits (insn, 28, 31) != INST_NV)
2425 {
2426 if ((insn & 0x0ffffff0) == 0x012fff10)
2427 /* BX. */
2428 found_return = 1;
2429 else if ((insn & 0x0ffffff0) == 0x01a0f000)
2430 /* MOV PC. */
2431 found_return = 1;
2432 else if ((insn & 0x0fff0000) == 0x08bd0000
2433 && (insn & 0x0000c000) != 0)
2434 /* POP (LDMIA), including PC or LR. */
2435 found_return = 1;
2436 }
2437
2438 if (!found_return)
2439 return 0;
2440
2441 /* Scan backwards. This is just a heuristic, so do not worry about
2442 false positives from mode changes. */
2443
2444 if (pc < func_start + 4)
2445 return 0;
2446
2447 found_stack_adjust = 0;
2448 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
2449 if (bits (insn, 28, 31) != INST_NV)
2450 {
2451 if ((insn & 0x0df0f000) == 0x0080d000)
2452 /* ADD SP (register or immediate). */
2453 found_stack_adjust = 1;
2454 else if ((insn & 0x0df0f000) == 0x0040d000)
2455 /* SUB SP (register or immediate). */
2456 found_stack_adjust = 1;
2457 else if ((insn & 0x0ffffff0) == 0x01a0d000)
2458 /* MOV SP. */
2459 found_stack_adjust = 1;
2460 else if ((insn & 0x0fff0000) == 0x08bd0000)
2461 /* POP (LDMIA). */
2462 found_stack_adjust = 1;
2463 }
2464
2465 if (found_stack_adjust)
2466 return 1;
2467
2468 return 0;
2469 }
2470
2471
2472 /* When arguments must be pushed onto the stack, they go on in reverse
2473 order. The code below implements a FILO (stack) to do this. */
2474
2475 struct stack_item
2476 {
2477 int len;
2478 struct stack_item *prev;
2479 void *data;
2480 };
2481
2482 static struct stack_item *
2483 push_stack_item (struct stack_item *prev, const void *contents, int len)
2484 {
2485 struct stack_item *si;
2486 si = xmalloc (sizeof (struct stack_item));
2487 si->data = xmalloc (len);
2488 si->len = len;
2489 si->prev = prev;
2490 memcpy (si->data, contents, len);
2491 return si;
2492 }
2493
2494 static struct stack_item *
2495 pop_stack_item (struct stack_item *si)
2496 {
2497 struct stack_item *dead = si;
2498 si = si->prev;
2499 xfree (dead->data);
2500 xfree (dead);
2501 return si;
2502 }
2503
2504
2505 /* Return the alignment (in bytes) of the given type. */
2506
2507 static int
2508 arm_type_align (struct type *t)
2509 {
2510 int n;
2511 int align;
2512 int falign;
2513
2514 t = check_typedef (t);
2515 switch (TYPE_CODE (t))
2516 {
2517 default:
2518 /* Should never happen. */
2519 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
2520 return 4;
2521
2522 case TYPE_CODE_PTR:
2523 case TYPE_CODE_ENUM:
2524 case TYPE_CODE_INT:
2525 case TYPE_CODE_FLT:
2526 case TYPE_CODE_SET:
2527 case TYPE_CODE_RANGE:
2528 case TYPE_CODE_BITSTRING:
2529 case TYPE_CODE_REF:
2530 case TYPE_CODE_CHAR:
2531 case TYPE_CODE_BOOL:
2532 return TYPE_LENGTH (t);
2533
2534 case TYPE_CODE_ARRAY:
2535 case TYPE_CODE_COMPLEX:
2536 /* TODO: What about vector types? */
2537 return arm_type_align (TYPE_TARGET_TYPE (t));
2538
2539 case TYPE_CODE_STRUCT:
2540 case TYPE_CODE_UNION:
2541 align = 1;
2542 for (n = 0; n < TYPE_NFIELDS (t); n++)
2543 {
2544 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
2545 if (falign > align)
2546 align = falign;
2547 }
2548 return align;
2549 }
2550 }
2551
2552 /* Possible base types for a candidate for passing and returning in
2553 VFP registers. */
2554
2555 enum arm_vfp_cprc_base_type
2556 {
2557 VFP_CPRC_UNKNOWN,
2558 VFP_CPRC_SINGLE,
2559 VFP_CPRC_DOUBLE,
2560 VFP_CPRC_VEC64,
2561 VFP_CPRC_VEC128
2562 };
2563
2564 /* The length of one element of base type B. */
2565
2566 static unsigned
2567 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
2568 {
2569 switch (b)
2570 {
2571 case VFP_CPRC_SINGLE:
2572 return 4;
2573 case VFP_CPRC_DOUBLE:
2574 return 8;
2575 case VFP_CPRC_VEC64:
2576 return 8;
2577 case VFP_CPRC_VEC128:
2578 return 16;
2579 default:
2580 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
2581 (int) b);
2582 }
2583 }
2584
2585 /* The character ('s', 'd' or 'q') for the type of VFP register used
2586 for passing base type B. */
2587
2588 static int
2589 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
2590 {
2591 switch (b)
2592 {
2593 case VFP_CPRC_SINGLE:
2594 return 's';
2595 case VFP_CPRC_DOUBLE:
2596 return 'd';
2597 case VFP_CPRC_VEC64:
2598 return 'd';
2599 case VFP_CPRC_VEC128:
2600 return 'q';
2601 default:
2602 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
2603 (int) b);
2604 }
2605 }
2606
2607 /* Determine whether T may be part of a candidate for passing and
2608 returning in VFP registers, ignoring the limit on the total number
2609 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
2610 classification of the first valid component found; if it is not
2611 VFP_CPRC_UNKNOWN, all components must have the same classification
2612 as *BASE_TYPE. If it is found that T contains a type not permitted
2613 for passing and returning in VFP registers, a type differently
2614 classified from *BASE_TYPE, or two types differently classified
2615 from each other, return -1, otherwise return the total number of
2616 base-type elements found (possibly 0 in an empty structure or
2617 array). Vectors and complex types are not currently supported,
2618 matching the generic AAPCS support. */
2619
2620 static int
2621 arm_vfp_cprc_sub_candidate (struct type *t,
2622 enum arm_vfp_cprc_base_type *base_type)
2623 {
2624 t = check_typedef (t);
2625 switch (TYPE_CODE (t))
2626 {
2627 case TYPE_CODE_FLT:
2628 switch (TYPE_LENGTH (t))
2629 {
2630 case 4:
2631 if (*base_type == VFP_CPRC_UNKNOWN)
2632 *base_type = VFP_CPRC_SINGLE;
2633 else if (*base_type != VFP_CPRC_SINGLE)
2634 return -1;
2635 return 1;
2636
2637 case 8:
2638 if (*base_type == VFP_CPRC_UNKNOWN)
2639 *base_type = VFP_CPRC_DOUBLE;
2640 else if (*base_type != VFP_CPRC_DOUBLE)
2641 return -1;
2642 return 1;
2643
2644 default:
2645 return -1;
2646 }
2647 break;
2648
2649 case TYPE_CODE_ARRAY:
2650 {
2651 int count;
2652 unsigned unitlen;
2653 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
2654 if (count == -1)
2655 return -1;
2656 if (TYPE_LENGTH (t) == 0)
2657 {
2658 gdb_assert (count == 0);
2659 return 0;
2660 }
2661 else if (count == 0)
2662 return -1;
2663 unitlen = arm_vfp_cprc_unit_length (*base_type);
2664 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
2665 return TYPE_LENGTH (t) / unitlen;
2666 }
2667 break;
2668
2669 case TYPE_CODE_STRUCT:
2670 {
2671 int count = 0;
2672 unsigned unitlen;
2673 int i;
2674 for (i = 0; i < TYPE_NFIELDS (t); i++)
2675 {
2676 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
2677 base_type);
2678 if (sub_count == -1)
2679 return -1;
2680 count += sub_count;
2681 }
2682 if (TYPE_LENGTH (t) == 0)
2683 {
2684 gdb_assert (count == 0);
2685 return 0;
2686 }
2687 else if (count == 0)
2688 return -1;
2689 unitlen = arm_vfp_cprc_unit_length (*base_type);
2690 if (TYPE_LENGTH (t) != unitlen * count)
2691 return -1;
2692 return count;
2693 }
2694
2695 case TYPE_CODE_UNION:
2696 {
2697 int count = 0;
2698 unsigned unitlen;
2699 int i;
2700 for (i = 0; i < TYPE_NFIELDS (t); i++)
2701 {
2702 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
2703 base_type);
2704 if (sub_count == -1)
2705 return -1;
2706 count = (count > sub_count ? count : sub_count);
2707 }
2708 if (TYPE_LENGTH (t) == 0)
2709 {
2710 gdb_assert (count == 0);
2711 return 0;
2712 }
2713 else if (count == 0)
2714 return -1;
2715 unitlen = arm_vfp_cprc_unit_length (*base_type);
2716 if (TYPE_LENGTH (t) != unitlen * count)
2717 return -1;
2718 return count;
2719 }
2720
2721 default:
2722 break;
2723 }
2724
2725 return -1;
2726 }
2727
2728 /* Determine whether T is a VFP co-processor register candidate (CPRC)
2729 if passed to or returned from a non-variadic function with the VFP
2730 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
2731 *BASE_TYPE to the base type for T and *COUNT to the number of
2732 elements of that base type before returning. */
2733
2734 static int
2735 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
2736 int *count)
2737 {
2738 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
2739 int c = arm_vfp_cprc_sub_candidate (t, &b);
2740 if (c <= 0 || c > 4)
2741 return 0;
2742 *base_type = b;
2743 *count = c;
2744 return 1;
2745 }
2746
2747 /* Return 1 if the VFP ABI should be used for passing arguments to and
2748 returning values from a function of type FUNC_TYPE, 0
2749 otherwise. */
2750
2751 static int
2752 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
2753 {
2754 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2755 /* Variadic functions always use the base ABI. Assume that functions
2756 without debug info are not variadic. */
2757 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
2758 return 0;
2759 /* The VFP ABI is only supported as a variant of AAPCS. */
2760 if (tdep->arm_abi != ARM_ABI_AAPCS)
2761 return 0;
2762 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
2763 }
2764
2765 /* We currently only support passing parameters in integer registers, which
2766 conforms with GCC's default model, and VFP argument passing following
2767 the VFP variant of AAPCS. Several other variants exist and
2768 we should probably support some of them based on the selected ABI. */
2769
2770 static CORE_ADDR
2771 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
2772 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
2773 struct value **args, CORE_ADDR sp, int struct_return,
2774 CORE_ADDR struct_addr)
2775 {
2776 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2777 int argnum;
2778 int argreg;
2779 int nstack;
2780 struct stack_item *si = NULL;
2781 int use_vfp_abi;
2782 struct type *ftype;
2783 unsigned vfp_regs_free = (1 << 16) - 1;
2784
2785 /* Determine the type of this function and whether the VFP ABI
2786 applies. */
2787 ftype = check_typedef (value_type (function));
2788 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
2789 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
2790 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
2791
2792 /* Set the return address. For the ARM, the return breakpoint is
2793 always at BP_ADDR. */
2794 if (arm_pc_is_thumb (gdbarch, bp_addr))
2795 bp_addr |= 1;
2796 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
2797
2798 /* Walk through the list of args and determine how large a temporary
2799 stack is required. Need to take care here as structs may be
2800 passed on the stack, and we have to to push them. */
2801 nstack = 0;
2802
2803 argreg = ARM_A1_REGNUM;
2804 nstack = 0;
2805
2806 /* The struct_return pointer occupies the first parameter
2807 passing register. */
2808 if (struct_return)
2809 {
2810 if (arm_debug)
2811 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
2812 gdbarch_register_name (gdbarch, argreg),
2813 paddress (gdbarch, struct_addr));
2814 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
2815 argreg++;
2816 }
2817
2818 for (argnum = 0; argnum < nargs; argnum++)
2819 {
2820 int len;
2821 struct type *arg_type;
2822 struct type *target_type;
2823 enum type_code typecode;
2824 const bfd_byte *val;
2825 int align;
2826 enum arm_vfp_cprc_base_type vfp_base_type;
2827 int vfp_base_count;
2828 int may_use_core_reg = 1;
2829
2830 arg_type = check_typedef (value_type (args[argnum]));
2831 len = TYPE_LENGTH (arg_type);
2832 target_type = TYPE_TARGET_TYPE (arg_type);
2833 typecode = TYPE_CODE (arg_type);
2834 val = value_contents (args[argnum]);
2835
2836 align = arm_type_align (arg_type);
2837 /* Round alignment up to a whole number of words. */
2838 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
2839 /* Different ABIs have different maximum alignments. */
2840 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
2841 {
2842 /* The APCS ABI only requires word alignment. */
2843 align = INT_REGISTER_SIZE;
2844 }
2845 else
2846 {
2847 /* The AAPCS requires at most doubleword alignment. */
2848 if (align > INT_REGISTER_SIZE * 2)
2849 align = INT_REGISTER_SIZE * 2;
2850 }
2851
2852 if (use_vfp_abi
2853 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
2854 &vfp_base_count))
2855 {
2856 int regno;
2857 int unit_length;
2858 int shift;
2859 unsigned mask;
2860
2861 /* Because this is a CPRC it cannot go in a core register or
2862 cause a core register to be skipped for alignment.
2863 Either it goes in VFP registers and the rest of this loop
2864 iteration is skipped for this argument, or it goes on the
2865 stack (and the stack alignment code is correct for this
2866 case). */
2867 may_use_core_reg = 0;
2868
2869 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
2870 shift = unit_length / 4;
2871 mask = (1 << (shift * vfp_base_count)) - 1;
2872 for (regno = 0; regno < 16; regno += shift)
2873 if (((vfp_regs_free >> regno) & mask) == mask)
2874 break;
2875
2876 if (regno < 16)
2877 {
2878 int reg_char;
2879 int reg_scaled;
2880 int i;
2881
2882 vfp_regs_free &= ~(mask << regno);
2883 reg_scaled = regno / shift;
2884 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
2885 for (i = 0; i < vfp_base_count; i++)
2886 {
2887 char name_buf[4];
2888 int regnum;
2889 if (reg_char == 'q')
2890 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
2891 val + i * unit_length);
2892 else
2893 {
2894 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
2895 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
2896 strlen (name_buf));
2897 regcache_cooked_write (regcache, regnum,
2898 val + i * unit_length);
2899 }
2900 }
2901 continue;
2902 }
2903 else
2904 {
2905 /* This CPRC could not go in VFP registers, so all VFP
2906 registers are now marked as used. */
2907 vfp_regs_free = 0;
2908 }
2909 }
2910
2911 /* Push stack padding for dowubleword alignment. */
2912 if (nstack & (align - 1))
2913 {
2914 si = push_stack_item (si, val, INT_REGISTER_SIZE);
2915 nstack += INT_REGISTER_SIZE;
2916 }
2917
2918 /* Doubleword aligned quantities must go in even register pairs. */
2919 if (may_use_core_reg
2920 && argreg <= ARM_LAST_ARG_REGNUM
2921 && align > INT_REGISTER_SIZE
2922 && argreg & 1)
2923 argreg++;
2924
2925 /* If the argument is a pointer to a function, and it is a
2926 Thumb function, create a LOCAL copy of the value and set
2927 the THUMB bit in it. */
2928 if (TYPE_CODE_PTR == typecode
2929 && target_type != NULL
2930 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
2931 {
2932 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
2933 if (arm_pc_is_thumb (gdbarch, regval))
2934 {
2935 bfd_byte *copy = alloca (len);
2936 store_unsigned_integer (copy, len, byte_order,
2937 MAKE_THUMB_ADDR (regval));
2938 val = copy;
2939 }
2940 }
2941
2942 /* Copy the argument to general registers or the stack in
2943 register-sized pieces. Large arguments are split between
2944 registers and stack. */
2945 while (len > 0)
2946 {
2947 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
2948
2949 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
2950 {
2951 /* The argument is being passed in a general purpose
2952 register. */
2953 CORE_ADDR regval
2954 = extract_unsigned_integer (val, partial_len, byte_order);
2955 if (byte_order == BFD_ENDIAN_BIG)
2956 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
2957 if (arm_debug)
2958 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
2959 argnum,
2960 gdbarch_register_name
2961 (gdbarch, argreg),
2962 phex (regval, INT_REGISTER_SIZE));
2963 regcache_cooked_write_unsigned (regcache, argreg, regval);
2964 argreg++;
2965 }
2966 else
2967 {
2968 /* Push the arguments onto the stack. */
2969 if (arm_debug)
2970 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
2971 argnum, nstack);
2972 si = push_stack_item (si, val, INT_REGISTER_SIZE);
2973 nstack += INT_REGISTER_SIZE;
2974 }
2975
2976 len -= partial_len;
2977 val += partial_len;
2978 }
2979 }
2980 /* If we have an odd number of words to push, then decrement the stack
2981 by one word now, so first stack argument will be dword aligned. */
2982 if (nstack & 4)
2983 sp -= 4;
2984
2985 while (si)
2986 {
2987 sp -= si->len;
2988 write_memory (sp, si->data, si->len);
2989 si = pop_stack_item (si);
2990 }
2991
2992 /* Finally, update teh SP register. */
2993 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
2994
2995 return sp;
2996 }
2997
2998
2999 /* Always align the frame to an 8-byte boundary. This is required on
3000 some platforms and harmless on the rest. */
3001
3002 static CORE_ADDR
3003 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3004 {
3005 /* Align the stack to eight bytes. */
3006 return sp & ~ (CORE_ADDR) 7;
3007 }
3008
3009 static void
3010 print_fpu_flags (int flags)
3011 {
3012 if (flags & (1 << 0))
3013 fputs ("IVO ", stdout);
3014 if (flags & (1 << 1))
3015 fputs ("DVZ ", stdout);
3016 if (flags & (1 << 2))
3017 fputs ("OFL ", stdout);
3018 if (flags & (1 << 3))
3019 fputs ("UFL ", stdout);
3020 if (flags & (1 << 4))
3021 fputs ("INX ", stdout);
3022 putchar ('\n');
3023 }
3024
3025 /* Print interesting information about the floating point processor
3026 (if present) or emulator. */
3027 static void
3028 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3029 struct frame_info *frame, const char *args)
3030 {
3031 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3032 int type;
3033
3034 type = (status >> 24) & 127;
3035 if (status & (1 << 31))
3036 printf (_("Hardware FPU type %d\n"), type);
3037 else
3038 printf (_("Software FPU type %d\n"), type);
3039 /* i18n: [floating point unit] mask */
3040 fputs (_("mask: "), stdout);
3041 print_fpu_flags (status >> 16);
3042 /* i18n: [floating point unit] flags */
3043 fputs (_("flags: "), stdout);
3044 print_fpu_flags (status);
3045 }
3046
3047 /* Construct the ARM extended floating point type. */
3048 static struct type *
3049 arm_ext_type (struct gdbarch *gdbarch)
3050 {
3051 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3052
3053 if (!tdep->arm_ext_type)
3054 tdep->arm_ext_type
3055 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3056 floatformats_arm_ext);
3057
3058 return tdep->arm_ext_type;
3059 }
3060
3061 static struct type *
3062 arm_neon_double_type (struct gdbarch *gdbarch)
3063 {
3064 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3065
3066 if (tdep->neon_double_type == NULL)
3067 {
3068 struct type *t, *elem;
3069
3070 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3071 TYPE_CODE_UNION);
3072 elem = builtin_type (gdbarch)->builtin_uint8;
3073 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3074 elem = builtin_type (gdbarch)->builtin_uint16;
3075 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3076 elem = builtin_type (gdbarch)->builtin_uint32;
3077 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3078 elem = builtin_type (gdbarch)->builtin_uint64;
3079 append_composite_type_field (t, "u64", elem);
3080 elem = builtin_type (gdbarch)->builtin_float;
3081 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3082 elem = builtin_type (gdbarch)->builtin_double;
3083 append_composite_type_field (t, "f64", elem);
3084
3085 TYPE_VECTOR (t) = 1;
3086 TYPE_NAME (t) = "neon_d";
3087 tdep->neon_double_type = t;
3088 }
3089
3090 return tdep->neon_double_type;
3091 }
3092
3093 /* FIXME: The vector types are not correctly ordered on big-endian
3094 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3095 bits of d0 - regardless of what unit size is being held in d0. So
3096 the offset of the first uint8 in d0 is 7, but the offset of the
3097 first float is 4. This code works as-is for little-endian
3098 targets. */
3099
3100 static struct type *
3101 arm_neon_quad_type (struct gdbarch *gdbarch)
3102 {
3103 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3104
3105 if (tdep->neon_quad_type == NULL)
3106 {
3107 struct type *t, *elem;
3108
3109 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3110 TYPE_CODE_UNION);
3111 elem = builtin_type (gdbarch)->builtin_uint8;
3112 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3113 elem = builtin_type (gdbarch)->builtin_uint16;
3114 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3115 elem = builtin_type (gdbarch)->builtin_uint32;
3116 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3117 elem = builtin_type (gdbarch)->builtin_uint64;
3118 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
3119 elem = builtin_type (gdbarch)->builtin_float;
3120 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
3121 elem = builtin_type (gdbarch)->builtin_double;
3122 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
3123
3124 TYPE_VECTOR (t) = 1;
3125 TYPE_NAME (t) = "neon_q";
3126 tdep->neon_quad_type = t;
3127 }
3128
3129 return tdep->neon_quad_type;
3130 }
3131
3132 /* Return the GDB type object for the "standard" data type of data in
3133 register N. */
3134
3135 static struct type *
3136 arm_register_type (struct gdbarch *gdbarch, int regnum)
3137 {
3138 int num_regs = gdbarch_num_regs (gdbarch);
3139
3140 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
3141 && regnum >= num_regs && regnum < num_regs + 32)
3142 return builtin_type (gdbarch)->builtin_float;
3143
3144 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
3145 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
3146 return arm_neon_quad_type (gdbarch);
3147
3148 /* If the target description has register information, we are only
3149 in this function so that we can override the types of
3150 double-precision registers for NEON. */
3151 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
3152 {
3153 struct type *t = tdesc_register_type (gdbarch, regnum);
3154
3155 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
3156 && TYPE_CODE (t) == TYPE_CODE_FLT
3157 && gdbarch_tdep (gdbarch)->have_neon)
3158 return arm_neon_double_type (gdbarch);
3159 else
3160 return t;
3161 }
3162
3163 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
3164 {
3165 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
3166 return builtin_type (gdbarch)->builtin_void;
3167
3168 return arm_ext_type (gdbarch);
3169 }
3170 else if (regnum == ARM_SP_REGNUM)
3171 return builtin_type (gdbarch)->builtin_data_ptr;
3172 else if (regnum == ARM_PC_REGNUM)
3173 return builtin_type (gdbarch)->builtin_func_ptr;
3174 else if (regnum >= ARRAY_SIZE (arm_register_names))
3175 /* These registers are only supported on targets which supply
3176 an XML description. */
3177 return builtin_type (gdbarch)->builtin_int0;
3178 else
3179 return builtin_type (gdbarch)->builtin_uint32;
3180 }
3181
3182 /* Map a DWARF register REGNUM onto the appropriate GDB register
3183 number. */
3184
3185 static int
3186 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
3187 {
3188 /* Core integer regs. */
3189 if (reg >= 0 && reg <= 15)
3190 return reg;
3191
3192 /* Legacy FPA encoding. These were once used in a way which
3193 overlapped with VFP register numbering, so their use is
3194 discouraged, but GDB doesn't support the ARM toolchain
3195 which used them for VFP. */
3196 if (reg >= 16 && reg <= 23)
3197 return ARM_F0_REGNUM + reg - 16;
3198
3199 /* New assignments for the FPA registers. */
3200 if (reg >= 96 && reg <= 103)
3201 return ARM_F0_REGNUM + reg - 96;
3202
3203 /* WMMX register assignments. */
3204 if (reg >= 104 && reg <= 111)
3205 return ARM_WCGR0_REGNUM + reg - 104;
3206
3207 if (reg >= 112 && reg <= 127)
3208 return ARM_WR0_REGNUM + reg - 112;
3209
3210 if (reg >= 192 && reg <= 199)
3211 return ARM_WC0_REGNUM + reg - 192;
3212
3213 /* VFP v2 registers. A double precision value is actually
3214 in d1 rather than s2, but the ABI only defines numbering
3215 for the single precision registers. This will "just work"
3216 in GDB for little endian targets (we'll read eight bytes,
3217 starting in s0 and then progressing to s1), but will be
3218 reversed on big endian targets with VFP. This won't
3219 be a problem for the new Neon quad registers; you're supposed
3220 to use DW_OP_piece for those. */
3221 if (reg >= 64 && reg <= 95)
3222 {
3223 char name_buf[4];
3224
3225 sprintf (name_buf, "s%d", reg - 64);
3226 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3227 strlen (name_buf));
3228 }
3229
3230 /* VFP v3 / Neon registers. This range is also used for VFP v2
3231 registers, except that it now describes d0 instead of s0. */
3232 if (reg >= 256 && reg <= 287)
3233 {
3234 char name_buf[4];
3235
3236 sprintf (name_buf, "d%d", reg - 256);
3237 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3238 strlen (name_buf));
3239 }
3240
3241 return -1;
3242 }
3243
3244 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
3245 static int
3246 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
3247 {
3248 int reg = regnum;
3249 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
3250
3251 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
3252 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
3253
3254 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
3255 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
3256
3257 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
3258 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
3259
3260 if (reg < NUM_GREGS)
3261 return SIM_ARM_R0_REGNUM + reg;
3262 reg -= NUM_GREGS;
3263
3264 if (reg < NUM_FREGS)
3265 return SIM_ARM_FP0_REGNUM + reg;
3266 reg -= NUM_FREGS;
3267
3268 if (reg < NUM_SREGS)
3269 return SIM_ARM_FPS_REGNUM + reg;
3270 reg -= NUM_SREGS;
3271
3272 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
3273 }
3274
3275 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
3276 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
3277 It is thought that this is is the floating-point register format on
3278 little-endian systems. */
3279
3280 static void
3281 convert_from_extended (const struct floatformat *fmt, const void *ptr,
3282 void *dbl, int endianess)
3283 {
3284 DOUBLEST d;
3285
3286 if (endianess == BFD_ENDIAN_BIG)
3287 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
3288 else
3289 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
3290 ptr, &d);
3291 floatformat_from_doublest (fmt, &d, dbl);
3292 }
3293
3294 static void
3295 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
3296 int endianess)
3297 {
3298 DOUBLEST d;
3299
3300 floatformat_to_doublest (fmt, ptr, &d);
3301 if (endianess == BFD_ENDIAN_BIG)
3302 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
3303 else
3304 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
3305 &d, dbl);
3306 }
3307
3308 static int
3309 condition_true (unsigned long cond, unsigned long status_reg)
3310 {
3311 if (cond == INST_AL || cond == INST_NV)
3312 return 1;
3313
3314 switch (cond)
3315 {
3316 case INST_EQ:
3317 return ((status_reg & FLAG_Z) != 0);
3318 case INST_NE:
3319 return ((status_reg & FLAG_Z) == 0);
3320 case INST_CS:
3321 return ((status_reg & FLAG_C) != 0);
3322 case INST_CC:
3323 return ((status_reg & FLAG_C) == 0);
3324 case INST_MI:
3325 return ((status_reg & FLAG_N) != 0);
3326 case INST_PL:
3327 return ((status_reg & FLAG_N) == 0);
3328 case INST_VS:
3329 return ((status_reg & FLAG_V) != 0);
3330 case INST_VC:
3331 return ((status_reg & FLAG_V) == 0);
3332 case INST_HI:
3333 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
3334 case INST_LS:
3335 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
3336 case INST_GE:
3337 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
3338 case INST_LT:
3339 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
3340 case INST_GT:
3341 return (((status_reg & FLAG_Z) == 0)
3342 && (((status_reg & FLAG_N) == 0)
3343 == ((status_reg & FLAG_V) == 0)));
3344 case INST_LE:
3345 return (((status_reg & FLAG_Z) != 0)
3346 || (((status_reg & FLAG_N) == 0)
3347 != ((status_reg & FLAG_V) == 0)));
3348 }
3349 return 1;
3350 }
3351
3352 static unsigned long
3353 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
3354 unsigned long pc_val, unsigned long status_reg)
3355 {
3356 unsigned long res, shift;
3357 int rm = bits (inst, 0, 3);
3358 unsigned long shifttype = bits (inst, 5, 6);
3359
3360 if (bit (inst, 4))
3361 {
3362 int rs = bits (inst, 8, 11);
3363 shift = (rs == 15 ? pc_val + 8
3364 : get_frame_register_unsigned (frame, rs)) & 0xFF;
3365 }
3366 else
3367 shift = bits (inst, 7, 11);
3368
3369 res = (rm == 15
3370 ? (pc_val + (bit (inst, 4) ? 12 : 8))
3371 : get_frame_register_unsigned (frame, rm));
3372
3373 switch (shifttype)
3374 {
3375 case 0: /* LSL */
3376 res = shift >= 32 ? 0 : res << shift;
3377 break;
3378
3379 case 1: /* LSR */
3380 res = shift >= 32 ? 0 : res >> shift;
3381 break;
3382
3383 case 2: /* ASR */
3384 if (shift >= 32)
3385 shift = 31;
3386 res = ((res & 0x80000000L)
3387 ? ~((~res) >> shift) : res >> shift);
3388 break;
3389
3390 case 3: /* ROR/RRX */
3391 shift &= 31;
3392 if (shift == 0)
3393 res = (res >> 1) | (carry ? 0x80000000L : 0);
3394 else
3395 res = (res >> shift) | (res << (32 - shift));
3396 break;
3397 }
3398
3399 return res & 0xffffffff;
3400 }
3401
3402 /* Return number of 1-bits in VAL. */
3403
3404 static int
3405 bitcount (unsigned long val)
3406 {
3407 int nbits;
3408 for (nbits = 0; val != 0; nbits++)
3409 val &= val - 1; /* Delete rightmost 1-bit in val. */
3410 return nbits;
3411 }
3412
3413 /* Return the size in bytes of the complete Thumb instruction whose
3414 first halfword is INST1. */
3415
3416 static int
3417 thumb_insn_size (unsigned short inst1)
3418 {
3419 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
3420 return 4;
3421 else
3422 return 2;
3423 }
3424
3425 static int
3426 thumb_advance_itstate (unsigned int itstate)
3427 {
3428 /* Preserve IT[7:5], the first three bits of the condition. Shift
3429 the upcoming condition flags left by one bit. */
3430 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
3431
3432 /* If we have finished the IT block, clear the state. */
3433 if ((itstate & 0x0f) == 0)
3434 itstate = 0;
3435
3436 return itstate;
3437 }
3438
3439 /* Find the next PC after the current instruction executes. In some
3440 cases we can not statically determine the answer (see the IT state
3441 handling in this function); in that case, a breakpoint may be
3442 inserted in addition to the returned PC, which will be used to set
3443 another breakpoint by our caller. */
3444
3445 static CORE_ADDR
3446 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
3447 {
3448 struct gdbarch *gdbarch = get_frame_arch (frame);
3449 struct address_space *aspace = get_frame_address_space (frame);
3450 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3451 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3452 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
3453 unsigned short inst1;
3454 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
3455 unsigned long offset;
3456 ULONGEST status, itstate;
3457
3458 nextpc = MAKE_THUMB_ADDR (nextpc);
3459 pc_val = MAKE_THUMB_ADDR (pc_val);
3460
3461 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3462
3463 /* Thumb-2 conditional execution support. There are eight bits in
3464 the CPSR which describe conditional execution state. Once
3465 reconstructed (they're in a funny order), the low five bits
3466 describe the low bit of the condition for each instruction and
3467 how many instructions remain. The high three bits describe the
3468 base condition. One of the low four bits will be set if an IT
3469 block is active. These bits read as zero on earlier
3470 processors. */
3471 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
3472 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
3473
3474 /* If-Then handling. On GNU/Linux, where this routine is used, we
3475 use an undefined instruction as a breakpoint. Unlike BKPT, IT
3476 can disable execution of the undefined instruction. So we might
3477 miss the breakpoint if we set it on a skipped conditional
3478 instruction. Because conditional instructions can change the
3479 flags, affecting the execution of further instructions, we may
3480 need to set two breakpoints. */
3481
3482 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
3483 {
3484 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
3485 {
3486 /* An IT instruction. Because this instruction does not
3487 modify the flags, we can accurately predict the next
3488 executed instruction. */
3489 itstate = inst1 & 0x00ff;
3490 pc += thumb_insn_size (inst1);
3491
3492 while (itstate != 0 && ! condition_true (itstate >> 4, status))
3493 {
3494 inst1 = read_memory_unsigned_integer (pc, 2,
3495 byte_order_for_code);
3496 pc += thumb_insn_size (inst1);
3497 itstate = thumb_advance_itstate (itstate);
3498 }
3499
3500 return MAKE_THUMB_ADDR (pc);
3501 }
3502 else if (itstate != 0)
3503 {
3504 /* We are in a conditional block. Check the condition. */
3505 if (! condition_true (itstate >> 4, status))
3506 {
3507 /* Advance to the next executed instruction. */
3508 pc += thumb_insn_size (inst1);
3509 itstate = thumb_advance_itstate (itstate);
3510
3511 while (itstate != 0 && ! condition_true (itstate >> 4, status))
3512 {
3513 inst1 = read_memory_unsigned_integer (pc, 2,
3514 byte_order_for_code);
3515 pc += thumb_insn_size (inst1);
3516 itstate = thumb_advance_itstate (itstate);
3517 }
3518
3519 return MAKE_THUMB_ADDR (pc);
3520 }
3521 else if ((itstate & 0x0f) == 0x08)
3522 {
3523 /* This is the last instruction of the conditional
3524 block, and it is executed. We can handle it normally
3525 because the following instruction is not conditional,
3526 and we must handle it normally because it is
3527 permitted to branch. Fall through. */
3528 }
3529 else
3530 {
3531 int cond_negated;
3532
3533 /* There are conditional instructions after this one.
3534 If this instruction modifies the flags, then we can
3535 not predict what the next executed instruction will
3536 be. Fortunately, this instruction is architecturally
3537 forbidden to branch; we know it will fall through.
3538 Start by skipping past it. */
3539 pc += thumb_insn_size (inst1);
3540 itstate = thumb_advance_itstate (itstate);
3541
3542 /* Set a breakpoint on the following instruction. */
3543 gdb_assert ((itstate & 0x0f) != 0);
3544 if (insert_bkpt)
3545 insert_single_step_breakpoint (gdbarch, aspace, pc);
3546 cond_negated = (itstate >> 4) & 1;
3547
3548 /* Skip all following instructions with the same
3549 condition. If there is a later instruction in the IT
3550 block with the opposite condition, set the other
3551 breakpoint there. If not, then set a breakpoint on
3552 the instruction after the IT block. */
3553 do
3554 {
3555 inst1 = read_memory_unsigned_integer (pc, 2,
3556 byte_order_for_code);
3557 pc += thumb_insn_size (inst1);
3558 itstate = thumb_advance_itstate (itstate);
3559 }
3560 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
3561
3562 return MAKE_THUMB_ADDR (pc);
3563 }
3564 }
3565 }
3566 else if (itstate & 0x0f)
3567 {
3568 /* We are in a conditional block. Check the condition. */
3569 int cond = itstate >> 4;
3570
3571 if (! condition_true (cond, status))
3572 {
3573 /* Advance to the next instruction. All the 32-bit
3574 instructions share a common prefix. */
3575 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
3576 return MAKE_THUMB_ADDR (pc + 4);
3577 else
3578 return MAKE_THUMB_ADDR (pc + 2);
3579 }
3580
3581 /* Otherwise, handle the instruction normally. */
3582 }
3583
3584 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
3585 {
3586 CORE_ADDR sp;
3587
3588 /* Fetch the saved PC from the stack. It's stored above
3589 all of the other registers. */
3590 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
3591 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
3592 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
3593 }
3594 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
3595 {
3596 unsigned long cond = bits (inst1, 8, 11);
3597 if (cond == 0x0f) /* 0x0f = SWI */
3598 {
3599 struct gdbarch_tdep *tdep;
3600 tdep = gdbarch_tdep (gdbarch);
3601
3602 if (tdep->syscall_next_pc != NULL)
3603 nextpc = tdep->syscall_next_pc (frame);
3604
3605 }
3606 else if (cond != 0x0f && condition_true (cond, status))
3607 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
3608 }
3609 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
3610 {
3611 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
3612 }
3613 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
3614 {
3615 unsigned short inst2;
3616 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
3617
3618 /* Default to the next instruction. */
3619 nextpc = pc + 4;
3620 nextpc = MAKE_THUMB_ADDR (nextpc);
3621
3622 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
3623 {
3624 /* Branches and miscellaneous control instructions. */
3625
3626 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
3627 {
3628 /* B, BL, BLX. */
3629 int j1, j2, imm1, imm2;
3630
3631 imm1 = sbits (inst1, 0, 10);
3632 imm2 = bits (inst2, 0, 10);
3633 j1 = bit (inst2, 13);
3634 j2 = bit (inst2, 11);
3635
3636 offset = ((imm1 << 12) + (imm2 << 1));
3637 offset ^= ((!j2) << 22) | ((!j1) << 23);
3638
3639 nextpc = pc_val + offset;
3640 /* For BLX make sure to clear the low bits. */
3641 if (bit (inst2, 12) == 0)
3642 nextpc = nextpc & 0xfffffffc;
3643 }
3644 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
3645 {
3646 /* SUBS PC, LR, #imm8. */
3647 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
3648 nextpc -= inst2 & 0x00ff;
3649 }
3650 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
3651 {
3652 /* Conditional branch. */
3653 if (condition_true (bits (inst1, 6, 9), status))
3654 {
3655 int sign, j1, j2, imm1, imm2;
3656
3657 sign = sbits (inst1, 10, 10);
3658 imm1 = bits (inst1, 0, 5);
3659 imm2 = bits (inst2, 0, 10);
3660 j1 = bit (inst2, 13);
3661 j2 = bit (inst2, 11);
3662
3663 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
3664 offset += (imm1 << 12) + (imm2 << 1);
3665
3666 nextpc = pc_val + offset;
3667 }
3668 }
3669 }
3670 else if ((inst1 & 0xfe50) == 0xe810)
3671 {
3672 /* Load multiple or RFE. */
3673 int rn, offset, load_pc = 1;
3674
3675 rn = bits (inst1, 0, 3);
3676 if (bit (inst1, 7) && !bit (inst1, 8))
3677 {
3678 /* LDMIA or POP */
3679 if (!bit (inst2, 15))
3680 load_pc = 0;
3681 offset = bitcount (inst2) * 4 - 4;
3682 }
3683 else if (!bit (inst1, 7) && bit (inst1, 8))
3684 {
3685 /* LDMDB */
3686 if (!bit (inst2, 15))
3687 load_pc = 0;
3688 offset = -4;
3689 }
3690 else if (bit (inst1, 7) && bit (inst1, 8))
3691 {
3692 /* RFEIA */
3693 offset = 0;
3694 }
3695 else if (!bit (inst1, 7) && !bit (inst1, 8))
3696 {
3697 /* RFEDB */
3698 offset = -8;
3699 }
3700 else
3701 load_pc = 0;
3702
3703 if (load_pc)
3704 {
3705 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
3706 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
3707 }
3708 }
3709 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
3710 {
3711 /* MOV PC or MOVS PC. */
3712 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3713 nextpc = MAKE_THUMB_ADDR (nextpc);
3714 }
3715 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
3716 {
3717 /* LDR PC. */
3718 CORE_ADDR base;
3719 int rn, load_pc = 1;
3720
3721 rn = bits (inst1, 0, 3);
3722 base = get_frame_register_unsigned (frame, rn);
3723 if (rn == 15)
3724 {
3725 base = (base + 4) & ~(CORE_ADDR) 0x3;
3726 if (bit (inst1, 7))
3727 base += bits (inst2, 0, 11);
3728 else
3729 base -= bits (inst2, 0, 11);
3730 }
3731 else if (bit (inst1, 7))
3732 base += bits (inst2, 0, 11);
3733 else if (bit (inst2, 11))
3734 {
3735 if (bit (inst2, 10))
3736 {
3737 if (bit (inst2, 9))
3738 base += bits (inst2, 0, 7);
3739 else
3740 base -= bits (inst2, 0, 7);
3741 }
3742 }
3743 else if ((inst2 & 0x0fc0) == 0x0000)
3744 {
3745 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
3746 base += get_frame_register_unsigned (frame, rm) << shift;
3747 }
3748 else
3749 /* Reserved. */
3750 load_pc = 0;
3751
3752 if (load_pc)
3753 nextpc = get_frame_memory_unsigned (frame, base, 4);
3754 }
3755 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
3756 {
3757 /* TBB. */
3758 CORE_ADDR tbl_reg, table, offset, length;
3759
3760 tbl_reg = bits (inst1, 0, 3);
3761 if (tbl_reg == 0x0f)
3762 table = pc + 4; /* Regcache copy of PC isn't right yet. */
3763 else
3764 table = get_frame_register_unsigned (frame, tbl_reg);
3765
3766 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3767 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
3768 nextpc = pc_val + length;
3769 }
3770 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
3771 {
3772 /* TBH. */
3773 CORE_ADDR tbl_reg, table, offset, length;
3774
3775 tbl_reg = bits (inst1, 0, 3);
3776 if (tbl_reg == 0x0f)
3777 table = pc + 4; /* Regcache copy of PC isn't right yet. */
3778 else
3779 table = get_frame_register_unsigned (frame, tbl_reg);
3780
3781 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3782 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
3783 nextpc = pc_val + length;
3784 }
3785 }
3786 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
3787 {
3788 if (bits (inst1, 3, 6) == 0x0f)
3789 nextpc = pc_val;
3790 else
3791 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
3792 }
3793 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
3794 {
3795 if (bits (inst1, 3, 6) == 0x0f)
3796 nextpc = pc_val;
3797 else
3798 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
3799
3800 nextpc = MAKE_THUMB_ADDR (nextpc);
3801 }
3802 else if ((inst1 & 0xf500) == 0xb100)
3803 {
3804 /* CBNZ or CBZ. */
3805 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
3806 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
3807
3808 if (bit (inst1, 11) && reg != 0)
3809 nextpc = pc_val + imm;
3810 else if (!bit (inst1, 11) && reg == 0)
3811 nextpc = pc_val + imm;
3812 }
3813 return nextpc;
3814 }
3815
3816 /* Get the raw next address. PC is the current program counter, in
3817 FRAME. INSERT_BKPT should be TRUE if we want a breakpoint set on
3818 the alternative next instruction if there are two options.
3819
3820 The value returned has the execution state of the next instruction
3821 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
3822 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
3823 address. */
3824
3825 static CORE_ADDR
3826 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
3827 {
3828 struct gdbarch *gdbarch = get_frame_arch (frame);
3829 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3830 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3831 unsigned long pc_val;
3832 unsigned long this_instr;
3833 unsigned long status;
3834 CORE_ADDR nextpc;
3835
3836 if (arm_frame_is_thumb (frame))
3837 return thumb_get_next_pc_raw (frame, pc, insert_bkpt);
3838
3839 pc_val = (unsigned long) pc;
3840 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3841
3842 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
3843 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
3844
3845 if (bits (this_instr, 28, 31) == INST_NV)
3846 switch (bits (this_instr, 24, 27))
3847 {
3848 case 0xa:
3849 case 0xb:
3850 {
3851 /* Branch with Link and change to Thumb. */
3852 nextpc = BranchDest (pc, this_instr);
3853 nextpc |= bit (this_instr, 24) << 1;
3854 nextpc = MAKE_THUMB_ADDR (nextpc);
3855 break;
3856 }
3857 case 0xc:
3858 case 0xd:
3859 case 0xe:
3860 /* Coprocessor register transfer. */
3861 if (bits (this_instr, 12, 15) == 15)
3862 error (_("Invalid update to pc in instruction"));
3863 break;
3864 }
3865 else if (condition_true (bits (this_instr, 28, 31), status))
3866 {
3867 switch (bits (this_instr, 24, 27))
3868 {
3869 case 0x0:
3870 case 0x1: /* data processing */
3871 case 0x2:
3872 case 0x3:
3873 {
3874 unsigned long operand1, operand2, result = 0;
3875 unsigned long rn;
3876 int c;
3877
3878 if (bits (this_instr, 12, 15) != 15)
3879 break;
3880
3881 if (bits (this_instr, 22, 25) == 0
3882 && bits (this_instr, 4, 7) == 9) /* multiply */
3883 error (_("Invalid update to pc in instruction"));
3884
3885 /* BX <reg>, BLX <reg> */
3886 if (bits (this_instr, 4, 27) == 0x12fff1
3887 || bits (this_instr, 4, 27) == 0x12fff3)
3888 {
3889 rn = bits (this_instr, 0, 3);
3890 nextpc = (rn == 15) ? pc_val + 8
3891 : get_frame_register_unsigned (frame, rn);
3892 return nextpc;
3893 }
3894
3895 /* Multiply into PC. */
3896 c = (status & FLAG_C) ? 1 : 0;
3897 rn = bits (this_instr, 16, 19);
3898 operand1 = (rn == 15) ? pc_val + 8
3899 : get_frame_register_unsigned (frame, rn);
3900
3901 if (bit (this_instr, 25))
3902 {
3903 unsigned long immval = bits (this_instr, 0, 7);
3904 unsigned long rotate = 2 * bits (this_instr, 8, 11);
3905 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
3906 & 0xffffffff;
3907 }
3908 else /* operand 2 is a shifted register. */
3909 operand2 = shifted_reg_val (frame, this_instr, c,
3910 pc_val, status);
3911
3912 switch (bits (this_instr, 21, 24))
3913 {
3914 case 0x0: /*and */
3915 result = operand1 & operand2;
3916 break;
3917
3918 case 0x1: /*eor */
3919 result = operand1 ^ operand2;
3920 break;
3921
3922 case 0x2: /*sub */
3923 result = operand1 - operand2;
3924 break;
3925
3926 case 0x3: /*rsb */
3927 result = operand2 - operand1;
3928 break;
3929
3930 case 0x4: /*add */
3931 result = operand1 + operand2;
3932 break;
3933
3934 case 0x5: /*adc */
3935 result = operand1 + operand2 + c;
3936 break;
3937
3938 case 0x6: /*sbc */
3939 result = operand1 - operand2 + c;
3940 break;
3941
3942 case 0x7: /*rsc */
3943 result = operand2 - operand1 + c;
3944 break;
3945
3946 case 0x8:
3947 case 0x9:
3948 case 0xa:
3949 case 0xb: /* tst, teq, cmp, cmn */
3950 result = (unsigned long) nextpc;
3951 break;
3952
3953 case 0xc: /*orr */
3954 result = operand1 | operand2;
3955 break;
3956
3957 case 0xd: /*mov */
3958 /* Always step into a function. */
3959 result = operand2;
3960 break;
3961
3962 case 0xe: /*bic */
3963 result = operand1 & ~operand2;
3964 break;
3965
3966 case 0xf: /*mvn */
3967 result = ~operand2;
3968 break;
3969 }
3970
3971 /* In 26-bit APCS the bottom two bits of the result are
3972 ignored, and we always end up in ARM state. */
3973 if (!arm_apcs_32)
3974 nextpc = arm_addr_bits_remove (gdbarch, result);
3975 else
3976 nextpc = result;
3977
3978 break;
3979 }
3980
3981 case 0x4:
3982 case 0x5: /* data transfer */
3983 case 0x6:
3984 case 0x7:
3985 if (bit (this_instr, 20))
3986 {
3987 /* load */
3988 if (bits (this_instr, 12, 15) == 15)
3989 {
3990 /* rd == pc */
3991 unsigned long rn;
3992 unsigned long base;
3993
3994 if (bit (this_instr, 22))
3995 error (_("Invalid update to pc in instruction"));
3996
3997 /* byte write to PC */
3998 rn = bits (this_instr, 16, 19);
3999 base = (rn == 15) ? pc_val + 8
4000 : get_frame_register_unsigned (frame, rn);
4001 if (bit (this_instr, 24))
4002 {
4003 /* pre-indexed */
4004 int c = (status & FLAG_C) ? 1 : 0;
4005 unsigned long offset =
4006 (bit (this_instr, 25)
4007 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4008 : bits (this_instr, 0, 11));
4009
4010 if (bit (this_instr, 23))
4011 base += offset;
4012 else
4013 base -= offset;
4014 }
4015 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
4016 4, byte_order);
4017 }
4018 }
4019 break;
4020
4021 case 0x8:
4022 case 0x9: /* block transfer */
4023 if (bit (this_instr, 20))
4024 {
4025 /* LDM */
4026 if (bit (this_instr, 15))
4027 {
4028 /* loading pc */
4029 int offset = 0;
4030
4031 if (bit (this_instr, 23))
4032 {
4033 /* up */
4034 unsigned long reglist = bits (this_instr, 0, 14);
4035 offset = bitcount (reglist) * 4;
4036 if (bit (this_instr, 24)) /* pre */
4037 offset += 4;
4038 }
4039 else if (bit (this_instr, 24))
4040 offset = -4;
4041
4042 {
4043 unsigned long rn_val =
4044 get_frame_register_unsigned (frame,
4045 bits (this_instr, 16, 19));
4046 nextpc =
4047 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
4048 + offset),
4049 4, byte_order);
4050 }
4051 }
4052 }
4053 break;
4054
4055 case 0xb: /* branch & link */
4056 case 0xa: /* branch */
4057 {
4058 nextpc = BranchDest (pc, this_instr);
4059 break;
4060 }
4061
4062 case 0xc:
4063 case 0xd:
4064 case 0xe: /* coproc ops */
4065 break;
4066 case 0xf: /* SWI */
4067 {
4068 struct gdbarch_tdep *tdep;
4069 tdep = gdbarch_tdep (gdbarch);
4070
4071 if (tdep->syscall_next_pc != NULL)
4072 nextpc = tdep->syscall_next_pc (frame);
4073
4074 }
4075 break;
4076
4077 default:
4078 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
4079 return (pc);
4080 }
4081 }
4082
4083 return nextpc;
4084 }
4085
4086 CORE_ADDR
4087 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
4088 {
4089 struct gdbarch *gdbarch = get_frame_arch (frame);
4090 CORE_ADDR nextpc =
4091 gdbarch_addr_bits_remove (gdbarch,
4092 arm_get_next_pc_raw (frame, pc, TRUE));
4093 if (nextpc == pc)
4094 error (_("Infinite loop detected"));
4095 return nextpc;
4096 }
4097
4098 /* single_step() is called just before we want to resume the inferior,
4099 if we want to single-step it but there is no hardware or kernel
4100 single-step support. We find the target of the coming instruction
4101 and breakpoint it. */
4102
4103 int
4104 arm_software_single_step (struct frame_info *frame)
4105 {
4106 struct gdbarch *gdbarch = get_frame_arch (frame);
4107 struct address_space *aspace = get_frame_address_space (frame);
4108
4109 /* NOTE: This may insert the wrong breakpoint instruction when
4110 single-stepping over a mode-changing instruction, if the
4111 CPSR heuristics are used. */
4112
4113 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
4114 insert_single_step_breakpoint (gdbarch, aspace, next_pc);
4115
4116 return 1;
4117 }
4118
4119 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4120 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4121 NULL if an error occurs. BUF is freed. */
4122
4123 static gdb_byte *
4124 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
4125 int old_len, int new_len)
4126 {
4127 gdb_byte *new_buf, *middle;
4128 int bytes_to_read = new_len - old_len;
4129
4130 new_buf = xmalloc (new_len);
4131 memcpy (new_buf + bytes_to_read, buf, old_len);
4132 xfree (buf);
4133 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
4134 {
4135 xfree (new_buf);
4136 return NULL;
4137 }
4138 return new_buf;
4139 }
4140
4141 /* An IT block is at most the 2-byte IT instruction followed by
4142 four 4-byte instructions. The furthest back we must search to
4143 find an IT block that affects the current instruction is thus
4144 2 + 3 * 4 == 14 bytes. */
4145 #define MAX_IT_BLOCK_PREFIX 14
4146
4147 /* Use a quick scan if there are more than this many bytes of
4148 code. */
4149 #define IT_SCAN_THRESHOLD 32
4150
4151 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4152 A breakpoint in an IT block may not be hit, depending on the
4153 condition flags. */
4154 static CORE_ADDR
4155 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
4156 {
4157 gdb_byte *buf;
4158 char map_type;
4159 CORE_ADDR boundary, func_start;
4160 int buf_len, buf2_len;
4161 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
4162 int i, any, last_it, last_it_count;
4163
4164 /* If we are using BKPT breakpoints, none of this is necessary. */
4165 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
4166 return bpaddr;
4167
4168 /* ARM mode does not have this problem. */
4169 if (!arm_pc_is_thumb (gdbarch, bpaddr))
4170 return bpaddr;
4171
4172 /* We are setting a breakpoint in Thumb code that could potentially
4173 contain an IT block. The first step is to find how much Thumb
4174 code there is; we do not need to read outside of known Thumb
4175 sequences. */
4176 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
4177 if (map_type == 0)
4178 /* Thumb-2 code must have mapping symbols to have a chance. */
4179 return bpaddr;
4180
4181 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
4182
4183 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
4184 && func_start > boundary)
4185 boundary = func_start;
4186
4187 /* Search for a candidate IT instruction. We have to do some fancy
4188 footwork to distinguish a real IT instruction from the second
4189 half of a 32-bit instruction, but there is no need for that if
4190 there's no candidate. */
4191 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
4192 if (buf_len == 0)
4193 /* No room for an IT instruction. */
4194 return bpaddr;
4195
4196 buf = xmalloc (buf_len);
4197 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
4198 return bpaddr;
4199 any = 0;
4200 for (i = 0; i < buf_len; i += 2)
4201 {
4202 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4203 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4204 {
4205 any = 1;
4206 break;
4207 }
4208 }
4209 if (any == 0)
4210 {
4211 xfree (buf);
4212 return bpaddr;
4213 }
4214
4215 /* OK, the code bytes before this instruction contain at least one
4216 halfword which resembles an IT instruction. We know that it's
4217 Thumb code, but there are still two possibilities. Either the
4218 halfword really is an IT instruction, or it is the second half of
4219 a 32-bit Thumb instruction. The only way we can tell is to
4220 scan forwards from a known instruction boundary. */
4221 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
4222 {
4223 int definite;
4224
4225 /* There's a lot of code before this instruction. Start with an
4226 optimistic search; it's easy to recognize halfwords that can
4227 not be the start of a 32-bit instruction, and use that to
4228 lock on to the instruction boundaries. */
4229 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
4230 if (buf == NULL)
4231 return bpaddr;
4232 buf_len = IT_SCAN_THRESHOLD;
4233
4234 definite = 0;
4235 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
4236 {
4237 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4238 if (thumb_insn_size (inst1) == 2)
4239 {
4240 definite = 1;
4241 break;
4242 }
4243 }
4244
4245 /* At this point, if DEFINITE, BUF[I] is the first place we
4246 are sure that we know the instruction boundaries, and it is far
4247 enough from BPADDR that we could not miss an IT instruction
4248 affecting BPADDR. If ! DEFINITE, give up - start from a
4249 known boundary. */
4250 if (! definite)
4251 {
4252 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
4253 bpaddr - boundary);
4254 if (buf == NULL)
4255 return bpaddr;
4256 buf_len = bpaddr - boundary;
4257 i = 0;
4258 }
4259 }
4260 else
4261 {
4262 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
4263 if (buf == NULL)
4264 return bpaddr;
4265 buf_len = bpaddr - boundary;
4266 i = 0;
4267 }
4268
4269 /* Scan forwards. Find the last IT instruction before BPADDR. */
4270 last_it = -1;
4271 last_it_count = 0;
4272 while (i < buf_len)
4273 {
4274 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4275 last_it_count--;
4276 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4277 {
4278 last_it = i;
4279 if (inst1 & 0x0001)
4280 last_it_count = 4;
4281 else if (inst1 & 0x0002)
4282 last_it_count = 3;
4283 else if (inst1 & 0x0004)
4284 last_it_count = 2;
4285 else
4286 last_it_count = 1;
4287 }
4288 i += thumb_insn_size (inst1);
4289 }
4290
4291 xfree (buf);
4292
4293 if (last_it == -1)
4294 /* There wasn't really an IT instruction after all. */
4295 return bpaddr;
4296
4297 if (last_it_count < 1)
4298 /* It was too far away. */
4299 return bpaddr;
4300
4301 /* This really is a trouble spot. Move the breakpoint to the IT
4302 instruction. */
4303 return bpaddr - buf_len + last_it;
4304 }
4305
4306 /* ARM displaced stepping support.
4307
4308 Generally ARM displaced stepping works as follows:
4309
4310 1. When an instruction is to be single-stepped, it is first decoded by
4311 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
4312 Depending on the type of instruction, it is then copied to a scratch
4313 location, possibly in a modified form. The copy_* set of functions
4314 performs such modification, as necessary. A breakpoint is placed after
4315 the modified instruction in the scratch space to return control to GDB.
4316 Note in particular that instructions which modify the PC will no longer
4317 do so after modification.
4318
4319 2. The instruction is single-stepped, by setting the PC to the scratch
4320 location address, and resuming. Control returns to GDB when the
4321 breakpoint is hit.
4322
4323 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
4324 function used for the current instruction. This function's job is to
4325 put the CPU/memory state back to what it would have been if the
4326 instruction had been executed unmodified in its original location. */
4327
4328 /* NOP instruction (mov r0, r0). */
4329 #define ARM_NOP 0xe1a00000
4330
4331 /* Helper for register reads for displaced stepping. In particular, this
4332 returns the PC as it would be seen by the instruction at its original
4333 location. */
4334
4335 ULONGEST
4336 displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
4337 {
4338 ULONGEST ret;
4339
4340 if (regno == 15)
4341 {
4342 if (debug_displaced)
4343 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
4344 (unsigned long) from + 8);
4345 return (ULONGEST) from + 8; /* Pipeline offset. */
4346 }
4347 else
4348 {
4349 regcache_cooked_read_unsigned (regs, regno, &ret);
4350 if (debug_displaced)
4351 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
4352 regno, (unsigned long) ret);
4353 return ret;
4354 }
4355 }
4356
4357 static int
4358 displaced_in_arm_mode (struct regcache *regs)
4359 {
4360 ULONGEST ps;
4361 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
4362
4363 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4364
4365 return (ps & t_bit) == 0;
4366 }
4367
4368 /* Write to the PC as from a branch instruction. */
4369
4370 static void
4371 branch_write_pc (struct regcache *regs, ULONGEST val)
4372 {
4373 if (displaced_in_arm_mode (regs))
4374 /* Note: If bits 0/1 are set, this branch would be unpredictable for
4375 architecture versions < 6. */
4376 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
4377 val & ~(ULONGEST) 0x3);
4378 else
4379 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
4380 val & ~(ULONGEST) 0x1);
4381 }
4382
4383 /* Write to the PC as from a branch-exchange instruction. */
4384
4385 static void
4386 bx_write_pc (struct regcache *regs, ULONGEST val)
4387 {
4388 ULONGEST ps;
4389 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
4390
4391 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4392
4393 if ((val & 1) == 1)
4394 {
4395 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
4396 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
4397 }
4398 else if ((val & 2) == 0)
4399 {
4400 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4401 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
4402 }
4403 else
4404 {
4405 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
4406 mode, align dest to 4 bytes). */
4407 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
4408 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4409 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
4410 }
4411 }
4412
4413 /* Write to the PC as if from a load instruction. */
4414
4415 static void
4416 load_write_pc (struct regcache *regs, ULONGEST val)
4417 {
4418 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
4419 bx_write_pc (regs, val);
4420 else
4421 branch_write_pc (regs, val);
4422 }
4423
4424 /* Write to the PC as if from an ALU instruction. */
4425
4426 static void
4427 alu_write_pc (struct regcache *regs, ULONGEST val)
4428 {
4429 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && displaced_in_arm_mode (regs))
4430 bx_write_pc (regs, val);
4431 else
4432 branch_write_pc (regs, val);
4433 }
4434
4435 /* Helper for writing to registers for displaced stepping. Writing to the PC
4436 has a varying effects depending on the instruction which does the write:
4437 this is controlled by the WRITE_PC argument. */
4438
4439 void
4440 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
4441 int regno, ULONGEST val, enum pc_write_style write_pc)
4442 {
4443 if (regno == 15)
4444 {
4445 if (debug_displaced)
4446 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
4447 (unsigned long) val);
4448 switch (write_pc)
4449 {
4450 case BRANCH_WRITE_PC:
4451 branch_write_pc (regs, val);
4452 break;
4453
4454 case BX_WRITE_PC:
4455 bx_write_pc (regs, val);
4456 break;
4457
4458 case LOAD_WRITE_PC:
4459 load_write_pc (regs, val);
4460 break;
4461
4462 case ALU_WRITE_PC:
4463 alu_write_pc (regs, val);
4464 break;
4465
4466 case CANNOT_WRITE_PC:
4467 warning (_("Instruction wrote to PC in an unexpected way when "
4468 "single-stepping"));
4469 break;
4470
4471 default:
4472 internal_error (__FILE__, __LINE__,
4473 _("Invalid argument to displaced_write_reg"));
4474 }
4475
4476 dsc->wrote_to_pc = 1;
4477 }
4478 else
4479 {
4480 if (debug_displaced)
4481 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
4482 regno, (unsigned long) val);
4483 regcache_cooked_write_unsigned (regs, regno, val);
4484 }
4485 }
4486
4487 /* This function is used to concisely determine if an instruction INSN
4488 references PC. Register fields of interest in INSN should have the
4489 corresponding fields of BITMASK set to 0b1111. The function
4490 returns return 1 if any of these fields in INSN reference the PC
4491 (also 0b1111, r15), else it returns 0. */
4492
4493 static int
4494 insn_references_pc (uint32_t insn, uint32_t bitmask)
4495 {
4496 uint32_t lowbit = 1;
4497
4498 while (bitmask != 0)
4499 {
4500 uint32_t mask;
4501
4502 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
4503 ;
4504
4505 if (!lowbit)
4506 break;
4507
4508 mask = lowbit * 0xf;
4509
4510 if ((insn & mask) == mask)
4511 return 1;
4512
4513 bitmask &= ~mask;
4514 }
4515
4516 return 0;
4517 }
4518
4519 /* The simplest copy function. Many instructions have the same effect no
4520 matter what address they are executed at: in those cases, use this. */
4521
4522 static int
4523 copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
4524 const char *iname, struct displaced_step_closure *dsc)
4525 {
4526 if (debug_displaced)
4527 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
4528 "opcode/class '%s' unmodified\n", (unsigned long) insn,
4529 iname);
4530
4531 dsc->modinsn[0] = insn;
4532
4533 return 0;
4534 }
4535
4536 /* Preload instructions with immediate offset. */
4537
4538 static void
4539 cleanup_preload (struct gdbarch *gdbarch,
4540 struct regcache *regs, struct displaced_step_closure *dsc)
4541 {
4542 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4543 if (!dsc->u.preload.immed)
4544 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4545 }
4546
4547 static int
4548 copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4549 struct displaced_step_closure *dsc)
4550 {
4551 unsigned int rn = bits (insn, 16, 19);
4552 ULONGEST rn_val;
4553 CORE_ADDR from = dsc->insn_addr;
4554
4555 if (!insn_references_pc (insn, 0x000f0000ul))
4556 return copy_unmodified (gdbarch, insn, "preload", dsc);
4557
4558 if (debug_displaced)
4559 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
4560 (unsigned long) insn);
4561
4562 /* Preload instructions:
4563
4564 {pli/pld} [rn, #+/-imm]
4565 ->
4566 {pli/pld} [r0, #+/-imm]. */
4567
4568 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4569 rn_val = displaced_read_reg (regs, from, rn);
4570 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4571
4572 dsc->u.preload.immed = 1;
4573
4574 dsc->modinsn[0] = insn & 0xfff0ffff;
4575
4576 dsc->cleanup = &cleanup_preload;
4577
4578 return 0;
4579 }
4580
4581 /* Preload instructions with register offset. */
4582
4583 static int
4584 copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
4585 struct regcache *regs,
4586 struct displaced_step_closure *dsc)
4587 {
4588 unsigned int rn = bits (insn, 16, 19);
4589 unsigned int rm = bits (insn, 0, 3);
4590 ULONGEST rn_val, rm_val;
4591 CORE_ADDR from = dsc->insn_addr;
4592
4593 if (!insn_references_pc (insn, 0x000f000ful))
4594 return copy_unmodified (gdbarch, insn, "preload reg", dsc);
4595
4596 if (debug_displaced)
4597 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
4598 (unsigned long) insn);
4599
4600 /* Preload register-offset instructions:
4601
4602 {pli/pld} [rn, rm {, shift}]
4603 ->
4604 {pli/pld} [r0, r1 {, shift}]. */
4605
4606 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4607 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4608 rn_val = displaced_read_reg (regs, from, rn);
4609 rm_val = displaced_read_reg (regs, from, rm);
4610 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4611 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
4612
4613 dsc->u.preload.immed = 0;
4614
4615 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
4616
4617 dsc->cleanup = &cleanup_preload;
4618
4619 return 0;
4620 }
4621
4622 /* Copy/cleanup coprocessor load and store instructions. */
4623
4624 static void
4625 cleanup_copro_load_store (struct gdbarch *gdbarch,
4626 struct regcache *regs,
4627 struct displaced_step_closure *dsc)
4628 {
4629 ULONGEST rn_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4630
4631 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4632
4633 if (dsc->u.ldst.writeback)
4634 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
4635 }
4636
4637 static int
4638 copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
4639 struct regcache *regs,
4640 struct displaced_step_closure *dsc)
4641 {
4642 unsigned int rn = bits (insn, 16, 19);
4643 ULONGEST rn_val;
4644 CORE_ADDR from = dsc->insn_addr;
4645
4646 if (!insn_references_pc (insn, 0x000f0000ul))
4647 return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
4648
4649 if (debug_displaced)
4650 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
4651 "load/store insn %.8lx\n", (unsigned long) insn);
4652
4653 /* Coprocessor load/store instructions:
4654
4655 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
4656 ->
4657 {stc/stc2} [r0, #+/-imm].
4658
4659 ldc/ldc2 are handled identically. */
4660
4661 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4662 rn_val = displaced_read_reg (regs, from, rn);
4663 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4664
4665 dsc->u.ldst.writeback = bit (insn, 25);
4666 dsc->u.ldst.rn = rn;
4667
4668 dsc->modinsn[0] = insn & 0xfff0ffff;
4669
4670 dsc->cleanup = &cleanup_copro_load_store;
4671
4672 return 0;
4673 }
4674
4675 /* Clean up branch instructions (actually perform the branch, by setting
4676 PC). */
4677
4678 static void
4679 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
4680 struct displaced_step_closure *dsc)
4681 {
4682 ULONGEST from = dsc->insn_addr;
4683 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
4684 int branch_taken = condition_true (dsc->u.branch.cond, status);
4685 enum pc_write_style write_pc = dsc->u.branch.exchange
4686 ? BX_WRITE_PC : BRANCH_WRITE_PC;
4687
4688 if (!branch_taken)
4689 return;
4690
4691 if (dsc->u.branch.link)
4692 {
4693 ULONGEST pc = displaced_read_reg (regs, from, 15);
4694 displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
4695 }
4696
4697 displaced_write_reg (regs, dsc, 15, dsc->u.branch.dest, write_pc);
4698 }
4699
4700 /* Copy B/BL/BLX instructions with immediate destinations. */
4701
4702 static int
4703 copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
4704 struct regcache *regs, struct displaced_step_closure *dsc)
4705 {
4706 unsigned int cond = bits (insn, 28, 31);
4707 int exchange = (cond == 0xf);
4708 int link = exchange || bit (insn, 24);
4709 CORE_ADDR from = dsc->insn_addr;
4710 long offset;
4711
4712 if (debug_displaced)
4713 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
4714 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
4715 (unsigned long) insn);
4716
4717 /* Implement "BL<cond> <label>" as:
4718
4719 Preparation: cond <- instruction condition
4720 Insn: mov r0, r0 (nop)
4721 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
4722
4723 B<cond> similar, but don't set r14 in cleanup. */
4724
4725 if (exchange)
4726 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
4727 then arrange the switch into Thumb mode. */
4728 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
4729 else
4730 offset = bits (insn, 0, 23) << 2;
4731
4732 if (bit (offset, 25))
4733 offset = offset | ~0x3ffffff;
4734
4735 dsc->u.branch.cond = cond;
4736 dsc->u.branch.link = link;
4737 dsc->u.branch.exchange = exchange;
4738 dsc->u.branch.dest = from + 8 + offset;
4739
4740 dsc->modinsn[0] = ARM_NOP;
4741
4742 dsc->cleanup = &cleanup_branch;
4743
4744 return 0;
4745 }
4746
4747 /* Copy BX/BLX with register-specified destinations. */
4748
4749 static int
4750 copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
4751 struct regcache *regs, struct displaced_step_closure *dsc)
4752 {
4753 unsigned int cond = bits (insn, 28, 31);
4754 /* BX: x12xxx1x
4755 BLX: x12xxx3x. */
4756 int link = bit (insn, 5);
4757 unsigned int rm = bits (insn, 0, 3);
4758 CORE_ADDR from = dsc->insn_addr;
4759
4760 if (debug_displaced)
4761 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
4762 "%.8lx\n", (link) ? "blx" : "bx",
4763 (unsigned long) insn);
4764
4765 /* Implement {BX,BLX}<cond> <reg>" as:
4766
4767 Preparation: cond <- instruction condition
4768 Insn: mov r0, r0 (nop)
4769 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
4770
4771 Don't set r14 in cleanup for BX. */
4772
4773 dsc->u.branch.dest = displaced_read_reg (regs, from, rm);
4774
4775 dsc->u.branch.cond = cond;
4776 dsc->u.branch.link = link;
4777 dsc->u.branch.exchange = 1;
4778
4779 dsc->modinsn[0] = ARM_NOP;
4780
4781 dsc->cleanup = &cleanup_branch;
4782
4783 return 0;
4784 }
4785
4786 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
4787
4788 static void
4789 cleanup_alu_imm (struct gdbarch *gdbarch,
4790 struct regcache *regs, struct displaced_step_closure *dsc)
4791 {
4792 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4793 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4794 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4795 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4796 }
4797
4798 static int
4799 copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4800 struct displaced_step_closure *dsc)
4801 {
4802 unsigned int rn = bits (insn, 16, 19);
4803 unsigned int rd = bits (insn, 12, 15);
4804 unsigned int op = bits (insn, 21, 24);
4805 int is_mov = (op == 0xd);
4806 ULONGEST rd_val, rn_val;
4807 CORE_ADDR from = dsc->insn_addr;
4808
4809 if (!insn_references_pc (insn, 0x000ff000ul))
4810 return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
4811
4812 if (debug_displaced)
4813 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
4814 "%.8lx\n", is_mov ? "move" : "ALU",
4815 (unsigned long) insn);
4816
4817 /* Instruction is of form:
4818
4819 <op><cond> rd, [rn,] #imm
4820
4821 Rewrite as:
4822
4823 Preparation: tmp1, tmp2 <- r0, r1;
4824 r0, r1 <- rd, rn
4825 Insn: <op><cond> r0, r1, #imm
4826 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
4827 */
4828
4829 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4830 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4831 rn_val = displaced_read_reg (regs, from, rn);
4832 rd_val = displaced_read_reg (regs, from, rd);
4833 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4834 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4835 dsc->rd = rd;
4836
4837 if (is_mov)
4838 dsc->modinsn[0] = insn & 0xfff00fff;
4839 else
4840 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
4841
4842 dsc->cleanup = &cleanup_alu_imm;
4843
4844 return 0;
4845 }
4846
4847 /* Copy/cleanup arithmetic/logic insns with register RHS. */
4848
4849 static void
4850 cleanup_alu_reg (struct gdbarch *gdbarch,
4851 struct regcache *regs, struct displaced_step_closure *dsc)
4852 {
4853 ULONGEST rd_val;
4854 int i;
4855
4856 rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4857
4858 for (i = 0; i < 3; i++)
4859 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
4860
4861 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4862 }
4863
4864 static int
4865 copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4866 struct displaced_step_closure *dsc)
4867 {
4868 unsigned int rn = bits (insn, 16, 19);
4869 unsigned int rm = bits (insn, 0, 3);
4870 unsigned int rd = bits (insn, 12, 15);
4871 unsigned int op = bits (insn, 21, 24);
4872 int is_mov = (op == 0xd);
4873 ULONGEST rd_val, rn_val, rm_val;
4874 CORE_ADDR from = dsc->insn_addr;
4875
4876 if (!insn_references_pc (insn, 0x000ff00ful))
4877 return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
4878
4879 if (debug_displaced)
4880 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
4881 is_mov ? "move" : "ALU", (unsigned long) insn);
4882
4883 /* Instruction is of form:
4884
4885 <op><cond> rd, [rn,] rm [, <shift>]
4886
4887 Rewrite as:
4888
4889 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
4890 r0, r1, r2 <- rd, rn, rm
4891 Insn: <op><cond> r0, r1, r2 [, <shift>]
4892 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
4893 */
4894
4895 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4896 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4897 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4898 rd_val = displaced_read_reg (regs, from, rd);
4899 rn_val = displaced_read_reg (regs, from, rn);
4900 rm_val = displaced_read_reg (regs, from, rm);
4901 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4902 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4903 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
4904 dsc->rd = rd;
4905
4906 if (is_mov)
4907 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
4908 else
4909 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
4910
4911 dsc->cleanup = &cleanup_alu_reg;
4912
4913 return 0;
4914 }
4915
4916 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
4917
4918 static void
4919 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
4920 struct regcache *regs,
4921 struct displaced_step_closure *dsc)
4922 {
4923 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4924 int i;
4925
4926 for (i = 0; i < 4; i++)
4927 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
4928
4929 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4930 }
4931
4932 static int
4933 copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
4934 struct regcache *regs,
4935 struct displaced_step_closure *dsc)
4936 {
4937 unsigned int rn = bits (insn, 16, 19);
4938 unsigned int rm = bits (insn, 0, 3);
4939 unsigned int rd = bits (insn, 12, 15);
4940 unsigned int rs = bits (insn, 8, 11);
4941 unsigned int op = bits (insn, 21, 24);
4942 int is_mov = (op == 0xd), i;
4943 ULONGEST rd_val, rn_val, rm_val, rs_val;
4944 CORE_ADDR from = dsc->insn_addr;
4945
4946 if (!insn_references_pc (insn, 0x000fff0ful))
4947 return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
4948
4949 if (debug_displaced)
4950 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
4951 "%.8lx\n", is_mov ? "move" : "ALU",
4952 (unsigned long) insn);
4953
4954 /* Instruction is of form:
4955
4956 <op><cond> rd, [rn,] rm, <shift> rs
4957
4958 Rewrite as:
4959
4960 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
4961 r0, r1, r2, r3 <- rd, rn, rm, rs
4962 Insn: <op><cond> r0, r1, r2, <shift> r3
4963 Cleanup: tmp5 <- r0
4964 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
4965 rd <- tmp5
4966 */
4967
4968 for (i = 0; i < 4; i++)
4969 dsc->tmp[i] = displaced_read_reg (regs, from, i);
4970
4971 rd_val = displaced_read_reg (regs, from, rd);
4972 rn_val = displaced_read_reg (regs, from, rn);
4973 rm_val = displaced_read_reg (regs, from, rm);
4974 rs_val = displaced_read_reg (regs, from, rs);
4975 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4976 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4977 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
4978 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
4979 dsc->rd = rd;
4980
4981 if (is_mov)
4982 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
4983 else
4984 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
4985
4986 dsc->cleanup = &cleanup_alu_shifted_reg;
4987
4988 return 0;
4989 }
4990
4991 /* Clean up load instructions. */
4992
4993 static void
4994 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
4995 struct displaced_step_closure *dsc)
4996 {
4997 ULONGEST rt_val, rt_val2 = 0, rn_val;
4998 CORE_ADDR from = dsc->insn_addr;
4999
5000 rt_val = displaced_read_reg (regs, from, 0);
5001 if (dsc->u.ldst.xfersize == 8)
5002 rt_val2 = displaced_read_reg (regs, from, 1);
5003 rn_val = displaced_read_reg (regs, from, 2);
5004
5005 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5006 if (dsc->u.ldst.xfersize > 4)
5007 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5008 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5009 if (!dsc->u.ldst.immed)
5010 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5011
5012 /* Handle register writeback. */
5013 if (dsc->u.ldst.writeback)
5014 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5015 /* Put result in right place. */
5016 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
5017 if (dsc->u.ldst.xfersize == 8)
5018 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
5019 }
5020
5021 /* Clean up store instructions. */
5022
5023 static void
5024 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
5025 struct displaced_step_closure *dsc)
5026 {
5027 CORE_ADDR from = dsc->insn_addr;
5028 ULONGEST rn_val = displaced_read_reg (regs, from, 2);
5029
5030 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5031 if (dsc->u.ldst.xfersize > 4)
5032 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5033 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5034 if (!dsc->u.ldst.immed)
5035 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5036 if (!dsc->u.ldst.restore_r4)
5037 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
5038
5039 /* Writeback. */
5040 if (dsc->u.ldst.writeback)
5041 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5042 }
5043
5044 /* Copy "extra" load/store instructions. These are halfword/doubleword
5045 transfers, which have a different encoding to byte/word transfers. */
5046
5047 static int
5048 copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
5049 struct regcache *regs, struct displaced_step_closure *dsc)
5050 {
5051 unsigned int op1 = bits (insn, 20, 24);
5052 unsigned int op2 = bits (insn, 5, 6);
5053 unsigned int rt = bits (insn, 12, 15);
5054 unsigned int rn = bits (insn, 16, 19);
5055 unsigned int rm = bits (insn, 0, 3);
5056 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
5057 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
5058 int immed = (op1 & 0x4) != 0;
5059 int opcode;
5060 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
5061 CORE_ADDR from = dsc->insn_addr;
5062
5063 if (!insn_references_pc (insn, 0x000ff00ful))
5064 return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
5065
5066 if (debug_displaced)
5067 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
5068 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
5069 (unsigned long) insn);
5070
5071 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
5072
5073 if (opcode < 0)
5074 internal_error (__FILE__, __LINE__,
5075 _("copy_extra_ld_st: instruction decode error"));
5076
5077 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5078 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
5079 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
5080 if (!immed)
5081 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
5082
5083 rt_val = displaced_read_reg (regs, from, rt);
5084 if (bytesize[opcode] == 8)
5085 rt_val2 = displaced_read_reg (regs, from, rt + 1);
5086 rn_val = displaced_read_reg (regs, from, rn);
5087 if (!immed)
5088 rm_val = displaced_read_reg (regs, from, rm);
5089
5090 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5091 if (bytesize[opcode] == 8)
5092 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
5093 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5094 if (!immed)
5095 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5096
5097 dsc->rd = rt;
5098 dsc->u.ldst.xfersize = bytesize[opcode];
5099 dsc->u.ldst.rn = rn;
5100 dsc->u.ldst.immed = immed;
5101 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5102 dsc->u.ldst.restore_r4 = 0;
5103
5104 if (immed)
5105 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
5106 ->
5107 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
5108 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5109 else
5110 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
5111 ->
5112 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
5113 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5114
5115 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
5116
5117 return 0;
5118 }
5119
5120 /* Copy byte/word loads and stores. */
5121
5122 static int
5123 copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
5124 struct regcache *regs,
5125 struct displaced_step_closure *dsc, int load, int byte,
5126 int usermode)
5127 {
5128 int immed = !bit (insn, 25);
5129 unsigned int rt = bits (insn, 12, 15);
5130 unsigned int rn = bits (insn, 16, 19);
5131 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
5132 ULONGEST rt_val, rn_val, rm_val = 0;
5133 CORE_ADDR from = dsc->insn_addr;
5134
5135 if (!insn_references_pc (insn, 0x000ff00ful))
5136 return copy_unmodified (gdbarch, insn, "load/store", dsc);
5137
5138 if (debug_displaced)
5139 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
5140 load ? (byte ? "ldrb" : "ldr")
5141 : (byte ? "strb" : "str"), usermode ? "t" : "",
5142 (unsigned long) insn);
5143
5144 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5145 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
5146 if (!immed)
5147 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
5148 if (!load)
5149 dsc->tmp[4] = displaced_read_reg (regs, from, 4);
5150
5151 rt_val = displaced_read_reg (regs, from, rt);
5152 rn_val = displaced_read_reg (regs, from, rn);
5153 if (!immed)
5154 rm_val = displaced_read_reg (regs, from, rm);
5155
5156 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5157 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5158 if (!immed)
5159 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5160
5161 dsc->rd = rt;
5162 dsc->u.ldst.xfersize = byte ? 1 : 4;
5163 dsc->u.ldst.rn = rn;
5164 dsc->u.ldst.immed = immed;
5165 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5166
5167 /* To write PC we can do:
5168
5169 scratch+0: str pc, temp (*temp = scratch + 8 + offset)
5170 scratch+4: ldr r4, temp
5171 scratch+8: sub r4, r4, pc (r4 = scratch + 8 + offset - scratch - 8 - 8)
5172 scratch+12: add r4, r4, #8 (r4 = offset)
5173 scratch+16: add r0, r0, r4
5174 scratch+20: str r0, [r2, #imm] (or str r0, [r2, r3])
5175 scratch+24: <temp>
5176
5177 Otherwise we don't know what value to write for PC, since the offset is
5178 architecture-dependent (sometimes PC+8, sometimes PC+12). */
5179
5180 if (load || rt != 15)
5181 {
5182 dsc->u.ldst.restore_r4 = 0;
5183
5184 if (immed)
5185 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
5186 ->
5187 {ldr,str}[b]<cond> r0, [r2, #imm]. */
5188 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5189 else
5190 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
5191 ->
5192 {ldr,str}[b]<cond> r0, [r2, r3]. */
5193 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5194 }
5195 else
5196 {
5197 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
5198 dsc->u.ldst.restore_r4 = 1;
5199
5200 dsc->modinsn[0] = 0xe58ff014; /* str pc, [pc, #20]. */
5201 dsc->modinsn[1] = 0xe59f4010; /* ldr r4, [pc, #16]. */
5202 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
5203 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
5204 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
5205
5206 /* As above. */
5207 if (immed)
5208 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
5209 else
5210 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
5211
5212 dsc->modinsn[6] = 0x0; /* breakpoint location. */
5213 dsc->modinsn[7] = 0x0; /* scratch space. */
5214
5215 dsc->numinsns = 6;
5216 }
5217
5218 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
5219
5220 return 0;
5221 }
5222
5223 /* Cleanup LDM instructions with fully-populated register list. This is an
5224 unfortunate corner case: it's impossible to implement correctly by modifying
5225 the instruction. The issue is as follows: we have an instruction,
5226
5227 ldm rN, {r0-r15}
5228
5229 which we must rewrite to avoid loading PC. A possible solution would be to
5230 do the load in two halves, something like (with suitable cleanup
5231 afterwards):
5232
5233 mov r8, rN
5234 ldm[id][ab] r8!, {r0-r7}
5235 str r7, <temp>
5236 ldm[id][ab] r8, {r7-r14}
5237 <bkpt>
5238
5239 but at present there's no suitable place for <temp>, since the scratch space
5240 is overwritten before the cleanup routine is called. For now, we simply
5241 emulate the instruction. */
5242
5243 static void
5244 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
5245 struct displaced_step_closure *dsc)
5246 {
5247 ULONGEST from = dsc->insn_addr;
5248 int inc = dsc->u.block.increment;
5249 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
5250 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
5251 uint32_t regmask = dsc->u.block.regmask;
5252 int regno = inc ? 0 : 15;
5253 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
5254 int exception_return = dsc->u.block.load && dsc->u.block.user
5255 && (regmask & 0x8000) != 0;
5256 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5257 int do_transfer = condition_true (dsc->u.block.cond, status);
5258 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5259
5260 if (!do_transfer)
5261 return;
5262
5263 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
5264 sensible we can do here. Complain loudly. */
5265 if (exception_return)
5266 error (_("Cannot single-step exception return"));
5267
5268 /* We don't handle any stores here for now. */
5269 gdb_assert (dsc->u.block.load != 0);
5270
5271 if (debug_displaced)
5272 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
5273 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
5274 dsc->u.block.increment ? "inc" : "dec",
5275 dsc->u.block.before ? "before" : "after");
5276
5277 while (regmask)
5278 {
5279 uint32_t memword;
5280
5281 if (inc)
5282 while (regno <= 15 && (regmask & (1 << regno)) == 0)
5283 regno++;
5284 else
5285 while (regno >= 0 && (regmask & (1 << regno)) == 0)
5286 regno--;
5287
5288 xfer_addr += bump_before;
5289
5290 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
5291 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
5292
5293 xfer_addr += bump_after;
5294
5295 regmask &= ~(1 << regno);
5296 }
5297
5298 if (dsc->u.block.writeback)
5299 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
5300 CANNOT_WRITE_PC);
5301 }
5302
5303 /* Clean up an STM which included the PC in the register list. */
5304
5305 static void
5306 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
5307 struct displaced_step_closure *dsc)
5308 {
5309 ULONGEST from = dsc->insn_addr;
5310 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5311 int store_executed = condition_true (dsc->u.block.cond, status);
5312 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
5313 CORE_ADDR stm_insn_addr;
5314 uint32_t pc_val;
5315 long offset;
5316 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5317
5318 /* If condition code fails, there's nothing else to do. */
5319 if (!store_executed)
5320 return;
5321
5322 if (dsc->u.block.increment)
5323 {
5324 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
5325
5326 if (dsc->u.block.before)
5327 pc_stored_at += 4;
5328 }
5329 else
5330 {
5331 pc_stored_at = dsc->u.block.xfer_addr;
5332
5333 if (dsc->u.block.before)
5334 pc_stored_at -= 4;
5335 }
5336
5337 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
5338 stm_insn_addr = dsc->scratch_base;
5339 offset = pc_val - stm_insn_addr;
5340
5341 if (debug_displaced)
5342 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
5343 "STM instruction\n", offset);
5344
5345 /* Rewrite the stored PC to the proper value for the non-displaced original
5346 instruction. */
5347 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
5348 dsc->insn_addr + offset);
5349 }
5350
5351 /* Clean up an LDM which includes the PC in the register list. We clumped all
5352 the registers in the transferred list into a contiguous range r0...rX (to
5353 avoid loading PC directly and losing control of the debugged program), so we
5354 must undo that here. */
5355
5356 static void
5357 cleanup_block_load_pc (struct gdbarch *gdbarch,
5358 struct regcache *regs,
5359 struct displaced_step_closure *dsc)
5360 {
5361 ULONGEST from = dsc->insn_addr;
5362 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5363 int load_executed = condition_true (dsc->u.block.cond, status), i;
5364 unsigned int mask = dsc->u.block.regmask, write_reg = 15;
5365 unsigned int regs_loaded = bitcount (mask);
5366 unsigned int num_to_shuffle = regs_loaded, clobbered;
5367
5368 /* The method employed here will fail if the register list is fully populated
5369 (we need to avoid loading PC directly). */
5370 gdb_assert (num_to_shuffle < 16);
5371
5372 if (!load_executed)
5373 return;
5374
5375 clobbered = (1 << num_to_shuffle) - 1;
5376
5377 while (num_to_shuffle > 0)
5378 {
5379 if ((mask & (1 << write_reg)) != 0)
5380 {
5381 unsigned int read_reg = num_to_shuffle - 1;
5382
5383 if (read_reg != write_reg)
5384 {
5385 ULONGEST rval = displaced_read_reg (regs, from, read_reg);
5386 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
5387 if (debug_displaced)
5388 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
5389 "loaded register r%d to r%d\n"), read_reg,
5390 write_reg);
5391 }
5392 else if (debug_displaced)
5393 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
5394 "r%d already in the right place\n"),
5395 write_reg);
5396
5397 clobbered &= ~(1 << write_reg);
5398
5399 num_to_shuffle--;
5400 }
5401
5402 write_reg--;
5403 }
5404
5405 /* Restore any registers we scribbled over. */
5406 for (write_reg = 0; clobbered != 0; write_reg++)
5407 {
5408 if ((clobbered & (1 << write_reg)) != 0)
5409 {
5410 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
5411 CANNOT_WRITE_PC);
5412 if (debug_displaced)
5413 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
5414 "clobbered register r%d\n"), write_reg);
5415 clobbered &= ~(1 << write_reg);
5416 }
5417 }
5418
5419 /* Perform register writeback manually. */
5420 if (dsc->u.block.writeback)
5421 {
5422 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
5423
5424 if (dsc->u.block.increment)
5425 new_rn_val += regs_loaded * 4;
5426 else
5427 new_rn_val -= regs_loaded * 4;
5428
5429 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
5430 CANNOT_WRITE_PC);
5431 }
5432 }
5433
5434 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
5435 in user-level code (in particular exception return, ldm rn, {...pc}^). */
5436
5437 static int
5438 copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5439 struct displaced_step_closure *dsc)
5440 {
5441 int load = bit (insn, 20);
5442 int user = bit (insn, 22);
5443 int increment = bit (insn, 23);
5444 int before = bit (insn, 24);
5445 int writeback = bit (insn, 21);
5446 int rn = bits (insn, 16, 19);
5447 CORE_ADDR from = dsc->insn_addr;
5448
5449 /* Block transfers which don't mention PC can be run directly
5450 out-of-line. */
5451 if (rn != 15 && (insn & 0x8000) == 0)
5452 return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
5453
5454 if (rn == 15)
5455 {
5456 warning (_("displaced: Unpredictable LDM or STM with "
5457 "base register r15"));
5458 return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
5459 }
5460
5461 if (debug_displaced)
5462 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
5463 "%.8lx\n", (unsigned long) insn);
5464
5465 dsc->u.block.xfer_addr = displaced_read_reg (regs, from, rn);
5466 dsc->u.block.rn = rn;
5467
5468 dsc->u.block.load = load;
5469 dsc->u.block.user = user;
5470 dsc->u.block.increment = increment;
5471 dsc->u.block.before = before;
5472 dsc->u.block.writeback = writeback;
5473 dsc->u.block.cond = bits (insn, 28, 31);
5474
5475 dsc->u.block.regmask = insn & 0xffff;
5476
5477 if (load)
5478 {
5479 if ((insn & 0xffff) == 0xffff)
5480 {
5481 /* LDM with a fully-populated register list. This case is
5482 particularly tricky. Implement for now by fully emulating the
5483 instruction (which might not behave perfectly in all cases, but
5484 these instructions should be rare enough for that not to matter
5485 too much). */
5486 dsc->modinsn[0] = ARM_NOP;
5487
5488 dsc->cleanup = &cleanup_block_load_all;
5489 }
5490 else
5491 {
5492 /* LDM of a list of registers which includes PC. Implement by
5493 rewriting the list of registers to be transferred into a
5494 contiguous chunk r0...rX before doing the transfer, then shuffling
5495 registers into the correct places in the cleanup routine. */
5496 unsigned int regmask = insn & 0xffff;
5497 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
5498 unsigned int to = 0, from = 0, i, new_rn;
5499
5500 for (i = 0; i < num_in_list; i++)
5501 dsc->tmp[i] = displaced_read_reg (regs, from, i);
5502
5503 /* Writeback makes things complicated. We need to avoid clobbering
5504 the base register with one of the registers in our modified
5505 register list, but just using a different register can't work in
5506 all cases, e.g.:
5507
5508 ldm r14!, {r0-r13,pc}
5509
5510 which would need to be rewritten as:
5511
5512 ldm rN!, {r0-r14}
5513
5514 but that can't work, because there's no free register for N.
5515
5516 Solve this by turning off the writeback bit, and emulating
5517 writeback manually in the cleanup routine. */
5518
5519 if (writeback)
5520 insn &= ~(1 << 21);
5521
5522 new_regmask = (1 << num_in_list) - 1;
5523
5524 if (debug_displaced)
5525 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
5526 "{..., pc}: original reg list %.4x, modified "
5527 "list %.4x\n"), rn, writeback ? "!" : "",
5528 (int) insn & 0xffff, new_regmask);
5529
5530 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
5531
5532 dsc->cleanup = &cleanup_block_load_pc;
5533 }
5534 }
5535 else
5536 {
5537 /* STM of a list of registers which includes PC. Run the instruction
5538 as-is, but out of line: this will store the wrong value for the PC,
5539 so we must manually fix up the memory in the cleanup routine.
5540 Doing things this way has the advantage that we can auto-detect
5541 the offset of the PC write (which is architecture-dependent) in
5542 the cleanup routine. */
5543 dsc->modinsn[0] = insn;
5544
5545 dsc->cleanup = &cleanup_block_store_pc;
5546 }
5547
5548 return 0;
5549 }
5550
5551 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
5552 for Linux, where some SVC instructions must be treated specially. */
5553
5554 static void
5555 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
5556 struct displaced_step_closure *dsc)
5557 {
5558 CORE_ADDR from = dsc->insn_addr;
5559 CORE_ADDR resume_addr = from + 4;
5560
5561 if (debug_displaced)
5562 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
5563 "%.8lx\n", (unsigned long) resume_addr);
5564
5565 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
5566 }
5567
5568 static int
5569 copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
5570 struct regcache *regs, struct displaced_step_closure *dsc)
5571 {
5572 CORE_ADDR from = dsc->insn_addr;
5573
5574 /* Allow OS-specific code to override SVC handling. */
5575 if (dsc->u.svc.copy_svc_os)
5576 return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
5577
5578 if (debug_displaced)
5579 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
5580 (unsigned long) insn);
5581
5582 /* Preparation: none.
5583 Insn: unmodified svc.
5584 Cleanup: pc <- insn_addr + 4. */
5585
5586 dsc->modinsn[0] = insn;
5587
5588 dsc->cleanup = &cleanup_svc;
5589 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
5590 instruction. */
5591 dsc->wrote_to_pc = 1;
5592
5593 return 0;
5594 }
5595
5596 /* Copy undefined instructions. */
5597
5598 static int
5599 copy_undef (struct gdbarch *gdbarch, uint32_t insn,
5600 struct displaced_step_closure *dsc)
5601 {
5602 if (debug_displaced)
5603 fprintf_unfiltered (gdb_stdlog,
5604 "displaced: copying undefined insn %.8lx\n",
5605 (unsigned long) insn);
5606
5607 dsc->modinsn[0] = insn;
5608
5609 return 0;
5610 }
5611
5612 /* Copy unpredictable instructions. */
5613
5614 static int
5615 copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
5616 struct displaced_step_closure *dsc)
5617 {
5618 if (debug_displaced)
5619 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
5620 "%.8lx\n", (unsigned long) insn);
5621
5622 dsc->modinsn[0] = insn;
5623
5624 return 0;
5625 }
5626
5627 /* The decode_* functions are instruction decoding helpers. They mostly follow
5628 the presentation in the ARM ARM. */
5629
5630 static int
5631 decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
5632 struct regcache *regs,
5633 struct displaced_step_closure *dsc)
5634 {
5635 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
5636 unsigned int rn = bits (insn, 16, 19);
5637
5638 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
5639 return copy_unmodified (gdbarch, insn, "cps", dsc);
5640 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
5641 return copy_unmodified (gdbarch, insn, "setend", dsc);
5642 else if ((op1 & 0x60) == 0x20)
5643 return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
5644 else if ((op1 & 0x71) == 0x40)
5645 return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
5646 else if ((op1 & 0x77) == 0x41)
5647 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
5648 else if ((op1 & 0x77) == 0x45)
5649 return copy_preload (gdbarch, insn, regs, dsc); /* pli. */
5650 else if ((op1 & 0x77) == 0x51)
5651 {
5652 if (rn != 0xf)
5653 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
5654 else
5655 return copy_unpred (gdbarch, insn, dsc);
5656 }
5657 else if ((op1 & 0x77) == 0x55)
5658 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
5659 else if (op1 == 0x57)
5660 switch (op2)
5661 {
5662 case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
5663 case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
5664 case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
5665 case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
5666 default: return copy_unpred (gdbarch, insn, dsc);
5667 }
5668 else if ((op1 & 0x63) == 0x43)
5669 return copy_unpred (gdbarch, insn, dsc);
5670 else if ((op2 & 0x1) == 0x0)
5671 switch (op1 & ~0x80)
5672 {
5673 case 0x61:
5674 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
5675 case 0x65:
5676 return copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
5677 case 0x71: case 0x75:
5678 /* pld/pldw reg. */
5679 return copy_preload_reg (gdbarch, insn, regs, dsc);
5680 case 0x63: case 0x67: case 0x73: case 0x77:
5681 return copy_unpred (gdbarch, insn, dsc);
5682 default:
5683 return copy_undef (gdbarch, insn, dsc);
5684 }
5685 else
5686 return copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
5687 }
5688
5689 static int
5690 decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
5691 struct regcache *regs,
5692 struct displaced_step_closure *dsc)
5693 {
5694 if (bit (insn, 27) == 0)
5695 return decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
5696 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
5697 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
5698 {
5699 case 0x0: case 0x2:
5700 return copy_unmodified (gdbarch, insn, "srs", dsc);
5701
5702 case 0x1: case 0x3:
5703 return copy_unmodified (gdbarch, insn, "rfe", dsc);
5704
5705 case 0x4: case 0x5: case 0x6: case 0x7:
5706 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
5707
5708 case 0x8:
5709 switch ((insn & 0xe00000) >> 21)
5710 {
5711 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
5712 /* stc/stc2. */
5713 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5714
5715 case 0x2:
5716 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
5717
5718 default:
5719 return copy_undef (gdbarch, insn, dsc);
5720 }
5721
5722 case 0x9:
5723 {
5724 int rn_f = (bits (insn, 16, 19) == 0xf);
5725 switch ((insn & 0xe00000) >> 21)
5726 {
5727 case 0x1: case 0x3:
5728 /* ldc/ldc2 imm (undefined for rn == pc). */
5729 return rn_f ? copy_undef (gdbarch, insn, dsc)
5730 : copy_copro_load_store (gdbarch, insn, regs, dsc);
5731
5732 case 0x2:
5733 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
5734
5735 case 0x4: case 0x5: case 0x6: case 0x7:
5736 /* ldc/ldc2 lit (undefined for rn != pc). */
5737 return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
5738 : copy_undef (gdbarch, insn, dsc);
5739
5740 default:
5741 return copy_undef (gdbarch, insn, dsc);
5742 }
5743 }
5744
5745 case 0xa:
5746 return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
5747
5748 case 0xb:
5749 if (bits (insn, 16, 19) == 0xf)
5750 /* ldc/ldc2 lit. */
5751 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5752 else
5753 return copy_undef (gdbarch, insn, dsc);
5754
5755 case 0xc:
5756 if (bit (insn, 4))
5757 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
5758 else
5759 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5760
5761 case 0xd:
5762 if (bit (insn, 4))
5763 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
5764 else
5765 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5766
5767 default:
5768 return copy_undef (gdbarch, insn, dsc);
5769 }
5770 }
5771
5772 /* Decode miscellaneous instructions in dp/misc encoding space. */
5773
5774 static int
5775 decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
5776 struct regcache *regs,
5777 struct displaced_step_closure *dsc)
5778 {
5779 unsigned int op2 = bits (insn, 4, 6);
5780 unsigned int op = bits (insn, 21, 22);
5781 unsigned int op1 = bits (insn, 16, 19);
5782
5783 switch (op2)
5784 {
5785 case 0x0:
5786 return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
5787
5788 case 0x1:
5789 if (op == 0x1) /* bx. */
5790 return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
5791 else if (op == 0x3)
5792 return copy_unmodified (gdbarch, insn, "clz", dsc);
5793 else
5794 return copy_undef (gdbarch, insn, dsc);
5795
5796 case 0x2:
5797 if (op == 0x1)
5798 /* Not really supported. */
5799 return copy_unmodified (gdbarch, insn, "bxj", dsc);
5800 else
5801 return copy_undef (gdbarch, insn, dsc);
5802
5803 case 0x3:
5804 if (op == 0x1)
5805 return copy_bx_blx_reg (gdbarch, insn,
5806 regs, dsc); /* blx register. */
5807 else
5808 return copy_undef (gdbarch, insn, dsc);
5809
5810 case 0x5:
5811 return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
5812
5813 case 0x7:
5814 if (op == 0x1)
5815 return copy_unmodified (gdbarch, insn, "bkpt", dsc);
5816 else if (op == 0x3)
5817 /* Not really supported. */
5818 return copy_unmodified (gdbarch, insn, "smc", dsc);
5819
5820 default:
5821 return copy_undef (gdbarch, insn, dsc);
5822 }
5823 }
5824
5825 static int
5826 decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5827 struct displaced_step_closure *dsc)
5828 {
5829 if (bit (insn, 25))
5830 switch (bits (insn, 20, 24))
5831 {
5832 case 0x10:
5833 return copy_unmodified (gdbarch, insn, "movw", dsc);
5834
5835 case 0x14:
5836 return copy_unmodified (gdbarch, insn, "movt", dsc);
5837
5838 case 0x12: case 0x16:
5839 return copy_unmodified (gdbarch, insn, "msr imm", dsc);
5840
5841 default:
5842 return copy_alu_imm (gdbarch, insn, regs, dsc);
5843 }
5844 else
5845 {
5846 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
5847
5848 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
5849 return copy_alu_reg (gdbarch, insn, regs, dsc);
5850 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
5851 return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
5852 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
5853 return decode_miscellaneous (gdbarch, insn, regs, dsc);
5854 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
5855 return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
5856 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
5857 return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
5858 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
5859 return copy_unmodified (gdbarch, insn, "synch", dsc);
5860 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
5861 /* 2nd arg means "unpriveleged". */
5862 return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
5863 dsc);
5864 }
5865
5866 /* Should be unreachable. */
5867 return 1;
5868 }
5869
5870 static int
5871 decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
5872 struct regcache *regs,
5873 struct displaced_step_closure *dsc)
5874 {
5875 int a = bit (insn, 25), b = bit (insn, 4);
5876 uint32_t op1 = bits (insn, 20, 24);
5877 int rn_f = bits (insn, 16, 19) == 0xf;
5878
5879 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
5880 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
5881 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
5882 else if ((!a && (op1 & 0x17) == 0x02)
5883 || (a && (op1 & 0x17) == 0x02 && !b))
5884 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
5885 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
5886 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
5887 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
5888 else if ((!a && (op1 & 0x17) == 0x03)
5889 || (a && (op1 & 0x17) == 0x03 && !b))
5890 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
5891 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
5892 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
5893 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
5894 else if ((!a && (op1 & 0x17) == 0x06)
5895 || (a && (op1 & 0x17) == 0x06 && !b))
5896 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
5897 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
5898 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
5899 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
5900 else if ((!a && (op1 & 0x17) == 0x07)
5901 || (a && (op1 & 0x17) == 0x07 && !b))
5902 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
5903
5904 /* Should be unreachable. */
5905 return 1;
5906 }
5907
5908 static int
5909 decode_media (struct gdbarch *gdbarch, uint32_t insn,
5910 struct displaced_step_closure *dsc)
5911 {
5912 switch (bits (insn, 20, 24))
5913 {
5914 case 0x00: case 0x01: case 0x02: case 0x03:
5915 return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
5916
5917 case 0x04: case 0x05: case 0x06: case 0x07:
5918 return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
5919
5920 case 0x08: case 0x09: case 0x0a: case 0x0b:
5921 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
5922 return copy_unmodified (gdbarch, insn,
5923 "decode/pack/unpack/saturate/reverse", dsc);
5924
5925 case 0x18:
5926 if (bits (insn, 5, 7) == 0) /* op2. */
5927 {
5928 if (bits (insn, 12, 15) == 0xf)
5929 return copy_unmodified (gdbarch, insn, "usad8", dsc);
5930 else
5931 return copy_unmodified (gdbarch, insn, "usada8", dsc);
5932 }
5933 else
5934 return copy_undef (gdbarch, insn, dsc);
5935
5936 case 0x1a: case 0x1b:
5937 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
5938 return copy_unmodified (gdbarch, insn, "sbfx", dsc);
5939 else
5940 return copy_undef (gdbarch, insn, dsc);
5941
5942 case 0x1c: case 0x1d:
5943 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
5944 {
5945 if (bits (insn, 0, 3) == 0xf)
5946 return copy_unmodified (gdbarch, insn, "bfc", dsc);
5947 else
5948 return copy_unmodified (gdbarch, insn, "bfi", dsc);
5949 }
5950 else
5951 return copy_undef (gdbarch, insn, dsc);
5952
5953 case 0x1e: case 0x1f:
5954 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
5955 return copy_unmodified (gdbarch, insn, "ubfx", dsc);
5956 else
5957 return copy_undef (gdbarch, insn, dsc);
5958 }
5959
5960 /* Should be unreachable. */
5961 return 1;
5962 }
5963
5964 static int
5965 decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
5966 struct regcache *regs, struct displaced_step_closure *dsc)
5967 {
5968 if (bit (insn, 25))
5969 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
5970 else
5971 return copy_block_xfer (gdbarch, insn, regs, dsc);
5972 }
5973
5974 static int
5975 decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
5976 struct regcache *regs,
5977 struct displaced_step_closure *dsc)
5978 {
5979 unsigned int opcode = bits (insn, 20, 24);
5980
5981 switch (opcode)
5982 {
5983 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
5984 return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
5985
5986 case 0x08: case 0x0a: case 0x0c: case 0x0e:
5987 case 0x12: case 0x16:
5988 return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
5989
5990 case 0x09: case 0x0b: case 0x0d: case 0x0f:
5991 case 0x13: case 0x17:
5992 return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
5993
5994 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
5995 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
5996 /* Note: no writeback for these instructions. Bit 25 will always be
5997 zero though (via caller), so the following works OK. */
5998 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5999 }
6000
6001 /* Should be unreachable. */
6002 return 1;
6003 }
6004
6005 static int
6006 decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
6007 struct regcache *regs, struct displaced_step_closure *dsc)
6008 {
6009 unsigned int op1 = bits (insn, 20, 25);
6010 int op = bit (insn, 4);
6011 unsigned int coproc = bits (insn, 8, 11);
6012 unsigned int rn = bits (insn, 16, 19);
6013
6014 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
6015 return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
6016 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
6017 && (coproc & 0xe) != 0xa)
6018 /* stc/stc2. */
6019 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6020 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
6021 && (coproc & 0xe) != 0xa)
6022 /* ldc/ldc2 imm/lit. */
6023 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6024 else if ((op1 & 0x3e) == 0x00)
6025 return copy_undef (gdbarch, insn, dsc);
6026 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
6027 return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
6028 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
6029 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6030 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
6031 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6032 else if ((op1 & 0x30) == 0x20 && !op)
6033 {
6034 if ((coproc & 0xe) == 0xa)
6035 return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
6036 else
6037 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6038 }
6039 else if ((op1 & 0x30) == 0x20 && op)
6040 return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
6041 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
6042 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6043 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
6044 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6045 else if ((op1 & 0x30) == 0x30)
6046 return copy_svc (gdbarch, insn, to, regs, dsc);
6047 else
6048 return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
6049 }
6050
6051 void
6052 arm_process_displaced_insn (struct gdbarch *gdbarch, uint32_t insn,
6053 CORE_ADDR from, CORE_ADDR to,
6054 struct regcache *regs,
6055 struct displaced_step_closure *dsc)
6056 {
6057 int err = 0;
6058
6059 if (!displaced_in_arm_mode (regs))
6060 error (_("Displaced stepping is only supported in ARM mode"));
6061
6062 /* Most displaced instructions use a 1-instruction scratch space, so set this
6063 here and override below if/when necessary. */
6064 dsc->numinsns = 1;
6065 dsc->insn_addr = from;
6066 dsc->scratch_base = to;
6067 dsc->cleanup = NULL;
6068 dsc->wrote_to_pc = 0;
6069
6070 if ((insn & 0xf0000000) == 0xf0000000)
6071 err = decode_unconditional (gdbarch, insn, regs, dsc);
6072 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
6073 {
6074 case 0x0: case 0x1: case 0x2: case 0x3:
6075 err = decode_dp_misc (gdbarch, insn, regs, dsc);
6076 break;
6077
6078 case 0x4: case 0x5: case 0x6:
6079 err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
6080 break;
6081
6082 case 0x7:
6083 err = decode_media (gdbarch, insn, dsc);
6084 break;
6085
6086 case 0x8: case 0x9: case 0xa: case 0xb:
6087 err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
6088 break;
6089
6090 case 0xc: case 0xd: case 0xe: case 0xf:
6091 err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
6092 break;
6093 }
6094
6095 if (err)
6096 internal_error (__FILE__, __LINE__,
6097 _("arm_process_displaced_insn: Instruction decode error"));
6098 }
6099
6100 /* Actually set up the scratch space for a displaced instruction. */
6101
6102 void
6103 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
6104 CORE_ADDR to, struct displaced_step_closure *dsc)
6105 {
6106 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6107 unsigned int i;
6108 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6109
6110 /* Poke modified instruction(s). */
6111 for (i = 0; i < dsc->numinsns; i++)
6112 {
6113 if (debug_displaced)
6114 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
6115 "%.8lx\n", (unsigned long) dsc->modinsn[i],
6116 (unsigned long) to + i * 4);
6117 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
6118 dsc->modinsn[i]);
6119 }
6120
6121 /* Put breakpoint afterwards. */
6122 write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
6123 tdep->arm_breakpoint_size);
6124
6125 if (debug_displaced)
6126 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
6127 paddress (gdbarch, from), paddress (gdbarch, to));
6128 }
6129
6130 /* Entry point for copying an instruction into scratch space for displaced
6131 stepping. */
6132
6133 struct displaced_step_closure *
6134 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
6135 CORE_ADDR from, CORE_ADDR to,
6136 struct regcache *regs)
6137 {
6138 struct displaced_step_closure *dsc
6139 = xmalloc (sizeof (struct displaced_step_closure));
6140 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6141 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
6142
6143 if (debug_displaced)
6144 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
6145 "at %.8lx\n", (unsigned long) insn,
6146 (unsigned long) from);
6147
6148 arm_process_displaced_insn (gdbarch, insn, from, to, regs, dsc);
6149 arm_displaced_init_closure (gdbarch, from, to, dsc);
6150
6151 return dsc;
6152 }
6153
6154 /* Entry point for cleaning things up after a displaced instruction has been
6155 single-stepped. */
6156
6157 void
6158 arm_displaced_step_fixup (struct gdbarch *gdbarch,
6159 struct displaced_step_closure *dsc,
6160 CORE_ADDR from, CORE_ADDR to,
6161 struct regcache *regs)
6162 {
6163 if (dsc->cleanup)
6164 dsc->cleanup (gdbarch, regs, dsc);
6165
6166 if (!dsc->wrote_to_pc)
6167 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
6168 }
6169
6170 #include "bfd-in2.h"
6171 #include "libcoff.h"
6172
6173 static int
6174 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
6175 {
6176 struct gdbarch *gdbarch = info->application_data;
6177
6178 if (arm_pc_is_thumb (gdbarch, memaddr))
6179 {
6180 static asymbol *asym;
6181 static combined_entry_type ce;
6182 static struct coff_symbol_struct csym;
6183 static struct bfd fake_bfd;
6184 static bfd_target fake_target;
6185
6186 if (csym.native == NULL)
6187 {
6188 /* Create a fake symbol vector containing a Thumb symbol.
6189 This is solely so that the code in print_insn_little_arm()
6190 and print_insn_big_arm() in opcodes/arm-dis.c will detect
6191 the presence of a Thumb symbol and switch to decoding
6192 Thumb instructions. */
6193
6194 fake_target.flavour = bfd_target_coff_flavour;
6195 fake_bfd.xvec = &fake_target;
6196 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
6197 csym.native = &ce;
6198 csym.symbol.the_bfd = &fake_bfd;
6199 csym.symbol.name = "fake";
6200 asym = (asymbol *) & csym;
6201 }
6202
6203 memaddr = UNMAKE_THUMB_ADDR (memaddr);
6204 info->symbols = &asym;
6205 }
6206 else
6207 info->symbols = NULL;
6208
6209 if (info->endian == BFD_ENDIAN_BIG)
6210 return print_insn_big_arm (memaddr, info);
6211 else
6212 return print_insn_little_arm (memaddr, info);
6213 }
6214
6215 /* The following define instruction sequences that will cause ARM
6216 cpu's to take an undefined instruction trap. These are used to
6217 signal a breakpoint to GDB.
6218
6219 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
6220 modes. A different instruction is required for each mode. The ARM
6221 cpu's can also be big or little endian. Thus four different
6222 instructions are needed to support all cases.
6223
6224 Note: ARMv4 defines several new instructions that will take the
6225 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
6226 not in fact add the new instructions. The new undefined
6227 instructions in ARMv4 are all instructions that had no defined
6228 behaviour in earlier chips. There is no guarantee that they will
6229 raise an exception, but may be treated as NOP's. In practice, it
6230 may only safe to rely on instructions matching:
6231
6232 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
6233 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
6234 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
6235
6236 Even this may only true if the condition predicate is true. The
6237 following use a condition predicate of ALWAYS so it is always TRUE.
6238
6239 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
6240 and NetBSD all use a software interrupt rather than an undefined
6241 instruction to force a trap. This can be handled by by the
6242 abi-specific code during establishment of the gdbarch vector. */
6243
6244 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
6245 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
6246 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
6247 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
6248
6249 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
6250 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
6251 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
6252 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
6253
6254 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
6255 the program counter value to determine whether a 16-bit or 32-bit
6256 breakpoint should be used. It returns a pointer to a string of
6257 bytes that encode a breakpoint instruction, stores the length of
6258 the string to *lenptr, and adjusts the program counter (if
6259 necessary) to point to the actual memory location where the
6260 breakpoint should be inserted. */
6261
6262 static const unsigned char *
6263 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
6264 {
6265 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6266 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6267
6268 if (arm_pc_is_thumb (gdbarch, *pcptr))
6269 {
6270 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
6271
6272 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
6273 check whether we are replacing a 32-bit instruction. */
6274 if (tdep->thumb2_breakpoint != NULL)
6275 {
6276 gdb_byte buf[2];
6277 if (target_read_memory (*pcptr, buf, 2) == 0)
6278 {
6279 unsigned short inst1;
6280 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
6281 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
6282 {
6283 *lenptr = tdep->thumb2_breakpoint_size;
6284 return tdep->thumb2_breakpoint;
6285 }
6286 }
6287 }
6288
6289 *lenptr = tdep->thumb_breakpoint_size;
6290 return tdep->thumb_breakpoint;
6291 }
6292 else
6293 {
6294 *lenptr = tdep->arm_breakpoint_size;
6295 return tdep->arm_breakpoint;
6296 }
6297 }
6298
6299 static void
6300 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
6301 int *kindptr)
6302 {
6303 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6304
6305 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
6306
6307 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
6308 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
6309 that this is not confused with a 32-bit ARM breakpoint. */
6310 *kindptr = 3;
6311 }
6312
6313 /* Extract from an array REGBUF containing the (raw) register state a
6314 function return value of type TYPE, and copy that, in virtual
6315 format, into VALBUF. */
6316
6317 static void
6318 arm_extract_return_value (struct type *type, struct regcache *regs,
6319 gdb_byte *valbuf)
6320 {
6321 struct gdbarch *gdbarch = get_regcache_arch (regs);
6322 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6323
6324 if (TYPE_CODE_FLT == TYPE_CODE (type))
6325 {
6326 switch (gdbarch_tdep (gdbarch)->fp_model)
6327 {
6328 case ARM_FLOAT_FPA:
6329 {
6330 /* The value is in register F0 in internal format. We need to
6331 extract the raw value and then convert it to the desired
6332 internal type. */
6333 bfd_byte tmpbuf[FP_REGISTER_SIZE];
6334
6335 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
6336 convert_from_extended (floatformat_from_type (type), tmpbuf,
6337 valbuf, gdbarch_byte_order (gdbarch));
6338 }
6339 break;
6340
6341 case ARM_FLOAT_SOFT_FPA:
6342 case ARM_FLOAT_SOFT_VFP:
6343 /* ARM_FLOAT_VFP can arise if this is a variadic function so
6344 not using the VFP ABI code. */
6345 case ARM_FLOAT_VFP:
6346 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
6347 if (TYPE_LENGTH (type) > 4)
6348 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
6349 valbuf + INT_REGISTER_SIZE);
6350 break;
6351
6352 default:
6353 internal_error (__FILE__, __LINE__,
6354 _("arm_extract_return_value: "
6355 "Floating point model not supported"));
6356 break;
6357 }
6358 }
6359 else if (TYPE_CODE (type) == TYPE_CODE_INT
6360 || TYPE_CODE (type) == TYPE_CODE_CHAR
6361 || TYPE_CODE (type) == TYPE_CODE_BOOL
6362 || TYPE_CODE (type) == TYPE_CODE_PTR
6363 || TYPE_CODE (type) == TYPE_CODE_REF
6364 || TYPE_CODE (type) == TYPE_CODE_ENUM)
6365 {
6366 /* If the the type is a plain integer, then the access is
6367 straight-forward. Otherwise we have to play around a bit more. */
6368 int len = TYPE_LENGTH (type);
6369 int regno = ARM_A1_REGNUM;
6370 ULONGEST tmp;
6371
6372 while (len > 0)
6373 {
6374 /* By using store_unsigned_integer we avoid having to do
6375 anything special for small big-endian values. */
6376 regcache_cooked_read_unsigned (regs, regno++, &tmp);
6377 store_unsigned_integer (valbuf,
6378 (len > INT_REGISTER_SIZE
6379 ? INT_REGISTER_SIZE : len),
6380 byte_order, tmp);
6381 len -= INT_REGISTER_SIZE;
6382 valbuf += INT_REGISTER_SIZE;
6383 }
6384 }
6385 else
6386 {
6387 /* For a structure or union the behaviour is as if the value had
6388 been stored to word-aligned memory and then loaded into
6389 registers with 32-bit load instruction(s). */
6390 int len = TYPE_LENGTH (type);
6391 int regno = ARM_A1_REGNUM;
6392 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6393
6394 while (len > 0)
6395 {
6396 regcache_cooked_read (regs, regno++, tmpbuf);
6397 memcpy (valbuf, tmpbuf,
6398 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
6399 len -= INT_REGISTER_SIZE;
6400 valbuf += INT_REGISTER_SIZE;
6401 }
6402 }
6403 }
6404
6405
6406 /* Will a function return an aggregate type in memory or in a
6407 register? Return 0 if an aggregate type can be returned in a
6408 register, 1 if it must be returned in memory. */
6409
6410 static int
6411 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
6412 {
6413 int nRc;
6414 enum type_code code;
6415
6416 CHECK_TYPEDEF (type);
6417
6418 /* In the ARM ABI, "integer" like aggregate types are returned in
6419 registers. For an aggregate type to be integer like, its size
6420 must be less than or equal to INT_REGISTER_SIZE and the
6421 offset of each addressable subfield must be zero. Note that bit
6422 fields are not addressable, and all addressable subfields of
6423 unions always start at offset zero.
6424
6425 This function is based on the behaviour of GCC 2.95.1.
6426 See: gcc/arm.c: arm_return_in_memory() for details.
6427
6428 Note: All versions of GCC before GCC 2.95.2 do not set up the
6429 parameters correctly for a function returning the following
6430 structure: struct { float f;}; This should be returned in memory,
6431 not a register. Richard Earnshaw sent me a patch, but I do not
6432 know of any way to detect if a function like the above has been
6433 compiled with the correct calling convention. */
6434
6435 /* All aggregate types that won't fit in a register must be returned
6436 in memory. */
6437 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
6438 {
6439 return 1;
6440 }
6441
6442 /* The AAPCS says all aggregates not larger than a word are returned
6443 in a register. */
6444 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
6445 return 0;
6446
6447 /* The only aggregate types that can be returned in a register are
6448 structs and unions. Arrays must be returned in memory. */
6449 code = TYPE_CODE (type);
6450 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
6451 {
6452 return 1;
6453 }
6454
6455 /* Assume all other aggregate types can be returned in a register.
6456 Run a check for structures, unions and arrays. */
6457 nRc = 0;
6458
6459 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
6460 {
6461 int i;
6462 /* Need to check if this struct/union is "integer" like. For
6463 this to be true, its size must be less than or equal to
6464 INT_REGISTER_SIZE and the offset of each addressable
6465 subfield must be zero. Note that bit fields are not
6466 addressable, and unions always start at offset zero. If any
6467 of the subfields is a floating point type, the struct/union
6468 cannot be an integer type. */
6469
6470 /* For each field in the object, check:
6471 1) Is it FP? --> yes, nRc = 1;
6472 2) Is it addressable (bitpos != 0) and
6473 not packed (bitsize == 0)?
6474 --> yes, nRc = 1
6475 */
6476
6477 for (i = 0; i < TYPE_NFIELDS (type); i++)
6478 {
6479 enum type_code field_type_code;
6480 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
6481 i)));
6482
6483 /* Is it a floating point type field? */
6484 if (field_type_code == TYPE_CODE_FLT)
6485 {
6486 nRc = 1;
6487 break;
6488 }
6489
6490 /* If bitpos != 0, then we have to care about it. */
6491 if (TYPE_FIELD_BITPOS (type, i) != 0)
6492 {
6493 /* Bitfields are not addressable. If the field bitsize is
6494 zero, then the field is not packed. Hence it cannot be
6495 a bitfield or any other packed type. */
6496 if (TYPE_FIELD_BITSIZE (type, i) == 0)
6497 {
6498 nRc = 1;
6499 break;
6500 }
6501 }
6502 }
6503 }
6504
6505 return nRc;
6506 }
6507
6508 /* Write into appropriate registers a function return value of type
6509 TYPE, given in virtual format. */
6510
6511 static void
6512 arm_store_return_value (struct type *type, struct regcache *regs,
6513 const gdb_byte *valbuf)
6514 {
6515 struct gdbarch *gdbarch = get_regcache_arch (regs);
6516 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6517
6518 if (TYPE_CODE (type) == TYPE_CODE_FLT)
6519 {
6520 char buf[MAX_REGISTER_SIZE];
6521
6522 switch (gdbarch_tdep (gdbarch)->fp_model)
6523 {
6524 case ARM_FLOAT_FPA:
6525
6526 convert_to_extended (floatformat_from_type (type), buf, valbuf,
6527 gdbarch_byte_order (gdbarch));
6528 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
6529 break;
6530
6531 case ARM_FLOAT_SOFT_FPA:
6532 case ARM_FLOAT_SOFT_VFP:
6533 /* ARM_FLOAT_VFP can arise if this is a variadic function so
6534 not using the VFP ABI code. */
6535 case ARM_FLOAT_VFP:
6536 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
6537 if (TYPE_LENGTH (type) > 4)
6538 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
6539 valbuf + INT_REGISTER_SIZE);
6540 break;
6541
6542 default:
6543 internal_error
6544 (__FILE__, __LINE__,
6545 _("arm_store_return_value: Floating point model not supported"));
6546 break;
6547 }
6548 }
6549 else if (TYPE_CODE (type) == TYPE_CODE_INT
6550 || TYPE_CODE (type) == TYPE_CODE_CHAR
6551 || TYPE_CODE (type) == TYPE_CODE_BOOL
6552 || TYPE_CODE (type) == TYPE_CODE_PTR
6553 || TYPE_CODE (type) == TYPE_CODE_REF
6554 || TYPE_CODE (type) == TYPE_CODE_ENUM)
6555 {
6556 if (TYPE_LENGTH (type) <= 4)
6557 {
6558 /* Values of one word or less are zero/sign-extended and
6559 returned in r0. */
6560 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6561 LONGEST val = unpack_long (type, valbuf);
6562
6563 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
6564 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
6565 }
6566 else
6567 {
6568 /* Integral values greater than one word are stored in consecutive
6569 registers starting with r0. This will always be a multiple of
6570 the regiser size. */
6571 int len = TYPE_LENGTH (type);
6572 int regno = ARM_A1_REGNUM;
6573
6574 while (len > 0)
6575 {
6576 regcache_cooked_write (regs, regno++, valbuf);
6577 len -= INT_REGISTER_SIZE;
6578 valbuf += INT_REGISTER_SIZE;
6579 }
6580 }
6581 }
6582 else
6583 {
6584 /* For a structure or union the behaviour is as if the value had
6585 been stored to word-aligned memory and then loaded into
6586 registers with 32-bit load instruction(s). */
6587 int len = TYPE_LENGTH (type);
6588 int regno = ARM_A1_REGNUM;
6589 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6590
6591 while (len > 0)
6592 {
6593 memcpy (tmpbuf, valbuf,
6594 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
6595 regcache_cooked_write (regs, regno++, tmpbuf);
6596 len -= INT_REGISTER_SIZE;
6597 valbuf += INT_REGISTER_SIZE;
6598 }
6599 }
6600 }
6601
6602
6603 /* Handle function return values. */
6604
6605 static enum return_value_convention
6606 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
6607 struct type *valtype, struct regcache *regcache,
6608 gdb_byte *readbuf, const gdb_byte *writebuf)
6609 {
6610 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6611 enum arm_vfp_cprc_base_type vfp_base_type;
6612 int vfp_base_count;
6613
6614 if (arm_vfp_abi_for_function (gdbarch, func_type)
6615 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
6616 {
6617 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
6618 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
6619 int i;
6620 for (i = 0; i < vfp_base_count; i++)
6621 {
6622 if (reg_char == 'q')
6623 {
6624 if (writebuf)
6625 arm_neon_quad_write (gdbarch, regcache, i,
6626 writebuf + i * unit_length);
6627
6628 if (readbuf)
6629 arm_neon_quad_read (gdbarch, regcache, i,
6630 readbuf + i * unit_length);
6631 }
6632 else
6633 {
6634 char name_buf[4];
6635 int regnum;
6636
6637 sprintf (name_buf, "%c%d", reg_char, i);
6638 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6639 strlen (name_buf));
6640 if (writebuf)
6641 regcache_cooked_write (regcache, regnum,
6642 writebuf + i * unit_length);
6643 if (readbuf)
6644 regcache_cooked_read (regcache, regnum,
6645 readbuf + i * unit_length);
6646 }
6647 }
6648 return RETURN_VALUE_REGISTER_CONVENTION;
6649 }
6650
6651 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
6652 || TYPE_CODE (valtype) == TYPE_CODE_UNION
6653 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
6654 {
6655 if (tdep->struct_return == pcc_struct_return
6656 || arm_return_in_memory (gdbarch, valtype))
6657 return RETURN_VALUE_STRUCT_CONVENTION;
6658 }
6659
6660 if (writebuf)
6661 arm_store_return_value (valtype, regcache, writebuf);
6662
6663 if (readbuf)
6664 arm_extract_return_value (valtype, regcache, readbuf);
6665
6666 return RETURN_VALUE_REGISTER_CONVENTION;
6667 }
6668
6669
6670 static int
6671 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
6672 {
6673 struct gdbarch *gdbarch = get_frame_arch (frame);
6674 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6675 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6676 CORE_ADDR jb_addr;
6677 char buf[INT_REGISTER_SIZE];
6678
6679 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
6680
6681 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
6682 INT_REGISTER_SIZE))
6683 return 0;
6684
6685 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
6686 return 1;
6687 }
6688
6689 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
6690 return the target PC. Otherwise return 0. */
6691
6692 CORE_ADDR
6693 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
6694 {
6695 char *name;
6696 int namelen;
6697 CORE_ADDR start_addr;
6698
6699 /* Find the starting address and name of the function containing the PC. */
6700 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
6701 return 0;
6702
6703 /* If PC is in a Thumb call or return stub, return the address of the
6704 target PC, which is in a register. The thunk functions are called
6705 _call_via_xx, where x is the register name. The possible names
6706 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
6707 functions, named __ARM_call_via_r[0-7]. */
6708 if (strncmp (name, "_call_via_", 10) == 0
6709 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
6710 {
6711 /* Use the name suffix to determine which register contains the
6712 target PC. */
6713 static char *table[15] =
6714 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
6715 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
6716 };
6717 int regno;
6718 int offset = strlen (name) - 2;
6719
6720 for (regno = 0; regno <= 14; regno++)
6721 if (strcmp (&name[offset], table[regno]) == 0)
6722 return get_frame_register_unsigned (frame, regno);
6723 }
6724
6725 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
6726 non-interworking calls to foo. We could decode the stubs
6727 to find the target but it's easier to use the symbol table. */
6728 namelen = strlen (name);
6729 if (name[0] == '_' && name[1] == '_'
6730 && ((namelen > 2 + strlen ("_from_thumb")
6731 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
6732 strlen ("_from_thumb")) == 0)
6733 || (namelen > 2 + strlen ("_from_arm")
6734 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
6735 strlen ("_from_arm")) == 0)))
6736 {
6737 char *target_name;
6738 int target_len = namelen - 2;
6739 struct minimal_symbol *minsym;
6740 struct objfile *objfile;
6741 struct obj_section *sec;
6742
6743 if (name[namelen - 1] == 'b')
6744 target_len -= strlen ("_from_thumb");
6745 else
6746 target_len -= strlen ("_from_arm");
6747
6748 target_name = alloca (target_len + 1);
6749 memcpy (target_name, name + 2, target_len);
6750 target_name[target_len] = '\0';
6751
6752 sec = find_pc_section (pc);
6753 objfile = (sec == NULL) ? NULL : sec->objfile;
6754 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
6755 if (minsym != NULL)
6756 return SYMBOL_VALUE_ADDRESS (minsym);
6757 else
6758 return 0;
6759 }
6760
6761 return 0; /* not a stub */
6762 }
6763
6764 static void
6765 set_arm_command (char *args, int from_tty)
6766 {
6767 printf_unfiltered (_("\
6768 \"set arm\" must be followed by an apporpriate subcommand.\n"));
6769 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
6770 }
6771
6772 static void
6773 show_arm_command (char *args, int from_tty)
6774 {
6775 cmd_show_list (showarmcmdlist, from_tty, "");
6776 }
6777
6778 static void
6779 arm_update_current_architecture (void)
6780 {
6781 struct gdbarch_info info;
6782
6783 /* If the current architecture is not ARM, we have nothing to do. */
6784 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
6785 return;
6786
6787 /* Update the architecture. */
6788 gdbarch_info_init (&info);
6789
6790 if (!gdbarch_update_p (info))
6791 internal_error (__FILE__, __LINE__, "could not update architecture");
6792 }
6793
6794 static void
6795 set_fp_model_sfunc (char *args, int from_tty,
6796 struct cmd_list_element *c)
6797 {
6798 enum arm_float_model fp_model;
6799
6800 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
6801 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
6802 {
6803 arm_fp_model = fp_model;
6804 break;
6805 }
6806
6807 if (fp_model == ARM_FLOAT_LAST)
6808 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
6809 current_fp_model);
6810
6811 arm_update_current_architecture ();
6812 }
6813
6814 static void
6815 show_fp_model (struct ui_file *file, int from_tty,
6816 struct cmd_list_element *c, const char *value)
6817 {
6818 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6819
6820 if (arm_fp_model == ARM_FLOAT_AUTO
6821 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
6822 fprintf_filtered (file, _("\
6823 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
6824 fp_model_strings[tdep->fp_model]);
6825 else
6826 fprintf_filtered (file, _("\
6827 The current ARM floating point model is \"%s\".\n"),
6828 fp_model_strings[arm_fp_model]);
6829 }
6830
6831 static void
6832 arm_set_abi (char *args, int from_tty,
6833 struct cmd_list_element *c)
6834 {
6835 enum arm_abi_kind arm_abi;
6836
6837 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
6838 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
6839 {
6840 arm_abi_global = arm_abi;
6841 break;
6842 }
6843
6844 if (arm_abi == ARM_ABI_LAST)
6845 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
6846 arm_abi_string);
6847
6848 arm_update_current_architecture ();
6849 }
6850
6851 static void
6852 arm_show_abi (struct ui_file *file, int from_tty,
6853 struct cmd_list_element *c, const char *value)
6854 {
6855 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6856
6857 if (arm_abi_global == ARM_ABI_AUTO
6858 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
6859 fprintf_filtered (file, _("\
6860 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
6861 arm_abi_strings[tdep->arm_abi]);
6862 else
6863 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
6864 arm_abi_string);
6865 }
6866
6867 static void
6868 arm_show_fallback_mode (struct ui_file *file, int from_tty,
6869 struct cmd_list_element *c, const char *value)
6870 {
6871 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6872
6873 fprintf_filtered (file,
6874 _("The current execution mode assumed "
6875 "(when symbols are unavailable) is \"%s\".\n"),
6876 arm_fallback_mode_string);
6877 }
6878
6879 static void
6880 arm_show_force_mode (struct ui_file *file, int from_tty,
6881 struct cmd_list_element *c, const char *value)
6882 {
6883 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6884
6885 fprintf_filtered (file,
6886 _("The current execution mode assumed "
6887 "(even when symbols are available) is \"%s\".\n"),
6888 arm_force_mode_string);
6889 }
6890
6891 /* If the user changes the register disassembly style used for info
6892 register and other commands, we have to also switch the style used
6893 in opcodes for disassembly output. This function is run in the "set
6894 arm disassembly" command, and does that. */
6895
6896 static void
6897 set_disassembly_style_sfunc (char *args, int from_tty,
6898 struct cmd_list_element *c)
6899 {
6900 set_disassembly_style ();
6901 }
6902 \f
6903 /* Return the ARM register name corresponding to register I. */
6904 static const char *
6905 arm_register_name (struct gdbarch *gdbarch, int i)
6906 {
6907 const int num_regs = gdbarch_num_regs (gdbarch);
6908
6909 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
6910 && i >= num_regs && i < num_regs + 32)
6911 {
6912 static const char *const vfp_pseudo_names[] = {
6913 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
6914 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
6915 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
6916 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
6917 };
6918
6919 return vfp_pseudo_names[i - num_regs];
6920 }
6921
6922 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
6923 && i >= num_regs + 32 && i < num_regs + 32 + 16)
6924 {
6925 static const char *const neon_pseudo_names[] = {
6926 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
6927 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
6928 };
6929
6930 return neon_pseudo_names[i - num_regs - 32];
6931 }
6932
6933 if (i >= ARRAY_SIZE (arm_register_names))
6934 /* These registers are only supported on targets which supply
6935 an XML description. */
6936 return "";
6937
6938 return arm_register_names[i];
6939 }
6940
6941 static void
6942 set_disassembly_style (void)
6943 {
6944 int current;
6945
6946 /* Find the style that the user wants. */
6947 for (current = 0; current < num_disassembly_options; current++)
6948 if (disassembly_style == valid_disassembly_styles[current])
6949 break;
6950 gdb_assert (current < num_disassembly_options);
6951
6952 /* Synchronize the disassembler. */
6953 set_arm_regname_option (current);
6954 }
6955
6956 /* Test whether the coff symbol specific value corresponds to a Thumb
6957 function. */
6958
6959 static int
6960 coff_sym_is_thumb (int val)
6961 {
6962 return (val == C_THUMBEXT
6963 || val == C_THUMBSTAT
6964 || val == C_THUMBEXTFUNC
6965 || val == C_THUMBSTATFUNC
6966 || val == C_THUMBLABEL);
6967 }
6968
6969 /* arm_coff_make_msymbol_special()
6970 arm_elf_make_msymbol_special()
6971
6972 These functions test whether the COFF or ELF symbol corresponds to
6973 an address in thumb code, and set a "special" bit in a minimal
6974 symbol to indicate that it does. */
6975
6976 static void
6977 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
6978 {
6979 /* Thumb symbols are of type STT_LOPROC, (synonymous with
6980 STT_ARM_TFUNC). */
6981 if (ELF_ST_TYPE (((elf_symbol_type *)sym)->internal_elf_sym.st_info)
6982 == STT_LOPROC)
6983 MSYMBOL_SET_SPECIAL (msym);
6984 }
6985
6986 static void
6987 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
6988 {
6989 if (coff_sym_is_thumb (val))
6990 MSYMBOL_SET_SPECIAL (msym);
6991 }
6992
6993 static void
6994 arm_objfile_data_free (struct objfile *objfile, void *arg)
6995 {
6996 struct arm_per_objfile *data = arg;
6997 unsigned int i;
6998
6999 for (i = 0; i < objfile->obfd->section_count; i++)
7000 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
7001 }
7002
7003 static void
7004 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
7005 asymbol *sym)
7006 {
7007 const char *name = bfd_asymbol_name (sym);
7008 struct arm_per_objfile *data;
7009 VEC(arm_mapping_symbol_s) **map_p;
7010 struct arm_mapping_symbol new_map_sym;
7011
7012 gdb_assert (name[0] == '$');
7013 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
7014 return;
7015
7016 data = objfile_data (objfile, arm_objfile_data_key);
7017 if (data == NULL)
7018 {
7019 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
7020 struct arm_per_objfile);
7021 set_objfile_data (objfile, arm_objfile_data_key, data);
7022 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
7023 objfile->obfd->section_count,
7024 VEC(arm_mapping_symbol_s) *);
7025 }
7026 map_p = &data->section_maps[bfd_get_section (sym)->index];
7027
7028 new_map_sym.value = sym->value;
7029 new_map_sym.type = name[1];
7030
7031 /* Assume that most mapping symbols appear in order of increasing
7032 value. If they were randomly distributed, it would be faster to
7033 always push here and then sort at first use. */
7034 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
7035 {
7036 struct arm_mapping_symbol *prev_map_sym;
7037
7038 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
7039 if (prev_map_sym->value >= sym->value)
7040 {
7041 unsigned int idx;
7042 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
7043 arm_compare_mapping_symbols);
7044 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
7045 return;
7046 }
7047 }
7048
7049 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
7050 }
7051
7052 static void
7053 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
7054 {
7055 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7056 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
7057
7058 /* If necessary, set the T bit. */
7059 if (arm_apcs_32)
7060 {
7061 ULONGEST val, t_bit;
7062 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
7063 t_bit = arm_psr_thumb_bit (gdbarch);
7064 if (arm_pc_is_thumb (gdbarch, pc))
7065 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7066 val | t_bit);
7067 else
7068 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7069 val & ~t_bit);
7070 }
7071 }
7072
7073 /* Read the contents of a NEON quad register, by reading from two
7074 double registers. This is used to implement the quad pseudo
7075 registers, and for argument passing in case the quad registers are
7076 missing; vectors are passed in quad registers when using the VFP
7077 ABI, even if a NEON unit is not present. REGNUM is the index of
7078 the quad register, in [0, 15]. */
7079
7080 static void
7081 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
7082 int regnum, gdb_byte *buf)
7083 {
7084 char name_buf[4];
7085 gdb_byte reg_buf[8];
7086 int offset, double_regnum;
7087
7088 sprintf (name_buf, "d%d", regnum << 1);
7089 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7090 strlen (name_buf));
7091
7092 /* d0 is always the least significant half of q0. */
7093 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7094 offset = 8;
7095 else
7096 offset = 0;
7097
7098 regcache_raw_read (regcache, double_regnum, reg_buf);
7099 memcpy (buf + offset, reg_buf, 8);
7100
7101 offset = 8 - offset;
7102 regcache_raw_read (regcache, double_regnum + 1, reg_buf);
7103 memcpy (buf + offset, reg_buf, 8);
7104 }
7105
7106 static void
7107 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
7108 int regnum, gdb_byte *buf)
7109 {
7110 const int num_regs = gdbarch_num_regs (gdbarch);
7111 char name_buf[4];
7112 gdb_byte reg_buf[8];
7113 int offset, double_regnum;
7114
7115 gdb_assert (regnum >= num_regs);
7116 regnum -= num_regs;
7117
7118 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
7119 /* Quad-precision register. */
7120 arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
7121 else
7122 {
7123 /* Single-precision register. */
7124 gdb_assert (regnum < 32);
7125
7126 /* s0 is always the least significant half of d0. */
7127 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7128 offset = (regnum & 1) ? 0 : 4;
7129 else
7130 offset = (regnum & 1) ? 4 : 0;
7131
7132 sprintf (name_buf, "d%d", regnum >> 1);
7133 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7134 strlen (name_buf));
7135
7136 regcache_raw_read (regcache, double_regnum, reg_buf);
7137 memcpy (buf, reg_buf + offset, 4);
7138 }
7139 }
7140
7141 /* Store the contents of BUF to a NEON quad register, by writing to
7142 two double registers. This is used to implement the quad pseudo
7143 registers, and for argument passing in case the quad registers are
7144 missing; vectors are passed in quad registers when using the VFP
7145 ABI, even if a NEON unit is not present. REGNUM is the index
7146 of the quad register, in [0, 15]. */
7147
7148 static void
7149 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
7150 int regnum, const gdb_byte *buf)
7151 {
7152 char name_buf[4];
7153 gdb_byte reg_buf[8];
7154 int offset, double_regnum;
7155
7156 sprintf (name_buf, "d%d", regnum << 1);
7157 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7158 strlen (name_buf));
7159
7160 /* d0 is always the least significant half of q0. */
7161 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7162 offset = 8;
7163 else
7164 offset = 0;
7165
7166 regcache_raw_write (regcache, double_regnum, buf + offset);
7167 offset = 8 - offset;
7168 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
7169 }
7170
7171 static void
7172 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
7173 int regnum, const gdb_byte *buf)
7174 {
7175 const int num_regs = gdbarch_num_regs (gdbarch);
7176 char name_buf[4];
7177 gdb_byte reg_buf[8];
7178 int offset, double_regnum;
7179
7180 gdb_assert (regnum >= num_regs);
7181 regnum -= num_regs;
7182
7183 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
7184 /* Quad-precision register. */
7185 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
7186 else
7187 {
7188 /* Single-precision register. */
7189 gdb_assert (regnum < 32);
7190
7191 /* s0 is always the least significant half of d0. */
7192 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7193 offset = (regnum & 1) ? 0 : 4;
7194 else
7195 offset = (regnum & 1) ? 4 : 0;
7196
7197 sprintf (name_buf, "d%d", regnum >> 1);
7198 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7199 strlen (name_buf));
7200
7201 regcache_raw_read (regcache, double_regnum, reg_buf);
7202 memcpy (reg_buf + offset, buf, 4);
7203 regcache_raw_write (regcache, double_regnum, reg_buf);
7204 }
7205 }
7206
7207 static struct value *
7208 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
7209 {
7210 const int *reg_p = baton;
7211 return value_of_register (*reg_p, frame);
7212 }
7213 \f
7214 static enum gdb_osabi
7215 arm_elf_osabi_sniffer (bfd *abfd)
7216 {
7217 unsigned int elfosabi;
7218 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
7219
7220 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
7221
7222 if (elfosabi == ELFOSABI_ARM)
7223 /* GNU tools use this value. Check note sections in this case,
7224 as well. */
7225 bfd_map_over_sections (abfd,
7226 generic_elf_osabi_sniff_abi_tag_sections,
7227 &osabi);
7228
7229 /* Anything else will be handled by the generic ELF sniffer. */
7230 return osabi;
7231 }
7232
7233 static int
7234 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
7235 struct reggroup *group)
7236 {
7237 /* FPS register's type is INT, but belongs to float_group. */
7238 if (regnum == ARM_FPS_REGNUM)
7239 return (group == float_reggroup);
7240 else
7241 return default_register_reggroup_p (gdbarch, regnum, group);
7242 }
7243
7244 \f
7245 /* Initialize the current architecture based on INFO. If possible,
7246 re-use an architecture from ARCHES, which is a list of
7247 architectures already created during this debugging session.
7248
7249 Called e.g. at program startup, when reading a core file, and when
7250 reading a binary file. */
7251
7252 static struct gdbarch *
7253 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
7254 {
7255 struct gdbarch_tdep *tdep;
7256 struct gdbarch *gdbarch;
7257 struct gdbarch_list *best_arch;
7258 enum arm_abi_kind arm_abi = arm_abi_global;
7259 enum arm_float_model fp_model = arm_fp_model;
7260 struct tdesc_arch_data *tdesc_data = NULL;
7261 int i, is_m = 0;
7262 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
7263 int have_neon = 0;
7264 int have_fpa_registers = 1;
7265 const struct target_desc *tdesc = info.target_desc;
7266
7267 /* If we have an object to base this architecture on, try to determine
7268 its ABI. */
7269
7270 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
7271 {
7272 int ei_osabi, e_flags;
7273
7274 switch (bfd_get_flavour (info.abfd))
7275 {
7276 case bfd_target_aout_flavour:
7277 /* Assume it's an old APCS-style ABI. */
7278 arm_abi = ARM_ABI_APCS;
7279 break;
7280
7281 case bfd_target_coff_flavour:
7282 /* Assume it's an old APCS-style ABI. */
7283 /* XXX WinCE? */
7284 arm_abi = ARM_ABI_APCS;
7285 break;
7286
7287 case bfd_target_elf_flavour:
7288 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
7289 e_flags = elf_elfheader (info.abfd)->e_flags;
7290
7291 if (ei_osabi == ELFOSABI_ARM)
7292 {
7293 /* GNU tools used to use this value, but do not for EABI
7294 objects. There's nowhere to tag an EABI version
7295 anyway, so assume APCS. */
7296 arm_abi = ARM_ABI_APCS;
7297 }
7298 else if (ei_osabi == ELFOSABI_NONE)
7299 {
7300 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
7301 int attr_arch, attr_profile;
7302
7303 switch (eabi_ver)
7304 {
7305 case EF_ARM_EABI_UNKNOWN:
7306 /* Assume GNU tools. */
7307 arm_abi = ARM_ABI_APCS;
7308 break;
7309
7310 case EF_ARM_EABI_VER4:
7311 case EF_ARM_EABI_VER5:
7312 arm_abi = ARM_ABI_AAPCS;
7313 /* EABI binaries default to VFP float ordering.
7314 They may also contain build attributes that can
7315 be used to identify if the VFP argument-passing
7316 ABI is in use. */
7317 if (fp_model == ARM_FLOAT_AUTO)
7318 {
7319 #ifdef HAVE_ELF
7320 switch (bfd_elf_get_obj_attr_int (info.abfd,
7321 OBJ_ATTR_PROC,
7322 Tag_ABI_VFP_args))
7323 {
7324 case 0:
7325 /* "The user intended FP parameter/result
7326 passing to conform to AAPCS, base
7327 variant". */
7328 fp_model = ARM_FLOAT_SOFT_VFP;
7329 break;
7330 case 1:
7331 /* "The user intended FP parameter/result
7332 passing to conform to AAPCS, VFP
7333 variant". */
7334 fp_model = ARM_FLOAT_VFP;
7335 break;
7336 case 2:
7337 /* "The user intended FP parameter/result
7338 passing to conform to tool chain-specific
7339 conventions" - we don't know any such
7340 conventions, so leave it as "auto". */
7341 break;
7342 default:
7343 /* Attribute value not mentioned in the
7344 October 2008 ABI, so leave it as
7345 "auto". */
7346 break;
7347 }
7348 #else
7349 fp_model = ARM_FLOAT_SOFT_VFP;
7350 #endif
7351 }
7352 break;
7353
7354 default:
7355 /* Leave it as "auto". */
7356 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
7357 break;
7358 }
7359
7360 #ifdef HAVE_ELF
7361 /* Detect M-profile programs. This only works if the
7362 executable file includes build attributes; GCC does
7363 copy them to the executable, but e.g. RealView does
7364 not. */
7365 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
7366 Tag_CPU_arch);
7367 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
7368 OBJ_ATTR_PROC,
7369 Tag_CPU_arch_profile);
7370 /* GCC specifies the profile for v6-M; RealView only
7371 specifies the profile for architectures starting with
7372 V7 (as opposed to architectures with a tag
7373 numerically greater than TAG_CPU_ARCH_V7). */
7374 if (!tdesc_has_registers (tdesc)
7375 && (attr_arch == TAG_CPU_ARCH_V6_M
7376 || attr_arch == TAG_CPU_ARCH_V6S_M
7377 || attr_profile == 'M'))
7378 tdesc = tdesc_arm_with_m;
7379 #endif
7380 }
7381
7382 if (fp_model == ARM_FLOAT_AUTO)
7383 {
7384 int e_flags = elf_elfheader (info.abfd)->e_flags;
7385
7386 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
7387 {
7388 case 0:
7389 /* Leave it as "auto". Strictly speaking this case
7390 means FPA, but almost nobody uses that now, and
7391 many toolchains fail to set the appropriate bits
7392 for the floating-point model they use. */
7393 break;
7394 case EF_ARM_SOFT_FLOAT:
7395 fp_model = ARM_FLOAT_SOFT_FPA;
7396 break;
7397 case EF_ARM_VFP_FLOAT:
7398 fp_model = ARM_FLOAT_VFP;
7399 break;
7400 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
7401 fp_model = ARM_FLOAT_SOFT_VFP;
7402 break;
7403 }
7404 }
7405
7406 if (e_flags & EF_ARM_BE8)
7407 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
7408
7409 break;
7410
7411 default:
7412 /* Leave it as "auto". */
7413 break;
7414 }
7415 }
7416
7417 /* Check any target description for validity. */
7418 if (tdesc_has_registers (tdesc))
7419 {
7420 /* For most registers we require GDB's default names; but also allow
7421 the numeric names for sp / lr / pc, as a convenience. */
7422 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
7423 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
7424 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
7425
7426 const struct tdesc_feature *feature;
7427 int valid_p;
7428
7429 feature = tdesc_find_feature (tdesc,
7430 "org.gnu.gdb.arm.core");
7431 if (feature == NULL)
7432 {
7433 feature = tdesc_find_feature (tdesc,
7434 "org.gnu.gdb.arm.m-profile");
7435 if (feature == NULL)
7436 return NULL;
7437 else
7438 is_m = 1;
7439 }
7440
7441 tdesc_data = tdesc_data_alloc ();
7442
7443 valid_p = 1;
7444 for (i = 0; i < ARM_SP_REGNUM; i++)
7445 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
7446 arm_register_names[i]);
7447 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7448 ARM_SP_REGNUM,
7449 arm_sp_names);
7450 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7451 ARM_LR_REGNUM,
7452 arm_lr_names);
7453 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7454 ARM_PC_REGNUM,
7455 arm_pc_names);
7456 if (is_m)
7457 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7458 ARM_PS_REGNUM, "xpsr");
7459 else
7460 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7461 ARM_PS_REGNUM, "cpsr");
7462
7463 if (!valid_p)
7464 {
7465 tdesc_data_cleanup (tdesc_data);
7466 return NULL;
7467 }
7468
7469 feature = tdesc_find_feature (tdesc,
7470 "org.gnu.gdb.arm.fpa");
7471 if (feature != NULL)
7472 {
7473 valid_p = 1;
7474 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
7475 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
7476 arm_register_names[i]);
7477 if (!valid_p)
7478 {
7479 tdesc_data_cleanup (tdesc_data);
7480 return NULL;
7481 }
7482 }
7483 else
7484 have_fpa_registers = 0;
7485
7486 feature = tdesc_find_feature (tdesc,
7487 "org.gnu.gdb.xscale.iwmmxt");
7488 if (feature != NULL)
7489 {
7490 static const char *const iwmmxt_names[] = {
7491 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
7492 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
7493 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
7494 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
7495 };
7496
7497 valid_p = 1;
7498 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
7499 valid_p
7500 &= tdesc_numbered_register (feature, tdesc_data, i,
7501 iwmmxt_names[i - ARM_WR0_REGNUM]);
7502
7503 /* Check for the control registers, but do not fail if they
7504 are missing. */
7505 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
7506 tdesc_numbered_register (feature, tdesc_data, i,
7507 iwmmxt_names[i - ARM_WR0_REGNUM]);
7508
7509 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
7510 valid_p
7511 &= tdesc_numbered_register (feature, tdesc_data, i,
7512 iwmmxt_names[i - ARM_WR0_REGNUM]);
7513
7514 if (!valid_p)
7515 {
7516 tdesc_data_cleanup (tdesc_data);
7517 return NULL;
7518 }
7519 }
7520
7521 /* If we have a VFP unit, check whether the single precision registers
7522 are present. If not, then we will synthesize them as pseudo
7523 registers. */
7524 feature = tdesc_find_feature (tdesc,
7525 "org.gnu.gdb.arm.vfp");
7526 if (feature != NULL)
7527 {
7528 static const char *const vfp_double_names[] = {
7529 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
7530 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
7531 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
7532 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
7533 };
7534
7535 /* Require the double precision registers. There must be either
7536 16 or 32. */
7537 valid_p = 1;
7538 for (i = 0; i < 32; i++)
7539 {
7540 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7541 ARM_D0_REGNUM + i,
7542 vfp_double_names[i]);
7543 if (!valid_p)
7544 break;
7545 }
7546
7547 if (!valid_p && i != 16)
7548 {
7549 tdesc_data_cleanup (tdesc_data);
7550 return NULL;
7551 }
7552
7553 if (tdesc_unnumbered_register (feature, "s0") == 0)
7554 have_vfp_pseudos = 1;
7555
7556 have_vfp_registers = 1;
7557
7558 /* If we have VFP, also check for NEON. The architecture allows
7559 NEON without VFP (integer vector operations only), but GDB
7560 does not support that. */
7561 feature = tdesc_find_feature (tdesc,
7562 "org.gnu.gdb.arm.neon");
7563 if (feature != NULL)
7564 {
7565 /* NEON requires 32 double-precision registers. */
7566 if (i != 32)
7567 {
7568 tdesc_data_cleanup (tdesc_data);
7569 return NULL;
7570 }
7571
7572 /* If there are quad registers defined by the stub, use
7573 their type; otherwise (normally) provide them with
7574 the default type. */
7575 if (tdesc_unnumbered_register (feature, "q0") == 0)
7576 have_neon_pseudos = 1;
7577
7578 have_neon = 1;
7579 }
7580 }
7581 }
7582
7583 /* If there is already a candidate, use it. */
7584 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
7585 best_arch != NULL;
7586 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
7587 {
7588 if (arm_abi != ARM_ABI_AUTO
7589 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
7590 continue;
7591
7592 if (fp_model != ARM_FLOAT_AUTO
7593 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
7594 continue;
7595
7596 /* There are various other properties in tdep that we do not
7597 need to check here: those derived from a target description,
7598 since gdbarches with a different target description are
7599 automatically disqualified. */
7600
7601 /* Do check is_m, though, since it might come from the binary. */
7602 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
7603 continue;
7604
7605 /* Found a match. */
7606 break;
7607 }
7608
7609 if (best_arch != NULL)
7610 {
7611 if (tdesc_data != NULL)
7612 tdesc_data_cleanup (tdesc_data);
7613 return best_arch->gdbarch;
7614 }
7615
7616 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
7617 gdbarch = gdbarch_alloc (&info, tdep);
7618
7619 /* Record additional information about the architecture we are defining.
7620 These are gdbarch discriminators, like the OSABI. */
7621 tdep->arm_abi = arm_abi;
7622 tdep->fp_model = fp_model;
7623 tdep->is_m = is_m;
7624 tdep->have_fpa_registers = have_fpa_registers;
7625 tdep->have_vfp_registers = have_vfp_registers;
7626 tdep->have_vfp_pseudos = have_vfp_pseudos;
7627 tdep->have_neon_pseudos = have_neon_pseudos;
7628 tdep->have_neon = have_neon;
7629
7630 /* Breakpoints. */
7631 switch (info.byte_order_for_code)
7632 {
7633 case BFD_ENDIAN_BIG:
7634 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
7635 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
7636 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
7637 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
7638
7639 break;
7640
7641 case BFD_ENDIAN_LITTLE:
7642 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
7643 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
7644 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
7645 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
7646
7647 break;
7648
7649 default:
7650 internal_error (__FILE__, __LINE__,
7651 _("arm_gdbarch_init: bad byte order for float format"));
7652 }
7653
7654 /* On ARM targets char defaults to unsigned. */
7655 set_gdbarch_char_signed (gdbarch, 0);
7656
7657 /* Note: for displaced stepping, this includes the breakpoint, and one word
7658 of additional scratch space. This setting isn't used for anything beside
7659 displaced stepping at present. */
7660 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
7661
7662 /* This should be low enough for everything. */
7663 tdep->lowest_pc = 0x20;
7664 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
7665
7666 /* The default, for both APCS and AAPCS, is to return small
7667 structures in registers. */
7668 tdep->struct_return = reg_struct_return;
7669
7670 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
7671 set_gdbarch_frame_align (gdbarch, arm_frame_align);
7672
7673 set_gdbarch_write_pc (gdbarch, arm_write_pc);
7674
7675 /* Frame handling. */
7676 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
7677 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
7678 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
7679
7680 frame_base_set_default (gdbarch, &arm_normal_base);
7681
7682 /* Address manipulation. */
7683 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
7684 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
7685
7686 /* Advance PC across function entry code. */
7687 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
7688
7689 /* Detect whether PC is in function epilogue. */
7690 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
7691
7692 /* Skip trampolines. */
7693 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
7694
7695 /* The stack grows downward. */
7696 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
7697
7698 /* Breakpoint manipulation. */
7699 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
7700 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
7701 arm_remote_breakpoint_from_pc);
7702
7703 /* Information about registers, etc. */
7704 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
7705 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
7706 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
7707 set_gdbarch_register_type (gdbarch, arm_register_type);
7708 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
7709
7710 /* This "info float" is FPA-specific. Use the generic version if we
7711 do not have FPA. */
7712 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
7713 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
7714
7715 /* Internal <-> external register number maps. */
7716 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
7717 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
7718
7719 set_gdbarch_register_name (gdbarch, arm_register_name);
7720
7721 /* Returning results. */
7722 set_gdbarch_return_value (gdbarch, arm_return_value);
7723
7724 /* Disassembly. */
7725 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
7726
7727 /* Minsymbol frobbing. */
7728 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
7729 set_gdbarch_coff_make_msymbol_special (gdbarch,
7730 arm_coff_make_msymbol_special);
7731 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
7732
7733 /* Thumb-2 IT block support. */
7734 set_gdbarch_adjust_breakpoint_address (gdbarch,
7735 arm_adjust_breakpoint_address);
7736
7737 /* Virtual tables. */
7738 set_gdbarch_vbit_in_delta (gdbarch, 1);
7739
7740 /* Hook in the ABI-specific overrides, if they have been registered. */
7741 gdbarch_init_osabi (info, gdbarch);
7742
7743 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
7744
7745 /* Add some default predicates. */
7746 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
7747 dwarf2_append_unwinders (gdbarch);
7748 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
7749
7750 /* Now we have tuned the configuration, set a few final things,
7751 based on what the OS ABI has told us. */
7752
7753 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
7754 binaries are always marked. */
7755 if (tdep->arm_abi == ARM_ABI_AUTO)
7756 tdep->arm_abi = ARM_ABI_APCS;
7757
7758 /* We used to default to FPA for generic ARM, but almost nobody
7759 uses that now, and we now provide a way for the user to force
7760 the model. So default to the most useful variant. */
7761 if (tdep->fp_model == ARM_FLOAT_AUTO)
7762 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
7763
7764 if (tdep->jb_pc >= 0)
7765 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
7766
7767 /* Floating point sizes and format. */
7768 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
7769 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
7770 {
7771 set_gdbarch_double_format
7772 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
7773 set_gdbarch_long_double_format
7774 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
7775 }
7776 else
7777 {
7778 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
7779 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
7780 }
7781
7782 if (have_vfp_pseudos)
7783 {
7784 /* NOTE: These are the only pseudo registers used by
7785 the ARM target at the moment. If more are added, a
7786 little more care in numbering will be needed. */
7787
7788 int num_pseudos = 32;
7789 if (have_neon_pseudos)
7790 num_pseudos += 16;
7791 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
7792 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
7793 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
7794 }
7795
7796 if (tdesc_data)
7797 {
7798 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
7799
7800 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
7801
7802 /* Override tdesc_register_type to adjust the types of VFP
7803 registers for NEON. */
7804 set_gdbarch_register_type (gdbarch, arm_register_type);
7805 }
7806
7807 /* Add standard register aliases. We add aliases even for those
7808 nanes which are used by the current architecture - it's simpler,
7809 and does no harm, since nothing ever lists user registers. */
7810 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
7811 user_reg_add (gdbarch, arm_register_aliases[i].name,
7812 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
7813
7814 return gdbarch;
7815 }
7816
7817 static void
7818 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
7819 {
7820 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7821
7822 if (tdep == NULL)
7823 return;
7824
7825 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
7826 (unsigned long) tdep->lowest_pc);
7827 }
7828
7829 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
7830
7831 void
7832 _initialize_arm_tdep (void)
7833 {
7834 struct ui_file *stb;
7835 long length;
7836 struct cmd_list_element *new_set, *new_show;
7837 const char *setname;
7838 const char *setdesc;
7839 const char *const *regnames;
7840 int numregs, i, j;
7841 static char *helptext;
7842 char regdesc[1024], *rdptr = regdesc;
7843 size_t rest = sizeof (regdesc);
7844
7845 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
7846
7847 arm_objfile_data_key
7848 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
7849
7850 /* Register an ELF OS ABI sniffer for ARM binaries. */
7851 gdbarch_register_osabi_sniffer (bfd_arch_arm,
7852 bfd_target_elf_flavour,
7853 arm_elf_osabi_sniffer);
7854
7855 /* Initialize the standard target descriptions. */
7856 initialize_tdesc_arm_with_m ();
7857
7858 /* Get the number of possible sets of register names defined in opcodes. */
7859 num_disassembly_options = get_arm_regname_num_options ();
7860
7861 /* Add root prefix command for all "set arm"/"show arm" commands. */
7862 add_prefix_cmd ("arm", no_class, set_arm_command,
7863 _("Various ARM-specific commands."),
7864 &setarmcmdlist, "set arm ", 0, &setlist);
7865
7866 add_prefix_cmd ("arm", no_class, show_arm_command,
7867 _("Various ARM-specific commands."),
7868 &showarmcmdlist, "show arm ", 0, &showlist);
7869
7870 /* Sync the opcode insn printer with our register viewer. */
7871 parse_arm_disassembler_option ("reg-names-std");
7872
7873 /* Initialize the array that will be passed to
7874 add_setshow_enum_cmd(). */
7875 valid_disassembly_styles
7876 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
7877 for (i = 0; i < num_disassembly_options; i++)
7878 {
7879 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
7880 valid_disassembly_styles[i] = setname;
7881 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
7882 rdptr += length;
7883 rest -= length;
7884 /* When we find the default names, tell the disassembler to use
7885 them. */
7886 if (!strcmp (setname, "std"))
7887 {
7888 disassembly_style = setname;
7889 set_arm_regname_option (i);
7890 }
7891 }
7892 /* Mark the end of valid options. */
7893 valid_disassembly_styles[num_disassembly_options] = NULL;
7894
7895 /* Create the help text. */
7896 stb = mem_fileopen ();
7897 fprintf_unfiltered (stb, "%s%s%s",
7898 _("The valid values are:\n"),
7899 regdesc,
7900 _("The default is \"std\"."));
7901 helptext = ui_file_xstrdup (stb, NULL);
7902 ui_file_delete (stb);
7903
7904 add_setshow_enum_cmd("disassembler", no_class,
7905 valid_disassembly_styles, &disassembly_style,
7906 _("Set the disassembly style."),
7907 _("Show the disassembly style."),
7908 helptext,
7909 set_disassembly_style_sfunc,
7910 NULL, /* FIXME: i18n: The disassembly style is
7911 \"%s\". */
7912 &setarmcmdlist, &showarmcmdlist);
7913
7914 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
7915 _("Set usage of ARM 32-bit mode."),
7916 _("Show usage of ARM 32-bit mode."),
7917 _("When off, a 26-bit PC will be used."),
7918 NULL,
7919 NULL, /* FIXME: i18n: Usage of ARM 32-bit
7920 mode is %s. */
7921 &setarmcmdlist, &showarmcmdlist);
7922
7923 /* Add a command to allow the user to force the FPU model. */
7924 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
7925 _("Set the floating point type."),
7926 _("Show the floating point type."),
7927 _("auto - Determine the FP typefrom the OS-ABI.\n\
7928 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
7929 fpa - FPA co-processor (GCC compiled).\n\
7930 softvfp - Software FP with pure-endian doubles.\n\
7931 vfp - VFP co-processor."),
7932 set_fp_model_sfunc, show_fp_model,
7933 &setarmcmdlist, &showarmcmdlist);
7934
7935 /* Add a command to allow the user to force the ABI. */
7936 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
7937 _("Set the ABI."),
7938 _("Show the ABI."),
7939 NULL, arm_set_abi, arm_show_abi,
7940 &setarmcmdlist, &showarmcmdlist);
7941
7942 /* Add two commands to allow the user to force the assumed
7943 execution mode. */
7944 add_setshow_enum_cmd ("fallback-mode", class_support,
7945 arm_mode_strings, &arm_fallback_mode_string,
7946 _("Set the mode assumed when symbols are unavailable."),
7947 _("Show the mode assumed when symbols are unavailable."),
7948 NULL, NULL, arm_show_fallback_mode,
7949 &setarmcmdlist, &showarmcmdlist);
7950 add_setshow_enum_cmd ("force-mode", class_support,
7951 arm_mode_strings, &arm_force_mode_string,
7952 _("Set the mode assumed even when symbols are available."),
7953 _("Show the mode assumed even when symbols are available."),
7954 NULL, NULL, arm_show_force_mode,
7955 &setarmcmdlist, &showarmcmdlist);
7956
7957 /* Debugging flag. */
7958 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
7959 _("Set ARM debugging."),
7960 _("Show ARM debugging."),
7961 _("When on, arm-specific debugging is enabled."),
7962 NULL,
7963 NULL, /* FIXME: i18n: "ARM debugging is %s. */
7964 &setdebuglist, &showdebuglist);
7965 }
This page took 0.266112 seconds and 4 git commands to generate.