21d65f38fe2d79b75e1ba3a61ee9953b6b7f1d17
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper () */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "doublest.h"
33 #include "value.h"
34 #include "arch-utils.h"
35 #include "osabi.h"
36 #include "frame-unwind.h"
37 #include "frame-base.h"
38 #include "trad-frame.h"
39 #include "objfiles.h"
40 #include "dwarf2-frame.h"
41 #include "gdbtypes.h"
42 #include "prologue-value.h"
43 #include "target-descriptions.h"
44 #include "user-regs.h"
45
46 #include "arm-tdep.h"
47 #include "gdb/sim-arm.h"
48
49 #include "elf-bfd.h"
50 #include "coff/internal.h"
51 #include "elf/arm.h"
52
53 #include "gdb_assert.h"
54 #include "vec.h"
55
56 static int arm_debug;
57
58 /* Macros for setting and testing a bit in a minimal symbol that marks
59 it as Thumb function. The MSB of the minimal symbol's "info" field
60 is used for this purpose.
61
62 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
63 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
64
65 #define MSYMBOL_SET_SPECIAL(msym) \
66 MSYMBOL_TARGET_FLAG_1 (msym) = 1
67
68 #define MSYMBOL_IS_SPECIAL(msym) \
69 MSYMBOL_TARGET_FLAG_1 (msym)
70
71 /* Per-objfile data used for mapping symbols. */
72 static const struct objfile_data *arm_objfile_data_key;
73
74 struct arm_mapping_symbol
75 {
76 bfd_vma value;
77 char type;
78 };
79 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
80 DEF_VEC_O(arm_mapping_symbol_s);
81
82 struct arm_per_objfile
83 {
84 VEC(arm_mapping_symbol_s) **section_maps;
85 };
86
87 /* The list of available "set arm ..." and "show arm ..." commands. */
88 static struct cmd_list_element *setarmcmdlist = NULL;
89 static struct cmd_list_element *showarmcmdlist = NULL;
90
91 /* The type of floating-point to use. Keep this in sync with enum
92 arm_float_model, and the help string in _initialize_arm_tdep. */
93 static const char *fp_model_strings[] =
94 {
95 "auto",
96 "softfpa",
97 "fpa",
98 "softvfp",
99 "vfp",
100 NULL
101 };
102
103 /* A variable that can be configured by the user. */
104 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
105 static const char *current_fp_model = "auto";
106
107 /* The ABI to use. Keep this in sync with arm_abi_kind. */
108 static const char *arm_abi_strings[] =
109 {
110 "auto",
111 "APCS",
112 "AAPCS",
113 NULL
114 };
115
116 /* A variable that can be configured by the user. */
117 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
118 static const char *arm_abi_string = "auto";
119
120 /* The execution mode to assume. */
121 static const char *arm_mode_strings[] =
122 {
123 "auto",
124 "arm",
125 "thumb"
126 };
127
128 static const char *arm_fallback_mode_string = "auto";
129 static const char *arm_force_mode_string = "auto";
130
131 /* Number of different reg name sets (options). */
132 static int num_disassembly_options;
133
134 /* The standard register names, and all the valid aliases for them. */
135 static const struct
136 {
137 const char *name;
138 int regnum;
139 } arm_register_aliases[] = {
140 /* Basic register numbers. */
141 { "r0", 0 },
142 { "r1", 1 },
143 { "r2", 2 },
144 { "r3", 3 },
145 { "r4", 4 },
146 { "r5", 5 },
147 { "r6", 6 },
148 { "r7", 7 },
149 { "r8", 8 },
150 { "r9", 9 },
151 { "r10", 10 },
152 { "r11", 11 },
153 { "r12", 12 },
154 { "r13", 13 },
155 { "r14", 14 },
156 { "r15", 15 },
157 /* Synonyms (argument and variable registers). */
158 { "a1", 0 },
159 { "a2", 1 },
160 { "a3", 2 },
161 { "a4", 3 },
162 { "v1", 4 },
163 { "v2", 5 },
164 { "v3", 6 },
165 { "v4", 7 },
166 { "v5", 8 },
167 { "v6", 9 },
168 { "v7", 10 },
169 { "v8", 11 },
170 /* Other platform-specific names for r9. */
171 { "sb", 9 },
172 { "tr", 9 },
173 /* Special names. */
174 { "ip", 12 },
175 { "sp", 13 },
176 { "lr", 14 },
177 { "pc", 15 },
178 /* Names used by GCC (not listed in the ARM EABI). */
179 { "sl", 10 },
180 { "fp", 11 },
181 /* A special name from the older ATPCS. */
182 { "wr", 7 },
183 };
184
185 static const char *const arm_register_names[] =
186 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
187 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
188 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
189 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
190 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
191 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
192 "fps", "cpsr" }; /* 24 25 */
193
194 /* Valid register name styles. */
195 static const char **valid_disassembly_styles;
196
197 /* Disassembly style to use. Default to "std" register names. */
198 static const char *disassembly_style;
199
200 /* This is used to keep the bfd arch_info in sync with the disassembly
201 style. */
202 static void set_disassembly_style_sfunc(char *, int,
203 struct cmd_list_element *);
204 static void set_disassembly_style (void);
205
206 static void convert_from_extended (const struct floatformat *, const void *,
207 void *, int);
208 static void convert_to_extended (const struct floatformat *, void *,
209 const void *, int);
210
211 static void arm_neon_quad_read (struct gdbarch *gdbarch,
212 struct regcache *regcache,
213 int regnum, gdb_byte *buf);
214 static void arm_neon_quad_write (struct gdbarch *gdbarch,
215 struct regcache *regcache,
216 int regnum, const gdb_byte *buf);
217
218 struct arm_prologue_cache
219 {
220 /* The stack pointer at the time this frame was created; i.e. the
221 caller's stack pointer when this function was called. It is used
222 to identify this frame. */
223 CORE_ADDR prev_sp;
224
225 /* The frame base for this frame is just prev_sp - frame size.
226 FRAMESIZE is the distance from the frame pointer to the
227 initial stack pointer. */
228
229 int framesize;
230
231 /* The register used to hold the frame pointer for this frame. */
232 int framereg;
233
234 /* Saved register offsets. */
235 struct trad_frame_saved_reg *saved_regs;
236 };
237
238 /* Architecture version for displaced stepping. This effects the behaviour of
239 certain instructions, and really should not be hard-wired. */
240
241 #define DISPLACED_STEPPING_ARCH_VERSION 5
242
243 /* Addresses for calling Thumb functions have the bit 0 set.
244 Here are some macros to test, set, or clear bit 0 of addresses. */
245 #define IS_THUMB_ADDR(addr) ((addr) & 1)
246 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
247 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
248
249 /* Set to true if the 32-bit mode is in use. */
250
251 int arm_apcs_32 = 1;
252
253 /* Determine if FRAME is executing in Thumb mode. */
254
255 static int
256 arm_frame_is_thumb (struct frame_info *frame)
257 {
258 CORE_ADDR cpsr;
259
260 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
261 directly (from a signal frame or dummy frame) or by interpreting
262 the saved LR (from a prologue or DWARF frame). So consult it and
263 trust the unwinders. */
264 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
265
266 return (cpsr & CPSR_T) != 0;
267 }
268
269 /* Callback for VEC_lower_bound. */
270
271 static inline int
272 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
273 const struct arm_mapping_symbol *rhs)
274 {
275 return lhs->value < rhs->value;
276 }
277
278 /* Determine if the program counter specified in MEMADDR is in a Thumb
279 function. This function should be called for addresses unrelated to
280 any executing frame; otherwise, prefer arm_frame_is_thumb. */
281
282 static int
283 arm_pc_is_thumb (CORE_ADDR memaddr)
284 {
285 struct obj_section *sec;
286 struct minimal_symbol *sym;
287
288 /* If bit 0 of the address is set, assume this is a Thumb address. */
289 if (IS_THUMB_ADDR (memaddr))
290 return 1;
291
292 /* If the user wants to override the symbol table, let him. */
293 if (strcmp (arm_force_mode_string, "arm") == 0)
294 return 0;
295 if (strcmp (arm_force_mode_string, "thumb") == 0)
296 return 1;
297
298 /* If there are mapping symbols, consult them. */
299 sec = find_pc_section (memaddr);
300 if (sec != NULL)
301 {
302 struct arm_per_objfile *data;
303 VEC(arm_mapping_symbol_s) *map;
304 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
305 0 };
306 unsigned int idx;
307
308 data = objfile_data (sec->objfile, arm_objfile_data_key);
309 if (data != NULL)
310 {
311 map = data->section_maps[sec->the_bfd_section->index];
312 if (!VEC_empty (arm_mapping_symbol_s, map))
313 {
314 struct arm_mapping_symbol *map_sym;
315
316 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
317 arm_compare_mapping_symbols);
318
319 /* VEC_lower_bound finds the earliest ordered insertion
320 point. If the following symbol starts at this exact
321 address, we use that; otherwise, the preceding
322 mapping symbol covers this address. */
323 if (idx < VEC_length (arm_mapping_symbol_s, map))
324 {
325 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
326 if (map_sym->value == map_key.value)
327 return map_sym->type == 't';
328 }
329
330 if (idx > 0)
331 {
332 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
333 return map_sym->type == 't';
334 }
335 }
336 }
337 }
338
339 /* Thumb functions have a "special" bit set in minimal symbols. */
340 sym = lookup_minimal_symbol_by_pc (memaddr);
341 if (sym)
342 return (MSYMBOL_IS_SPECIAL (sym));
343
344 /* If the user wants to override the fallback mode, let them. */
345 if (strcmp (arm_fallback_mode_string, "arm") == 0)
346 return 0;
347 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
348 return 1;
349
350 /* If we couldn't find any symbol, but we're talking to a running
351 target, then trust the current value of $cpsr. This lets
352 "display/i $pc" always show the correct mode (though if there is
353 a symbol table we will not reach here, so it still may not be
354 displayed in the mode it will be executed). */
355 if (target_has_registers)
356 return arm_frame_is_thumb (get_current_frame ());
357
358 /* Otherwise we're out of luck; we assume ARM. */
359 return 0;
360 }
361
362 /* Remove useless bits from addresses in a running program. */
363 static CORE_ADDR
364 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
365 {
366 if (arm_apcs_32)
367 return UNMAKE_THUMB_ADDR (val);
368 else
369 return (val & 0x03fffffc);
370 }
371
372 /* When reading symbols, we need to zap the low bit of the address,
373 which may be set to 1 for Thumb functions. */
374 static CORE_ADDR
375 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
376 {
377 return val & ~1;
378 }
379
380 /* Analyze a Thumb prologue, looking for a recognizable stack frame
381 and frame pointer. Scan until we encounter a store that could
382 clobber the stack frame unexpectedly, or an unknown instruction. */
383
384 static CORE_ADDR
385 thumb_analyze_prologue (struct gdbarch *gdbarch,
386 CORE_ADDR start, CORE_ADDR limit,
387 struct arm_prologue_cache *cache)
388 {
389 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
390 int i;
391 pv_t regs[16];
392 struct pv_area *stack;
393 struct cleanup *back_to;
394 CORE_ADDR offset;
395
396 for (i = 0; i < 16; i++)
397 regs[i] = pv_register (i, 0);
398 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
399 back_to = make_cleanup_free_pv_area (stack);
400
401 while (start < limit)
402 {
403 unsigned short insn;
404
405 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
406
407 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
408 {
409 int regno;
410 int mask;
411
412 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
413 break;
414
415 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
416 whether to save LR (R14). */
417 mask = (insn & 0xff) | ((insn & 0x100) << 6);
418
419 /* Calculate offsets of saved R0-R7 and LR. */
420 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
421 if (mask & (1 << regno))
422 {
423 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
424 -4);
425 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
426 }
427 }
428 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
429 sub sp, #simm */
430 {
431 offset = (insn & 0x7f) << 2; /* get scaled offset */
432 if (insn & 0x80) /* Check for SUB. */
433 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
434 -offset);
435 else
436 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
437 offset);
438 }
439 else if ((insn & 0xff00) == 0xaf00) /* add r7, sp, #imm */
440 regs[THUMB_FP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
441 (insn & 0xff) << 2);
442 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
443 {
444 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
445 int src_reg = (insn & 0x78) >> 3;
446 regs[dst_reg] = regs[src_reg];
447 }
448 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
449 {
450 /* Handle stores to the stack. Normally pushes are used,
451 but with GCC -mtpcs-frame, there may be other stores
452 in the prologue to create the frame. */
453 int regno = (insn >> 8) & 0x7;
454 pv_t addr;
455
456 offset = (insn & 0xff) << 2;
457 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
458
459 if (pv_area_store_would_trash (stack, addr))
460 break;
461
462 pv_area_store (stack, addr, 4, regs[regno]);
463 }
464 else
465 {
466 /* We don't know what this instruction is. We're finished
467 scanning. NOTE: Recognizing more safe-to-ignore
468 instructions here will improve support for optimized
469 code. */
470 break;
471 }
472
473 start += 2;
474 }
475
476 if (cache == NULL)
477 {
478 do_cleanups (back_to);
479 return start;
480 }
481
482 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
483 {
484 /* Frame pointer is fp. Frame size is constant. */
485 cache->framereg = ARM_FP_REGNUM;
486 cache->framesize = -regs[ARM_FP_REGNUM].k;
487 }
488 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
489 {
490 /* Frame pointer is r7. Frame size is constant. */
491 cache->framereg = THUMB_FP_REGNUM;
492 cache->framesize = -regs[THUMB_FP_REGNUM].k;
493 }
494 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
495 {
496 /* Try the stack pointer... this is a bit desperate. */
497 cache->framereg = ARM_SP_REGNUM;
498 cache->framesize = -regs[ARM_SP_REGNUM].k;
499 }
500 else
501 {
502 /* We're just out of luck. We don't know where the frame is. */
503 cache->framereg = -1;
504 cache->framesize = 0;
505 }
506
507 for (i = 0; i < 16; i++)
508 if (pv_area_find_reg (stack, gdbarch, i, &offset))
509 cache->saved_regs[i].addr = offset;
510
511 do_cleanups (back_to);
512 return start;
513 }
514
515 /* Advance the PC across any function entry prologue instructions to
516 reach some "real" code.
517
518 The APCS (ARM Procedure Call Standard) defines the following
519 prologue:
520
521 mov ip, sp
522 [stmfd sp!, {a1,a2,a3,a4}]
523 stmfd sp!, {...,fp,ip,lr,pc}
524 [stfe f7, [sp, #-12]!]
525 [stfe f6, [sp, #-12]!]
526 [stfe f5, [sp, #-12]!]
527 [stfe f4, [sp, #-12]!]
528 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn */
529
530 static CORE_ADDR
531 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
532 {
533 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
534 unsigned long inst;
535 CORE_ADDR skip_pc;
536 CORE_ADDR func_addr, limit_pc;
537 struct symtab_and_line sal;
538
539 /* If we're in a dummy frame, don't even try to skip the prologue. */
540 if (deprecated_pc_in_call_dummy (gdbarch, pc))
541 return pc;
542
543 /* See if we can determine the end of the prologue via the symbol table.
544 If so, then return either PC, or the PC after the prologue, whichever
545 is greater. */
546 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
547 {
548 CORE_ADDR post_prologue_pc
549 = skip_prologue_using_sal (gdbarch, func_addr);
550 if (post_prologue_pc != 0)
551 return max (pc, post_prologue_pc);
552 }
553
554 /* Can't determine prologue from the symbol table, need to examine
555 instructions. */
556
557 /* Find an upper limit on the function prologue using the debug
558 information. If the debug information could not be used to provide
559 that bound, then use an arbitrary large number as the upper bound. */
560 /* Like arm_scan_prologue, stop no later than pc + 64. */
561 limit_pc = skip_prologue_using_sal (gdbarch, pc);
562 if (limit_pc == 0)
563 limit_pc = pc + 64; /* Magic. */
564
565
566 /* Check if this is Thumb code. */
567 if (arm_pc_is_thumb (pc))
568 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
569
570 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
571 {
572 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
573
574 /* "mov ip, sp" is no longer a required part of the prologue. */
575 if (inst == 0xe1a0c00d) /* mov ip, sp */
576 continue;
577
578 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
579 continue;
580
581 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
582 continue;
583
584 /* Some prologues begin with "str lr, [sp, #-4]!". */
585 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
586 continue;
587
588 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
589 continue;
590
591 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
592 continue;
593
594 /* Any insns after this point may float into the code, if it makes
595 for better instruction scheduling, so we skip them only if we
596 find them, but still consider the function to be frame-ful. */
597
598 /* We may have either one sfmfd instruction here, or several stfe
599 insns, depending on the version of floating point code we
600 support. */
601 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
602 continue;
603
604 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
605 continue;
606
607 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
608 continue;
609
610 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
611 continue;
612
613 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
614 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
615 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
616 continue;
617
618 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
619 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
620 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
621 continue;
622
623 /* Un-recognized instruction; stop scanning. */
624 break;
625 }
626
627 return skip_pc; /* End of prologue */
628 }
629
630 /* *INDENT-OFF* */
631 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
632 This function decodes a Thumb function prologue to determine:
633 1) the size of the stack frame
634 2) which registers are saved on it
635 3) the offsets of saved regs
636 4) the offset from the stack pointer to the frame pointer
637
638 A typical Thumb function prologue would create this stack frame
639 (offsets relative to FP)
640 old SP -> 24 stack parameters
641 20 LR
642 16 R7
643 R7 -> 0 local variables (16 bytes)
644 SP -> -12 additional stack space (12 bytes)
645 The frame size would thus be 36 bytes, and the frame offset would be
646 12 bytes. The frame register is R7.
647
648 The comments for thumb_skip_prolog() describe the algorithm we use
649 to detect the end of the prolog. */
650 /* *INDENT-ON* */
651
652 static void
653 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
654 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
655 {
656 CORE_ADDR prologue_start;
657 CORE_ADDR prologue_end;
658 CORE_ADDR current_pc;
659
660 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
661 &prologue_end))
662 {
663 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
664
665 if (sal.line == 0) /* no line info, use current PC */
666 prologue_end = prev_pc;
667 else if (sal.end < prologue_end) /* next line begins after fn end */
668 prologue_end = sal.end; /* (probably means no prologue) */
669 }
670 else
671 /* We're in the boondocks: we have no idea where the start of the
672 function is. */
673 return;
674
675 prologue_end = min (prologue_end, prev_pc);
676
677 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
678 }
679
680 /* This function decodes an ARM function prologue to determine:
681 1) the size of the stack frame
682 2) which registers are saved on it
683 3) the offsets of saved regs
684 4) the offset from the stack pointer to the frame pointer
685 This information is stored in the "extra" fields of the frame_info.
686
687 There are two basic forms for the ARM prologue. The fixed argument
688 function call will look like:
689
690 mov ip, sp
691 stmfd sp!, {fp, ip, lr, pc}
692 sub fp, ip, #4
693 [sub sp, sp, #4]
694
695 Which would create this stack frame (offsets relative to FP):
696 IP -> 4 (caller's stack)
697 FP -> 0 PC (points to address of stmfd instruction + 8 in callee)
698 -4 LR (return address in caller)
699 -8 IP (copy of caller's SP)
700 -12 FP (caller's FP)
701 SP -> -28 Local variables
702
703 The frame size would thus be 32 bytes, and the frame offset would be
704 28 bytes. The stmfd call can also save any of the vN registers it
705 plans to use, which increases the frame size accordingly.
706
707 Note: The stored PC is 8 off of the STMFD instruction that stored it
708 because the ARM Store instructions always store PC + 8 when you read
709 the PC register.
710
711 A variable argument function call will look like:
712
713 mov ip, sp
714 stmfd sp!, {a1, a2, a3, a4}
715 stmfd sp!, {fp, ip, lr, pc}
716 sub fp, ip, #20
717
718 Which would create this stack frame (offsets relative to FP):
719 IP -> 20 (caller's stack)
720 16 A4
721 12 A3
722 8 A2
723 4 A1
724 FP -> 0 PC (points to address of stmfd instruction + 8 in callee)
725 -4 LR (return address in caller)
726 -8 IP (copy of caller's SP)
727 -12 FP (caller's FP)
728 SP -> -28 Local variables
729
730 The frame size would thus be 48 bytes, and the frame offset would be
731 28 bytes.
732
733 There is another potential complication, which is that the optimizer
734 will try to separate the store of fp in the "stmfd" instruction from
735 the "sub fp, ip, #NN" instruction. Almost anything can be there, so
736 we just key on the stmfd, and then scan for the "sub fp, ip, #NN"...
737
738 Also, note, the original version of the ARM toolchain claimed that there
739 should be an
740
741 instruction at the end of the prologue. I have never seen GCC produce
742 this, and the ARM docs don't mention it. We still test for it below in
743 case it happens...
744
745 */
746
747 static void
748 arm_scan_prologue (struct frame_info *this_frame,
749 struct arm_prologue_cache *cache)
750 {
751 struct gdbarch *gdbarch = get_frame_arch (this_frame);
752 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
753 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
754 int regno;
755 CORE_ADDR prologue_start, prologue_end, current_pc;
756 CORE_ADDR prev_pc = get_frame_pc (this_frame);
757 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
758 pv_t regs[ARM_FPS_REGNUM];
759 struct pv_area *stack;
760 struct cleanup *back_to;
761 CORE_ADDR offset;
762
763 /* Assume there is no frame until proven otherwise. */
764 cache->framereg = ARM_SP_REGNUM;
765 cache->framesize = 0;
766
767 /* Check for Thumb prologue. */
768 if (arm_frame_is_thumb (this_frame))
769 {
770 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
771 return;
772 }
773
774 /* Find the function prologue. If we can't find the function in
775 the symbol table, peek in the stack frame to find the PC. */
776 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
777 &prologue_end))
778 {
779 /* One way to find the end of the prologue (which works well
780 for unoptimized code) is to do the following:
781
782 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
783
784 if (sal.line == 0)
785 prologue_end = prev_pc;
786 else if (sal.end < prologue_end)
787 prologue_end = sal.end;
788
789 This mechanism is very accurate so long as the optimizer
790 doesn't move any instructions from the function body into the
791 prologue. If this happens, sal.end will be the last
792 instruction in the first hunk of prologue code just before
793 the first instruction that the scheduler has moved from
794 the body to the prologue.
795
796 In order to make sure that we scan all of the prologue
797 instructions, we use a slightly less accurate mechanism which
798 may scan more than necessary. To help compensate for this
799 lack of accuracy, the prologue scanning loop below contains
800 several clauses which'll cause the loop to terminate early if
801 an implausible prologue instruction is encountered.
802
803 The expression
804
805 prologue_start + 64
806
807 is a suitable endpoint since it accounts for the largest
808 possible prologue plus up to five instructions inserted by
809 the scheduler. */
810
811 if (prologue_end > prologue_start + 64)
812 {
813 prologue_end = prologue_start + 64; /* See above. */
814 }
815 }
816 else
817 {
818 /* We have no symbol information. Our only option is to assume this
819 function has a standard stack frame and the normal frame register.
820 Then, we can find the value of our frame pointer on entrance to
821 the callee (or at the present moment if this is the innermost frame).
822 The value stored there should be the address of the stmfd + 8. */
823 CORE_ADDR frame_loc;
824 LONGEST return_value;
825
826 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
827 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
828 return;
829 else
830 {
831 prologue_start = gdbarch_addr_bits_remove
832 (gdbarch, return_value) - 8;
833 prologue_end = prologue_start + 64; /* See above. */
834 }
835 }
836
837 if (prev_pc < prologue_end)
838 prologue_end = prev_pc;
839
840 /* Now search the prologue looking for instructions that set up the
841 frame pointer, adjust the stack pointer, and save registers.
842
843 Be careful, however, and if it doesn't look like a prologue,
844 don't try to scan it. If, for instance, a frameless function
845 begins with stmfd sp!, then we will tell ourselves there is
846 a frame, which will confuse stack traceback, as well as "finish"
847 and other operations that rely on a knowledge of the stack
848 traceback.
849
850 In the APCS, the prologue should start with "mov ip, sp" so
851 if we don't see this as the first insn, we will stop.
852
853 [Note: This doesn't seem to be true any longer, so it's now an
854 optional part of the prologue. - Kevin Buettner, 2001-11-20]
855
856 [Note further: The "mov ip,sp" only seems to be missing in
857 frameless functions at optimization level "-O2" or above,
858 in which case it is often (but not always) replaced by
859 "str lr, [sp, #-4]!". - Michael Snyder, 2002-04-23] */
860
861 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
862 regs[regno] = pv_register (regno, 0);
863 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
864 back_to = make_cleanup_free_pv_area (stack);
865
866 for (current_pc = prologue_start;
867 current_pc < prologue_end;
868 current_pc += 4)
869 {
870 unsigned int insn
871 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
872
873 if (insn == 0xe1a0c00d) /* mov ip, sp */
874 {
875 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
876 continue;
877 }
878 else if ((insn & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
879 {
880 unsigned imm = insn & 0xff; /* immediate value */
881 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
882 imm = (imm >> rot) | (imm << (32 - rot));
883 regs[ARM_IP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], imm);
884 continue;
885 }
886 else if ((insn & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
887 {
888 unsigned imm = insn & 0xff; /* immediate value */
889 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
890 imm = (imm >> rot) | (imm << (32 - rot));
891 regs[ARM_IP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
892 continue;
893 }
894 else if (insn == 0xe52de004) /* str lr, [sp, #-4]! */
895 {
896 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
897 break;
898 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
899 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[ARM_LR_REGNUM]);
900 continue;
901 }
902 else if ((insn & 0xffff0000) == 0xe92d0000)
903 /* stmfd sp!, {..., fp, ip, lr, pc}
904 or
905 stmfd sp!, {a1, a2, a3, a4} */
906 {
907 int mask = insn & 0xffff;
908
909 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
910 break;
911
912 /* Calculate offsets of saved registers. */
913 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
914 if (mask & (1 << regno))
915 {
916 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
917 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
918 }
919 }
920 else if ((insn & 0xffffc000) == 0xe54b0000 /* strb rx,[r11,#-n] */
921 || (insn & 0xffffc0f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
922 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
923 {
924 /* No need to add this to saved_regs -- it's just an arg reg. */
925 continue;
926 }
927 else if ((insn & 0xffffc000) == 0xe5cd0000 /* strb rx,[sp,#n] */
928 || (insn & 0xffffc0f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
929 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
930 {
931 /* No need to add this to saved_regs -- it's just an arg reg. */
932 continue;
933 }
934 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
935 {
936 unsigned imm = insn & 0xff; /* immediate value */
937 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
938 imm = (imm >> rot) | (imm << (32 - rot));
939 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
940 }
941 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
942 {
943 unsigned imm = insn & 0xff; /* immediate value */
944 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
945 imm = (imm >> rot) | (imm << (32 - rot));
946 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
947 }
948 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?, [sp, -#c]! */
949 && gdbarch_tdep (gdbarch)->have_fpa_registers)
950 {
951 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
952 break;
953
954 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
955 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
956 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
957 }
958 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4, [sp!] */
959 && gdbarch_tdep (gdbarch)->have_fpa_registers)
960 {
961 int n_saved_fp_regs;
962 unsigned int fp_start_reg, fp_bound_reg;
963
964 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
965 break;
966
967 if ((insn & 0x800) == 0x800) /* N0 is set */
968 {
969 if ((insn & 0x40000) == 0x40000) /* N1 is set */
970 n_saved_fp_regs = 3;
971 else
972 n_saved_fp_regs = 1;
973 }
974 else
975 {
976 if ((insn & 0x40000) == 0x40000) /* N1 is set */
977 n_saved_fp_regs = 2;
978 else
979 n_saved_fp_regs = 4;
980 }
981
982 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
983 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
984 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
985 {
986 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
987 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
988 regs[fp_start_reg++]);
989 }
990 }
991 else if ((insn & 0xf0000000) != 0xe0000000)
992 break; /* Condition not true, exit early */
993 else if ((insn & 0xfe200000) == 0xe8200000) /* ldm? */
994 break; /* Don't scan past a block load */
995 else
996 /* The optimizer might shove anything into the prologue,
997 so we just skip what we don't recognize. */
998 continue;
999 }
1000
1001 /* The frame size is just the distance from the frame register
1002 to the original stack pointer. */
1003 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1004 {
1005 /* Frame pointer is fp. */
1006 cache->framereg = ARM_FP_REGNUM;
1007 cache->framesize = -regs[ARM_FP_REGNUM].k;
1008 }
1009 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1010 {
1011 /* Try the stack pointer... this is a bit desperate. */
1012 cache->framereg = ARM_SP_REGNUM;
1013 cache->framesize = -regs[ARM_SP_REGNUM].k;
1014 }
1015 else
1016 {
1017 /* We're just out of luck. We don't know where the frame is. */
1018 cache->framereg = -1;
1019 cache->framesize = 0;
1020 }
1021
1022 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1023 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1024 cache->saved_regs[regno].addr = offset;
1025
1026 do_cleanups (back_to);
1027 }
1028
1029 static struct arm_prologue_cache *
1030 arm_make_prologue_cache (struct frame_info *this_frame)
1031 {
1032 int reg;
1033 struct arm_prologue_cache *cache;
1034 CORE_ADDR unwound_fp;
1035
1036 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1037 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1038
1039 arm_scan_prologue (this_frame, cache);
1040
1041 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1042 if (unwound_fp == 0)
1043 return cache;
1044
1045 cache->prev_sp = unwound_fp + cache->framesize;
1046
1047 /* Calculate actual addresses of saved registers using offsets
1048 determined by arm_scan_prologue. */
1049 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1050 if (trad_frame_addr_p (cache->saved_regs, reg))
1051 cache->saved_regs[reg].addr += cache->prev_sp;
1052
1053 return cache;
1054 }
1055
1056 /* Our frame ID for a normal frame is the current function's starting PC
1057 and the caller's SP when we were called. */
1058
1059 static void
1060 arm_prologue_this_id (struct frame_info *this_frame,
1061 void **this_cache,
1062 struct frame_id *this_id)
1063 {
1064 struct arm_prologue_cache *cache;
1065 struct frame_id id;
1066 CORE_ADDR pc, func;
1067
1068 if (*this_cache == NULL)
1069 *this_cache = arm_make_prologue_cache (this_frame);
1070 cache = *this_cache;
1071
1072 /* This is meant to halt the backtrace at "_start". */
1073 pc = get_frame_pc (this_frame);
1074 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1075 return;
1076
1077 /* If we've hit a wall, stop. */
1078 if (cache->prev_sp == 0)
1079 return;
1080
1081 func = get_frame_func (this_frame);
1082 id = frame_id_build (cache->prev_sp, func);
1083 *this_id = id;
1084 }
1085
1086 static struct value *
1087 arm_prologue_prev_register (struct frame_info *this_frame,
1088 void **this_cache,
1089 int prev_regnum)
1090 {
1091 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1092 struct arm_prologue_cache *cache;
1093
1094 if (*this_cache == NULL)
1095 *this_cache = arm_make_prologue_cache (this_frame);
1096 cache = *this_cache;
1097
1098 /* If we are asked to unwind the PC, then we need to return the LR
1099 instead. The prologue may save PC, but it will point into this
1100 frame's prologue, not the next frame's resume location. Also
1101 strip the saved T bit. A valid LR may have the low bit set, but
1102 a valid PC never does. */
1103 if (prev_regnum == ARM_PC_REGNUM)
1104 {
1105 CORE_ADDR lr;
1106
1107 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1108 return frame_unwind_got_constant (this_frame, prev_regnum,
1109 arm_addr_bits_remove (gdbarch, lr));
1110 }
1111
1112 /* SP is generally not saved to the stack, but this frame is
1113 identified by the next frame's stack pointer at the time of the call.
1114 The value was already reconstructed into PREV_SP. */
1115 if (prev_regnum == ARM_SP_REGNUM)
1116 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
1117
1118 /* The CPSR may have been changed by the call instruction and by the
1119 called function. The only bit we can reconstruct is the T bit,
1120 by checking the low bit of LR as of the call. This is a reliable
1121 indicator of Thumb-ness except for some ARM v4T pre-interworking
1122 Thumb code, which could get away with a clear low bit as long as
1123 the called function did not use bx. Guess that all other
1124 bits are unchanged; the condition flags are presumably lost,
1125 but the processor status is likely valid. */
1126 if (prev_regnum == ARM_PS_REGNUM)
1127 {
1128 CORE_ADDR lr, cpsr;
1129
1130 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
1131 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1132 if (IS_THUMB_ADDR (lr))
1133 cpsr |= CPSR_T;
1134 else
1135 cpsr &= ~CPSR_T;
1136 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
1137 }
1138
1139 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1140 prev_regnum);
1141 }
1142
1143 struct frame_unwind arm_prologue_unwind = {
1144 NORMAL_FRAME,
1145 arm_prologue_this_id,
1146 arm_prologue_prev_register,
1147 NULL,
1148 default_frame_sniffer
1149 };
1150
1151 static struct arm_prologue_cache *
1152 arm_make_stub_cache (struct frame_info *this_frame)
1153 {
1154 int reg;
1155 struct arm_prologue_cache *cache;
1156 CORE_ADDR unwound_fp;
1157
1158 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1159 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1160
1161 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
1162
1163 return cache;
1164 }
1165
1166 /* Our frame ID for a stub frame is the current SP and LR. */
1167
1168 static void
1169 arm_stub_this_id (struct frame_info *this_frame,
1170 void **this_cache,
1171 struct frame_id *this_id)
1172 {
1173 struct arm_prologue_cache *cache;
1174
1175 if (*this_cache == NULL)
1176 *this_cache = arm_make_stub_cache (this_frame);
1177 cache = *this_cache;
1178
1179 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
1180 }
1181
1182 static int
1183 arm_stub_unwind_sniffer (const struct frame_unwind *self,
1184 struct frame_info *this_frame,
1185 void **this_prologue_cache)
1186 {
1187 CORE_ADDR addr_in_block;
1188 char dummy[4];
1189
1190 addr_in_block = get_frame_address_in_block (this_frame);
1191 if (in_plt_section (addr_in_block, NULL)
1192 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1193 return 1;
1194
1195 return 0;
1196 }
1197
1198 struct frame_unwind arm_stub_unwind = {
1199 NORMAL_FRAME,
1200 arm_stub_this_id,
1201 arm_prologue_prev_register,
1202 NULL,
1203 arm_stub_unwind_sniffer
1204 };
1205
1206 static CORE_ADDR
1207 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1208 {
1209 struct arm_prologue_cache *cache;
1210
1211 if (*this_cache == NULL)
1212 *this_cache = arm_make_prologue_cache (this_frame);
1213 cache = *this_cache;
1214
1215 return cache->prev_sp - cache->framesize;
1216 }
1217
1218 struct frame_base arm_normal_base = {
1219 &arm_prologue_unwind,
1220 arm_normal_frame_base,
1221 arm_normal_frame_base,
1222 arm_normal_frame_base
1223 };
1224
1225 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1226 dummy frame. The frame ID's base needs to match the TOS value
1227 saved by save_dummy_frame_tos() and returned from
1228 arm_push_dummy_call, and the PC needs to match the dummy frame's
1229 breakpoint. */
1230
1231 static struct frame_id
1232 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1233 {
1234 return frame_id_build (get_frame_register_unsigned (this_frame, ARM_SP_REGNUM),
1235 get_frame_pc (this_frame));
1236 }
1237
1238 /* Given THIS_FRAME, find the previous frame's resume PC (which will
1239 be used to construct the previous frame's ID, after looking up the
1240 containing function). */
1241
1242 static CORE_ADDR
1243 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1244 {
1245 CORE_ADDR pc;
1246 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
1247 return arm_addr_bits_remove (gdbarch, pc);
1248 }
1249
1250 static CORE_ADDR
1251 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1252 {
1253 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
1254 }
1255
1256 static struct value *
1257 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
1258 int regnum)
1259 {
1260 struct gdbarch * gdbarch = get_frame_arch (this_frame);
1261 CORE_ADDR lr, cpsr;
1262
1263 switch (regnum)
1264 {
1265 case ARM_PC_REGNUM:
1266 /* The PC is normally copied from the return column, which
1267 describes saves of LR. However, that version may have an
1268 extra bit set to indicate Thumb state. The bit is not
1269 part of the PC. */
1270 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1271 return frame_unwind_got_constant (this_frame, regnum,
1272 arm_addr_bits_remove (gdbarch, lr));
1273
1274 case ARM_PS_REGNUM:
1275 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
1276 cpsr = get_frame_register_unsigned (this_frame, regnum);
1277 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1278 if (IS_THUMB_ADDR (lr))
1279 cpsr |= CPSR_T;
1280 else
1281 cpsr &= ~CPSR_T;
1282 return frame_unwind_got_constant (this_frame, regnum, cpsr);
1283
1284 default:
1285 internal_error (__FILE__, __LINE__,
1286 _("Unexpected register %d"), regnum);
1287 }
1288 }
1289
1290 static void
1291 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1292 struct dwarf2_frame_state_reg *reg,
1293 struct frame_info *this_frame)
1294 {
1295 switch (regnum)
1296 {
1297 case ARM_PC_REGNUM:
1298 case ARM_PS_REGNUM:
1299 reg->how = DWARF2_FRAME_REG_FN;
1300 reg->loc.fn = arm_dwarf2_prev_register;
1301 break;
1302 case ARM_SP_REGNUM:
1303 reg->how = DWARF2_FRAME_REG_CFA;
1304 break;
1305 }
1306 }
1307
1308 /* When arguments must be pushed onto the stack, they go on in reverse
1309 order. The code below implements a FILO (stack) to do this. */
1310
1311 struct stack_item
1312 {
1313 int len;
1314 struct stack_item *prev;
1315 void *data;
1316 };
1317
1318 static struct stack_item *
1319 push_stack_item (struct stack_item *prev, void *contents, int len)
1320 {
1321 struct stack_item *si;
1322 si = xmalloc (sizeof (struct stack_item));
1323 si->data = xmalloc (len);
1324 si->len = len;
1325 si->prev = prev;
1326 memcpy (si->data, contents, len);
1327 return si;
1328 }
1329
1330 static struct stack_item *
1331 pop_stack_item (struct stack_item *si)
1332 {
1333 struct stack_item *dead = si;
1334 si = si->prev;
1335 xfree (dead->data);
1336 xfree (dead);
1337 return si;
1338 }
1339
1340
1341 /* Return the alignment (in bytes) of the given type. */
1342
1343 static int
1344 arm_type_align (struct type *t)
1345 {
1346 int n;
1347 int align;
1348 int falign;
1349
1350 t = check_typedef (t);
1351 switch (TYPE_CODE (t))
1352 {
1353 default:
1354 /* Should never happen. */
1355 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1356 return 4;
1357
1358 case TYPE_CODE_PTR:
1359 case TYPE_CODE_ENUM:
1360 case TYPE_CODE_INT:
1361 case TYPE_CODE_FLT:
1362 case TYPE_CODE_SET:
1363 case TYPE_CODE_RANGE:
1364 case TYPE_CODE_BITSTRING:
1365 case TYPE_CODE_REF:
1366 case TYPE_CODE_CHAR:
1367 case TYPE_CODE_BOOL:
1368 return TYPE_LENGTH (t);
1369
1370 case TYPE_CODE_ARRAY:
1371 case TYPE_CODE_COMPLEX:
1372 /* TODO: What about vector types? */
1373 return arm_type_align (TYPE_TARGET_TYPE (t));
1374
1375 case TYPE_CODE_STRUCT:
1376 case TYPE_CODE_UNION:
1377 align = 1;
1378 for (n = 0; n < TYPE_NFIELDS (t); n++)
1379 {
1380 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
1381 if (falign > align)
1382 align = falign;
1383 }
1384 return align;
1385 }
1386 }
1387
1388 /* Possible base types for a candidate for passing and returning in
1389 VFP registers. */
1390
1391 enum arm_vfp_cprc_base_type
1392 {
1393 VFP_CPRC_UNKNOWN,
1394 VFP_CPRC_SINGLE,
1395 VFP_CPRC_DOUBLE,
1396 VFP_CPRC_VEC64,
1397 VFP_CPRC_VEC128
1398 };
1399
1400 /* The length of one element of base type B. */
1401
1402 static unsigned
1403 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
1404 {
1405 switch (b)
1406 {
1407 case VFP_CPRC_SINGLE:
1408 return 4;
1409 case VFP_CPRC_DOUBLE:
1410 return 8;
1411 case VFP_CPRC_VEC64:
1412 return 8;
1413 case VFP_CPRC_VEC128:
1414 return 16;
1415 default:
1416 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
1417 (int) b);
1418 }
1419 }
1420
1421 /* The character ('s', 'd' or 'q') for the type of VFP register used
1422 for passing base type B. */
1423
1424 static int
1425 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
1426 {
1427 switch (b)
1428 {
1429 case VFP_CPRC_SINGLE:
1430 return 's';
1431 case VFP_CPRC_DOUBLE:
1432 return 'd';
1433 case VFP_CPRC_VEC64:
1434 return 'd';
1435 case VFP_CPRC_VEC128:
1436 return 'q';
1437 default:
1438 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
1439 (int) b);
1440 }
1441 }
1442
1443 /* Determine whether T may be part of a candidate for passing and
1444 returning in VFP registers, ignoring the limit on the total number
1445 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
1446 classification of the first valid component found; if it is not
1447 VFP_CPRC_UNKNOWN, all components must have the same classification
1448 as *BASE_TYPE. If it is found that T contains a type not permitted
1449 for passing and returning in VFP registers, a type differently
1450 classified from *BASE_TYPE, or two types differently classified
1451 from each other, return -1, otherwise return the total number of
1452 base-type elements found (possibly 0 in an empty structure or
1453 array). Vectors and complex types are not currently supported,
1454 matching the generic AAPCS support. */
1455
1456 static int
1457 arm_vfp_cprc_sub_candidate (struct type *t,
1458 enum arm_vfp_cprc_base_type *base_type)
1459 {
1460 t = check_typedef (t);
1461 switch (TYPE_CODE (t))
1462 {
1463 case TYPE_CODE_FLT:
1464 switch (TYPE_LENGTH (t))
1465 {
1466 case 4:
1467 if (*base_type == VFP_CPRC_UNKNOWN)
1468 *base_type = VFP_CPRC_SINGLE;
1469 else if (*base_type != VFP_CPRC_SINGLE)
1470 return -1;
1471 return 1;
1472
1473 case 8:
1474 if (*base_type == VFP_CPRC_UNKNOWN)
1475 *base_type = VFP_CPRC_DOUBLE;
1476 else if (*base_type != VFP_CPRC_DOUBLE)
1477 return -1;
1478 return 1;
1479
1480 default:
1481 return -1;
1482 }
1483 break;
1484
1485 case TYPE_CODE_ARRAY:
1486 {
1487 int count;
1488 unsigned unitlen;
1489 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
1490 if (count == -1)
1491 return -1;
1492 if (TYPE_LENGTH (t) == 0)
1493 {
1494 gdb_assert (count == 0);
1495 return 0;
1496 }
1497 else if (count == 0)
1498 return -1;
1499 unitlen = arm_vfp_cprc_unit_length (*base_type);
1500 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
1501 return TYPE_LENGTH (t) / unitlen;
1502 }
1503 break;
1504
1505 case TYPE_CODE_STRUCT:
1506 {
1507 int count = 0;
1508 unsigned unitlen;
1509 int i;
1510 for (i = 0; i < TYPE_NFIELDS (t); i++)
1511 {
1512 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
1513 base_type);
1514 if (sub_count == -1)
1515 return -1;
1516 count += sub_count;
1517 }
1518 if (TYPE_LENGTH (t) == 0)
1519 {
1520 gdb_assert (count == 0);
1521 return 0;
1522 }
1523 else if (count == 0)
1524 return -1;
1525 unitlen = arm_vfp_cprc_unit_length (*base_type);
1526 if (TYPE_LENGTH (t) != unitlen * count)
1527 return -1;
1528 return count;
1529 }
1530
1531 case TYPE_CODE_UNION:
1532 {
1533 int count = 0;
1534 unsigned unitlen;
1535 int i;
1536 for (i = 0; i < TYPE_NFIELDS (t); i++)
1537 {
1538 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
1539 base_type);
1540 if (sub_count == -1)
1541 return -1;
1542 count = (count > sub_count ? count : sub_count);
1543 }
1544 if (TYPE_LENGTH (t) == 0)
1545 {
1546 gdb_assert (count == 0);
1547 return 0;
1548 }
1549 else if (count == 0)
1550 return -1;
1551 unitlen = arm_vfp_cprc_unit_length (*base_type);
1552 if (TYPE_LENGTH (t) != unitlen * count)
1553 return -1;
1554 return count;
1555 }
1556
1557 default:
1558 break;
1559 }
1560
1561 return -1;
1562 }
1563
1564 /* Determine whether T is a VFP co-processor register candidate (CPRC)
1565 if passed to or returned from a non-variadic function with the VFP
1566 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
1567 *BASE_TYPE to the base type for T and *COUNT to the number of
1568 elements of that base type before returning. */
1569
1570 static int
1571 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
1572 int *count)
1573 {
1574 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
1575 int c = arm_vfp_cprc_sub_candidate (t, &b);
1576 if (c <= 0 || c > 4)
1577 return 0;
1578 *base_type = b;
1579 *count = c;
1580 return 1;
1581 }
1582
1583 /* Return 1 if the VFP ABI should be used for passing arguments to and
1584 returning values from a function of type FUNC_TYPE, 0
1585 otherwise. */
1586
1587 static int
1588 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
1589 {
1590 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1591 /* Variadic functions always use the base ABI. Assume that functions
1592 without debug info are not variadic. */
1593 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
1594 return 0;
1595 /* The VFP ABI is only supported as a variant of AAPCS. */
1596 if (tdep->arm_abi != ARM_ABI_AAPCS)
1597 return 0;
1598 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
1599 }
1600
1601 /* We currently only support passing parameters in integer registers, which
1602 conforms with GCC's default model, and VFP argument passing following
1603 the VFP variant of AAPCS. Several other variants exist and
1604 we should probably support some of them based on the selected ABI. */
1605
1606 static CORE_ADDR
1607 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1608 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
1609 struct value **args, CORE_ADDR sp, int struct_return,
1610 CORE_ADDR struct_addr)
1611 {
1612 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1613 int argnum;
1614 int argreg;
1615 int nstack;
1616 struct stack_item *si = NULL;
1617 int use_vfp_abi;
1618 struct type *ftype;
1619 unsigned vfp_regs_free = (1 << 16) - 1;
1620
1621 /* Determine the type of this function and whether the VFP ABI
1622 applies. */
1623 ftype = check_typedef (value_type (function));
1624 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
1625 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
1626 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
1627
1628 /* Set the return address. For the ARM, the return breakpoint is
1629 always at BP_ADDR. */
1630 if (arm_pc_is_thumb (bp_addr))
1631 bp_addr |= 1;
1632 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
1633
1634 /* Walk through the list of args and determine how large a temporary
1635 stack is required. Need to take care here as structs may be
1636 passed on the stack, and we have to to push them. */
1637 nstack = 0;
1638
1639 argreg = ARM_A1_REGNUM;
1640 nstack = 0;
1641
1642 /* The struct_return pointer occupies the first parameter
1643 passing register. */
1644 if (struct_return)
1645 {
1646 if (arm_debug)
1647 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
1648 gdbarch_register_name (gdbarch, argreg),
1649 paddress (gdbarch, struct_addr));
1650 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
1651 argreg++;
1652 }
1653
1654 for (argnum = 0; argnum < nargs; argnum++)
1655 {
1656 int len;
1657 struct type *arg_type;
1658 struct type *target_type;
1659 enum type_code typecode;
1660 bfd_byte *val;
1661 int align;
1662 enum arm_vfp_cprc_base_type vfp_base_type;
1663 int vfp_base_count;
1664 int may_use_core_reg = 1;
1665
1666 arg_type = check_typedef (value_type (args[argnum]));
1667 len = TYPE_LENGTH (arg_type);
1668 target_type = TYPE_TARGET_TYPE (arg_type);
1669 typecode = TYPE_CODE (arg_type);
1670 val = value_contents_writeable (args[argnum]);
1671
1672 align = arm_type_align (arg_type);
1673 /* Round alignment up to a whole number of words. */
1674 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
1675 /* Different ABIs have different maximum alignments. */
1676 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
1677 {
1678 /* The APCS ABI only requires word alignment. */
1679 align = INT_REGISTER_SIZE;
1680 }
1681 else
1682 {
1683 /* The AAPCS requires at most doubleword alignment. */
1684 if (align > INT_REGISTER_SIZE * 2)
1685 align = INT_REGISTER_SIZE * 2;
1686 }
1687
1688 if (use_vfp_abi
1689 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
1690 &vfp_base_count))
1691 {
1692 int regno;
1693 int unit_length;
1694 int shift;
1695 unsigned mask;
1696
1697 /* Because this is a CPRC it cannot go in a core register or
1698 cause a core register to be skipped for alignment.
1699 Either it goes in VFP registers and the rest of this loop
1700 iteration is skipped for this argument, or it goes on the
1701 stack (and the stack alignment code is correct for this
1702 case). */
1703 may_use_core_reg = 0;
1704
1705 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
1706 shift = unit_length / 4;
1707 mask = (1 << (shift * vfp_base_count)) - 1;
1708 for (regno = 0; regno < 16; regno += shift)
1709 if (((vfp_regs_free >> regno) & mask) == mask)
1710 break;
1711
1712 if (regno < 16)
1713 {
1714 int reg_char;
1715 int reg_scaled;
1716 int i;
1717
1718 vfp_regs_free &= ~(mask << regno);
1719 reg_scaled = regno / shift;
1720 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
1721 for (i = 0; i < vfp_base_count; i++)
1722 {
1723 char name_buf[4];
1724 int regnum;
1725 if (reg_char == 'q')
1726 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
1727 val + i * unit_length);
1728 else
1729 {
1730 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
1731 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
1732 strlen (name_buf));
1733 regcache_cooked_write (regcache, regnum,
1734 val + i * unit_length);
1735 }
1736 }
1737 continue;
1738 }
1739 else
1740 {
1741 /* This CPRC could not go in VFP registers, so all VFP
1742 registers are now marked as used. */
1743 vfp_regs_free = 0;
1744 }
1745 }
1746
1747 /* Push stack padding for dowubleword alignment. */
1748 if (nstack & (align - 1))
1749 {
1750 si = push_stack_item (si, val, INT_REGISTER_SIZE);
1751 nstack += INT_REGISTER_SIZE;
1752 }
1753
1754 /* Doubleword aligned quantities must go in even register pairs. */
1755 if (may_use_core_reg
1756 && argreg <= ARM_LAST_ARG_REGNUM
1757 && align > INT_REGISTER_SIZE
1758 && argreg & 1)
1759 argreg++;
1760
1761 /* If the argument is a pointer to a function, and it is a
1762 Thumb function, create a LOCAL copy of the value and set
1763 the THUMB bit in it. */
1764 if (TYPE_CODE_PTR == typecode
1765 && target_type != NULL
1766 && TYPE_CODE_FUNC == TYPE_CODE (target_type))
1767 {
1768 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
1769 if (arm_pc_is_thumb (regval))
1770 {
1771 val = alloca (len);
1772 store_unsigned_integer (val, len, byte_order,
1773 MAKE_THUMB_ADDR (regval));
1774 }
1775 }
1776
1777 /* Copy the argument to general registers or the stack in
1778 register-sized pieces. Large arguments are split between
1779 registers and stack. */
1780 while (len > 0)
1781 {
1782 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
1783
1784 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
1785 {
1786 /* The argument is being passed in a general purpose
1787 register. */
1788 CORE_ADDR regval
1789 = extract_unsigned_integer (val, partial_len, byte_order);
1790 if (byte_order == BFD_ENDIAN_BIG)
1791 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
1792 if (arm_debug)
1793 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
1794 argnum,
1795 gdbarch_register_name
1796 (gdbarch, argreg),
1797 phex (regval, INT_REGISTER_SIZE));
1798 regcache_cooked_write_unsigned (regcache, argreg, regval);
1799 argreg++;
1800 }
1801 else
1802 {
1803 /* Push the arguments onto the stack. */
1804 if (arm_debug)
1805 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
1806 argnum, nstack);
1807 si = push_stack_item (si, val, INT_REGISTER_SIZE);
1808 nstack += INT_REGISTER_SIZE;
1809 }
1810
1811 len -= partial_len;
1812 val += partial_len;
1813 }
1814 }
1815 /* If we have an odd number of words to push, then decrement the stack
1816 by one word now, so first stack argument will be dword aligned. */
1817 if (nstack & 4)
1818 sp -= 4;
1819
1820 while (si)
1821 {
1822 sp -= si->len;
1823 write_memory (sp, si->data, si->len);
1824 si = pop_stack_item (si);
1825 }
1826
1827 /* Finally, update teh SP register. */
1828 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
1829
1830 return sp;
1831 }
1832
1833
1834 /* Always align the frame to an 8-byte boundary. This is required on
1835 some platforms and harmless on the rest. */
1836
1837 static CORE_ADDR
1838 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1839 {
1840 /* Align the stack to eight bytes. */
1841 return sp & ~ (CORE_ADDR) 7;
1842 }
1843
1844 static void
1845 print_fpu_flags (int flags)
1846 {
1847 if (flags & (1 << 0))
1848 fputs ("IVO ", stdout);
1849 if (flags & (1 << 1))
1850 fputs ("DVZ ", stdout);
1851 if (flags & (1 << 2))
1852 fputs ("OFL ", stdout);
1853 if (flags & (1 << 3))
1854 fputs ("UFL ", stdout);
1855 if (flags & (1 << 4))
1856 fputs ("INX ", stdout);
1857 putchar ('\n');
1858 }
1859
1860 /* Print interesting information about the floating point processor
1861 (if present) or emulator. */
1862 static void
1863 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
1864 struct frame_info *frame, const char *args)
1865 {
1866 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
1867 int type;
1868
1869 type = (status >> 24) & 127;
1870 if (status & (1 << 31))
1871 printf (_("Hardware FPU type %d\n"), type);
1872 else
1873 printf (_("Software FPU type %d\n"), type);
1874 /* i18n: [floating point unit] mask */
1875 fputs (_("mask: "), stdout);
1876 print_fpu_flags (status >> 16);
1877 /* i18n: [floating point unit] flags */
1878 fputs (_("flags: "), stdout);
1879 print_fpu_flags (status);
1880 }
1881
1882 /* Construct the ARM extended floating point type. */
1883 static struct type *
1884 arm_ext_type (struct gdbarch *gdbarch)
1885 {
1886 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1887
1888 if (!tdep->arm_ext_type)
1889 tdep->arm_ext_type
1890 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
1891 floatformats_arm_ext);
1892
1893 return tdep->arm_ext_type;
1894 }
1895
1896 static struct type *
1897 arm_neon_double_type (struct gdbarch *gdbarch)
1898 {
1899 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1900
1901 if (tdep->neon_double_type == NULL)
1902 {
1903 struct type *t, *elem;
1904
1905 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
1906 TYPE_CODE_UNION);
1907 elem = builtin_type (gdbarch)->builtin_uint8;
1908 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
1909 elem = builtin_type (gdbarch)->builtin_uint16;
1910 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
1911 elem = builtin_type (gdbarch)->builtin_uint32;
1912 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
1913 elem = builtin_type (gdbarch)->builtin_uint64;
1914 append_composite_type_field (t, "u64", elem);
1915 elem = builtin_type (gdbarch)->builtin_float;
1916 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
1917 elem = builtin_type (gdbarch)->builtin_double;
1918 append_composite_type_field (t, "f64", elem);
1919
1920 TYPE_VECTOR (t) = 1;
1921 TYPE_NAME (t) = "neon_d";
1922 tdep->neon_double_type = t;
1923 }
1924
1925 return tdep->neon_double_type;
1926 }
1927
1928 /* FIXME: The vector types are not correctly ordered on big-endian
1929 targets. Just as s0 is the low bits of d0, d0[0] is also the low
1930 bits of d0 - regardless of what unit size is being held in d0. So
1931 the offset of the first uint8 in d0 is 7, but the offset of the
1932 first float is 4. This code works as-is for little-endian
1933 targets. */
1934
1935 static struct type *
1936 arm_neon_quad_type (struct gdbarch *gdbarch)
1937 {
1938 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1939
1940 if (tdep->neon_quad_type == NULL)
1941 {
1942 struct type *t, *elem;
1943
1944 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
1945 TYPE_CODE_UNION);
1946 elem = builtin_type (gdbarch)->builtin_uint8;
1947 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
1948 elem = builtin_type (gdbarch)->builtin_uint16;
1949 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
1950 elem = builtin_type (gdbarch)->builtin_uint32;
1951 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
1952 elem = builtin_type (gdbarch)->builtin_uint64;
1953 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
1954 elem = builtin_type (gdbarch)->builtin_float;
1955 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
1956 elem = builtin_type (gdbarch)->builtin_double;
1957 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
1958
1959 TYPE_VECTOR (t) = 1;
1960 TYPE_NAME (t) = "neon_q";
1961 tdep->neon_quad_type = t;
1962 }
1963
1964 return tdep->neon_quad_type;
1965 }
1966
1967 /* Return the GDB type object for the "standard" data type of data in
1968 register N. */
1969
1970 static struct type *
1971 arm_register_type (struct gdbarch *gdbarch, int regnum)
1972 {
1973 int num_regs = gdbarch_num_regs (gdbarch);
1974
1975 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
1976 && regnum >= num_regs && regnum < num_regs + 32)
1977 return builtin_type (gdbarch)->builtin_float;
1978
1979 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
1980 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
1981 return arm_neon_quad_type (gdbarch);
1982
1983 /* If the target description has register information, we are only
1984 in this function so that we can override the types of
1985 double-precision registers for NEON. */
1986 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
1987 {
1988 struct type *t = tdesc_register_type (gdbarch, regnum);
1989
1990 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
1991 && TYPE_CODE (t) == TYPE_CODE_FLT
1992 && gdbarch_tdep (gdbarch)->have_neon)
1993 return arm_neon_double_type (gdbarch);
1994 else
1995 return t;
1996 }
1997
1998 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
1999 {
2000 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
2001 return builtin_type (gdbarch)->builtin_void;
2002
2003 return arm_ext_type (gdbarch);
2004 }
2005 else if (regnum == ARM_SP_REGNUM)
2006 return builtin_type (gdbarch)->builtin_data_ptr;
2007 else if (regnum == ARM_PC_REGNUM)
2008 return builtin_type (gdbarch)->builtin_func_ptr;
2009 else if (regnum >= ARRAY_SIZE (arm_register_names))
2010 /* These registers are only supported on targets which supply
2011 an XML description. */
2012 return builtin_type (gdbarch)->builtin_int0;
2013 else
2014 return builtin_type (gdbarch)->builtin_uint32;
2015 }
2016
2017 /* Map a DWARF register REGNUM onto the appropriate GDB register
2018 number. */
2019
2020 static int
2021 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2022 {
2023 /* Core integer regs. */
2024 if (reg >= 0 && reg <= 15)
2025 return reg;
2026
2027 /* Legacy FPA encoding. These were once used in a way which
2028 overlapped with VFP register numbering, so their use is
2029 discouraged, but GDB doesn't support the ARM toolchain
2030 which used them for VFP. */
2031 if (reg >= 16 && reg <= 23)
2032 return ARM_F0_REGNUM + reg - 16;
2033
2034 /* New assignments for the FPA registers. */
2035 if (reg >= 96 && reg <= 103)
2036 return ARM_F0_REGNUM + reg - 96;
2037
2038 /* WMMX register assignments. */
2039 if (reg >= 104 && reg <= 111)
2040 return ARM_WCGR0_REGNUM + reg - 104;
2041
2042 if (reg >= 112 && reg <= 127)
2043 return ARM_WR0_REGNUM + reg - 112;
2044
2045 if (reg >= 192 && reg <= 199)
2046 return ARM_WC0_REGNUM + reg - 192;
2047
2048 /* VFP v2 registers. A double precision value is actually
2049 in d1 rather than s2, but the ABI only defines numbering
2050 for the single precision registers. This will "just work"
2051 in GDB for little endian targets (we'll read eight bytes,
2052 starting in s0 and then progressing to s1), but will be
2053 reversed on big endian targets with VFP. This won't
2054 be a problem for the new Neon quad registers; you're supposed
2055 to use DW_OP_piece for those. */
2056 if (reg >= 64 && reg <= 95)
2057 {
2058 char name_buf[4];
2059
2060 sprintf (name_buf, "s%d", reg - 64);
2061 return user_reg_map_name_to_regnum (gdbarch, name_buf,
2062 strlen (name_buf));
2063 }
2064
2065 /* VFP v3 / Neon registers. This range is also used for VFP v2
2066 registers, except that it now describes d0 instead of s0. */
2067 if (reg >= 256 && reg <= 287)
2068 {
2069 char name_buf[4];
2070
2071 sprintf (name_buf, "d%d", reg - 256);
2072 return user_reg_map_name_to_regnum (gdbarch, name_buf,
2073 strlen (name_buf));
2074 }
2075
2076 return -1;
2077 }
2078
2079 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
2080 static int
2081 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
2082 {
2083 int reg = regnum;
2084 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
2085
2086 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
2087 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
2088
2089 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
2090 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
2091
2092 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
2093 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
2094
2095 if (reg < NUM_GREGS)
2096 return SIM_ARM_R0_REGNUM + reg;
2097 reg -= NUM_GREGS;
2098
2099 if (reg < NUM_FREGS)
2100 return SIM_ARM_FP0_REGNUM + reg;
2101 reg -= NUM_FREGS;
2102
2103 if (reg < NUM_SREGS)
2104 return SIM_ARM_FPS_REGNUM + reg;
2105 reg -= NUM_SREGS;
2106
2107 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
2108 }
2109
2110 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
2111 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
2112 It is thought that this is is the floating-point register format on
2113 little-endian systems. */
2114
2115 static void
2116 convert_from_extended (const struct floatformat *fmt, const void *ptr,
2117 void *dbl, int endianess)
2118 {
2119 DOUBLEST d;
2120
2121 if (endianess == BFD_ENDIAN_BIG)
2122 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
2123 else
2124 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
2125 ptr, &d);
2126 floatformat_from_doublest (fmt, &d, dbl);
2127 }
2128
2129 static void
2130 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
2131 int endianess)
2132 {
2133 DOUBLEST d;
2134
2135 floatformat_to_doublest (fmt, ptr, &d);
2136 if (endianess == BFD_ENDIAN_BIG)
2137 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
2138 else
2139 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
2140 &d, dbl);
2141 }
2142
2143 static int
2144 condition_true (unsigned long cond, unsigned long status_reg)
2145 {
2146 if (cond == INST_AL || cond == INST_NV)
2147 return 1;
2148
2149 switch (cond)
2150 {
2151 case INST_EQ:
2152 return ((status_reg & FLAG_Z) != 0);
2153 case INST_NE:
2154 return ((status_reg & FLAG_Z) == 0);
2155 case INST_CS:
2156 return ((status_reg & FLAG_C) != 0);
2157 case INST_CC:
2158 return ((status_reg & FLAG_C) == 0);
2159 case INST_MI:
2160 return ((status_reg & FLAG_N) != 0);
2161 case INST_PL:
2162 return ((status_reg & FLAG_N) == 0);
2163 case INST_VS:
2164 return ((status_reg & FLAG_V) != 0);
2165 case INST_VC:
2166 return ((status_reg & FLAG_V) == 0);
2167 case INST_HI:
2168 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
2169 case INST_LS:
2170 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
2171 case INST_GE:
2172 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
2173 case INST_LT:
2174 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
2175 case INST_GT:
2176 return (((status_reg & FLAG_Z) == 0)
2177 && (((status_reg & FLAG_N) == 0)
2178 == ((status_reg & FLAG_V) == 0)));
2179 case INST_LE:
2180 return (((status_reg & FLAG_Z) != 0)
2181 || (((status_reg & FLAG_N) == 0)
2182 != ((status_reg & FLAG_V) == 0)));
2183 }
2184 return 1;
2185 }
2186
2187 /* Support routines for single stepping. Calculate the next PC value. */
2188 #define submask(x) ((1L << ((x) + 1)) - 1)
2189 #define bit(obj,st) (((obj) >> (st)) & 1)
2190 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2191 #define sbits(obj,st,fn) \
2192 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
2193 #define BranchDest(addr,instr) \
2194 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
2195 #define ARM_PC_32 1
2196
2197 static unsigned long
2198 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
2199 unsigned long pc_val, unsigned long status_reg)
2200 {
2201 unsigned long res, shift;
2202 int rm = bits (inst, 0, 3);
2203 unsigned long shifttype = bits (inst, 5, 6);
2204
2205 if (bit (inst, 4))
2206 {
2207 int rs = bits (inst, 8, 11);
2208 shift = (rs == 15 ? pc_val + 8
2209 : get_frame_register_unsigned (frame, rs)) & 0xFF;
2210 }
2211 else
2212 shift = bits (inst, 7, 11);
2213
2214 res = (rm == 15
2215 ? ((pc_val | (ARM_PC_32 ? 0 : status_reg))
2216 + (bit (inst, 4) ? 12 : 8))
2217 : get_frame_register_unsigned (frame, rm));
2218
2219 switch (shifttype)
2220 {
2221 case 0: /* LSL */
2222 res = shift >= 32 ? 0 : res << shift;
2223 break;
2224
2225 case 1: /* LSR */
2226 res = shift >= 32 ? 0 : res >> shift;
2227 break;
2228
2229 case 2: /* ASR */
2230 if (shift >= 32)
2231 shift = 31;
2232 res = ((res & 0x80000000L)
2233 ? ~((~res) >> shift) : res >> shift);
2234 break;
2235
2236 case 3: /* ROR/RRX */
2237 shift &= 31;
2238 if (shift == 0)
2239 res = (res >> 1) | (carry ? 0x80000000L : 0);
2240 else
2241 res = (res >> shift) | (res << (32 - shift));
2242 break;
2243 }
2244
2245 return res & 0xffffffff;
2246 }
2247
2248 /* Return number of 1-bits in VAL. */
2249
2250 static int
2251 bitcount (unsigned long val)
2252 {
2253 int nbits;
2254 for (nbits = 0; val != 0; nbits++)
2255 val &= val - 1; /* delete rightmost 1-bit in val */
2256 return nbits;
2257 }
2258
2259 static CORE_ADDR
2260 thumb_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
2261 {
2262 struct gdbarch *gdbarch = get_frame_arch (frame);
2263 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2264 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2265 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
2266 unsigned short inst1;
2267 CORE_ADDR nextpc = pc + 2; /* default is next instruction */
2268 unsigned long offset;
2269 ULONGEST status, it;
2270
2271 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
2272
2273 /* Thumb-2 conditional execution support. There are eight bits in
2274 the CPSR which describe conditional execution state. Once
2275 reconstructed (they're in a funny order), the low five bits
2276 describe the low bit of the condition for each instruction and
2277 how many instructions remain. The high three bits describe the
2278 base condition. One of the low four bits will be set if an IT
2279 block is active. These bits read as zero on earlier
2280 processors. */
2281 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
2282 it = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
2283
2284 /* On GNU/Linux, where this routine is used, we use an undefined
2285 instruction as a breakpoint. Unlike BKPT, IT can disable execution
2286 of the undefined instruction. So we might miss the breakpoint! */
2287 if ((inst1 & 0xff00) == 0xbf00 || (it & 0x0f))
2288 error (_("Stepping through Thumb-2 IT blocks is not yet supported"));
2289
2290 if (it & 0x0f)
2291 {
2292 /* We are in a conditional block. Check the condition. */
2293 int cond = it >> 4;
2294
2295 if (! condition_true (cond, status))
2296 {
2297 /* Advance to the next instruction. All the 32-bit
2298 instructions share a common prefix. */
2299 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
2300 return pc + 4;
2301 else
2302 return pc + 2;
2303 }
2304 }
2305
2306 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
2307 {
2308 CORE_ADDR sp;
2309
2310 /* Fetch the saved PC from the stack. It's stored above
2311 all of the other registers. */
2312 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
2313 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
2314 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
2315 nextpc = gdbarch_addr_bits_remove (gdbarch, nextpc);
2316 if (nextpc == pc)
2317 error (_("Infinite loop detected"));
2318 }
2319 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
2320 {
2321 unsigned long cond = bits (inst1, 8, 11);
2322 if (cond != 0x0f && condition_true (cond, status)) /* 0x0f = SWI */
2323 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
2324 }
2325 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
2326 {
2327 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
2328 }
2329 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
2330 {
2331 unsigned short inst2;
2332 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
2333
2334 /* Default to the next instruction. */
2335 nextpc = pc + 4;
2336
2337 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
2338 {
2339 /* Branches and miscellaneous control instructions. */
2340
2341 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
2342 {
2343 /* B, BL, BLX. */
2344 int j1, j2, imm1, imm2;
2345
2346 imm1 = sbits (inst1, 0, 10);
2347 imm2 = bits (inst2, 0, 10);
2348 j1 = bit (inst2, 13);
2349 j2 = bit (inst2, 11);
2350
2351 offset = ((imm1 << 12) + (imm2 << 1));
2352 offset ^= ((!j2) << 22) | ((!j1) << 23);
2353
2354 nextpc = pc_val + offset;
2355 /* For BLX make sure to clear the low bits. */
2356 if (bit (inst2, 12) == 0)
2357 nextpc = nextpc & 0xfffffffc;
2358 }
2359 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
2360 {
2361 /* SUBS PC, LR, #imm8. */
2362 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
2363 nextpc -= inst2 & 0x00ff;
2364 }
2365 else if ((inst2 & 0xd000) == 0xc000 && (inst1 & 0x0380) != 0x0380)
2366 {
2367 /* Conditional branch. */
2368 if (condition_true (bits (inst1, 6, 9), status))
2369 {
2370 int sign, j1, j2, imm1, imm2;
2371
2372 sign = sbits (inst1, 10, 10);
2373 imm1 = bits (inst1, 0, 5);
2374 imm2 = bits (inst2, 0, 10);
2375 j1 = bit (inst2, 13);
2376 j2 = bit (inst2, 11);
2377
2378 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
2379 offset += (imm1 << 12) + (imm2 << 1);
2380
2381 nextpc = pc_val + offset;
2382 }
2383 }
2384 }
2385 else if ((inst1 & 0xfe50) == 0xe810)
2386 {
2387 /* Load multiple or RFE. */
2388 int rn, offset, load_pc = 1;
2389
2390 rn = bits (inst1, 0, 3);
2391 if (bit (inst1, 7) && !bit (inst1, 8))
2392 {
2393 /* LDMIA or POP */
2394 if (!bit (inst2, 15))
2395 load_pc = 0;
2396 offset = bitcount (inst2) * 4 - 4;
2397 }
2398 else if (!bit (inst1, 7) && bit (inst1, 8))
2399 {
2400 /* LDMDB */
2401 if (!bit (inst2, 15))
2402 load_pc = 0;
2403 offset = -4;
2404 }
2405 else if (bit (inst1, 7) && bit (inst1, 8))
2406 {
2407 /* RFEIA */
2408 offset = 0;
2409 }
2410 else if (!bit (inst1, 7) && !bit (inst1, 8))
2411 {
2412 /* RFEDB */
2413 offset = -8;
2414 }
2415 else
2416 load_pc = 0;
2417
2418 if (load_pc)
2419 {
2420 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
2421 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
2422 }
2423 }
2424 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
2425 {
2426 /* MOV PC or MOVS PC. */
2427 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
2428 }
2429 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
2430 {
2431 /* LDR PC. */
2432 CORE_ADDR base;
2433 int rn, load_pc = 1;
2434
2435 rn = bits (inst1, 0, 3);
2436 base = get_frame_register_unsigned (frame, rn);
2437 if (rn == 15)
2438 {
2439 base = (base + 4) & ~(CORE_ADDR) 0x3;
2440 if (bit (inst1, 7))
2441 base += bits (inst2, 0, 11);
2442 else
2443 base -= bits (inst2, 0, 11);
2444 }
2445 else if (bit (inst1, 7))
2446 base += bits (inst2, 0, 11);
2447 else if (bit (inst2, 11))
2448 {
2449 if (bit (inst2, 10))
2450 {
2451 if (bit (inst2, 9))
2452 base += bits (inst2, 0, 7);
2453 else
2454 base -= bits (inst2, 0, 7);
2455 }
2456 }
2457 else if ((inst2 & 0x0fc0) == 0x0000)
2458 {
2459 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
2460 base += get_frame_register_unsigned (frame, rm) << shift;
2461 }
2462 else
2463 /* Reserved. */
2464 load_pc = 0;
2465
2466 if (load_pc)
2467 nextpc = get_frame_memory_unsigned (frame, base, 4);
2468 }
2469 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
2470 {
2471 /* TBB. */
2472 CORE_ADDR table, offset, length;
2473
2474 table = get_frame_register_unsigned (frame, bits (inst1, 0, 3));
2475 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
2476 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
2477 nextpc = pc_val + length;
2478 }
2479 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
2480 {
2481 /* TBH. */
2482 CORE_ADDR table, offset, length;
2483
2484 table = get_frame_register_unsigned (frame, bits (inst1, 0, 3));
2485 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
2486 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
2487 nextpc = pc_val + length;
2488 }
2489 }
2490 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
2491 {
2492 if (bits (inst1, 3, 6) == 0x0f)
2493 nextpc = pc_val;
2494 else
2495 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
2496
2497 nextpc = gdbarch_addr_bits_remove (gdbarch, nextpc);
2498 if (nextpc == pc)
2499 error (_("Infinite loop detected"));
2500 }
2501 else if ((inst1 & 0xf500) == 0xb100)
2502 {
2503 /* CBNZ or CBZ. */
2504 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
2505 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
2506
2507 if (bit (inst1, 11) && reg != 0)
2508 nextpc = pc_val + imm;
2509 else if (!bit (inst1, 11) && reg == 0)
2510 nextpc = pc_val + imm;
2511 }
2512
2513 return nextpc;
2514 }
2515
2516 CORE_ADDR
2517 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
2518 {
2519 struct gdbarch *gdbarch = get_frame_arch (frame);
2520 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2521 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2522 unsigned long pc_val;
2523 unsigned long this_instr;
2524 unsigned long status;
2525 CORE_ADDR nextpc;
2526
2527 if (arm_frame_is_thumb (frame))
2528 return thumb_get_next_pc (frame, pc);
2529
2530 pc_val = (unsigned long) pc;
2531 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
2532
2533 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
2534 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
2535
2536 if (bits (this_instr, 28, 31) == INST_NV)
2537 switch (bits (this_instr, 24, 27))
2538 {
2539 case 0xa:
2540 case 0xb:
2541 {
2542 /* Branch with Link and change to Thumb. */
2543 nextpc = BranchDest (pc, this_instr);
2544 nextpc |= bit (this_instr, 24) << 1;
2545
2546 nextpc = gdbarch_addr_bits_remove (gdbarch, nextpc);
2547 if (nextpc == pc)
2548 error (_("Infinite loop detected"));
2549 break;
2550 }
2551 case 0xc:
2552 case 0xd:
2553 case 0xe:
2554 /* Coprocessor register transfer. */
2555 if (bits (this_instr, 12, 15) == 15)
2556 error (_("Invalid update to pc in instruction"));
2557 break;
2558 }
2559 else if (condition_true (bits (this_instr, 28, 31), status))
2560 {
2561 switch (bits (this_instr, 24, 27))
2562 {
2563 case 0x0:
2564 case 0x1: /* data processing */
2565 case 0x2:
2566 case 0x3:
2567 {
2568 unsigned long operand1, operand2, result = 0;
2569 unsigned long rn;
2570 int c;
2571
2572 if (bits (this_instr, 12, 15) != 15)
2573 break;
2574
2575 if (bits (this_instr, 22, 25) == 0
2576 && bits (this_instr, 4, 7) == 9) /* multiply */
2577 error (_("Invalid update to pc in instruction"));
2578
2579 /* BX <reg>, BLX <reg> */
2580 if (bits (this_instr, 4, 27) == 0x12fff1
2581 || bits (this_instr, 4, 27) == 0x12fff3)
2582 {
2583 rn = bits (this_instr, 0, 3);
2584 result = (rn == 15) ? pc_val + 8
2585 : get_frame_register_unsigned (frame, rn);
2586 nextpc = (CORE_ADDR) gdbarch_addr_bits_remove
2587 (gdbarch, result);
2588
2589 if (nextpc == pc)
2590 error (_("Infinite loop detected"));
2591
2592 return nextpc;
2593 }
2594
2595 /* Multiply into PC */
2596 c = (status & FLAG_C) ? 1 : 0;
2597 rn = bits (this_instr, 16, 19);
2598 operand1 = (rn == 15) ? pc_val + 8
2599 : get_frame_register_unsigned (frame, rn);
2600
2601 if (bit (this_instr, 25))
2602 {
2603 unsigned long immval = bits (this_instr, 0, 7);
2604 unsigned long rotate = 2 * bits (this_instr, 8, 11);
2605 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
2606 & 0xffffffff;
2607 }
2608 else /* operand 2 is a shifted register */
2609 operand2 = shifted_reg_val (frame, this_instr, c, pc_val, status);
2610
2611 switch (bits (this_instr, 21, 24))
2612 {
2613 case 0x0: /*and */
2614 result = operand1 & operand2;
2615 break;
2616
2617 case 0x1: /*eor */
2618 result = operand1 ^ operand2;
2619 break;
2620
2621 case 0x2: /*sub */
2622 result = operand1 - operand2;
2623 break;
2624
2625 case 0x3: /*rsb */
2626 result = operand2 - operand1;
2627 break;
2628
2629 case 0x4: /*add */
2630 result = operand1 + operand2;
2631 break;
2632
2633 case 0x5: /*adc */
2634 result = operand1 + operand2 + c;
2635 break;
2636
2637 case 0x6: /*sbc */
2638 result = operand1 - operand2 + c;
2639 break;
2640
2641 case 0x7: /*rsc */
2642 result = operand2 - operand1 + c;
2643 break;
2644
2645 case 0x8:
2646 case 0x9:
2647 case 0xa:
2648 case 0xb: /* tst, teq, cmp, cmn */
2649 result = (unsigned long) nextpc;
2650 break;
2651
2652 case 0xc: /*orr */
2653 result = operand1 | operand2;
2654 break;
2655
2656 case 0xd: /*mov */
2657 /* Always step into a function. */
2658 result = operand2;
2659 break;
2660
2661 case 0xe: /*bic */
2662 result = operand1 & ~operand2;
2663 break;
2664
2665 case 0xf: /*mvn */
2666 result = ~operand2;
2667 break;
2668 }
2669 nextpc = (CORE_ADDR) gdbarch_addr_bits_remove
2670 (gdbarch, result);
2671
2672 if (nextpc == pc)
2673 error (_("Infinite loop detected"));
2674 break;
2675 }
2676
2677 case 0x4:
2678 case 0x5: /* data transfer */
2679 case 0x6:
2680 case 0x7:
2681 if (bit (this_instr, 20))
2682 {
2683 /* load */
2684 if (bits (this_instr, 12, 15) == 15)
2685 {
2686 /* rd == pc */
2687 unsigned long rn;
2688 unsigned long base;
2689
2690 if (bit (this_instr, 22))
2691 error (_("Invalid update to pc in instruction"));
2692
2693 /* byte write to PC */
2694 rn = bits (this_instr, 16, 19);
2695 base = (rn == 15) ? pc_val + 8
2696 : get_frame_register_unsigned (frame, rn);
2697 if (bit (this_instr, 24))
2698 {
2699 /* pre-indexed */
2700 int c = (status & FLAG_C) ? 1 : 0;
2701 unsigned long offset =
2702 (bit (this_instr, 25)
2703 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
2704 : bits (this_instr, 0, 11));
2705
2706 if (bit (this_instr, 23))
2707 base += offset;
2708 else
2709 base -= offset;
2710 }
2711 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
2712 4, byte_order);
2713
2714 nextpc = gdbarch_addr_bits_remove (gdbarch, nextpc);
2715
2716 if (nextpc == pc)
2717 error (_("Infinite loop detected"));
2718 }
2719 }
2720 break;
2721
2722 case 0x8:
2723 case 0x9: /* block transfer */
2724 if (bit (this_instr, 20))
2725 {
2726 /* LDM */
2727 if (bit (this_instr, 15))
2728 {
2729 /* loading pc */
2730 int offset = 0;
2731
2732 if (bit (this_instr, 23))
2733 {
2734 /* up */
2735 unsigned long reglist = bits (this_instr, 0, 14);
2736 offset = bitcount (reglist) * 4;
2737 if (bit (this_instr, 24)) /* pre */
2738 offset += 4;
2739 }
2740 else if (bit (this_instr, 24))
2741 offset = -4;
2742
2743 {
2744 unsigned long rn_val =
2745 get_frame_register_unsigned (frame,
2746 bits (this_instr, 16, 19));
2747 nextpc =
2748 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
2749 + offset),
2750 4, byte_order);
2751 }
2752 nextpc = gdbarch_addr_bits_remove
2753 (gdbarch, nextpc);
2754 if (nextpc == pc)
2755 error (_("Infinite loop detected"));
2756 }
2757 }
2758 break;
2759
2760 case 0xb: /* branch & link */
2761 case 0xa: /* branch */
2762 {
2763 nextpc = BranchDest (pc, this_instr);
2764
2765 nextpc = gdbarch_addr_bits_remove (gdbarch, nextpc);
2766 if (nextpc == pc)
2767 error (_("Infinite loop detected"));
2768 break;
2769 }
2770
2771 case 0xc:
2772 case 0xd:
2773 case 0xe: /* coproc ops */
2774 case 0xf: /* SWI */
2775 break;
2776
2777 default:
2778 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
2779 return (pc);
2780 }
2781 }
2782
2783 return nextpc;
2784 }
2785
2786 /* single_step() is called just before we want to resume the inferior,
2787 if we want to single-step it but there is no hardware or kernel
2788 single-step support. We find the target of the coming instruction
2789 and breakpoint it. */
2790
2791 int
2792 arm_software_single_step (struct frame_info *frame)
2793 {
2794 struct gdbarch *gdbarch = get_frame_arch (frame);
2795
2796 /* NOTE: This may insert the wrong breakpoint instruction when
2797 single-stepping over a mode-changing instruction, if the
2798 CPSR heuristics are used. */
2799
2800 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
2801 insert_single_step_breakpoint (gdbarch, next_pc);
2802
2803 return 1;
2804 }
2805
2806 /* ARM displaced stepping support.
2807
2808 Generally ARM displaced stepping works as follows:
2809
2810 1. When an instruction is to be single-stepped, it is first decoded by
2811 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
2812 Depending on the type of instruction, it is then copied to a scratch
2813 location, possibly in a modified form. The copy_* set of functions
2814 performs such modification, as necessary. A breakpoint is placed after
2815 the modified instruction in the scratch space to return control to GDB.
2816 Note in particular that instructions which modify the PC will no longer
2817 do so after modification.
2818
2819 2. The instruction is single-stepped, by setting the PC to the scratch
2820 location address, and resuming. Control returns to GDB when the
2821 breakpoint is hit.
2822
2823 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
2824 function used for the current instruction. This function's job is to
2825 put the CPU/memory state back to what it would have been if the
2826 instruction had been executed unmodified in its original location. */
2827
2828 /* NOP instruction (mov r0, r0). */
2829 #define ARM_NOP 0xe1a00000
2830
2831 /* Helper for register reads for displaced stepping. In particular, this
2832 returns the PC as it would be seen by the instruction at its original
2833 location. */
2834
2835 ULONGEST
2836 displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
2837 {
2838 ULONGEST ret;
2839
2840 if (regno == 15)
2841 {
2842 if (debug_displaced)
2843 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
2844 (unsigned long) from + 8);
2845 return (ULONGEST) from + 8; /* Pipeline offset. */
2846 }
2847 else
2848 {
2849 regcache_cooked_read_unsigned (regs, regno, &ret);
2850 if (debug_displaced)
2851 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
2852 regno, (unsigned long) ret);
2853 return ret;
2854 }
2855 }
2856
2857 static int
2858 displaced_in_arm_mode (struct regcache *regs)
2859 {
2860 ULONGEST ps;
2861
2862 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
2863
2864 return (ps & CPSR_T) == 0;
2865 }
2866
2867 /* Write to the PC as from a branch instruction. */
2868
2869 static void
2870 branch_write_pc (struct regcache *regs, ULONGEST val)
2871 {
2872 if (displaced_in_arm_mode (regs))
2873 /* Note: If bits 0/1 are set, this branch would be unpredictable for
2874 architecture versions < 6. */
2875 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & ~(ULONGEST) 0x3);
2876 else
2877 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & ~(ULONGEST) 0x1);
2878 }
2879
2880 /* Write to the PC as from a branch-exchange instruction. */
2881
2882 static void
2883 bx_write_pc (struct regcache *regs, ULONGEST val)
2884 {
2885 ULONGEST ps;
2886
2887 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
2888
2889 if ((val & 1) == 1)
2890 {
2891 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | CPSR_T);
2892 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
2893 }
2894 else if ((val & 2) == 0)
2895 {
2896 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM,
2897 ps & ~(ULONGEST) CPSR_T);
2898 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
2899 }
2900 else
2901 {
2902 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
2903 mode, align dest to 4 bytes). */
2904 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
2905 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM,
2906 ps & ~(ULONGEST) CPSR_T);
2907 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
2908 }
2909 }
2910
2911 /* Write to the PC as if from a load instruction. */
2912
2913 static void
2914 load_write_pc (struct regcache *regs, ULONGEST val)
2915 {
2916 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
2917 bx_write_pc (regs, val);
2918 else
2919 branch_write_pc (regs, val);
2920 }
2921
2922 /* Write to the PC as if from an ALU instruction. */
2923
2924 static void
2925 alu_write_pc (struct regcache *regs, ULONGEST val)
2926 {
2927 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && displaced_in_arm_mode (regs))
2928 bx_write_pc (regs, val);
2929 else
2930 branch_write_pc (regs, val);
2931 }
2932
2933 /* Helper for writing to registers for displaced stepping. Writing to the PC
2934 has a varying effects depending on the instruction which does the write:
2935 this is controlled by the WRITE_PC argument. */
2936
2937 void
2938 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
2939 int regno, ULONGEST val, enum pc_write_style write_pc)
2940 {
2941 if (regno == 15)
2942 {
2943 if (debug_displaced)
2944 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
2945 (unsigned long) val);
2946 switch (write_pc)
2947 {
2948 case BRANCH_WRITE_PC:
2949 branch_write_pc (regs, val);
2950 break;
2951
2952 case BX_WRITE_PC:
2953 bx_write_pc (regs, val);
2954 break;
2955
2956 case LOAD_WRITE_PC:
2957 load_write_pc (regs, val);
2958 break;
2959
2960 case ALU_WRITE_PC:
2961 alu_write_pc (regs, val);
2962 break;
2963
2964 case CANNOT_WRITE_PC:
2965 warning (_("Instruction wrote to PC in an unexpected way when "
2966 "single-stepping"));
2967 break;
2968
2969 default:
2970 internal_error (__FILE__, __LINE__,
2971 _("Invalid argument to displaced_write_reg"));
2972 }
2973
2974 dsc->wrote_to_pc = 1;
2975 }
2976 else
2977 {
2978 if (debug_displaced)
2979 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
2980 regno, (unsigned long) val);
2981 regcache_cooked_write_unsigned (regs, regno, val);
2982 }
2983 }
2984
2985 /* This function is used to concisely determine if an instruction INSN
2986 references PC. Register fields of interest in INSN should have the
2987 corresponding fields of BITMASK set to 0b1111. The function returns return 1
2988 if any of these fields in INSN reference the PC (also 0b1111, r15), else it
2989 returns 0. */
2990
2991 static int
2992 insn_references_pc (uint32_t insn, uint32_t bitmask)
2993 {
2994 uint32_t lowbit = 1;
2995
2996 while (bitmask != 0)
2997 {
2998 uint32_t mask;
2999
3000 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
3001 ;
3002
3003 if (!lowbit)
3004 break;
3005
3006 mask = lowbit * 0xf;
3007
3008 if ((insn & mask) == mask)
3009 return 1;
3010
3011 bitmask &= ~mask;
3012 }
3013
3014 return 0;
3015 }
3016
3017 /* The simplest copy function. Many instructions have the same effect no
3018 matter what address they are executed at: in those cases, use this. */
3019
3020 static int
3021 copy_unmodified (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, uint32_t insn,
3022 const char *iname, struct displaced_step_closure *dsc)
3023 {
3024 if (debug_displaced)
3025 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
3026 "opcode/class '%s' unmodified\n", (unsigned long) insn,
3027 iname);
3028
3029 dsc->modinsn[0] = insn;
3030
3031 return 0;
3032 }
3033
3034 /* Preload instructions with immediate offset. */
3035
3036 static void
3037 cleanup_preload (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
3038 struct regcache *regs, struct displaced_step_closure *dsc)
3039 {
3040 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
3041 if (!dsc->u.preload.immed)
3042 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
3043 }
3044
3045 static int
3046 copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
3047 struct displaced_step_closure *dsc)
3048 {
3049 unsigned int rn = bits (insn, 16, 19);
3050 ULONGEST rn_val;
3051 CORE_ADDR from = dsc->insn_addr;
3052
3053 if (!insn_references_pc (insn, 0x000f0000ul))
3054 return copy_unmodified (gdbarch, insn, "preload", dsc);
3055
3056 if (debug_displaced)
3057 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
3058 (unsigned long) insn);
3059
3060 /* Preload instructions:
3061
3062 {pli/pld} [rn, #+/-imm]
3063 ->
3064 {pli/pld} [r0, #+/-imm]. */
3065
3066 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3067 rn_val = displaced_read_reg (regs, from, rn);
3068 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
3069
3070 dsc->u.preload.immed = 1;
3071
3072 dsc->modinsn[0] = insn & 0xfff0ffff;
3073
3074 dsc->cleanup = &cleanup_preload;
3075
3076 return 0;
3077 }
3078
3079 /* Preload instructions with register offset. */
3080
3081 static int
3082 copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
3083 struct displaced_step_closure *dsc)
3084 {
3085 unsigned int rn = bits (insn, 16, 19);
3086 unsigned int rm = bits (insn, 0, 3);
3087 ULONGEST rn_val, rm_val;
3088 CORE_ADDR from = dsc->insn_addr;
3089
3090 if (!insn_references_pc (insn, 0x000f000ful))
3091 return copy_unmodified (gdbarch, insn, "preload reg", dsc);
3092
3093 if (debug_displaced)
3094 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
3095 (unsigned long) insn);
3096
3097 /* Preload register-offset instructions:
3098
3099 {pli/pld} [rn, rm {, shift}]
3100 ->
3101 {pli/pld} [r0, r1 {, shift}]. */
3102
3103 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3104 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
3105 rn_val = displaced_read_reg (regs, from, rn);
3106 rm_val = displaced_read_reg (regs, from, rm);
3107 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
3108 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
3109
3110 dsc->u.preload.immed = 0;
3111
3112 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
3113
3114 dsc->cleanup = &cleanup_preload;
3115
3116 return 0;
3117 }
3118
3119 /* Copy/cleanup coprocessor load and store instructions. */
3120
3121 static void
3122 cleanup_copro_load_store (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
3123 struct regcache *regs,
3124 struct displaced_step_closure *dsc)
3125 {
3126 ULONGEST rn_val = displaced_read_reg (regs, dsc->insn_addr, 0);
3127
3128 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
3129
3130 if (dsc->u.ldst.writeback)
3131 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
3132 }
3133
3134 static int
3135 copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
3136 struct regcache *regs,
3137 struct displaced_step_closure *dsc)
3138 {
3139 unsigned int rn = bits (insn, 16, 19);
3140 ULONGEST rn_val;
3141 CORE_ADDR from = dsc->insn_addr;
3142
3143 if (!insn_references_pc (insn, 0x000f0000ul))
3144 return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
3145
3146 if (debug_displaced)
3147 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
3148 "load/store insn %.8lx\n", (unsigned long) insn);
3149
3150 /* Coprocessor load/store instructions:
3151
3152 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
3153 ->
3154 {stc/stc2} [r0, #+/-imm].
3155
3156 ldc/ldc2 are handled identically. */
3157
3158 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3159 rn_val = displaced_read_reg (regs, from, rn);
3160 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
3161
3162 dsc->u.ldst.writeback = bit (insn, 25);
3163 dsc->u.ldst.rn = rn;
3164
3165 dsc->modinsn[0] = insn & 0xfff0ffff;
3166
3167 dsc->cleanup = &cleanup_copro_load_store;
3168
3169 return 0;
3170 }
3171
3172 /* Clean up branch instructions (actually perform the branch, by setting
3173 PC). */
3174
3175 static void
3176 cleanup_branch (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, struct regcache *regs,
3177 struct displaced_step_closure *dsc)
3178 {
3179 ULONGEST from = dsc->insn_addr;
3180 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
3181 int branch_taken = condition_true (dsc->u.branch.cond, status);
3182 enum pc_write_style write_pc = dsc->u.branch.exchange
3183 ? BX_WRITE_PC : BRANCH_WRITE_PC;
3184
3185 if (!branch_taken)
3186 return;
3187
3188 if (dsc->u.branch.link)
3189 {
3190 ULONGEST pc = displaced_read_reg (regs, from, 15);
3191 displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
3192 }
3193
3194 displaced_write_reg (regs, dsc, 15, dsc->u.branch.dest, write_pc);
3195 }
3196
3197 /* Copy B/BL/BLX instructions with immediate destinations. */
3198
3199 static int
3200 copy_b_bl_blx (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, uint32_t insn,
3201 struct regcache *regs, struct displaced_step_closure *dsc)
3202 {
3203 unsigned int cond = bits (insn, 28, 31);
3204 int exchange = (cond == 0xf);
3205 int link = exchange || bit (insn, 24);
3206 CORE_ADDR from = dsc->insn_addr;
3207 long offset;
3208
3209 if (debug_displaced)
3210 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
3211 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
3212 (unsigned long) insn);
3213
3214 /* Implement "BL<cond> <label>" as:
3215
3216 Preparation: cond <- instruction condition
3217 Insn: mov r0, r0 (nop)
3218 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
3219
3220 B<cond> similar, but don't set r14 in cleanup. */
3221
3222 if (exchange)
3223 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
3224 then arrange the switch into Thumb mode. */
3225 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
3226 else
3227 offset = bits (insn, 0, 23) << 2;
3228
3229 if (bit (offset, 25))
3230 offset = offset | ~0x3ffffff;
3231
3232 dsc->u.branch.cond = cond;
3233 dsc->u.branch.link = link;
3234 dsc->u.branch.exchange = exchange;
3235 dsc->u.branch.dest = from + 8 + offset;
3236
3237 dsc->modinsn[0] = ARM_NOP;
3238
3239 dsc->cleanup = &cleanup_branch;
3240
3241 return 0;
3242 }
3243
3244 /* Copy BX/BLX with register-specified destinations. */
3245
3246 static int
3247 copy_bx_blx_reg (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, uint32_t insn,
3248 struct regcache *regs, struct displaced_step_closure *dsc)
3249 {
3250 unsigned int cond = bits (insn, 28, 31);
3251 /* BX: x12xxx1x
3252 BLX: x12xxx3x. */
3253 int link = bit (insn, 5);
3254 unsigned int rm = bits (insn, 0, 3);
3255 CORE_ADDR from = dsc->insn_addr;
3256
3257 if (debug_displaced)
3258 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
3259 "%.8lx\n", (link) ? "blx" : "bx", (unsigned long) insn);
3260
3261 /* Implement {BX,BLX}<cond> <reg>" as:
3262
3263 Preparation: cond <- instruction condition
3264 Insn: mov r0, r0 (nop)
3265 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
3266
3267 Don't set r14 in cleanup for BX. */
3268
3269 dsc->u.branch.dest = displaced_read_reg (regs, from, rm);
3270
3271 dsc->u.branch.cond = cond;
3272 dsc->u.branch.link = link;
3273 dsc->u.branch.exchange = 1;
3274
3275 dsc->modinsn[0] = ARM_NOP;
3276
3277 dsc->cleanup = &cleanup_branch;
3278
3279 return 0;
3280 }
3281
3282 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
3283
3284 static void
3285 cleanup_alu_imm (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
3286 struct regcache *regs, struct displaced_step_closure *dsc)
3287 {
3288 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
3289 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
3290 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
3291 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
3292 }
3293
3294 static int
3295 copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
3296 struct displaced_step_closure *dsc)
3297 {
3298 unsigned int rn = bits (insn, 16, 19);
3299 unsigned int rd = bits (insn, 12, 15);
3300 unsigned int op = bits (insn, 21, 24);
3301 int is_mov = (op == 0xd);
3302 ULONGEST rd_val, rn_val;
3303 CORE_ADDR from = dsc->insn_addr;
3304
3305 if (!insn_references_pc (insn, 0x000ff000ul))
3306 return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
3307
3308 if (debug_displaced)
3309 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
3310 "%.8lx\n", is_mov ? "move" : "ALU",
3311 (unsigned long) insn);
3312
3313 /* Instruction is of form:
3314
3315 <op><cond> rd, [rn,] #imm
3316
3317 Rewrite as:
3318
3319 Preparation: tmp1, tmp2 <- r0, r1;
3320 r0, r1 <- rd, rn
3321 Insn: <op><cond> r0, r1, #imm
3322 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
3323 */
3324
3325 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3326 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
3327 rn_val = displaced_read_reg (regs, from, rn);
3328 rd_val = displaced_read_reg (regs, from, rd);
3329 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
3330 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
3331 dsc->rd = rd;
3332
3333 if (is_mov)
3334 dsc->modinsn[0] = insn & 0xfff00fff;
3335 else
3336 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
3337
3338 dsc->cleanup = &cleanup_alu_imm;
3339
3340 return 0;
3341 }
3342
3343 /* Copy/cleanup arithmetic/logic insns with register RHS. */
3344
3345 static void
3346 cleanup_alu_reg (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
3347 struct regcache *regs, struct displaced_step_closure *dsc)
3348 {
3349 ULONGEST rd_val;
3350 int i;
3351
3352 rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
3353
3354 for (i = 0; i < 3; i++)
3355 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
3356
3357 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
3358 }
3359
3360 static int
3361 copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
3362 struct displaced_step_closure *dsc)
3363 {
3364 unsigned int rn = bits (insn, 16, 19);
3365 unsigned int rm = bits (insn, 0, 3);
3366 unsigned int rd = bits (insn, 12, 15);
3367 unsigned int op = bits (insn, 21, 24);
3368 int is_mov = (op == 0xd);
3369 ULONGEST rd_val, rn_val, rm_val;
3370 CORE_ADDR from = dsc->insn_addr;
3371
3372 if (!insn_references_pc (insn, 0x000ff00ful))
3373 return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
3374
3375 if (debug_displaced)
3376 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
3377 is_mov ? "move" : "ALU", (unsigned long) insn);
3378
3379 /* Instruction is of form:
3380
3381 <op><cond> rd, [rn,] rm [, <shift>]
3382
3383 Rewrite as:
3384
3385 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
3386 r0, r1, r2 <- rd, rn, rm
3387 Insn: <op><cond> r0, r1, r2 [, <shift>]
3388 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
3389 */
3390
3391 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3392 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
3393 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
3394 rd_val = displaced_read_reg (regs, from, rd);
3395 rn_val = displaced_read_reg (regs, from, rn);
3396 rm_val = displaced_read_reg (regs, from, rm);
3397 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
3398 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
3399 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
3400 dsc->rd = rd;
3401
3402 if (is_mov)
3403 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
3404 else
3405 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
3406
3407 dsc->cleanup = &cleanup_alu_reg;
3408
3409 return 0;
3410 }
3411
3412 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
3413
3414 static void
3415 cleanup_alu_shifted_reg (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
3416 struct regcache *regs,
3417 struct displaced_step_closure *dsc)
3418 {
3419 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
3420 int i;
3421
3422 for (i = 0; i < 4; i++)
3423 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
3424
3425 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
3426 }
3427
3428 static int
3429 copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
3430 struct regcache *regs, struct displaced_step_closure *dsc)
3431 {
3432 unsigned int rn = bits (insn, 16, 19);
3433 unsigned int rm = bits (insn, 0, 3);
3434 unsigned int rd = bits (insn, 12, 15);
3435 unsigned int rs = bits (insn, 8, 11);
3436 unsigned int op = bits (insn, 21, 24);
3437 int is_mov = (op == 0xd), i;
3438 ULONGEST rd_val, rn_val, rm_val, rs_val;
3439 CORE_ADDR from = dsc->insn_addr;
3440
3441 if (!insn_references_pc (insn, 0x000fff0ful))
3442 return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
3443
3444 if (debug_displaced)
3445 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
3446 "%.8lx\n", is_mov ? "move" : "ALU",
3447 (unsigned long) insn);
3448
3449 /* Instruction is of form:
3450
3451 <op><cond> rd, [rn,] rm, <shift> rs
3452
3453 Rewrite as:
3454
3455 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
3456 r0, r1, r2, r3 <- rd, rn, rm, rs
3457 Insn: <op><cond> r0, r1, r2, <shift> r3
3458 Cleanup: tmp5 <- r0
3459 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
3460 rd <- tmp5
3461 */
3462
3463 for (i = 0; i < 4; i++)
3464 dsc->tmp[i] = displaced_read_reg (regs, from, i);
3465
3466 rd_val = displaced_read_reg (regs, from, rd);
3467 rn_val = displaced_read_reg (regs, from, rn);
3468 rm_val = displaced_read_reg (regs, from, rm);
3469 rs_val = displaced_read_reg (regs, from, rs);
3470 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
3471 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
3472 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
3473 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
3474 dsc->rd = rd;
3475
3476 if (is_mov)
3477 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
3478 else
3479 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
3480
3481 dsc->cleanup = &cleanup_alu_shifted_reg;
3482
3483 return 0;
3484 }
3485
3486 /* Clean up load instructions. */
3487
3488 static void
3489 cleanup_load (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, struct regcache *regs,
3490 struct displaced_step_closure *dsc)
3491 {
3492 ULONGEST rt_val, rt_val2 = 0, rn_val;
3493 CORE_ADDR from = dsc->insn_addr;
3494
3495 rt_val = displaced_read_reg (regs, from, 0);
3496 if (dsc->u.ldst.xfersize == 8)
3497 rt_val2 = displaced_read_reg (regs, from, 1);
3498 rn_val = displaced_read_reg (regs, from, 2);
3499
3500 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
3501 if (dsc->u.ldst.xfersize > 4)
3502 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
3503 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
3504 if (!dsc->u.ldst.immed)
3505 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
3506
3507 /* Handle register writeback. */
3508 if (dsc->u.ldst.writeback)
3509 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
3510 /* Put result in right place. */
3511 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
3512 if (dsc->u.ldst.xfersize == 8)
3513 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
3514 }
3515
3516 /* Clean up store instructions. */
3517
3518 static void
3519 cleanup_store (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, struct regcache *regs,
3520 struct displaced_step_closure *dsc)
3521 {
3522 CORE_ADDR from = dsc->insn_addr;
3523 ULONGEST rn_val = displaced_read_reg (regs, from, 2);
3524
3525 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
3526 if (dsc->u.ldst.xfersize > 4)
3527 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
3528 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
3529 if (!dsc->u.ldst.immed)
3530 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
3531 if (!dsc->u.ldst.restore_r4)
3532 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
3533
3534 /* Writeback. */
3535 if (dsc->u.ldst.writeback)
3536 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
3537 }
3538
3539 /* Copy "extra" load/store instructions. These are halfword/doubleword
3540 transfers, which have a different encoding to byte/word transfers. */
3541
3542 static int
3543 copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
3544 struct regcache *regs, struct displaced_step_closure *dsc)
3545 {
3546 unsigned int op1 = bits (insn, 20, 24);
3547 unsigned int op2 = bits (insn, 5, 6);
3548 unsigned int rt = bits (insn, 12, 15);
3549 unsigned int rn = bits (insn, 16, 19);
3550 unsigned int rm = bits (insn, 0, 3);
3551 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
3552 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
3553 int immed = (op1 & 0x4) != 0;
3554 int opcode;
3555 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
3556 CORE_ADDR from = dsc->insn_addr;
3557
3558 if (!insn_references_pc (insn, 0x000ff00ful))
3559 return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
3560
3561 if (debug_displaced)
3562 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
3563 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
3564 (unsigned long) insn);
3565
3566 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
3567
3568 if (opcode < 0)
3569 internal_error (__FILE__, __LINE__,
3570 _("copy_extra_ld_st: instruction decode error"));
3571
3572 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3573 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
3574 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
3575 if (!immed)
3576 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
3577
3578 rt_val = displaced_read_reg (regs, from, rt);
3579 if (bytesize[opcode] == 8)
3580 rt_val2 = displaced_read_reg (regs, from, rt + 1);
3581 rn_val = displaced_read_reg (regs, from, rn);
3582 if (!immed)
3583 rm_val = displaced_read_reg (regs, from, rm);
3584
3585 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
3586 if (bytesize[opcode] == 8)
3587 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
3588 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
3589 if (!immed)
3590 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
3591
3592 dsc->rd = rt;
3593 dsc->u.ldst.xfersize = bytesize[opcode];
3594 dsc->u.ldst.rn = rn;
3595 dsc->u.ldst.immed = immed;
3596 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
3597 dsc->u.ldst.restore_r4 = 0;
3598
3599 if (immed)
3600 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
3601 ->
3602 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
3603 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
3604 else
3605 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
3606 ->
3607 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
3608 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
3609
3610 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
3611
3612 return 0;
3613 }
3614
3615 /* Copy byte/word loads and stores. */
3616
3617 static int
3618 copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
3619 struct regcache *regs,
3620 struct displaced_step_closure *dsc, int load, int byte,
3621 int usermode)
3622 {
3623 int immed = !bit (insn, 25);
3624 unsigned int rt = bits (insn, 12, 15);
3625 unsigned int rn = bits (insn, 16, 19);
3626 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
3627 ULONGEST rt_val, rn_val, rm_val = 0;
3628 CORE_ADDR from = dsc->insn_addr;
3629
3630 if (!insn_references_pc (insn, 0x000ff00ful))
3631 return copy_unmodified (gdbarch, insn, "load/store", dsc);
3632
3633 if (debug_displaced)
3634 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
3635 load ? (byte ? "ldrb" : "ldr")
3636 : (byte ? "strb" : "str"), usermode ? "t" : "",
3637 (unsigned long) insn);
3638
3639 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3640 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
3641 if (!immed)
3642 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
3643 if (!load)
3644 dsc->tmp[4] = displaced_read_reg (regs, from, 4);
3645
3646 rt_val = displaced_read_reg (regs, from, rt);
3647 rn_val = displaced_read_reg (regs, from, rn);
3648 if (!immed)
3649 rm_val = displaced_read_reg (regs, from, rm);
3650
3651 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
3652 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
3653 if (!immed)
3654 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
3655
3656 dsc->rd = rt;
3657 dsc->u.ldst.xfersize = byte ? 1 : 4;
3658 dsc->u.ldst.rn = rn;
3659 dsc->u.ldst.immed = immed;
3660 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
3661
3662 /* To write PC we can do:
3663
3664 scratch+0: str pc, temp (*temp = scratch + 8 + offset)
3665 scratch+4: ldr r4, temp
3666 scratch+8: sub r4, r4, pc (r4 = scratch + 8 + offset - scratch - 8 - 8)
3667 scratch+12: add r4, r4, #8 (r4 = offset)
3668 scratch+16: add r0, r0, r4
3669 scratch+20: str r0, [r2, #imm] (or str r0, [r2, r3])
3670 scratch+24: <temp>
3671
3672 Otherwise we don't know what value to write for PC, since the offset is
3673 architecture-dependent (sometimes PC+8, sometimes PC+12). */
3674
3675 if (load || rt != 15)
3676 {
3677 dsc->u.ldst.restore_r4 = 0;
3678
3679 if (immed)
3680 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
3681 ->
3682 {ldr,str}[b]<cond> r0, [r2, #imm]. */
3683 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
3684 else
3685 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
3686 ->
3687 {ldr,str}[b]<cond> r0, [r2, r3]. */
3688 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
3689 }
3690 else
3691 {
3692 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
3693 dsc->u.ldst.restore_r4 = 1;
3694
3695 dsc->modinsn[0] = 0xe58ff014; /* str pc, [pc, #20]. */
3696 dsc->modinsn[1] = 0xe59f4010; /* ldr r4, [pc, #16]. */
3697 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
3698 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
3699 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
3700
3701 /* As above. */
3702 if (immed)
3703 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
3704 else
3705 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
3706
3707 dsc->modinsn[6] = 0x0; /* breakpoint location. */
3708 dsc->modinsn[7] = 0x0; /* scratch space. */
3709
3710 dsc->numinsns = 6;
3711 }
3712
3713 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
3714
3715 return 0;
3716 }
3717
3718 /* Cleanup LDM instructions with fully-populated register list. This is an
3719 unfortunate corner case: it's impossible to implement correctly by modifying
3720 the instruction. The issue is as follows: we have an instruction,
3721
3722 ldm rN, {r0-r15}
3723
3724 which we must rewrite to avoid loading PC. A possible solution would be to
3725 do the load in two halves, something like (with suitable cleanup
3726 afterwards):
3727
3728 mov r8, rN
3729 ldm[id][ab] r8!, {r0-r7}
3730 str r7, <temp>
3731 ldm[id][ab] r8, {r7-r14}
3732 <bkpt>
3733
3734 but at present there's no suitable place for <temp>, since the scratch space
3735 is overwritten before the cleanup routine is called. For now, we simply
3736 emulate the instruction. */
3737
3738 static void
3739 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
3740 struct displaced_step_closure *dsc)
3741 {
3742 ULONGEST from = dsc->insn_addr;
3743 int inc = dsc->u.block.increment;
3744 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
3745 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
3746 uint32_t regmask = dsc->u.block.regmask;
3747 int regno = inc ? 0 : 15;
3748 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
3749 int exception_return = dsc->u.block.load && dsc->u.block.user
3750 && (regmask & 0x8000) != 0;
3751 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
3752 int do_transfer = condition_true (dsc->u.block.cond, status);
3753 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3754
3755 if (!do_transfer)
3756 return;
3757
3758 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
3759 sensible we can do here. Complain loudly. */
3760 if (exception_return)
3761 error (_("Cannot single-step exception return"));
3762
3763 /* We don't handle any stores here for now. */
3764 gdb_assert (dsc->u.block.load != 0);
3765
3766 if (debug_displaced)
3767 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
3768 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
3769 dsc->u.block.increment ? "inc" : "dec",
3770 dsc->u.block.before ? "before" : "after");
3771
3772 while (regmask)
3773 {
3774 uint32_t memword;
3775
3776 if (inc)
3777 while (regno <= 15 && (regmask & (1 << regno)) == 0)
3778 regno++;
3779 else
3780 while (regno >= 0 && (regmask & (1 << regno)) == 0)
3781 regno--;
3782
3783 xfer_addr += bump_before;
3784
3785 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
3786 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
3787
3788 xfer_addr += bump_after;
3789
3790 regmask &= ~(1 << regno);
3791 }
3792
3793 if (dsc->u.block.writeback)
3794 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
3795 CANNOT_WRITE_PC);
3796 }
3797
3798 /* Clean up an STM which included the PC in the register list. */
3799
3800 static void
3801 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
3802 struct displaced_step_closure *dsc)
3803 {
3804 ULONGEST from = dsc->insn_addr;
3805 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
3806 int store_executed = condition_true (dsc->u.block.cond, status);
3807 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
3808 CORE_ADDR stm_insn_addr;
3809 uint32_t pc_val;
3810 long offset;
3811 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3812
3813 /* If condition code fails, there's nothing else to do. */
3814 if (!store_executed)
3815 return;
3816
3817 if (dsc->u.block.increment)
3818 {
3819 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
3820
3821 if (dsc->u.block.before)
3822 pc_stored_at += 4;
3823 }
3824 else
3825 {
3826 pc_stored_at = dsc->u.block.xfer_addr;
3827
3828 if (dsc->u.block.before)
3829 pc_stored_at -= 4;
3830 }
3831
3832 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
3833 stm_insn_addr = dsc->scratch_base;
3834 offset = pc_val - stm_insn_addr;
3835
3836 if (debug_displaced)
3837 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
3838 "STM instruction\n", offset);
3839
3840 /* Rewrite the stored PC to the proper value for the non-displaced original
3841 instruction. */
3842 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
3843 dsc->insn_addr + offset);
3844 }
3845
3846 /* Clean up an LDM which includes the PC in the register list. We clumped all
3847 the registers in the transferred list into a contiguous range r0...rX (to
3848 avoid loading PC directly and losing control of the debugged program), so we
3849 must undo that here. */
3850
3851 static void
3852 cleanup_block_load_pc (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
3853 struct regcache *regs,
3854 struct displaced_step_closure *dsc)
3855 {
3856 ULONGEST from = dsc->insn_addr;
3857 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
3858 int load_executed = condition_true (dsc->u.block.cond, status), i;
3859 unsigned int mask = dsc->u.block.regmask, write_reg = 15;
3860 unsigned int regs_loaded = bitcount (mask);
3861 unsigned int num_to_shuffle = regs_loaded, clobbered;
3862
3863 /* The method employed here will fail if the register list is fully populated
3864 (we need to avoid loading PC directly). */
3865 gdb_assert (num_to_shuffle < 16);
3866
3867 if (!load_executed)
3868 return;
3869
3870 clobbered = (1 << num_to_shuffle) - 1;
3871
3872 while (num_to_shuffle > 0)
3873 {
3874 if ((mask & (1 << write_reg)) != 0)
3875 {
3876 unsigned int read_reg = num_to_shuffle - 1;
3877
3878 if (read_reg != write_reg)
3879 {
3880 ULONGEST rval = displaced_read_reg (regs, from, read_reg);
3881 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
3882 if (debug_displaced)
3883 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
3884 "loaded register r%d to r%d\n"), read_reg,
3885 write_reg);
3886 }
3887 else if (debug_displaced)
3888 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
3889 "r%d already in the right place\n"),
3890 write_reg);
3891
3892 clobbered &= ~(1 << write_reg);
3893
3894 num_to_shuffle--;
3895 }
3896
3897 write_reg--;
3898 }
3899
3900 /* Restore any registers we scribbled over. */
3901 for (write_reg = 0; clobbered != 0; write_reg++)
3902 {
3903 if ((clobbered & (1 << write_reg)) != 0)
3904 {
3905 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
3906 CANNOT_WRITE_PC);
3907 if (debug_displaced)
3908 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
3909 "clobbered register r%d\n"), write_reg);
3910 clobbered &= ~(1 << write_reg);
3911 }
3912 }
3913
3914 /* Perform register writeback manually. */
3915 if (dsc->u.block.writeback)
3916 {
3917 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
3918
3919 if (dsc->u.block.increment)
3920 new_rn_val += regs_loaded * 4;
3921 else
3922 new_rn_val -= regs_loaded * 4;
3923
3924 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
3925 CANNOT_WRITE_PC);
3926 }
3927 }
3928
3929 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
3930 in user-level code (in particular exception return, ldm rn, {...pc}^). */
3931
3932 static int
3933 copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
3934 struct displaced_step_closure *dsc)
3935 {
3936 int load = bit (insn, 20);
3937 int user = bit (insn, 22);
3938 int increment = bit (insn, 23);
3939 int before = bit (insn, 24);
3940 int writeback = bit (insn, 21);
3941 int rn = bits (insn, 16, 19);
3942 CORE_ADDR from = dsc->insn_addr;
3943
3944 /* Block transfers which don't mention PC can be run directly out-of-line. */
3945 if (rn != 15 && (insn & 0x8000) == 0)
3946 return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
3947
3948 if (rn == 15)
3949 {
3950 warning (_("displaced: Unpredictable LDM or STM with base register r15"));
3951 return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
3952 }
3953
3954 if (debug_displaced)
3955 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
3956 "%.8lx\n", (unsigned long) insn);
3957
3958 dsc->u.block.xfer_addr = displaced_read_reg (regs, from, rn);
3959 dsc->u.block.rn = rn;
3960
3961 dsc->u.block.load = load;
3962 dsc->u.block.user = user;
3963 dsc->u.block.increment = increment;
3964 dsc->u.block.before = before;
3965 dsc->u.block.writeback = writeback;
3966 dsc->u.block.cond = bits (insn, 28, 31);
3967
3968 dsc->u.block.regmask = insn & 0xffff;
3969
3970 if (load)
3971 {
3972 if ((insn & 0xffff) == 0xffff)
3973 {
3974 /* LDM with a fully-populated register list. This case is
3975 particularly tricky. Implement for now by fully emulating the
3976 instruction (which might not behave perfectly in all cases, but
3977 these instructions should be rare enough for that not to matter
3978 too much). */
3979 dsc->modinsn[0] = ARM_NOP;
3980
3981 dsc->cleanup = &cleanup_block_load_all;
3982 }
3983 else
3984 {
3985 /* LDM of a list of registers which includes PC. Implement by
3986 rewriting the list of registers to be transferred into a
3987 contiguous chunk r0...rX before doing the transfer, then shuffling
3988 registers into the correct places in the cleanup routine. */
3989 unsigned int regmask = insn & 0xffff;
3990 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
3991 unsigned int to = 0, from = 0, i, new_rn;
3992
3993 for (i = 0; i < num_in_list; i++)
3994 dsc->tmp[i] = displaced_read_reg (regs, from, i);
3995
3996 /* Writeback makes things complicated. We need to avoid clobbering
3997 the base register with one of the registers in our modified
3998 register list, but just using a different register can't work in
3999 all cases, e.g.:
4000
4001 ldm r14!, {r0-r13,pc}
4002
4003 which would need to be rewritten as:
4004
4005 ldm rN!, {r0-r14}
4006
4007 but that can't work, because there's no free register for N.
4008
4009 Solve this by turning off the writeback bit, and emulating
4010 writeback manually in the cleanup routine. */
4011
4012 if (writeback)
4013 insn &= ~(1 << 21);
4014
4015 new_regmask = (1 << num_in_list) - 1;
4016
4017 if (debug_displaced)
4018 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
4019 "{..., pc}: original reg list %.4x, modified "
4020 "list %.4x\n"), rn, writeback ? "!" : "",
4021 (int) insn & 0xffff, new_regmask);
4022
4023 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
4024
4025 dsc->cleanup = &cleanup_block_load_pc;
4026 }
4027 }
4028 else
4029 {
4030 /* STM of a list of registers which includes PC. Run the instruction
4031 as-is, but out of line: this will store the wrong value for the PC,
4032 so we must manually fix up the memory in the cleanup routine.
4033 Doing things this way has the advantage that we can auto-detect
4034 the offset of the PC write (which is architecture-dependent) in
4035 the cleanup routine. */
4036 dsc->modinsn[0] = insn;
4037
4038 dsc->cleanup = &cleanup_block_store_pc;
4039 }
4040
4041 return 0;
4042 }
4043
4044 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
4045 for Linux, where some SVC instructions must be treated specially. */
4046
4047 static void
4048 cleanup_svc (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, struct regcache *regs,
4049 struct displaced_step_closure *dsc)
4050 {
4051 CORE_ADDR from = dsc->insn_addr;
4052 CORE_ADDR resume_addr = from + 4;
4053
4054 if (debug_displaced)
4055 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
4056 "%.8lx\n", (unsigned long) resume_addr);
4057
4058 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
4059 }
4060
4061 static int
4062 copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
4063 struct regcache *regs, struct displaced_step_closure *dsc)
4064 {
4065 CORE_ADDR from = dsc->insn_addr;
4066
4067 /* Allow OS-specific code to override SVC handling. */
4068 if (dsc->u.svc.copy_svc_os)
4069 return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
4070
4071 if (debug_displaced)
4072 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
4073 (unsigned long) insn);
4074
4075 /* Preparation: none.
4076 Insn: unmodified svc.
4077 Cleanup: pc <- insn_addr + 4. */
4078
4079 dsc->modinsn[0] = insn;
4080
4081 dsc->cleanup = &cleanup_svc;
4082 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
4083 instruction. */
4084 dsc->wrote_to_pc = 1;
4085
4086 return 0;
4087 }
4088
4089 /* Copy undefined instructions. */
4090
4091 static int
4092 copy_undef (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, uint32_t insn,
4093 struct displaced_step_closure *dsc)
4094 {
4095 if (debug_displaced)
4096 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn %.8lx\n",
4097 (unsigned long) insn);
4098
4099 dsc->modinsn[0] = insn;
4100
4101 return 0;
4102 }
4103
4104 /* Copy unpredictable instructions. */
4105
4106 static int
4107 copy_unpred (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, uint32_t insn,
4108 struct displaced_step_closure *dsc)
4109 {
4110 if (debug_displaced)
4111 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
4112 "%.8lx\n", (unsigned long) insn);
4113
4114 dsc->modinsn[0] = insn;
4115
4116 return 0;
4117 }
4118
4119 /* The decode_* functions are instruction decoding helpers. They mostly follow
4120 the presentation in the ARM ARM. */
4121
4122 static int
4123 decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
4124 struct regcache *regs,
4125 struct displaced_step_closure *dsc)
4126 {
4127 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
4128 unsigned int rn = bits (insn, 16, 19);
4129
4130 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
4131 return copy_unmodified (gdbarch, insn, "cps", dsc);
4132 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
4133 return copy_unmodified (gdbarch, insn, "setend", dsc);
4134 else if ((op1 & 0x60) == 0x20)
4135 return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
4136 else if ((op1 & 0x71) == 0x40)
4137 return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
4138 else if ((op1 & 0x77) == 0x41)
4139 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
4140 else if ((op1 & 0x77) == 0x45)
4141 return copy_preload (gdbarch, insn, regs, dsc); /* pli. */
4142 else if ((op1 & 0x77) == 0x51)
4143 {
4144 if (rn != 0xf)
4145 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
4146 else
4147 return copy_unpred (gdbarch, insn, dsc);
4148 }
4149 else if ((op1 & 0x77) == 0x55)
4150 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
4151 else if (op1 == 0x57)
4152 switch (op2)
4153 {
4154 case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
4155 case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
4156 case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
4157 case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
4158 default: return copy_unpred (gdbarch, insn, dsc);
4159 }
4160 else if ((op1 & 0x63) == 0x43)
4161 return copy_unpred (gdbarch, insn, dsc);
4162 else if ((op2 & 0x1) == 0x0)
4163 switch (op1 & ~0x80)
4164 {
4165 case 0x61:
4166 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
4167 case 0x65:
4168 return copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
4169 case 0x71: case 0x75:
4170 /* pld/pldw reg. */
4171 return copy_preload_reg (gdbarch, insn, regs, dsc);
4172 case 0x63: case 0x67: case 0x73: case 0x77:
4173 return copy_unpred (gdbarch, insn, dsc);
4174 default:
4175 return copy_undef (gdbarch, insn, dsc);
4176 }
4177 else
4178 return copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
4179 }
4180
4181 static int
4182 decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
4183 struct regcache *regs, struct displaced_step_closure *dsc)
4184 {
4185 if (bit (insn, 27) == 0)
4186 return decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
4187 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
4188 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
4189 {
4190 case 0x0: case 0x2:
4191 return copy_unmodified (gdbarch, insn, "srs", dsc);
4192
4193 case 0x1: case 0x3:
4194 return copy_unmodified (gdbarch, insn, "rfe", dsc);
4195
4196 case 0x4: case 0x5: case 0x6: case 0x7:
4197 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
4198
4199 case 0x8:
4200 switch ((insn & 0xe00000) >> 21)
4201 {
4202 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
4203 /* stc/stc2. */
4204 return copy_copro_load_store (gdbarch, insn, regs, dsc);
4205
4206 case 0x2:
4207 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
4208
4209 default:
4210 return copy_undef (gdbarch, insn, dsc);
4211 }
4212
4213 case 0x9:
4214 {
4215 int rn_f = (bits (insn, 16, 19) == 0xf);
4216 switch ((insn & 0xe00000) >> 21)
4217 {
4218 case 0x1: case 0x3:
4219 /* ldc/ldc2 imm (undefined for rn == pc). */
4220 return rn_f ? copy_undef (gdbarch, insn, dsc)
4221 : copy_copro_load_store (gdbarch, insn, regs, dsc);
4222
4223 case 0x2:
4224 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
4225
4226 case 0x4: case 0x5: case 0x6: case 0x7:
4227 /* ldc/ldc2 lit (undefined for rn != pc). */
4228 return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
4229 : copy_undef (gdbarch, insn, dsc);
4230
4231 default:
4232 return copy_undef (gdbarch, insn, dsc);
4233 }
4234 }
4235
4236 case 0xa:
4237 return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
4238
4239 case 0xb:
4240 if (bits (insn, 16, 19) == 0xf)
4241 /* ldc/ldc2 lit. */
4242 return copy_copro_load_store (gdbarch, insn, regs, dsc);
4243 else
4244 return copy_undef (gdbarch, insn, dsc);
4245
4246 case 0xc:
4247 if (bit (insn, 4))
4248 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
4249 else
4250 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
4251
4252 case 0xd:
4253 if (bit (insn, 4))
4254 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
4255 else
4256 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
4257
4258 default:
4259 return copy_undef (gdbarch, insn, dsc);
4260 }
4261 }
4262
4263 /* Decode miscellaneous instructions in dp/misc encoding space. */
4264
4265 static int
4266 decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
4267 struct regcache *regs, struct displaced_step_closure *dsc)
4268 {
4269 unsigned int op2 = bits (insn, 4, 6);
4270 unsigned int op = bits (insn, 21, 22);
4271 unsigned int op1 = bits (insn, 16, 19);
4272
4273 switch (op2)
4274 {
4275 case 0x0:
4276 return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
4277
4278 case 0x1:
4279 if (op == 0x1) /* bx. */
4280 return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
4281 else if (op == 0x3)
4282 return copy_unmodified (gdbarch, insn, "clz", dsc);
4283 else
4284 return copy_undef (gdbarch, insn, dsc);
4285
4286 case 0x2:
4287 if (op == 0x1)
4288 /* Not really supported. */
4289 return copy_unmodified (gdbarch, insn, "bxj", dsc);
4290 else
4291 return copy_undef (gdbarch, insn, dsc);
4292
4293 case 0x3:
4294 if (op == 0x1)
4295 return copy_bx_blx_reg (gdbarch, insn, regs, dsc); /* blx register. */
4296 else
4297 return copy_undef (gdbarch, insn, dsc);
4298
4299 case 0x5:
4300 return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
4301
4302 case 0x7:
4303 if (op == 0x1)
4304 return copy_unmodified (gdbarch, insn, "bkpt", dsc);
4305 else if (op == 0x3)
4306 /* Not really supported. */
4307 return copy_unmodified (gdbarch, insn, "smc", dsc);
4308
4309 default:
4310 return copy_undef (gdbarch, insn, dsc);
4311 }
4312 }
4313
4314 static int
4315 decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4316 struct displaced_step_closure *dsc)
4317 {
4318 if (bit (insn, 25))
4319 switch (bits (insn, 20, 24))
4320 {
4321 case 0x10:
4322 return copy_unmodified (gdbarch, insn, "movw", dsc);
4323
4324 case 0x14:
4325 return copy_unmodified (gdbarch, insn, "movt", dsc);
4326
4327 case 0x12: case 0x16:
4328 return copy_unmodified (gdbarch, insn, "msr imm", dsc);
4329
4330 default:
4331 return copy_alu_imm (gdbarch, insn, regs, dsc);
4332 }
4333 else
4334 {
4335 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
4336
4337 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
4338 return copy_alu_reg (gdbarch, insn, regs, dsc);
4339 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
4340 return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
4341 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
4342 return decode_miscellaneous (gdbarch, insn, regs, dsc);
4343 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
4344 return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
4345 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
4346 return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
4347 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
4348 return copy_unmodified (gdbarch, insn, "synch", dsc);
4349 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
4350 /* 2nd arg means "unpriveleged". */
4351 return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
4352 dsc);
4353 }
4354
4355 /* Should be unreachable. */
4356 return 1;
4357 }
4358
4359 static int
4360 decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
4361 struct regcache *regs,
4362 struct displaced_step_closure *dsc)
4363 {
4364 int a = bit (insn, 25), b = bit (insn, 4);
4365 uint32_t op1 = bits (insn, 20, 24);
4366 int rn_f = bits (insn, 16, 19) == 0xf;
4367
4368 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
4369 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
4370 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
4371 else if ((!a && (op1 & 0x17) == 0x02)
4372 || (a && (op1 & 0x17) == 0x02 && !b))
4373 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
4374 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
4375 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
4376 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
4377 else if ((!a && (op1 & 0x17) == 0x03)
4378 || (a && (op1 & 0x17) == 0x03 && !b))
4379 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
4380 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
4381 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
4382 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
4383 else if ((!a && (op1 & 0x17) == 0x06)
4384 || (a && (op1 & 0x17) == 0x06 && !b))
4385 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
4386 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
4387 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
4388 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
4389 else if ((!a && (op1 & 0x17) == 0x07)
4390 || (a && (op1 & 0x17) == 0x07 && !b))
4391 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
4392
4393 /* Should be unreachable. */
4394 return 1;
4395 }
4396
4397 static int
4398 decode_media (struct gdbarch *gdbarch, uint32_t insn,
4399 struct displaced_step_closure *dsc)
4400 {
4401 switch (bits (insn, 20, 24))
4402 {
4403 case 0x00: case 0x01: case 0x02: case 0x03:
4404 return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
4405
4406 case 0x04: case 0x05: case 0x06: case 0x07:
4407 return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
4408
4409 case 0x08: case 0x09: case 0x0a: case 0x0b:
4410 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
4411 return copy_unmodified (gdbarch, insn,
4412 "decode/pack/unpack/saturate/reverse", dsc);
4413
4414 case 0x18:
4415 if (bits (insn, 5, 7) == 0) /* op2. */
4416 {
4417 if (bits (insn, 12, 15) == 0xf)
4418 return copy_unmodified (gdbarch, insn, "usad8", dsc);
4419 else
4420 return copy_unmodified (gdbarch, insn, "usada8", dsc);
4421 }
4422 else
4423 return copy_undef (gdbarch, insn, dsc);
4424
4425 case 0x1a: case 0x1b:
4426 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
4427 return copy_unmodified (gdbarch, insn, "sbfx", dsc);
4428 else
4429 return copy_undef (gdbarch, insn, dsc);
4430
4431 case 0x1c: case 0x1d:
4432 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
4433 {
4434 if (bits (insn, 0, 3) == 0xf)
4435 return copy_unmodified (gdbarch, insn, "bfc", dsc);
4436 else
4437 return copy_unmodified (gdbarch, insn, "bfi", dsc);
4438 }
4439 else
4440 return copy_undef (gdbarch, insn, dsc);
4441
4442 case 0x1e: case 0x1f:
4443 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
4444 return copy_unmodified (gdbarch, insn, "ubfx", dsc);
4445 else
4446 return copy_undef (gdbarch, insn, dsc);
4447 }
4448
4449 /* Should be unreachable. */
4450 return 1;
4451 }
4452
4453 static int
4454 decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
4455 struct regcache *regs, struct displaced_step_closure *dsc)
4456 {
4457 if (bit (insn, 25))
4458 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
4459 else
4460 return copy_block_xfer (gdbarch, insn, regs, dsc);
4461 }
4462
4463 static int
4464 decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
4465 struct regcache *regs, struct displaced_step_closure *dsc)
4466 {
4467 unsigned int opcode = bits (insn, 20, 24);
4468
4469 switch (opcode)
4470 {
4471 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
4472 return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
4473
4474 case 0x08: case 0x0a: case 0x0c: case 0x0e:
4475 case 0x12: case 0x16:
4476 return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
4477
4478 case 0x09: case 0x0b: case 0x0d: case 0x0f:
4479 case 0x13: case 0x17:
4480 return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
4481
4482 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
4483 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
4484 /* Note: no writeback for these instructions. Bit 25 will always be
4485 zero though (via caller), so the following works OK. */
4486 return copy_copro_load_store (gdbarch, insn, regs, dsc);
4487 }
4488
4489 /* Should be unreachable. */
4490 return 1;
4491 }
4492
4493 static int
4494 decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
4495 struct regcache *regs, struct displaced_step_closure *dsc)
4496 {
4497 unsigned int op1 = bits (insn, 20, 25);
4498 int op = bit (insn, 4);
4499 unsigned int coproc = bits (insn, 8, 11);
4500 unsigned int rn = bits (insn, 16, 19);
4501
4502 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
4503 return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
4504 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
4505 && (coproc & 0xe) != 0xa)
4506 /* stc/stc2. */
4507 return copy_copro_load_store (gdbarch, insn, regs, dsc);
4508 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
4509 && (coproc & 0xe) != 0xa)
4510 /* ldc/ldc2 imm/lit. */
4511 return copy_copro_load_store (gdbarch, insn, regs, dsc);
4512 else if ((op1 & 0x3e) == 0x00)
4513 return copy_undef (gdbarch, insn, dsc);
4514 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
4515 return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
4516 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
4517 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
4518 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
4519 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
4520 else if ((op1 & 0x30) == 0x20 && !op)
4521 {
4522 if ((coproc & 0xe) == 0xa)
4523 return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
4524 else
4525 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
4526 }
4527 else if ((op1 & 0x30) == 0x20 && op)
4528 return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
4529 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
4530 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
4531 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
4532 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
4533 else if ((op1 & 0x30) == 0x30)
4534 return copy_svc (gdbarch, insn, to, regs, dsc);
4535 else
4536 return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
4537 }
4538
4539 void
4540 arm_process_displaced_insn (struct gdbarch *gdbarch, uint32_t insn,
4541 CORE_ADDR from, CORE_ADDR to, struct regcache *regs,
4542 struct displaced_step_closure *dsc)
4543 {
4544 int err = 0;
4545
4546 if (!displaced_in_arm_mode (regs))
4547 error (_("Displaced stepping is only supported in ARM mode"));
4548
4549 /* Most displaced instructions use a 1-instruction scratch space, so set this
4550 here and override below if/when necessary. */
4551 dsc->numinsns = 1;
4552 dsc->insn_addr = from;
4553 dsc->scratch_base = to;
4554 dsc->cleanup = NULL;
4555 dsc->wrote_to_pc = 0;
4556
4557 if ((insn & 0xf0000000) == 0xf0000000)
4558 err = decode_unconditional (gdbarch, insn, regs, dsc);
4559 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
4560 {
4561 case 0x0: case 0x1: case 0x2: case 0x3:
4562 err = decode_dp_misc (gdbarch, insn, regs, dsc);
4563 break;
4564
4565 case 0x4: case 0x5: case 0x6:
4566 err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
4567 break;
4568
4569 case 0x7:
4570 err = decode_media (gdbarch, insn, dsc);
4571 break;
4572
4573 case 0x8: case 0x9: case 0xa: case 0xb:
4574 err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
4575 break;
4576
4577 case 0xc: case 0xd: case 0xe: case 0xf:
4578 err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
4579 break;
4580 }
4581
4582 if (err)
4583 internal_error (__FILE__, __LINE__,
4584 _("arm_process_displaced_insn: Instruction decode error"));
4585 }
4586
4587 /* Actually set up the scratch space for a displaced instruction. */
4588
4589 void
4590 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
4591 CORE_ADDR to, struct displaced_step_closure *dsc)
4592 {
4593 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
4594 unsigned int i;
4595 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4596
4597 /* Poke modified instruction(s). */
4598 for (i = 0; i < dsc->numinsns; i++)
4599 {
4600 if (debug_displaced)
4601 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
4602 "%.8lx\n", (unsigned long) dsc->modinsn[i],
4603 (unsigned long) to + i * 4);
4604 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
4605 dsc->modinsn[i]);
4606 }
4607
4608 /* Put breakpoint afterwards. */
4609 write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
4610 tdep->arm_breakpoint_size);
4611
4612 if (debug_displaced)
4613 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
4614 paddress (gdbarch, from), paddress (gdbarch, to));
4615 }
4616
4617 /* Entry point for copying an instruction into scratch space for displaced
4618 stepping. */
4619
4620 struct displaced_step_closure *
4621 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
4622 CORE_ADDR from, CORE_ADDR to,
4623 struct regcache *regs)
4624 {
4625 struct displaced_step_closure *dsc
4626 = xmalloc (sizeof (struct displaced_step_closure));
4627 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4628 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
4629
4630 if (debug_displaced)
4631 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
4632 "at %.8lx\n", (unsigned long) insn,
4633 (unsigned long) from);
4634
4635 arm_process_displaced_insn (gdbarch, insn, from, to, regs, dsc);
4636 arm_displaced_init_closure (gdbarch, from, to, dsc);
4637
4638 return dsc;
4639 }
4640
4641 /* Entry point for cleaning things up after a displaced instruction has been
4642 single-stepped. */
4643
4644 void
4645 arm_displaced_step_fixup (struct gdbarch *gdbarch,
4646 struct displaced_step_closure *dsc,
4647 CORE_ADDR from, CORE_ADDR to,
4648 struct regcache *regs)
4649 {
4650 if (dsc->cleanup)
4651 dsc->cleanup (gdbarch, regs, dsc);
4652
4653 if (!dsc->wrote_to_pc)
4654 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
4655 }
4656
4657 #include "bfd-in2.h"
4658 #include "libcoff.h"
4659
4660 static int
4661 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
4662 {
4663 if (arm_pc_is_thumb (memaddr))
4664 {
4665 static asymbol *asym;
4666 static combined_entry_type ce;
4667 static struct coff_symbol_struct csym;
4668 static struct bfd fake_bfd;
4669 static bfd_target fake_target;
4670
4671 if (csym.native == NULL)
4672 {
4673 /* Create a fake symbol vector containing a Thumb symbol.
4674 This is solely so that the code in print_insn_little_arm()
4675 and print_insn_big_arm() in opcodes/arm-dis.c will detect
4676 the presence of a Thumb symbol and switch to decoding
4677 Thumb instructions. */
4678
4679 fake_target.flavour = bfd_target_coff_flavour;
4680 fake_bfd.xvec = &fake_target;
4681 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
4682 csym.native = &ce;
4683 csym.symbol.the_bfd = &fake_bfd;
4684 csym.symbol.name = "fake";
4685 asym = (asymbol *) & csym;
4686 }
4687
4688 memaddr = UNMAKE_THUMB_ADDR (memaddr);
4689 info->symbols = &asym;
4690 }
4691 else
4692 info->symbols = NULL;
4693
4694 if (info->endian == BFD_ENDIAN_BIG)
4695 return print_insn_big_arm (memaddr, info);
4696 else
4697 return print_insn_little_arm (memaddr, info);
4698 }
4699
4700 /* The following define instruction sequences that will cause ARM
4701 cpu's to take an undefined instruction trap. These are used to
4702 signal a breakpoint to GDB.
4703
4704 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
4705 modes. A different instruction is required for each mode. The ARM
4706 cpu's can also be big or little endian. Thus four different
4707 instructions are needed to support all cases.
4708
4709 Note: ARMv4 defines several new instructions that will take the
4710 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
4711 not in fact add the new instructions. The new undefined
4712 instructions in ARMv4 are all instructions that had no defined
4713 behaviour in earlier chips. There is no guarantee that they will
4714 raise an exception, but may be treated as NOP's. In practice, it
4715 may only safe to rely on instructions matching:
4716
4717 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
4718 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
4719 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
4720
4721 Even this may only true if the condition predicate is true. The
4722 following use a condition predicate of ALWAYS so it is always TRUE.
4723
4724 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
4725 and NetBSD all use a software interrupt rather than an undefined
4726 instruction to force a trap. This can be handled by by the
4727 abi-specific code during establishment of the gdbarch vector. */
4728
4729 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
4730 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
4731 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
4732 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
4733
4734 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
4735 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
4736 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
4737 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
4738
4739 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
4740 the program counter value to determine whether a 16-bit or 32-bit
4741 breakpoint should be used. It returns a pointer to a string of
4742 bytes that encode a breakpoint instruction, stores the length of
4743 the string to *lenptr, and adjusts the program counter (if
4744 necessary) to point to the actual memory location where the
4745 breakpoint should be inserted. */
4746
4747 static const unsigned char *
4748 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
4749 {
4750 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
4751
4752 if (arm_pc_is_thumb (*pcptr))
4753 {
4754 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
4755 *lenptr = tdep->thumb_breakpoint_size;
4756 return tdep->thumb_breakpoint;
4757 }
4758 else
4759 {
4760 *lenptr = tdep->arm_breakpoint_size;
4761 return tdep->arm_breakpoint;
4762 }
4763 }
4764
4765 /* Extract from an array REGBUF containing the (raw) register state a
4766 function return value of type TYPE, and copy that, in virtual
4767 format, into VALBUF. */
4768
4769 static void
4770 arm_extract_return_value (struct type *type, struct regcache *regs,
4771 gdb_byte *valbuf)
4772 {
4773 struct gdbarch *gdbarch = get_regcache_arch (regs);
4774 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4775
4776 if (TYPE_CODE_FLT == TYPE_CODE (type))
4777 {
4778 switch (gdbarch_tdep (gdbarch)->fp_model)
4779 {
4780 case ARM_FLOAT_FPA:
4781 {
4782 /* The value is in register F0 in internal format. We need to
4783 extract the raw value and then convert it to the desired
4784 internal type. */
4785 bfd_byte tmpbuf[FP_REGISTER_SIZE];
4786
4787 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
4788 convert_from_extended (floatformat_from_type (type), tmpbuf,
4789 valbuf, gdbarch_byte_order (gdbarch));
4790 }
4791 break;
4792
4793 case ARM_FLOAT_SOFT_FPA:
4794 case ARM_FLOAT_SOFT_VFP:
4795 /* ARM_FLOAT_VFP can arise if this is a variadic function so
4796 not using the VFP ABI code. */
4797 case ARM_FLOAT_VFP:
4798 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
4799 if (TYPE_LENGTH (type) > 4)
4800 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
4801 valbuf + INT_REGISTER_SIZE);
4802 break;
4803
4804 default:
4805 internal_error
4806 (__FILE__, __LINE__,
4807 _("arm_extract_return_value: Floating point model not supported"));
4808 break;
4809 }
4810 }
4811 else if (TYPE_CODE (type) == TYPE_CODE_INT
4812 || TYPE_CODE (type) == TYPE_CODE_CHAR
4813 || TYPE_CODE (type) == TYPE_CODE_BOOL
4814 || TYPE_CODE (type) == TYPE_CODE_PTR
4815 || TYPE_CODE (type) == TYPE_CODE_REF
4816 || TYPE_CODE (type) == TYPE_CODE_ENUM)
4817 {
4818 /* If the the type is a plain integer, then the access is
4819 straight-forward. Otherwise we have to play around a bit more. */
4820 int len = TYPE_LENGTH (type);
4821 int regno = ARM_A1_REGNUM;
4822 ULONGEST tmp;
4823
4824 while (len > 0)
4825 {
4826 /* By using store_unsigned_integer we avoid having to do
4827 anything special for small big-endian values. */
4828 regcache_cooked_read_unsigned (regs, regno++, &tmp);
4829 store_unsigned_integer (valbuf,
4830 (len > INT_REGISTER_SIZE
4831 ? INT_REGISTER_SIZE : len),
4832 byte_order, tmp);
4833 len -= INT_REGISTER_SIZE;
4834 valbuf += INT_REGISTER_SIZE;
4835 }
4836 }
4837 else
4838 {
4839 /* For a structure or union the behaviour is as if the value had
4840 been stored to word-aligned memory and then loaded into
4841 registers with 32-bit load instruction(s). */
4842 int len = TYPE_LENGTH (type);
4843 int regno = ARM_A1_REGNUM;
4844 bfd_byte tmpbuf[INT_REGISTER_SIZE];
4845
4846 while (len > 0)
4847 {
4848 regcache_cooked_read (regs, regno++, tmpbuf);
4849 memcpy (valbuf, tmpbuf,
4850 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
4851 len -= INT_REGISTER_SIZE;
4852 valbuf += INT_REGISTER_SIZE;
4853 }
4854 }
4855 }
4856
4857
4858 /* Will a function return an aggregate type in memory or in a
4859 register? Return 0 if an aggregate type can be returned in a
4860 register, 1 if it must be returned in memory. */
4861
4862 static int
4863 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
4864 {
4865 int nRc;
4866 enum type_code code;
4867
4868 CHECK_TYPEDEF (type);
4869
4870 /* In the ARM ABI, "integer" like aggregate types are returned in
4871 registers. For an aggregate type to be integer like, its size
4872 must be less than or equal to INT_REGISTER_SIZE and the
4873 offset of each addressable subfield must be zero. Note that bit
4874 fields are not addressable, and all addressable subfields of
4875 unions always start at offset zero.
4876
4877 This function is based on the behaviour of GCC 2.95.1.
4878 See: gcc/arm.c: arm_return_in_memory() for details.
4879
4880 Note: All versions of GCC before GCC 2.95.2 do not set up the
4881 parameters correctly for a function returning the following
4882 structure: struct { float f;}; This should be returned in memory,
4883 not a register. Richard Earnshaw sent me a patch, but I do not
4884 know of any way to detect if a function like the above has been
4885 compiled with the correct calling convention. */
4886
4887 /* All aggregate types that won't fit in a register must be returned
4888 in memory. */
4889 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
4890 {
4891 return 1;
4892 }
4893
4894 /* The AAPCS says all aggregates not larger than a word are returned
4895 in a register. */
4896 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
4897 return 0;
4898
4899 /* The only aggregate types that can be returned in a register are
4900 structs and unions. Arrays must be returned in memory. */
4901 code = TYPE_CODE (type);
4902 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
4903 {
4904 return 1;
4905 }
4906
4907 /* Assume all other aggregate types can be returned in a register.
4908 Run a check for structures, unions and arrays. */
4909 nRc = 0;
4910
4911 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
4912 {
4913 int i;
4914 /* Need to check if this struct/union is "integer" like. For
4915 this to be true, its size must be less than or equal to
4916 INT_REGISTER_SIZE and the offset of each addressable
4917 subfield must be zero. Note that bit fields are not
4918 addressable, and unions always start at offset zero. If any
4919 of the subfields is a floating point type, the struct/union
4920 cannot be an integer type. */
4921
4922 /* For each field in the object, check:
4923 1) Is it FP? --> yes, nRc = 1;
4924 2) Is it addressable (bitpos != 0) and
4925 not packed (bitsize == 0)?
4926 --> yes, nRc = 1
4927 */
4928
4929 for (i = 0; i < TYPE_NFIELDS (type); i++)
4930 {
4931 enum type_code field_type_code;
4932 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type, i)));
4933
4934 /* Is it a floating point type field? */
4935 if (field_type_code == TYPE_CODE_FLT)
4936 {
4937 nRc = 1;
4938 break;
4939 }
4940
4941 /* If bitpos != 0, then we have to care about it. */
4942 if (TYPE_FIELD_BITPOS (type, i) != 0)
4943 {
4944 /* Bitfields are not addressable. If the field bitsize is
4945 zero, then the field is not packed. Hence it cannot be
4946 a bitfield or any other packed type. */
4947 if (TYPE_FIELD_BITSIZE (type, i) == 0)
4948 {
4949 nRc = 1;
4950 break;
4951 }
4952 }
4953 }
4954 }
4955
4956 return nRc;
4957 }
4958
4959 /* Write into appropriate registers a function return value of type
4960 TYPE, given in virtual format. */
4961
4962 static void
4963 arm_store_return_value (struct type *type, struct regcache *regs,
4964 const gdb_byte *valbuf)
4965 {
4966 struct gdbarch *gdbarch = get_regcache_arch (regs);
4967 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4968
4969 if (TYPE_CODE (type) == TYPE_CODE_FLT)
4970 {
4971 char buf[MAX_REGISTER_SIZE];
4972
4973 switch (gdbarch_tdep (gdbarch)->fp_model)
4974 {
4975 case ARM_FLOAT_FPA:
4976
4977 convert_to_extended (floatformat_from_type (type), buf, valbuf,
4978 gdbarch_byte_order (gdbarch));
4979 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
4980 break;
4981
4982 case ARM_FLOAT_SOFT_FPA:
4983 case ARM_FLOAT_SOFT_VFP:
4984 /* ARM_FLOAT_VFP can arise if this is a variadic function so
4985 not using the VFP ABI code. */
4986 case ARM_FLOAT_VFP:
4987 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
4988 if (TYPE_LENGTH (type) > 4)
4989 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
4990 valbuf + INT_REGISTER_SIZE);
4991 break;
4992
4993 default:
4994 internal_error
4995 (__FILE__, __LINE__,
4996 _("arm_store_return_value: Floating point model not supported"));
4997 break;
4998 }
4999 }
5000 else if (TYPE_CODE (type) == TYPE_CODE_INT
5001 || TYPE_CODE (type) == TYPE_CODE_CHAR
5002 || TYPE_CODE (type) == TYPE_CODE_BOOL
5003 || TYPE_CODE (type) == TYPE_CODE_PTR
5004 || TYPE_CODE (type) == TYPE_CODE_REF
5005 || TYPE_CODE (type) == TYPE_CODE_ENUM)
5006 {
5007 if (TYPE_LENGTH (type) <= 4)
5008 {
5009 /* Values of one word or less are zero/sign-extended and
5010 returned in r0. */
5011 bfd_byte tmpbuf[INT_REGISTER_SIZE];
5012 LONGEST val = unpack_long (type, valbuf);
5013
5014 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
5015 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
5016 }
5017 else
5018 {
5019 /* Integral values greater than one word are stored in consecutive
5020 registers starting with r0. This will always be a multiple of
5021 the regiser size. */
5022 int len = TYPE_LENGTH (type);
5023 int regno = ARM_A1_REGNUM;
5024
5025 while (len > 0)
5026 {
5027 regcache_cooked_write (regs, regno++, valbuf);
5028 len -= INT_REGISTER_SIZE;
5029 valbuf += INT_REGISTER_SIZE;
5030 }
5031 }
5032 }
5033 else
5034 {
5035 /* For a structure or union the behaviour is as if the value had
5036 been stored to word-aligned memory and then loaded into
5037 registers with 32-bit load instruction(s). */
5038 int len = TYPE_LENGTH (type);
5039 int regno = ARM_A1_REGNUM;
5040 bfd_byte tmpbuf[INT_REGISTER_SIZE];
5041
5042 while (len > 0)
5043 {
5044 memcpy (tmpbuf, valbuf,
5045 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
5046 regcache_cooked_write (regs, regno++, tmpbuf);
5047 len -= INT_REGISTER_SIZE;
5048 valbuf += INT_REGISTER_SIZE;
5049 }
5050 }
5051 }
5052
5053
5054 /* Handle function return values. */
5055
5056 static enum return_value_convention
5057 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
5058 struct type *valtype, struct regcache *regcache,
5059 gdb_byte *readbuf, const gdb_byte *writebuf)
5060 {
5061 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5062 enum arm_vfp_cprc_base_type vfp_base_type;
5063 int vfp_base_count;
5064
5065 if (arm_vfp_abi_for_function (gdbarch, func_type)
5066 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
5067 {
5068 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
5069 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
5070 int i;
5071 for (i = 0; i < vfp_base_count; i++)
5072 {
5073 if (reg_char == 'q')
5074 {
5075 if (writebuf)
5076 arm_neon_quad_write (gdbarch, regcache, i,
5077 writebuf + i * unit_length);
5078
5079 if (readbuf)
5080 arm_neon_quad_read (gdbarch, regcache, i,
5081 readbuf + i * unit_length);
5082 }
5083 else
5084 {
5085 char name_buf[4];
5086 int regnum;
5087
5088 sprintf (name_buf, "%c%d", reg_char, i);
5089 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
5090 strlen (name_buf));
5091 if (writebuf)
5092 regcache_cooked_write (regcache, regnum,
5093 writebuf + i * unit_length);
5094 if (readbuf)
5095 regcache_cooked_read (regcache, regnum,
5096 readbuf + i * unit_length);
5097 }
5098 }
5099 return RETURN_VALUE_REGISTER_CONVENTION;
5100 }
5101
5102 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
5103 || TYPE_CODE (valtype) == TYPE_CODE_UNION
5104 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
5105 {
5106 if (tdep->struct_return == pcc_struct_return
5107 || arm_return_in_memory (gdbarch, valtype))
5108 return RETURN_VALUE_STRUCT_CONVENTION;
5109 }
5110
5111 if (writebuf)
5112 arm_store_return_value (valtype, regcache, writebuf);
5113
5114 if (readbuf)
5115 arm_extract_return_value (valtype, regcache, readbuf);
5116
5117 return RETURN_VALUE_REGISTER_CONVENTION;
5118 }
5119
5120
5121 static int
5122 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
5123 {
5124 struct gdbarch *gdbarch = get_frame_arch (frame);
5125 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5126 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5127 CORE_ADDR jb_addr;
5128 char buf[INT_REGISTER_SIZE];
5129
5130 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
5131
5132 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
5133 INT_REGISTER_SIZE))
5134 return 0;
5135
5136 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
5137 return 1;
5138 }
5139
5140 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
5141 return the target PC. Otherwise return 0. */
5142
5143 CORE_ADDR
5144 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
5145 {
5146 char *name;
5147 int namelen;
5148 CORE_ADDR start_addr;
5149
5150 /* Find the starting address and name of the function containing the PC. */
5151 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
5152 return 0;
5153
5154 /* If PC is in a Thumb call or return stub, return the address of the
5155 target PC, which is in a register. The thunk functions are called
5156 _call_via_xx, where x is the register name. The possible names
5157 are r0-r9, sl, fp, ip, sp, and lr. */
5158 if (strncmp (name, "_call_via_", 10) == 0)
5159 {
5160 /* Use the name suffix to determine which register contains the
5161 target PC. */
5162 static char *table[15] =
5163 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
5164 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
5165 };
5166 int regno;
5167 int offset = strlen (name) - 2;
5168
5169 for (regno = 0; regno <= 14; regno++)
5170 if (strcmp (&name[offset], table[regno]) == 0)
5171 return get_frame_register_unsigned (frame, regno);
5172 }
5173
5174 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
5175 non-interworking calls to foo. We could decode the stubs
5176 to find the target but it's easier to use the symbol table. */
5177 namelen = strlen (name);
5178 if (name[0] == '_' && name[1] == '_'
5179 && ((namelen > 2 + strlen ("_from_thumb")
5180 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
5181 strlen ("_from_thumb")) == 0)
5182 || (namelen > 2 + strlen ("_from_arm")
5183 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
5184 strlen ("_from_arm")) == 0)))
5185 {
5186 char *target_name;
5187 int target_len = namelen - 2;
5188 struct minimal_symbol *minsym;
5189 struct objfile *objfile;
5190 struct obj_section *sec;
5191
5192 if (name[namelen - 1] == 'b')
5193 target_len -= strlen ("_from_thumb");
5194 else
5195 target_len -= strlen ("_from_arm");
5196
5197 target_name = alloca (target_len + 1);
5198 memcpy (target_name, name + 2, target_len);
5199 target_name[target_len] = '\0';
5200
5201 sec = find_pc_section (pc);
5202 objfile = (sec == NULL) ? NULL : sec->objfile;
5203 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
5204 if (minsym != NULL)
5205 return SYMBOL_VALUE_ADDRESS (minsym);
5206 else
5207 return 0;
5208 }
5209
5210 return 0; /* not a stub */
5211 }
5212
5213 static void
5214 set_arm_command (char *args, int from_tty)
5215 {
5216 printf_unfiltered (_("\
5217 \"set arm\" must be followed by an apporpriate subcommand.\n"));
5218 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
5219 }
5220
5221 static void
5222 show_arm_command (char *args, int from_tty)
5223 {
5224 cmd_show_list (showarmcmdlist, from_tty, "");
5225 }
5226
5227 static void
5228 arm_update_current_architecture (void)
5229 {
5230 struct gdbarch_info info;
5231
5232 /* If the current architecture is not ARM, we have nothing to do. */
5233 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
5234 return;
5235
5236 /* Update the architecture. */
5237 gdbarch_info_init (&info);
5238
5239 if (!gdbarch_update_p (info))
5240 internal_error (__FILE__, __LINE__, "could not update architecture");
5241 }
5242
5243 static void
5244 set_fp_model_sfunc (char *args, int from_tty,
5245 struct cmd_list_element *c)
5246 {
5247 enum arm_float_model fp_model;
5248
5249 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
5250 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
5251 {
5252 arm_fp_model = fp_model;
5253 break;
5254 }
5255
5256 if (fp_model == ARM_FLOAT_LAST)
5257 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
5258 current_fp_model);
5259
5260 arm_update_current_architecture ();
5261 }
5262
5263 static void
5264 show_fp_model (struct ui_file *file, int from_tty,
5265 struct cmd_list_element *c, const char *value)
5266 {
5267 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
5268
5269 if (arm_fp_model == ARM_FLOAT_AUTO
5270 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
5271 fprintf_filtered (file, _("\
5272 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
5273 fp_model_strings[tdep->fp_model]);
5274 else
5275 fprintf_filtered (file, _("\
5276 The current ARM floating point model is \"%s\".\n"),
5277 fp_model_strings[arm_fp_model]);
5278 }
5279
5280 static void
5281 arm_set_abi (char *args, int from_tty,
5282 struct cmd_list_element *c)
5283 {
5284 enum arm_abi_kind arm_abi;
5285
5286 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
5287 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
5288 {
5289 arm_abi_global = arm_abi;
5290 break;
5291 }
5292
5293 if (arm_abi == ARM_ABI_LAST)
5294 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
5295 arm_abi_string);
5296
5297 arm_update_current_architecture ();
5298 }
5299
5300 static void
5301 arm_show_abi (struct ui_file *file, int from_tty,
5302 struct cmd_list_element *c, const char *value)
5303 {
5304 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
5305
5306 if (arm_abi_global == ARM_ABI_AUTO
5307 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
5308 fprintf_filtered (file, _("\
5309 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
5310 arm_abi_strings[tdep->arm_abi]);
5311 else
5312 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
5313 arm_abi_string);
5314 }
5315
5316 static void
5317 arm_show_fallback_mode (struct ui_file *file, int from_tty,
5318 struct cmd_list_element *c, const char *value)
5319 {
5320 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
5321
5322 fprintf_filtered (file, _("\
5323 The current execution mode assumed (when symbols are unavailable) is \"%s\".\n"),
5324 arm_fallback_mode_string);
5325 }
5326
5327 static void
5328 arm_show_force_mode (struct ui_file *file, int from_tty,
5329 struct cmd_list_element *c, const char *value)
5330 {
5331 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
5332
5333 fprintf_filtered (file, _("\
5334 The current execution mode assumed (even when symbols are available) is \"%s\".\n"),
5335 arm_force_mode_string);
5336 }
5337
5338 /* If the user changes the register disassembly style used for info
5339 register and other commands, we have to also switch the style used
5340 in opcodes for disassembly output. This function is run in the "set
5341 arm disassembly" command, and does that. */
5342
5343 static void
5344 set_disassembly_style_sfunc (char *args, int from_tty,
5345 struct cmd_list_element *c)
5346 {
5347 set_disassembly_style ();
5348 }
5349 \f
5350 /* Return the ARM register name corresponding to register I. */
5351 static const char *
5352 arm_register_name (struct gdbarch *gdbarch, int i)
5353 {
5354 const int num_regs = gdbarch_num_regs (gdbarch);
5355
5356 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
5357 && i >= num_regs && i < num_regs + 32)
5358 {
5359 static const char *const vfp_pseudo_names[] = {
5360 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
5361 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
5362 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
5363 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
5364 };
5365
5366 return vfp_pseudo_names[i - num_regs];
5367 }
5368
5369 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
5370 && i >= num_regs + 32 && i < num_regs + 32 + 16)
5371 {
5372 static const char *const neon_pseudo_names[] = {
5373 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
5374 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
5375 };
5376
5377 return neon_pseudo_names[i - num_regs - 32];
5378 }
5379
5380 if (i >= ARRAY_SIZE (arm_register_names))
5381 /* These registers are only supported on targets which supply
5382 an XML description. */
5383 return "";
5384
5385 return arm_register_names[i];
5386 }
5387
5388 static void
5389 set_disassembly_style (void)
5390 {
5391 int current;
5392
5393 /* Find the style that the user wants. */
5394 for (current = 0; current < num_disassembly_options; current++)
5395 if (disassembly_style == valid_disassembly_styles[current])
5396 break;
5397 gdb_assert (current < num_disassembly_options);
5398
5399 /* Synchronize the disassembler. */
5400 set_arm_regname_option (current);
5401 }
5402
5403 /* Test whether the coff symbol specific value corresponds to a Thumb
5404 function. */
5405
5406 static int
5407 coff_sym_is_thumb (int val)
5408 {
5409 return (val == C_THUMBEXT
5410 || val == C_THUMBSTAT
5411 || val == C_THUMBEXTFUNC
5412 || val == C_THUMBSTATFUNC
5413 || val == C_THUMBLABEL);
5414 }
5415
5416 /* arm_coff_make_msymbol_special()
5417 arm_elf_make_msymbol_special()
5418
5419 These functions test whether the COFF or ELF symbol corresponds to
5420 an address in thumb code, and set a "special" bit in a minimal
5421 symbol to indicate that it does. */
5422
5423 static void
5424 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
5425 {
5426 /* Thumb symbols are of type STT_LOPROC, (synonymous with
5427 STT_ARM_TFUNC). */
5428 if (ELF_ST_TYPE (((elf_symbol_type *)sym)->internal_elf_sym.st_info)
5429 == STT_LOPROC)
5430 MSYMBOL_SET_SPECIAL (msym);
5431 }
5432
5433 static void
5434 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
5435 {
5436 if (coff_sym_is_thumb (val))
5437 MSYMBOL_SET_SPECIAL (msym);
5438 }
5439
5440 static void
5441 arm_objfile_data_free (struct objfile *objfile, void *arg)
5442 {
5443 struct arm_per_objfile *data = arg;
5444 unsigned int i;
5445
5446 for (i = 0; i < objfile->obfd->section_count; i++)
5447 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
5448 }
5449
5450 static void
5451 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
5452 asymbol *sym)
5453 {
5454 const char *name = bfd_asymbol_name (sym);
5455 struct arm_per_objfile *data;
5456 VEC(arm_mapping_symbol_s) **map_p;
5457 struct arm_mapping_symbol new_map_sym;
5458
5459 gdb_assert (name[0] == '$');
5460 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
5461 return;
5462
5463 data = objfile_data (objfile, arm_objfile_data_key);
5464 if (data == NULL)
5465 {
5466 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
5467 struct arm_per_objfile);
5468 set_objfile_data (objfile, arm_objfile_data_key, data);
5469 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
5470 objfile->obfd->section_count,
5471 VEC(arm_mapping_symbol_s) *);
5472 }
5473 map_p = &data->section_maps[bfd_get_section (sym)->index];
5474
5475 new_map_sym.value = sym->value;
5476 new_map_sym.type = name[1];
5477
5478 /* Assume that most mapping symbols appear in order of increasing
5479 value. If they were randomly distributed, it would be faster to
5480 always push here and then sort at first use. */
5481 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
5482 {
5483 struct arm_mapping_symbol *prev_map_sym;
5484
5485 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
5486 if (prev_map_sym->value >= sym->value)
5487 {
5488 unsigned int idx;
5489 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
5490 arm_compare_mapping_symbols);
5491 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
5492 return;
5493 }
5494 }
5495
5496 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
5497 }
5498
5499 static void
5500 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
5501 {
5502 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
5503
5504 /* If necessary, set the T bit. */
5505 if (arm_apcs_32)
5506 {
5507 ULONGEST val;
5508 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
5509 if (arm_pc_is_thumb (pc))
5510 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM, val | CPSR_T);
5511 else
5512 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
5513 val & ~(ULONGEST) CPSR_T);
5514 }
5515 }
5516
5517 /* Read the contents of a NEON quad register, by reading from two
5518 double registers. This is used to implement the quad pseudo
5519 registers, and for argument passing in case the quad registers are
5520 missing; vectors are passed in quad registers when using the VFP
5521 ABI, even if a NEON unit is not present. REGNUM is the index of
5522 the quad register, in [0, 15]. */
5523
5524 static void
5525 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
5526 int regnum, gdb_byte *buf)
5527 {
5528 char name_buf[4];
5529 gdb_byte reg_buf[8];
5530 int offset, double_regnum;
5531
5532 sprintf (name_buf, "d%d", regnum << 1);
5533 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
5534 strlen (name_buf));
5535
5536 /* d0 is always the least significant half of q0. */
5537 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
5538 offset = 8;
5539 else
5540 offset = 0;
5541
5542 regcache_raw_read (regcache, double_regnum, reg_buf);
5543 memcpy (buf + offset, reg_buf, 8);
5544
5545 offset = 8 - offset;
5546 regcache_raw_read (regcache, double_regnum + 1, reg_buf);
5547 memcpy (buf + offset, reg_buf, 8);
5548 }
5549
5550 static void
5551 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
5552 int regnum, gdb_byte *buf)
5553 {
5554 const int num_regs = gdbarch_num_regs (gdbarch);
5555 char name_buf[4];
5556 gdb_byte reg_buf[8];
5557 int offset, double_regnum;
5558
5559 gdb_assert (regnum >= num_regs);
5560 regnum -= num_regs;
5561
5562 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
5563 /* Quad-precision register. */
5564 arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
5565 else
5566 {
5567 /* Single-precision register. */
5568 gdb_assert (regnum < 32);
5569
5570 /* s0 is always the least significant half of d0. */
5571 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
5572 offset = (regnum & 1) ? 0 : 4;
5573 else
5574 offset = (regnum & 1) ? 4 : 0;
5575
5576 sprintf (name_buf, "d%d", regnum >> 1);
5577 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
5578 strlen (name_buf));
5579
5580 regcache_raw_read (regcache, double_regnum, reg_buf);
5581 memcpy (buf, reg_buf + offset, 4);
5582 }
5583 }
5584
5585 /* Store the contents of BUF to a NEON quad register, by writing to
5586 two double registers. This is used to implement the quad pseudo
5587 registers, and for argument passing in case the quad registers are
5588 missing; vectors are passed in quad registers when using the VFP
5589 ABI, even if a NEON unit is not present. REGNUM is the index
5590 of the quad register, in [0, 15]. */
5591
5592 static void
5593 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
5594 int regnum, const gdb_byte *buf)
5595 {
5596 char name_buf[4];
5597 gdb_byte reg_buf[8];
5598 int offset, double_regnum;
5599
5600 sprintf (name_buf, "d%d", regnum << 1);
5601 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
5602 strlen (name_buf));
5603
5604 /* d0 is always the least significant half of q0. */
5605 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
5606 offset = 8;
5607 else
5608 offset = 0;
5609
5610 regcache_raw_write (regcache, double_regnum, buf + offset);
5611 offset = 8 - offset;
5612 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
5613 }
5614
5615 static void
5616 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
5617 int regnum, const gdb_byte *buf)
5618 {
5619 const int num_regs = gdbarch_num_regs (gdbarch);
5620 char name_buf[4];
5621 gdb_byte reg_buf[8];
5622 int offset, double_regnum;
5623
5624 gdb_assert (regnum >= num_regs);
5625 regnum -= num_regs;
5626
5627 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
5628 /* Quad-precision register. */
5629 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
5630 else
5631 {
5632 /* Single-precision register. */
5633 gdb_assert (regnum < 32);
5634
5635 /* s0 is always the least significant half of d0. */
5636 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
5637 offset = (regnum & 1) ? 0 : 4;
5638 else
5639 offset = (regnum & 1) ? 4 : 0;
5640
5641 sprintf (name_buf, "d%d", regnum >> 1);
5642 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
5643 strlen (name_buf));
5644
5645 regcache_raw_read (regcache, double_regnum, reg_buf);
5646 memcpy (reg_buf + offset, buf, 4);
5647 regcache_raw_write (regcache, double_regnum, reg_buf);
5648 }
5649 }
5650
5651 static struct value *
5652 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
5653 {
5654 const int *reg_p = baton;
5655 return value_of_register (*reg_p, frame);
5656 }
5657 \f
5658 static enum gdb_osabi
5659 arm_elf_osabi_sniffer (bfd *abfd)
5660 {
5661 unsigned int elfosabi;
5662 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
5663
5664 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
5665
5666 if (elfosabi == ELFOSABI_ARM)
5667 /* GNU tools use this value. Check note sections in this case,
5668 as well. */
5669 bfd_map_over_sections (abfd,
5670 generic_elf_osabi_sniff_abi_tag_sections,
5671 &osabi);
5672
5673 /* Anything else will be handled by the generic ELF sniffer. */
5674 return osabi;
5675 }
5676
5677 \f
5678 /* Initialize the current architecture based on INFO. If possible,
5679 re-use an architecture from ARCHES, which is a list of
5680 architectures already created during this debugging session.
5681
5682 Called e.g. at program startup, when reading a core file, and when
5683 reading a binary file. */
5684
5685 static struct gdbarch *
5686 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
5687 {
5688 struct gdbarch_tdep *tdep;
5689 struct gdbarch *gdbarch;
5690 struct gdbarch_list *best_arch;
5691 enum arm_abi_kind arm_abi = arm_abi_global;
5692 enum arm_float_model fp_model = arm_fp_model;
5693 struct tdesc_arch_data *tdesc_data = NULL;
5694 int i;
5695 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
5696 int have_neon = 0;
5697 int have_fpa_registers = 1;
5698
5699 /* Check any target description for validity. */
5700 if (tdesc_has_registers (info.target_desc))
5701 {
5702 /* For most registers we require GDB's default names; but also allow
5703 the numeric names for sp / lr / pc, as a convenience. */
5704 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
5705 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
5706 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
5707
5708 const struct tdesc_feature *feature;
5709 int valid_p;
5710
5711 feature = tdesc_find_feature (info.target_desc,
5712 "org.gnu.gdb.arm.core");
5713 if (feature == NULL)
5714 return NULL;
5715
5716 tdesc_data = tdesc_data_alloc ();
5717
5718 valid_p = 1;
5719 for (i = 0; i < ARM_SP_REGNUM; i++)
5720 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
5721 arm_register_names[i]);
5722 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
5723 ARM_SP_REGNUM,
5724 arm_sp_names);
5725 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
5726 ARM_LR_REGNUM,
5727 arm_lr_names);
5728 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
5729 ARM_PC_REGNUM,
5730 arm_pc_names);
5731 valid_p &= tdesc_numbered_register (feature, tdesc_data,
5732 ARM_PS_REGNUM, "cpsr");
5733
5734 if (!valid_p)
5735 {
5736 tdesc_data_cleanup (tdesc_data);
5737 return NULL;
5738 }
5739
5740 feature = tdesc_find_feature (info.target_desc,
5741 "org.gnu.gdb.arm.fpa");
5742 if (feature != NULL)
5743 {
5744 valid_p = 1;
5745 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
5746 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
5747 arm_register_names[i]);
5748 if (!valid_p)
5749 {
5750 tdesc_data_cleanup (tdesc_data);
5751 return NULL;
5752 }
5753 }
5754 else
5755 have_fpa_registers = 0;
5756
5757 feature = tdesc_find_feature (info.target_desc,
5758 "org.gnu.gdb.xscale.iwmmxt");
5759 if (feature != NULL)
5760 {
5761 static const char *const iwmmxt_names[] = {
5762 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
5763 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
5764 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
5765 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
5766 };
5767
5768 valid_p = 1;
5769 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
5770 valid_p
5771 &= tdesc_numbered_register (feature, tdesc_data, i,
5772 iwmmxt_names[i - ARM_WR0_REGNUM]);
5773
5774 /* Check for the control registers, but do not fail if they
5775 are missing. */
5776 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
5777 tdesc_numbered_register (feature, tdesc_data, i,
5778 iwmmxt_names[i - ARM_WR0_REGNUM]);
5779
5780 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
5781 valid_p
5782 &= tdesc_numbered_register (feature, tdesc_data, i,
5783 iwmmxt_names[i - ARM_WR0_REGNUM]);
5784
5785 if (!valid_p)
5786 {
5787 tdesc_data_cleanup (tdesc_data);
5788 return NULL;
5789 }
5790 }
5791
5792 /* If we have a VFP unit, check whether the single precision registers
5793 are present. If not, then we will synthesize them as pseudo
5794 registers. */
5795 feature = tdesc_find_feature (info.target_desc,
5796 "org.gnu.gdb.arm.vfp");
5797 if (feature != NULL)
5798 {
5799 static const char *const vfp_double_names[] = {
5800 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
5801 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
5802 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
5803 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
5804 };
5805
5806 /* Require the double precision registers. There must be either
5807 16 or 32. */
5808 valid_p = 1;
5809 for (i = 0; i < 32; i++)
5810 {
5811 valid_p &= tdesc_numbered_register (feature, tdesc_data,
5812 ARM_D0_REGNUM + i,
5813 vfp_double_names[i]);
5814 if (!valid_p)
5815 break;
5816 }
5817
5818 if (!valid_p && i != 16)
5819 {
5820 tdesc_data_cleanup (tdesc_data);
5821 return NULL;
5822 }
5823
5824 if (tdesc_unnumbered_register (feature, "s0") == 0)
5825 have_vfp_pseudos = 1;
5826
5827 have_vfp_registers = 1;
5828
5829 /* If we have VFP, also check for NEON. The architecture allows
5830 NEON without VFP (integer vector operations only), but GDB
5831 does not support that. */
5832 feature = tdesc_find_feature (info.target_desc,
5833 "org.gnu.gdb.arm.neon");
5834 if (feature != NULL)
5835 {
5836 /* NEON requires 32 double-precision registers. */
5837 if (i != 32)
5838 {
5839 tdesc_data_cleanup (tdesc_data);
5840 return NULL;
5841 }
5842
5843 /* If there are quad registers defined by the stub, use
5844 their type; otherwise (normally) provide them with
5845 the default type. */
5846 if (tdesc_unnumbered_register (feature, "q0") == 0)
5847 have_neon_pseudos = 1;
5848
5849 have_neon = 1;
5850 }
5851 }
5852 }
5853
5854 /* If we have an object to base this architecture on, try to determine
5855 its ABI. */
5856
5857 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
5858 {
5859 int ei_osabi, e_flags;
5860
5861 switch (bfd_get_flavour (info.abfd))
5862 {
5863 case bfd_target_aout_flavour:
5864 /* Assume it's an old APCS-style ABI. */
5865 arm_abi = ARM_ABI_APCS;
5866 break;
5867
5868 case bfd_target_coff_flavour:
5869 /* Assume it's an old APCS-style ABI. */
5870 /* XXX WinCE? */
5871 arm_abi = ARM_ABI_APCS;
5872 break;
5873
5874 case bfd_target_elf_flavour:
5875 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
5876 e_flags = elf_elfheader (info.abfd)->e_flags;
5877
5878 if (ei_osabi == ELFOSABI_ARM)
5879 {
5880 /* GNU tools used to use this value, but do not for EABI
5881 objects. There's nowhere to tag an EABI version
5882 anyway, so assume APCS. */
5883 arm_abi = ARM_ABI_APCS;
5884 }
5885 else if (ei_osabi == ELFOSABI_NONE)
5886 {
5887 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
5888
5889 switch (eabi_ver)
5890 {
5891 case EF_ARM_EABI_UNKNOWN:
5892 /* Assume GNU tools. */
5893 arm_abi = ARM_ABI_APCS;
5894 break;
5895
5896 case EF_ARM_EABI_VER4:
5897 case EF_ARM_EABI_VER5:
5898 arm_abi = ARM_ABI_AAPCS;
5899 /* EABI binaries default to VFP float ordering.
5900 They may also contain build attributes that can
5901 be used to identify if the VFP argument-passing
5902 ABI is in use. */
5903 if (fp_model == ARM_FLOAT_AUTO)
5904 {
5905 #ifdef HAVE_ELF
5906 switch (bfd_elf_get_obj_attr_int (info.abfd,
5907 OBJ_ATTR_PROC,
5908 Tag_ABI_VFP_args))
5909 {
5910 case 0:
5911 /* "The user intended FP parameter/result
5912 passing to conform to AAPCS, base
5913 variant". */
5914 fp_model = ARM_FLOAT_SOFT_VFP;
5915 break;
5916 case 1:
5917 /* "The user intended FP parameter/result
5918 passing to conform to AAPCS, VFP
5919 variant". */
5920 fp_model = ARM_FLOAT_VFP;
5921 break;
5922 case 2:
5923 /* "The user intended FP parameter/result
5924 passing to conform to tool chain-specific
5925 conventions" - we don't know any such
5926 conventions, so leave it as "auto". */
5927 break;
5928 default:
5929 /* Attribute value not mentioned in the
5930 October 2008 ABI, so leave it as
5931 "auto". */
5932 break;
5933 }
5934 #else
5935 fp_model = ARM_FLOAT_SOFT_VFP;
5936 #endif
5937 }
5938 break;
5939
5940 default:
5941 /* Leave it as "auto". */
5942 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
5943 break;
5944 }
5945 }
5946
5947 if (fp_model == ARM_FLOAT_AUTO)
5948 {
5949 int e_flags = elf_elfheader (info.abfd)->e_flags;
5950
5951 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
5952 {
5953 case 0:
5954 /* Leave it as "auto". Strictly speaking this case
5955 means FPA, but almost nobody uses that now, and
5956 many toolchains fail to set the appropriate bits
5957 for the floating-point model they use. */
5958 break;
5959 case EF_ARM_SOFT_FLOAT:
5960 fp_model = ARM_FLOAT_SOFT_FPA;
5961 break;
5962 case EF_ARM_VFP_FLOAT:
5963 fp_model = ARM_FLOAT_VFP;
5964 break;
5965 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
5966 fp_model = ARM_FLOAT_SOFT_VFP;
5967 break;
5968 }
5969 }
5970
5971 if (e_flags & EF_ARM_BE8)
5972 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
5973
5974 break;
5975
5976 default:
5977 /* Leave it as "auto". */
5978 break;
5979 }
5980 }
5981
5982 /* If there is already a candidate, use it. */
5983 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
5984 best_arch != NULL;
5985 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
5986 {
5987 if (arm_abi != ARM_ABI_AUTO
5988 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
5989 continue;
5990
5991 if (fp_model != ARM_FLOAT_AUTO
5992 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
5993 continue;
5994
5995 /* There are various other properties in tdep that we do not
5996 need to check here: those derived from a target description,
5997 since gdbarches with a different target description are
5998 automatically disqualified. */
5999
6000 /* Found a match. */
6001 break;
6002 }
6003
6004 if (best_arch != NULL)
6005 {
6006 if (tdesc_data != NULL)
6007 tdesc_data_cleanup (tdesc_data);
6008 return best_arch->gdbarch;
6009 }
6010
6011 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
6012 gdbarch = gdbarch_alloc (&info, tdep);
6013
6014 /* Record additional information about the architecture we are defining.
6015 These are gdbarch discriminators, like the OSABI. */
6016 tdep->arm_abi = arm_abi;
6017 tdep->fp_model = fp_model;
6018 tdep->have_fpa_registers = have_fpa_registers;
6019 tdep->have_vfp_registers = have_vfp_registers;
6020 tdep->have_vfp_pseudos = have_vfp_pseudos;
6021 tdep->have_neon_pseudos = have_neon_pseudos;
6022 tdep->have_neon = have_neon;
6023
6024 /* Breakpoints. */
6025 switch (info.byte_order_for_code)
6026 {
6027 case BFD_ENDIAN_BIG:
6028 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
6029 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
6030 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
6031 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
6032
6033 break;
6034
6035 case BFD_ENDIAN_LITTLE:
6036 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
6037 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
6038 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
6039 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
6040
6041 break;
6042
6043 default:
6044 internal_error (__FILE__, __LINE__,
6045 _("arm_gdbarch_init: bad byte order for float format"));
6046 }
6047
6048 /* On ARM targets char defaults to unsigned. */
6049 set_gdbarch_char_signed (gdbarch, 0);
6050
6051 /* Note: for displaced stepping, this includes the breakpoint, and one word
6052 of additional scratch space. This setting isn't used for anything beside
6053 displaced stepping at present. */
6054 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
6055
6056 /* This should be low enough for everything. */
6057 tdep->lowest_pc = 0x20;
6058 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
6059
6060 /* The default, for both APCS and AAPCS, is to return small
6061 structures in registers. */
6062 tdep->struct_return = reg_struct_return;
6063
6064 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
6065 set_gdbarch_frame_align (gdbarch, arm_frame_align);
6066
6067 set_gdbarch_write_pc (gdbarch, arm_write_pc);
6068
6069 /* Frame handling. */
6070 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
6071 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
6072 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
6073
6074 frame_base_set_default (gdbarch, &arm_normal_base);
6075
6076 /* Address manipulation. */
6077 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
6078 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
6079
6080 /* Advance PC across function entry code. */
6081 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
6082
6083 /* Skip trampolines. */
6084 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
6085
6086 /* The stack grows downward. */
6087 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
6088
6089 /* Breakpoint manipulation. */
6090 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
6091
6092 /* Information about registers, etc. */
6093 set_gdbarch_deprecated_fp_regnum (gdbarch, ARM_FP_REGNUM); /* ??? */
6094 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
6095 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
6096 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
6097 set_gdbarch_register_type (gdbarch, arm_register_type);
6098
6099 /* This "info float" is FPA-specific. Use the generic version if we
6100 do not have FPA. */
6101 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
6102 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
6103
6104 /* Internal <-> external register number maps. */
6105 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
6106 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
6107
6108 set_gdbarch_register_name (gdbarch, arm_register_name);
6109
6110 /* Returning results. */
6111 set_gdbarch_return_value (gdbarch, arm_return_value);
6112
6113 /* Disassembly. */
6114 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
6115
6116 /* Minsymbol frobbing. */
6117 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
6118 set_gdbarch_coff_make_msymbol_special (gdbarch,
6119 arm_coff_make_msymbol_special);
6120 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
6121
6122 /* Virtual tables. */
6123 set_gdbarch_vbit_in_delta (gdbarch, 1);
6124
6125 /* Hook in the ABI-specific overrides, if they have been registered. */
6126 gdbarch_init_osabi (info, gdbarch);
6127
6128 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
6129
6130 /* Add some default predicates. */
6131 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
6132 dwarf2_append_unwinders (gdbarch);
6133 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
6134
6135 /* Now we have tuned the configuration, set a few final things,
6136 based on what the OS ABI has told us. */
6137
6138 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
6139 binaries are always marked. */
6140 if (tdep->arm_abi == ARM_ABI_AUTO)
6141 tdep->arm_abi = ARM_ABI_APCS;
6142
6143 /* We used to default to FPA for generic ARM, but almost nobody
6144 uses that now, and we now provide a way for the user to force
6145 the model. So default to the most useful variant. */
6146 if (tdep->fp_model == ARM_FLOAT_AUTO)
6147 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
6148
6149 if (tdep->jb_pc >= 0)
6150 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
6151
6152 /* Floating point sizes and format. */
6153 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
6154 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
6155 {
6156 set_gdbarch_double_format
6157 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
6158 set_gdbarch_long_double_format
6159 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
6160 }
6161 else
6162 {
6163 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
6164 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
6165 }
6166
6167 if (have_vfp_pseudos)
6168 {
6169 /* NOTE: These are the only pseudo registers used by
6170 the ARM target at the moment. If more are added, a
6171 little more care in numbering will be needed. */
6172
6173 int num_pseudos = 32;
6174 if (have_neon_pseudos)
6175 num_pseudos += 16;
6176 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
6177 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
6178 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
6179 }
6180
6181 if (tdesc_data)
6182 {
6183 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
6184
6185 tdesc_use_registers (gdbarch, info.target_desc, tdesc_data);
6186
6187 /* Override tdesc_register_type to adjust the types of VFP
6188 registers for NEON. */
6189 set_gdbarch_register_type (gdbarch, arm_register_type);
6190 }
6191
6192 /* Add standard register aliases. We add aliases even for those
6193 nanes which are used by the current architecture - it's simpler,
6194 and does no harm, since nothing ever lists user registers. */
6195 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
6196 user_reg_add (gdbarch, arm_register_aliases[i].name,
6197 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
6198
6199 return gdbarch;
6200 }
6201
6202 static void
6203 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
6204 {
6205 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6206
6207 if (tdep == NULL)
6208 return;
6209
6210 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
6211 (unsigned long) tdep->lowest_pc);
6212 }
6213
6214 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
6215
6216 void
6217 _initialize_arm_tdep (void)
6218 {
6219 struct ui_file *stb;
6220 long length;
6221 struct cmd_list_element *new_set, *new_show;
6222 const char *setname;
6223 const char *setdesc;
6224 const char *const *regnames;
6225 int numregs, i, j;
6226 static char *helptext;
6227 char regdesc[1024], *rdptr = regdesc;
6228 size_t rest = sizeof (regdesc);
6229
6230 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
6231
6232 arm_objfile_data_key
6233 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
6234
6235 /* Register an ELF OS ABI sniffer for ARM binaries. */
6236 gdbarch_register_osabi_sniffer (bfd_arch_arm,
6237 bfd_target_elf_flavour,
6238 arm_elf_osabi_sniffer);
6239
6240 /* Get the number of possible sets of register names defined in opcodes. */
6241 num_disassembly_options = get_arm_regname_num_options ();
6242
6243 /* Add root prefix command for all "set arm"/"show arm" commands. */
6244 add_prefix_cmd ("arm", no_class, set_arm_command,
6245 _("Various ARM-specific commands."),
6246 &setarmcmdlist, "set arm ", 0, &setlist);
6247
6248 add_prefix_cmd ("arm", no_class, show_arm_command,
6249 _("Various ARM-specific commands."),
6250 &showarmcmdlist, "show arm ", 0, &showlist);
6251
6252 /* Sync the opcode insn printer with our register viewer. */
6253 parse_arm_disassembler_option ("reg-names-std");
6254
6255 /* Initialize the array that will be passed to
6256 add_setshow_enum_cmd(). */
6257 valid_disassembly_styles
6258 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
6259 for (i = 0; i < num_disassembly_options; i++)
6260 {
6261 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
6262 valid_disassembly_styles[i] = setname;
6263 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
6264 rdptr += length;
6265 rest -= length;
6266 /* When we find the default names, tell the disassembler to use
6267 them. */
6268 if (!strcmp (setname, "std"))
6269 {
6270 disassembly_style = setname;
6271 set_arm_regname_option (i);
6272 }
6273 }
6274 /* Mark the end of valid options. */
6275 valid_disassembly_styles[num_disassembly_options] = NULL;
6276
6277 /* Create the help text. */
6278 stb = mem_fileopen ();
6279 fprintf_unfiltered (stb, "%s%s%s",
6280 _("The valid values are:\n"),
6281 regdesc,
6282 _("The default is \"std\"."));
6283 helptext = ui_file_xstrdup (stb, NULL);
6284 ui_file_delete (stb);
6285
6286 add_setshow_enum_cmd("disassembler", no_class,
6287 valid_disassembly_styles, &disassembly_style,
6288 _("Set the disassembly style."),
6289 _("Show the disassembly style."),
6290 helptext,
6291 set_disassembly_style_sfunc,
6292 NULL, /* FIXME: i18n: The disassembly style is \"%s\". */
6293 &setarmcmdlist, &showarmcmdlist);
6294
6295 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
6296 _("Set usage of ARM 32-bit mode."),
6297 _("Show usage of ARM 32-bit mode."),
6298 _("When off, a 26-bit PC will be used."),
6299 NULL,
6300 NULL, /* FIXME: i18n: Usage of ARM 32-bit mode is %s. */
6301 &setarmcmdlist, &showarmcmdlist);
6302
6303 /* Add a command to allow the user to force the FPU model. */
6304 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
6305 _("Set the floating point type."),
6306 _("Show the floating point type."),
6307 _("auto - Determine the FP typefrom the OS-ABI.\n\
6308 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
6309 fpa - FPA co-processor (GCC compiled).\n\
6310 softvfp - Software FP with pure-endian doubles.\n\
6311 vfp - VFP co-processor."),
6312 set_fp_model_sfunc, show_fp_model,
6313 &setarmcmdlist, &showarmcmdlist);
6314
6315 /* Add a command to allow the user to force the ABI. */
6316 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
6317 _("Set the ABI."),
6318 _("Show the ABI."),
6319 NULL, arm_set_abi, arm_show_abi,
6320 &setarmcmdlist, &showarmcmdlist);
6321
6322 /* Add two commands to allow the user to force the assumed
6323 execution mode. */
6324 add_setshow_enum_cmd ("fallback-mode", class_support,
6325 arm_mode_strings, &arm_fallback_mode_string,
6326 _("Set the mode assumed when symbols are unavailable."),
6327 _("Show the mode assumed when symbols are unavailable."),
6328 NULL, NULL, arm_show_fallback_mode,
6329 &setarmcmdlist, &showarmcmdlist);
6330 add_setshow_enum_cmd ("force-mode", class_support,
6331 arm_mode_strings, &arm_force_mode_string,
6332 _("Set the mode assumed even when symbols are available."),
6333 _("Show the mode assumed even when symbols are available."),
6334 NULL, NULL, arm_show_force_mode,
6335 &setarmcmdlist, &showarmcmdlist);
6336
6337 /* Debugging flag. */
6338 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
6339 _("Set ARM debugging."),
6340 _("Show ARM debugging."),
6341 _("When on, arm-specific debugging is enabled."),
6342 NULL,
6343 NULL, /* FIXME: i18n: "ARM debugging is %s. */
6344 &setdebuglist, &showdebuglist);
6345 }
This page took 0.177821 seconds and 3 git commands to generate.