gas/
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper () */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "doublest.h"
33 #include "value.h"
34 #include "arch-utils.h"
35 #include "osabi.h"
36 #include "frame-unwind.h"
37 #include "frame-base.h"
38 #include "trad-frame.h"
39 #include "objfiles.h"
40 #include "dwarf2-frame.h"
41 #include "gdbtypes.h"
42 #include "prologue-value.h"
43 #include "target-descriptions.h"
44 #include "user-regs.h"
45
46 #include "arm-tdep.h"
47 #include "gdb/sim-arm.h"
48
49 #include "elf-bfd.h"
50 #include "coff/internal.h"
51 #include "elf/arm.h"
52
53 #include "gdb_assert.h"
54 #include "vec.h"
55
56 static int arm_debug;
57
58 /* Macros for setting and testing a bit in a minimal symbol that marks
59 it as Thumb function. The MSB of the minimal symbol's "info" field
60 is used for this purpose.
61
62 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
63 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
64
65 #define MSYMBOL_SET_SPECIAL(msym) \
66 MSYMBOL_TARGET_FLAG_1 (msym) = 1
67
68 #define MSYMBOL_IS_SPECIAL(msym) \
69 MSYMBOL_TARGET_FLAG_1 (msym)
70
71 /* Per-objfile data used for mapping symbols. */
72 static const struct objfile_data *arm_objfile_data_key;
73
74 struct arm_mapping_symbol
75 {
76 bfd_vma value;
77 char type;
78 };
79 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
80 DEF_VEC_O(arm_mapping_symbol_s);
81
82 struct arm_per_objfile
83 {
84 VEC(arm_mapping_symbol_s) **section_maps;
85 };
86
87 /* The list of available "set arm ..." and "show arm ..." commands. */
88 static struct cmd_list_element *setarmcmdlist = NULL;
89 static struct cmd_list_element *showarmcmdlist = NULL;
90
91 /* The type of floating-point to use. Keep this in sync with enum
92 arm_float_model, and the help string in _initialize_arm_tdep. */
93 static const char *fp_model_strings[] =
94 {
95 "auto",
96 "softfpa",
97 "fpa",
98 "softvfp",
99 "vfp",
100 NULL
101 };
102
103 /* A variable that can be configured by the user. */
104 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
105 static const char *current_fp_model = "auto";
106
107 /* The ABI to use. Keep this in sync with arm_abi_kind. */
108 static const char *arm_abi_strings[] =
109 {
110 "auto",
111 "APCS",
112 "AAPCS",
113 NULL
114 };
115
116 /* A variable that can be configured by the user. */
117 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
118 static const char *arm_abi_string = "auto";
119
120 /* The execution mode to assume. */
121 static const char *arm_mode_strings[] =
122 {
123 "auto",
124 "arm",
125 "thumb"
126 };
127
128 static const char *arm_fallback_mode_string = "auto";
129 static const char *arm_force_mode_string = "auto";
130
131 /* Number of different reg name sets (options). */
132 static int num_disassembly_options;
133
134 /* The standard register names, and all the valid aliases for them. */
135 static const struct
136 {
137 const char *name;
138 int regnum;
139 } arm_register_aliases[] = {
140 /* Basic register numbers. */
141 { "r0", 0 },
142 { "r1", 1 },
143 { "r2", 2 },
144 { "r3", 3 },
145 { "r4", 4 },
146 { "r5", 5 },
147 { "r6", 6 },
148 { "r7", 7 },
149 { "r8", 8 },
150 { "r9", 9 },
151 { "r10", 10 },
152 { "r11", 11 },
153 { "r12", 12 },
154 { "r13", 13 },
155 { "r14", 14 },
156 { "r15", 15 },
157 /* Synonyms (argument and variable registers). */
158 { "a1", 0 },
159 { "a2", 1 },
160 { "a3", 2 },
161 { "a4", 3 },
162 { "v1", 4 },
163 { "v2", 5 },
164 { "v3", 6 },
165 { "v4", 7 },
166 { "v5", 8 },
167 { "v6", 9 },
168 { "v7", 10 },
169 { "v8", 11 },
170 /* Other platform-specific names for r9. */
171 { "sb", 9 },
172 { "tr", 9 },
173 /* Special names. */
174 { "ip", 12 },
175 { "sp", 13 },
176 { "lr", 14 },
177 { "pc", 15 },
178 /* Names used by GCC (not listed in the ARM EABI). */
179 { "sl", 10 },
180 { "fp", 11 },
181 /* A special name from the older ATPCS. */
182 { "wr", 7 },
183 };
184
185 static const char *const arm_register_names[] =
186 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
187 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
188 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
189 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
190 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
191 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
192 "fps", "cpsr" }; /* 24 25 */
193
194 /* Valid register name styles. */
195 static const char **valid_disassembly_styles;
196
197 /* Disassembly style to use. Default to "std" register names. */
198 static const char *disassembly_style;
199
200 /* This is used to keep the bfd arch_info in sync with the disassembly
201 style. */
202 static void set_disassembly_style_sfunc(char *, int,
203 struct cmd_list_element *);
204 static void set_disassembly_style (void);
205
206 static void convert_from_extended (const struct floatformat *, const void *,
207 void *, int);
208 static void convert_to_extended (const struct floatformat *, void *,
209 const void *, int);
210
211 static void arm_neon_quad_read (struct gdbarch *gdbarch,
212 struct regcache *regcache,
213 int regnum, gdb_byte *buf);
214 static void arm_neon_quad_write (struct gdbarch *gdbarch,
215 struct regcache *regcache,
216 int regnum, const gdb_byte *buf);
217
218 struct arm_prologue_cache
219 {
220 /* The stack pointer at the time this frame was created; i.e. the
221 caller's stack pointer when this function was called. It is used
222 to identify this frame. */
223 CORE_ADDR prev_sp;
224
225 /* The frame base for this frame is just prev_sp - frame size.
226 FRAMESIZE is the distance from the frame pointer to the
227 initial stack pointer. */
228
229 int framesize;
230
231 /* The register used to hold the frame pointer for this frame. */
232 int framereg;
233
234 /* Saved register offsets. */
235 struct trad_frame_saved_reg *saved_regs;
236 };
237
238 /* Architecture version for displaced stepping. This effects the behaviour of
239 certain instructions, and really should not be hard-wired. */
240
241 #define DISPLACED_STEPPING_ARCH_VERSION 5
242
243 /* Addresses for calling Thumb functions have the bit 0 set.
244 Here are some macros to test, set, or clear bit 0 of addresses. */
245 #define IS_THUMB_ADDR(addr) ((addr) & 1)
246 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
247 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
248
249 /* Set to true if the 32-bit mode is in use. */
250
251 int arm_apcs_32 = 1;
252
253 /* Determine if FRAME is executing in Thumb mode. */
254
255 static int
256 arm_frame_is_thumb (struct frame_info *frame)
257 {
258 CORE_ADDR cpsr;
259
260 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
261 directly (from a signal frame or dummy frame) or by interpreting
262 the saved LR (from a prologue or DWARF frame). So consult it and
263 trust the unwinders. */
264 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
265
266 return (cpsr & CPSR_T) != 0;
267 }
268
269 /* Callback for VEC_lower_bound. */
270
271 static inline int
272 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
273 const struct arm_mapping_symbol *rhs)
274 {
275 return lhs->value < rhs->value;
276 }
277
278 /* Search for the mapping symbol covering MEMADDR. If one is found,
279 return its type. Otherwise, return 0. If START is non-NULL,
280 set *START to the location of the mapping symbol. */
281
282 static char
283 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
284 {
285 struct obj_section *sec;
286
287 /* If there are mapping symbols, consult them. */
288 sec = find_pc_section (memaddr);
289 if (sec != NULL)
290 {
291 struct arm_per_objfile *data;
292 VEC(arm_mapping_symbol_s) *map;
293 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
294 0 };
295 unsigned int idx;
296
297 data = objfile_data (sec->objfile, arm_objfile_data_key);
298 if (data != NULL)
299 {
300 map = data->section_maps[sec->the_bfd_section->index];
301 if (!VEC_empty (arm_mapping_symbol_s, map))
302 {
303 struct arm_mapping_symbol *map_sym;
304
305 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
306 arm_compare_mapping_symbols);
307
308 /* VEC_lower_bound finds the earliest ordered insertion
309 point. If the following symbol starts at this exact
310 address, we use that; otherwise, the preceding
311 mapping symbol covers this address. */
312 if (idx < VEC_length (arm_mapping_symbol_s, map))
313 {
314 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
315 if (map_sym->value == map_key.value)
316 {
317 if (start)
318 *start = map_sym->value + obj_section_addr (sec);
319 return map_sym->type;
320 }
321 }
322
323 if (idx > 0)
324 {
325 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
326 if (start)
327 *start = map_sym->value + obj_section_addr (sec);
328 return map_sym->type;
329 }
330 }
331 }
332 }
333
334 return 0;
335 }
336
337 /* Determine if the program counter specified in MEMADDR is in a Thumb
338 function. This function should be called for addresses unrelated to
339 any executing frame; otherwise, prefer arm_frame_is_thumb. */
340
341 static int
342 arm_pc_is_thumb (CORE_ADDR memaddr)
343 {
344 struct obj_section *sec;
345 struct minimal_symbol *sym;
346 char type;
347
348 /* If bit 0 of the address is set, assume this is a Thumb address. */
349 if (IS_THUMB_ADDR (memaddr))
350 return 1;
351
352 /* If the user wants to override the symbol table, let him. */
353 if (strcmp (arm_force_mode_string, "arm") == 0)
354 return 0;
355 if (strcmp (arm_force_mode_string, "thumb") == 0)
356 return 1;
357
358 /* If there are mapping symbols, consult them. */
359 type = arm_find_mapping_symbol (memaddr, NULL);
360 if (type)
361 return type == 't';
362
363 /* Thumb functions have a "special" bit set in minimal symbols. */
364 sym = lookup_minimal_symbol_by_pc (memaddr);
365 if (sym)
366 return (MSYMBOL_IS_SPECIAL (sym));
367
368 /* If the user wants to override the fallback mode, let them. */
369 if (strcmp (arm_fallback_mode_string, "arm") == 0)
370 return 0;
371 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
372 return 1;
373
374 /* If we couldn't find any symbol, but we're talking to a running
375 target, then trust the current value of $cpsr. This lets
376 "display/i $pc" always show the correct mode (though if there is
377 a symbol table we will not reach here, so it still may not be
378 displayed in the mode it will be executed). */
379 if (target_has_registers)
380 return arm_frame_is_thumb (get_current_frame ());
381
382 /* Otherwise we're out of luck; we assume ARM. */
383 return 0;
384 }
385
386 /* Remove useless bits from addresses in a running program. */
387 static CORE_ADDR
388 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
389 {
390 if (arm_apcs_32)
391 return UNMAKE_THUMB_ADDR (val);
392 else
393 return (val & 0x03fffffc);
394 }
395
396 /* When reading symbols, we need to zap the low bit of the address,
397 which may be set to 1 for Thumb functions. */
398 static CORE_ADDR
399 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
400 {
401 return val & ~1;
402 }
403
404 /* Analyze a Thumb prologue, looking for a recognizable stack frame
405 and frame pointer. Scan until we encounter a store that could
406 clobber the stack frame unexpectedly, or an unknown instruction. */
407
408 static CORE_ADDR
409 thumb_analyze_prologue (struct gdbarch *gdbarch,
410 CORE_ADDR start, CORE_ADDR limit,
411 struct arm_prologue_cache *cache)
412 {
413 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
414 int i;
415 pv_t regs[16];
416 struct pv_area *stack;
417 struct cleanup *back_to;
418 CORE_ADDR offset;
419
420 for (i = 0; i < 16; i++)
421 regs[i] = pv_register (i, 0);
422 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
423 back_to = make_cleanup_free_pv_area (stack);
424
425 while (start < limit)
426 {
427 unsigned short insn;
428
429 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
430
431 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
432 {
433 int regno;
434 int mask;
435
436 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
437 break;
438
439 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
440 whether to save LR (R14). */
441 mask = (insn & 0xff) | ((insn & 0x100) << 6);
442
443 /* Calculate offsets of saved R0-R7 and LR. */
444 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
445 if (mask & (1 << regno))
446 {
447 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
448 -4);
449 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
450 }
451 }
452 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
453 sub sp, #simm */
454 {
455 offset = (insn & 0x7f) << 2; /* get scaled offset */
456 if (insn & 0x80) /* Check for SUB. */
457 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
458 -offset);
459 else
460 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
461 offset);
462 }
463 else if ((insn & 0xff00) == 0xaf00) /* add r7, sp, #imm */
464 regs[THUMB_FP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
465 (insn & 0xff) << 2);
466 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
467 {
468 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
469 int src_reg = (insn & 0x78) >> 3;
470 regs[dst_reg] = regs[src_reg];
471 }
472 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
473 {
474 /* Handle stores to the stack. Normally pushes are used,
475 but with GCC -mtpcs-frame, there may be other stores
476 in the prologue to create the frame. */
477 int regno = (insn >> 8) & 0x7;
478 pv_t addr;
479
480 offset = (insn & 0xff) << 2;
481 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
482
483 if (pv_area_store_would_trash (stack, addr))
484 break;
485
486 pv_area_store (stack, addr, 4, regs[regno]);
487 }
488 else
489 {
490 /* We don't know what this instruction is. We're finished
491 scanning. NOTE: Recognizing more safe-to-ignore
492 instructions here will improve support for optimized
493 code. */
494 break;
495 }
496
497 start += 2;
498 }
499
500 if (cache == NULL)
501 {
502 do_cleanups (back_to);
503 return start;
504 }
505
506 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
507 {
508 /* Frame pointer is fp. Frame size is constant. */
509 cache->framereg = ARM_FP_REGNUM;
510 cache->framesize = -regs[ARM_FP_REGNUM].k;
511 }
512 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
513 {
514 /* Frame pointer is r7. Frame size is constant. */
515 cache->framereg = THUMB_FP_REGNUM;
516 cache->framesize = -regs[THUMB_FP_REGNUM].k;
517 }
518 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
519 {
520 /* Try the stack pointer... this is a bit desperate. */
521 cache->framereg = ARM_SP_REGNUM;
522 cache->framesize = -regs[ARM_SP_REGNUM].k;
523 }
524 else
525 {
526 /* We're just out of luck. We don't know where the frame is. */
527 cache->framereg = -1;
528 cache->framesize = 0;
529 }
530
531 for (i = 0; i < 16; i++)
532 if (pv_area_find_reg (stack, gdbarch, i, &offset))
533 cache->saved_regs[i].addr = offset;
534
535 do_cleanups (back_to);
536 return start;
537 }
538
539 /* Advance the PC across any function entry prologue instructions to
540 reach some "real" code.
541
542 The APCS (ARM Procedure Call Standard) defines the following
543 prologue:
544
545 mov ip, sp
546 [stmfd sp!, {a1,a2,a3,a4}]
547 stmfd sp!, {...,fp,ip,lr,pc}
548 [stfe f7, [sp, #-12]!]
549 [stfe f6, [sp, #-12]!]
550 [stfe f5, [sp, #-12]!]
551 [stfe f4, [sp, #-12]!]
552 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn */
553
554 static CORE_ADDR
555 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
556 {
557 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
558 unsigned long inst;
559 CORE_ADDR skip_pc;
560 CORE_ADDR func_addr, limit_pc;
561 struct symtab_and_line sal;
562
563 /* If we're in a dummy frame, don't even try to skip the prologue. */
564 if (deprecated_pc_in_call_dummy (gdbarch, pc))
565 return pc;
566
567 /* See if we can determine the end of the prologue via the symbol table.
568 If so, then return either PC, or the PC after the prologue, whichever
569 is greater. */
570 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
571 {
572 CORE_ADDR post_prologue_pc
573 = skip_prologue_using_sal (gdbarch, func_addr);
574 if (post_prologue_pc != 0)
575 return max (pc, post_prologue_pc);
576 }
577
578 /* Can't determine prologue from the symbol table, need to examine
579 instructions. */
580
581 /* Find an upper limit on the function prologue using the debug
582 information. If the debug information could not be used to provide
583 that bound, then use an arbitrary large number as the upper bound. */
584 /* Like arm_scan_prologue, stop no later than pc + 64. */
585 limit_pc = skip_prologue_using_sal (gdbarch, pc);
586 if (limit_pc == 0)
587 limit_pc = pc + 64; /* Magic. */
588
589
590 /* Check if this is Thumb code. */
591 if (arm_pc_is_thumb (pc))
592 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
593
594 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
595 {
596 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
597
598 /* "mov ip, sp" is no longer a required part of the prologue. */
599 if (inst == 0xe1a0c00d) /* mov ip, sp */
600 continue;
601
602 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
603 continue;
604
605 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
606 continue;
607
608 /* Some prologues begin with "str lr, [sp, #-4]!". */
609 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
610 continue;
611
612 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
613 continue;
614
615 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
616 continue;
617
618 /* Any insns after this point may float into the code, if it makes
619 for better instruction scheduling, so we skip them only if we
620 find them, but still consider the function to be frame-ful. */
621
622 /* We may have either one sfmfd instruction here, or several stfe
623 insns, depending on the version of floating point code we
624 support. */
625 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
626 continue;
627
628 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
629 continue;
630
631 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
632 continue;
633
634 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
635 continue;
636
637 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
638 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
639 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
640 continue;
641
642 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
643 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
644 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
645 continue;
646
647 /* Un-recognized instruction; stop scanning. */
648 break;
649 }
650
651 return skip_pc; /* End of prologue */
652 }
653
654 /* *INDENT-OFF* */
655 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
656 This function decodes a Thumb function prologue to determine:
657 1) the size of the stack frame
658 2) which registers are saved on it
659 3) the offsets of saved regs
660 4) the offset from the stack pointer to the frame pointer
661
662 A typical Thumb function prologue would create this stack frame
663 (offsets relative to FP)
664 old SP -> 24 stack parameters
665 20 LR
666 16 R7
667 R7 -> 0 local variables (16 bytes)
668 SP -> -12 additional stack space (12 bytes)
669 The frame size would thus be 36 bytes, and the frame offset would be
670 12 bytes. The frame register is R7.
671
672 The comments for thumb_skip_prolog() describe the algorithm we use
673 to detect the end of the prolog. */
674 /* *INDENT-ON* */
675
676 static void
677 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
678 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
679 {
680 CORE_ADDR prologue_start;
681 CORE_ADDR prologue_end;
682 CORE_ADDR current_pc;
683
684 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
685 &prologue_end))
686 {
687 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
688
689 if (sal.line == 0) /* no line info, use current PC */
690 prologue_end = prev_pc;
691 else if (sal.end < prologue_end) /* next line begins after fn end */
692 prologue_end = sal.end; /* (probably means no prologue) */
693 }
694 else
695 /* We're in the boondocks: we have no idea where the start of the
696 function is. */
697 return;
698
699 prologue_end = min (prologue_end, prev_pc);
700
701 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
702 }
703
704 /* This function decodes an ARM function prologue to determine:
705 1) the size of the stack frame
706 2) which registers are saved on it
707 3) the offsets of saved regs
708 4) the offset from the stack pointer to the frame pointer
709 This information is stored in the "extra" fields of the frame_info.
710
711 There are two basic forms for the ARM prologue. The fixed argument
712 function call will look like:
713
714 mov ip, sp
715 stmfd sp!, {fp, ip, lr, pc}
716 sub fp, ip, #4
717 [sub sp, sp, #4]
718
719 Which would create this stack frame (offsets relative to FP):
720 IP -> 4 (caller's stack)
721 FP -> 0 PC (points to address of stmfd instruction + 8 in callee)
722 -4 LR (return address in caller)
723 -8 IP (copy of caller's SP)
724 -12 FP (caller's FP)
725 SP -> -28 Local variables
726
727 The frame size would thus be 32 bytes, and the frame offset would be
728 28 bytes. The stmfd call can also save any of the vN registers it
729 plans to use, which increases the frame size accordingly.
730
731 Note: The stored PC is 8 off of the STMFD instruction that stored it
732 because the ARM Store instructions always store PC + 8 when you read
733 the PC register.
734
735 A variable argument function call will look like:
736
737 mov ip, sp
738 stmfd sp!, {a1, a2, a3, a4}
739 stmfd sp!, {fp, ip, lr, pc}
740 sub fp, ip, #20
741
742 Which would create this stack frame (offsets relative to FP):
743 IP -> 20 (caller's stack)
744 16 A4
745 12 A3
746 8 A2
747 4 A1
748 FP -> 0 PC (points to address of stmfd instruction + 8 in callee)
749 -4 LR (return address in caller)
750 -8 IP (copy of caller's SP)
751 -12 FP (caller's FP)
752 SP -> -28 Local variables
753
754 The frame size would thus be 48 bytes, and the frame offset would be
755 28 bytes.
756
757 There is another potential complication, which is that the optimizer
758 will try to separate the store of fp in the "stmfd" instruction from
759 the "sub fp, ip, #NN" instruction. Almost anything can be there, so
760 we just key on the stmfd, and then scan for the "sub fp, ip, #NN"...
761
762 Also, note, the original version of the ARM toolchain claimed that there
763 should be an
764
765 instruction at the end of the prologue. I have never seen GCC produce
766 this, and the ARM docs don't mention it. We still test for it below in
767 case it happens...
768
769 */
770
771 static void
772 arm_scan_prologue (struct frame_info *this_frame,
773 struct arm_prologue_cache *cache)
774 {
775 struct gdbarch *gdbarch = get_frame_arch (this_frame);
776 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
777 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
778 int regno;
779 CORE_ADDR prologue_start, prologue_end, current_pc;
780 CORE_ADDR prev_pc = get_frame_pc (this_frame);
781 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
782 pv_t regs[ARM_FPS_REGNUM];
783 struct pv_area *stack;
784 struct cleanup *back_to;
785 CORE_ADDR offset;
786
787 /* Assume there is no frame until proven otherwise. */
788 cache->framereg = ARM_SP_REGNUM;
789 cache->framesize = 0;
790
791 /* Check for Thumb prologue. */
792 if (arm_frame_is_thumb (this_frame))
793 {
794 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
795 return;
796 }
797
798 /* Find the function prologue. If we can't find the function in
799 the symbol table, peek in the stack frame to find the PC. */
800 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
801 &prologue_end))
802 {
803 /* One way to find the end of the prologue (which works well
804 for unoptimized code) is to do the following:
805
806 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
807
808 if (sal.line == 0)
809 prologue_end = prev_pc;
810 else if (sal.end < prologue_end)
811 prologue_end = sal.end;
812
813 This mechanism is very accurate so long as the optimizer
814 doesn't move any instructions from the function body into the
815 prologue. If this happens, sal.end will be the last
816 instruction in the first hunk of prologue code just before
817 the first instruction that the scheduler has moved from
818 the body to the prologue.
819
820 In order to make sure that we scan all of the prologue
821 instructions, we use a slightly less accurate mechanism which
822 may scan more than necessary. To help compensate for this
823 lack of accuracy, the prologue scanning loop below contains
824 several clauses which'll cause the loop to terminate early if
825 an implausible prologue instruction is encountered.
826
827 The expression
828
829 prologue_start + 64
830
831 is a suitable endpoint since it accounts for the largest
832 possible prologue plus up to five instructions inserted by
833 the scheduler. */
834
835 if (prologue_end > prologue_start + 64)
836 {
837 prologue_end = prologue_start + 64; /* See above. */
838 }
839 }
840 else
841 {
842 /* We have no symbol information. Our only option is to assume this
843 function has a standard stack frame and the normal frame register.
844 Then, we can find the value of our frame pointer on entrance to
845 the callee (or at the present moment if this is the innermost frame).
846 The value stored there should be the address of the stmfd + 8. */
847 CORE_ADDR frame_loc;
848 LONGEST return_value;
849
850 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
851 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
852 return;
853 else
854 {
855 prologue_start = gdbarch_addr_bits_remove
856 (gdbarch, return_value) - 8;
857 prologue_end = prologue_start + 64; /* See above. */
858 }
859 }
860
861 if (prev_pc < prologue_end)
862 prologue_end = prev_pc;
863
864 /* Now search the prologue looking for instructions that set up the
865 frame pointer, adjust the stack pointer, and save registers.
866
867 Be careful, however, and if it doesn't look like a prologue,
868 don't try to scan it. If, for instance, a frameless function
869 begins with stmfd sp!, then we will tell ourselves there is
870 a frame, which will confuse stack traceback, as well as "finish"
871 and other operations that rely on a knowledge of the stack
872 traceback.
873
874 In the APCS, the prologue should start with "mov ip, sp" so
875 if we don't see this as the first insn, we will stop.
876
877 [Note: This doesn't seem to be true any longer, so it's now an
878 optional part of the prologue. - Kevin Buettner, 2001-11-20]
879
880 [Note further: The "mov ip,sp" only seems to be missing in
881 frameless functions at optimization level "-O2" or above,
882 in which case it is often (but not always) replaced by
883 "str lr, [sp, #-4]!". - Michael Snyder, 2002-04-23] */
884
885 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
886 regs[regno] = pv_register (regno, 0);
887 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
888 back_to = make_cleanup_free_pv_area (stack);
889
890 for (current_pc = prologue_start;
891 current_pc < prologue_end;
892 current_pc += 4)
893 {
894 unsigned int insn
895 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
896
897 if (insn == 0xe1a0c00d) /* mov ip, sp */
898 {
899 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
900 continue;
901 }
902 else if ((insn & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
903 {
904 unsigned imm = insn & 0xff; /* immediate value */
905 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
906 imm = (imm >> rot) | (imm << (32 - rot));
907 regs[ARM_IP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], imm);
908 continue;
909 }
910 else if ((insn & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
911 {
912 unsigned imm = insn & 0xff; /* immediate value */
913 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
914 imm = (imm >> rot) | (imm << (32 - rot));
915 regs[ARM_IP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
916 continue;
917 }
918 else if (insn == 0xe52de004) /* str lr, [sp, #-4]! */
919 {
920 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
921 break;
922 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
923 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[ARM_LR_REGNUM]);
924 continue;
925 }
926 else if ((insn & 0xffff0000) == 0xe92d0000)
927 /* stmfd sp!, {..., fp, ip, lr, pc}
928 or
929 stmfd sp!, {a1, a2, a3, a4} */
930 {
931 int mask = insn & 0xffff;
932
933 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
934 break;
935
936 /* Calculate offsets of saved registers. */
937 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
938 if (mask & (1 << regno))
939 {
940 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
941 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
942 }
943 }
944 else if ((insn & 0xffffc000) == 0xe54b0000 /* strb rx,[r11,#-n] */
945 || (insn & 0xffffc0f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
946 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
947 {
948 /* No need to add this to saved_regs -- it's just an arg reg. */
949 continue;
950 }
951 else if ((insn & 0xffffc000) == 0xe5cd0000 /* strb rx,[sp,#n] */
952 || (insn & 0xffffc0f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
953 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
954 {
955 /* No need to add this to saved_regs -- it's just an arg reg. */
956 continue;
957 }
958 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
959 {
960 unsigned imm = insn & 0xff; /* immediate value */
961 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
962 imm = (imm >> rot) | (imm << (32 - rot));
963 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
964 }
965 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
966 {
967 unsigned imm = insn & 0xff; /* immediate value */
968 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
969 imm = (imm >> rot) | (imm << (32 - rot));
970 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
971 }
972 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?, [sp, -#c]! */
973 && gdbarch_tdep (gdbarch)->have_fpa_registers)
974 {
975 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
976 break;
977
978 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
979 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
980 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
981 }
982 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4, [sp!] */
983 && gdbarch_tdep (gdbarch)->have_fpa_registers)
984 {
985 int n_saved_fp_regs;
986 unsigned int fp_start_reg, fp_bound_reg;
987
988 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
989 break;
990
991 if ((insn & 0x800) == 0x800) /* N0 is set */
992 {
993 if ((insn & 0x40000) == 0x40000) /* N1 is set */
994 n_saved_fp_regs = 3;
995 else
996 n_saved_fp_regs = 1;
997 }
998 else
999 {
1000 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1001 n_saved_fp_regs = 2;
1002 else
1003 n_saved_fp_regs = 4;
1004 }
1005
1006 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1007 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1008 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1009 {
1010 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1011 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1012 regs[fp_start_reg++]);
1013 }
1014 }
1015 else if ((insn & 0xf0000000) != 0xe0000000)
1016 break; /* Condition not true, exit early */
1017 else if ((insn & 0xfe200000) == 0xe8200000) /* ldm? */
1018 break; /* Don't scan past a block load */
1019 else
1020 /* The optimizer might shove anything into the prologue,
1021 so we just skip what we don't recognize. */
1022 continue;
1023 }
1024
1025 /* The frame size is just the distance from the frame register
1026 to the original stack pointer. */
1027 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1028 {
1029 /* Frame pointer is fp. */
1030 cache->framereg = ARM_FP_REGNUM;
1031 cache->framesize = -regs[ARM_FP_REGNUM].k;
1032 }
1033 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1034 {
1035 /* Try the stack pointer... this is a bit desperate. */
1036 cache->framereg = ARM_SP_REGNUM;
1037 cache->framesize = -regs[ARM_SP_REGNUM].k;
1038 }
1039 else
1040 {
1041 /* We're just out of luck. We don't know where the frame is. */
1042 cache->framereg = -1;
1043 cache->framesize = 0;
1044 }
1045
1046 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1047 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1048 cache->saved_regs[regno].addr = offset;
1049
1050 do_cleanups (back_to);
1051 }
1052
1053 static struct arm_prologue_cache *
1054 arm_make_prologue_cache (struct frame_info *this_frame)
1055 {
1056 int reg;
1057 struct arm_prologue_cache *cache;
1058 CORE_ADDR unwound_fp;
1059
1060 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1061 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1062
1063 arm_scan_prologue (this_frame, cache);
1064
1065 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1066 if (unwound_fp == 0)
1067 return cache;
1068
1069 cache->prev_sp = unwound_fp + cache->framesize;
1070
1071 /* Calculate actual addresses of saved registers using offsets
1072 determined by arm_scan_prologue. */
1073 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1074 if (trad_frame_addr_p (cache->saved_regs, reg))
1075 cache->saved_regs[reg].addr += cache->prev_sp;
1076
1077 return cache;
1078 }
1079
1080 /* Our frame ID for a normal frame is the current function's starting PC
1081 and the caller's SP when we were called. */
1082
1083 static void
1084 arm_prologue_this_id (struct frame_info *this_frame,
1085 void **this_cache,
1086 struct frame_id *this_id)
1087 {
1088 struct arm_prologue_cache *cache;
1089 struct frame_id id;
1090 CORE_ADDR pc, func;
1091
1092 if (*this_cache == NULL)
1093 *this_cache = arm_make_prologue_cache (this_frame);
1094 cache = *this_cache;
1095
1096 /* This is meant to halt the backtrace at "_start". */
1097 pc = get_frame_pc (this_frame);
1098 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1099 return;
1100
1101 /* If we've hit a wall, stop. */
1102 if (cache->prev_sp == 0)
1103 return;
1104
1105 func = get_frame_func (this_frame);
1106 id = frame_id_build (cache->prev_sp, func);
1107 *this_id = id;
1108 }
1109
1110 static struct value *
1111 arm_prologue_prev_register (struct frame_info *this_frame,
1112 void **this_cache,
1113 int prev_regnum)
1114 {
1115 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1116 struct arm_prologue_cache *cache;
1117
1118 if (*this_cache == NULL)
1119 *this_cache = arm_make_prologue_cache (this_frame);
1120 cache = *this_cache;
1121
1122 /* If we are asked to unwind the PC, then we need to return the LR
1123 instead. The prologue may save PC, but it will point into this
1124 frame's prologue, not the next frame's resume location. Also
1125 strip the saved T bit. A valid LR may have the low bit set, but
1126 a valid PC never does. */
1127 if (prev_regnum == ARM_PC_REGNUM)
1128 {
1129 CORE_ADDR lr;
1130
1131 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1132 return frame_unwind_got_constant (this_frame, prev_regnum,
1133 arm_addr_bits_remove (gdbarch, lr));
1134 }
1135
1136 /* SP is generally not saved to the stack, but this frame is
1137 identified by the next frame's stack pointer at the time of the call.
1138 The value was already reconstructed into PREV_SP. */
1139 if (prev_regnum == ARM_SP_REGNUM)
1140 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
1141
1142 /* The CPSR may have been changed by the call instruction and by the
1143 called function. The only bit we can reconstruct is the T bit,
1144 by checking the low bit of LR as of the call. This is a reliable
1145 indicator of Thumb-ness except for some ARM v4T pre-interworking
1146 Thumb code, which could get away with a clear low bit as long as
1147 the called function did not use bx. Guess that all other
1148 bits are unchanged; the condition flags are presumably lost,
1149 but the processor status is likely valid. */
1150 if (prev_regnum == ARM_PS_REGNUM)
1151 {
1152 CORE_ADDR lr, cpsr;
1153
1154 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
1155 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1156 if (IS_THUMB_ADDR (lr))
1157 cpsr |= CPSR_T;
1158 else
1159 cpsr &= ~CPSR_T;
1160 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
1161 }
1162
1163 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1164 prev_regnum);
1165 }
1166
1167 struct frame_unwind arm_prologue_unwind = {
1168 NORMAL_FRAME,
1169 arm_prologue_this_id,
1170 arm_prologue_prev_register,
1171 NULL,
1172 default_frame_sniffer
1173 };
1174
1175 static struct arm_prologue_cache *
1176 arm_make_stub_cache (struct frame_info *this_frame)
1177 {
1178 int reg;
1179 struct arm_prologue_cache *cache;
1180 CORE_ADDR unwound_fp;
1181
1182 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1183 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1184
1185 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
1186
1187 return cache;
1188 }
1189
1190 /* Our frame ID for a stub frame is the current SP and LR. */
1191
1192 static void
1193 arm_stub_this_id (struct frame_info *this_frame,
1194 void **this_cache,
1195 struct frame_id *this_id)
1196 {
1197 struct arm_prologue_cache *cache;
1198
1199 if (*this_cache == NULL)
1200 *this_cache = arm_make_stub_cache (this_frame);
1201 cache = *this_cache;
1202
1203 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
1204 }
1205
1206 static int
1207 arm_stub_unwind_sniffer (const struct frame_unwind *self,
1208 struct frame_info *this_frame,
1209 void **this_prologue_cache)
1210 {
1211 CORE_ADDR addr_in_block;
1212 char dummy[4];
1213
1214 addr_in_block = get_frame_address_in_block (this_frame);
1215 if (in_plt_section (addr_in_block, NULL)
1216 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1217 return 1;
1218
1219 return 0;
1220 }
1221
1222 struct frame_unwind arm_stub_unwind = {
1223 NORMAL_FRAME,
1224 arm_stub_this_id,
1225 arm_prologue_prev_register,
1226 NULL,
1227 arm_stub_unwind_sniffer
1228 };
1229
1230 static CORE_ADDR
1231 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1232 {
1233 struct arm_prologue_cache *cache;
1234
1235 if (*this_cache == NULL)
1236 *this_cache = arm_make_prologue_cache (this_frame);
1237 cache = *this_cache;
1238
1239 return cache->prev_sp - cache->framesize;
1240 }
1241
1242 struct frame_base arm_normal_base = {
1243 &arm_prologue_unwind,
1244 arm_normal_frame_base,
1245 arm_normal_frame_base,
1246 arm_normal_frame_base
1247 };
1248
1249 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1250 dummy frame. The frame ID's base needs to match the TOS value
1251 saved by save_dummy_frame_tos() and returned from
1252 arm_push_dummy_call, and the PC needs to match the dummy frame's
1253 breakpoint. */
1254
1255 static struct frame_id
1256 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1257 {
1258 return frame_id_build (get_frame_register_unsigned (this_frame, ARM_SP_REGNUM),
1259 get_frame_pc (this_frame));
1260 }
1261
1262 /* Given THIS_FRAME, find the previous frame's resume PC (which will
1263 be used to construct the previous frame's ID, after looking up the
1264 containing function). */
1265
1266 static CORE_ADDR
1267 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1268 {
1269 CORE_ADDR pc;
1270 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
1271 return arm_addr_bits_remove (gdbarch, pc);
1272 }
1273
1274 static CORE_ADDR
1275 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1276 {
1277 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
1278 }
1279
1280 static struct value *
1281 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
1282 int regnum)
1283 {
1284 struct gdbarch * gdbarch = get_frame_arch (this_frame);
1285 CORE_ADDR lr, cpsr;
1286
1287 switch (regnum)
1288 {
1289 case ARM_PC_REGNUM:
1290 /* The PC is normally copied from the return column, which
1291 describes saves of LR. However, that version may have an
1292 extra bit set to indicate Thumb state. The bit is not
1293 part of the PC. */
1294 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1295 return frame_unwind_got_constant (this_frame, regnum,
1296 arm_addr_bits_remove (gdbarch, lr));
1297
1298 case ARM_PS_REGNUM:
1299 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
1300 cpsr = get_frame_register_unsigned (this_frame, regnum);
1301 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1302 if (IS_THUMB_ADDR (lr))
1303 cpsr |= CPSR_T;
1304 else
1305 cpsr &= ~CPSR_T;
1306 return frame_unwind_got_constant (this_frame, regnum, cpsr);
1307
1308 default:
1309 internal_error (__FILE__, __LINE__,
1310 _("Unexpected register %d"), regnum);
1311 }
1312 }
1313
1314 static void
1315 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1316 struct dwarf2_frame_state_reg *reg,
1317 struct frame_info *this_frame)
1318 {
1319 switch (regnum)
1320 {
1321 case ARM_PC_REGNUM:
1322 case ARM_PS_REGNUM:
1323 reg->how = DWARF2_FRAME_REG_FN;
1324 reg->loc.fn = arm_dwarf2_prev_register;
1325 break;
1326 case ARM_SP_REGNUM:
1327 reg->how = DWARF2_FRAME_REG_CFA;
1328 break;
1329 }
1330 }
1331
1332 /* When arguments must be pushed onto the stack, they go on in reverse
1333 order. The code below implements a FILO (stack) to do this. */
1334
1335 struct stack_item
1336 {
1337 int len;
1338 struct stack_item *prev;
1339 void *data;
1340 };
1341
1342 static struct stack_item *
1343 push_stack_item (struct stack_item *prev, void *contents, int len)
1344 {
1345 struct stack_item *si;
1346 si = xmalloc (sizeof (struct stack_item));
1347 si->data = xmalloc (len);
1348 si->len = len;
1349 si->prev = prev;
1350 memcpy (si->data, contents, len);
1351 return si;
1352 }
1353
1354 static struct stack_item *
1355 pop_stack_item (struct stack_item *si)
1356 {
1357 struct stack_item *dead = si;
1358 si = si->prev;
1359 xfree (dead->data);
1360 xfree (dead);
1361 return si;
1362 }
1363
1364
1365 /* Return the alignment (in bytes) of the given type. */
1366
1367 static int
1368 arm_type_align (struct type *t)
1369 {
1370 int n;
1371 int align;
1372 int falign;
1373
1374 t = check_typedef (t);
1375 switch (TYPE_CODE (t))
1376 {
1377 default:
1378 /* Should never happen. */
1379 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1380 return 4;
1381
1382 case TYPE_CODE_PTR:
1383 case TYPE_CODE_ENUM:
1384 case TYPE_CODE_INT:
1385 case TYPE_CODE_FLT:
1386 case TYPE_CODE_SET:
1387 case TYPE_CODE_RANGE:
1388 case TYPE_CODE_BITSTRING:
1389 case TYPE_CODE_REF:
1390 case TYPE_CODE_CHAR:
1391 case TYPE_CODE_BOOL:
1392 return TYPE_LENGTH (t);
1393
1394 case TYPE_CODE_ARRAY:
1395 case TYPE_CODE_COMPLEX:
1396 /* TODO: What about vector types? */
1397 return arm_type_align (TYPE_TARGET_TYPE (t));
1398
1399 case TYPE_CODE_STRUCT:
1400 case TYPE_CODE_UNION:
1401 align = 1;
1402 for (n = 0; n < TYPE_NFIELDS (t); n++)
1403 {
1404 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
1405 if (falign > align)
1406 align = falign;
1407 }
1408 return align;
1409 }
1410 }
1411
1412 /* Possible base types for a candidate for passing and returning in
1413 VFP registers. */
1414
1415 enum arm_vfp_cprc_base_type
1416 {
1417 VFP_CPRC_UNKNOWN,
1418 VFP_CPRC_SINGLE,
1419 VFP_CPRC_DOUBLE,
1420 VFP_CPRC_VEC64,
1421 VFP_CPRC_VEC128
1422 };
1423
1424 /* The length of one element of base type B. */
1425
1426 static unsigned
1427 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
1428 {
1429 switch (b)
1430 {
1431 case VFP_CPRC_SINGLE:
1432 return 4;
1433 case VFP_CPRC_DOUBLE:
1434 return 8;
1435 case VFP_CPRC_VEC64:
1436 return 8;
1437 case VFP_CPRC_VEC128:
1438 return 16;
1439 default:
1440 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
1441 (int) b);
1442 }
1443 }
1444
1445 /* The character ('s', 'd' or 'q') for the type of VFP register used
1446 for passing base type B. */
1447
1448 static int
1449 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
1450 {
1451 switch (b)
1452 {
1453 case VFP_CPRC_SINGLE:
1454 return 's';
1455 case VFP_CPRC_DOUBLE:
1456 return 'd';
1457 case VFP_CPRC_VEC64:
1458 return 'd';
1459 case VFP_CPRC_VEC128:
1460 return 'q';
1461 default:
1462 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
1463 (int) b);
1464 }
1465 }
1466
1467 /* Determine whether T may be part of a candidate for passing and
1468 returning in VFP registers, ignoring the limit on the total number
1469 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
1470 classification of the first valid component found; if it is not
1471 VFP_CPRC_UNKNOWN, all components must have the same classification
1472 as *BASE_TYPE. If it is found that T contains a type not permitted
1473 for passing and returning in VFP registers, a type differently
1474 classified from *BASE_TYPE, or two types differently classified
1475 from each other, return -1, otherwise return the total number of
1476 base-type elements found (possibly 0 in an empty structure or
1477 array). Vectors and complex types are not currently supported,
1478 matching the generic AAPCS support. */
1479
1480 static int
1481 arm_vfp_cprc_sub_candidate (struct type *t,
1482 enum arm_vfp_cprc_base_type *base_type)
1483 {
1484 t = check_typedef (t);
1485 switch (TYPE_CODE (t))
1486 {
1487 case TYPE_CODE_FLT:
1488 switch (TYPE_LENGTH (t))
1489 {
1490 case 4:
1491 if (*base_type == VFP_CPRC_UNKNOWN)
1492 *base_type = VFP_CPRC_SINGLE;
1493 else if (*base_type != VFP_CPRC_SINGLE)
1494 return -1;
1495 return 1;
1496
1497 case 8:
1498 if (*base_type == VFP_CPRC_UNKNOWN)
1499 *base_type = VFP_CPRC_DOUBLE;
1500 else if (*base_type != VFP_CPRC_DOUBLE)
1501 return -1;
1502 return 1;
1503
1504 default:
1505 return -1;
1506 }
1507 break;
1508
1509 case TYPE_CODE_ARRAY:
1510 {
1511 int count;
1512 unsigned unitlen;
1513 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
1514 if (count == -1)
1515 return -1;
1516 if (TYPE_LENGTH (t) == 0)
1517 {
1518 gdb_assert (count == 0);
1519 return 0;
1520 }
1521 else if (count == 0)
1522 return -1;
1523 unitlen = arm_vfp_cprc_unit_length (*base_type);
1524 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
1525 return TYPE_LENGTH (t) / unitlen;
1526 }
1527 break;
1528
1529 case TYPE_CODE_STRUCT:
1530 {
1531 int count = 0;
1532 unsigned unitlen;
1533 int i;
1534 for (i = 0; i < TYPE_NFIELDS (t); i++)
1535 {
1536 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
1537 base_type);
1538 if (sub_count == -1)
1539 return -1;
1540 count += sub_count;
1541 }
1542 if (TYPE_LENGTH (t) == 0)
1543 {
1544 gdb_assert (count == 0);
1545 return 0;
1546 }
1547 else if (count == 0)
1548 return -1;
1549 unitlen = arm_vfp_cprc_unit_length (*base_type);
1550 if (TYPE_LENGTH (t) != unitlen * count)
1551 return -1;
1552 return count;
1553 }
1554
1555 case TYPE_CODE_UNION:
1556 {
1557 int count = 0;
1558 unsigned unitlen;
1559 int i;
1560 for (i = 0; i < TYPE_NFIELDS (t); i++)
1561 {
1562 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
1563 base_type);
1564 if (sub_count == -1)
1565 return -1;
1566 count = (count > sub_count ? count : sub_count);
1567 }
1568 if (TYPE_LENGTH (t) == 0)
1569 {
1570 gdb_assert (count == 0);
1571 return 0;
1572 }
1573 else if (count == 0)
1574 return -1;
1575 unitlen = arm_vfp_cprc_unit_length (*base_type);
1576 if (TYPE_LENGTH (t) != unitlen * count)
1577 return -1;
1578 return count;
1579 }
1580
1581 default:
1582 break;
1583 }
1584
1585 return -1;
1586 }
1587
1588 /* Determine whether T is a VFP co-processor register candidate (CPRC)
1589 if passed to or returned from a non-variadic function with the VFP
1590 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
1591 *BASE_TYPE to the base type for T and *COUNT to the number of
1592 elements of that base type before returning. */
1593
1594 static int
1595 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
1596 int *count)
1597 {
1598 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
1599 int c = arm_vfp_cprc_sub_candidate (t, &b);
1600 if (c <= 0 || c > 4)
1601 return 0;
1602 *base_type = b;
1603 *count = c;
1604 return 1;
1605 }
1606
1607 /* Return 1 if the VFP ABI should be used for passing arguments to and
1608 returning values from a function of type FUNC_TYPE, 0
1609 otherwise. */
1610
1611 static int
1612 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
1613 {
1614 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1615 /* Variadic functions always use the base ABI. Assume that functions
1616 without debug info are not variadic. */
1617 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
1618 return 0;
1619 /* The VFP ABI is only supported as a variant of AAPCS. */
1620 if (tdep->arm_abi != ARM_ABI_AAPCS)
1621 return 0;
1622 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
1623 }
1624
1625 /* We currently only support passing parameters in integer registers, which
1626 conforms with GCC's default model, and VFP argument passing following
1627 the VFP variant of AAPCS. Several other variants exist and
1628 we should probably support some of them based on the selected ABI. */
1629
1630 static CORE_ADDR
1631 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1632 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
1633 struct value **args, CORE_ADDR sp, int struct_return,
1634 CORE_ADDR struct_addr)
1635 {
1636 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1637 int argnum;
1638 int argreg;
1639 int nstack;
1640 struct stack_item *si = NULL;
1641 int use_vfp_abi;
1642 struct type *ftype;
1643 unsigned vfp_regs_free = (1 << 16) - 1;
1644
1645 /* Determine the type of this function and whether the VFP ABI
1646 applies. */
1647 ftype = check_typedef (value_type (function));
1648 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
1649 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
1650 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
1651
1652 /* Set the return address. For the ARM, the return breakpoint is
1653 always at BP_ADDR. */
1654 if (arm_pc_is_thumb (bp_addr))
1655 bp_addr |= 1;
1656 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
1657
1658 /* Walk through the list of args and determine how large a temporary
1659 stack is required. Need to take care here as structs may be
1660 passed on the stack, and we have to to push them. */
1661 nstack = 0;
1662
1663 argreg = ARM_A1_REGNUM;
1664 nstack = 0;
1665
1666 /* The struct_return pointer occupies the first parameter
1667 passing register. */
1668 if (struct_return)
1669 {
1670 if (arm_debug)
1671 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
1672 gdbarch_register_name (gdbarch, argreg),
1673 paddress (gdbarch, struct_addr));
1674 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
1675 argreg++;
1676 }
1677
1678 for (argnum = 0; argnum < nargs; argnum++)
1679 {
1680 int len;
1681 struct type *arg_type;
1682 struct type *target_type;
1683 enum type_code typecode;
1684 bfd_byte *val;
1685 int align;
1686 enum arm_vfp_cprc_base_type vfp_base_type;
1687 int vfp_base_count;
1688 int may_use_core_reg = 1;
1689
1690 arg_type = check_typedef (value_type (args[argnum]));
1691 len = TYPE_LENGTH (arg_type);
1692 target_type = TYPE_TARGET_TYPE (arg_type);
1693 typecode = TYPE_CODE (arg_type);
1694 val = value_contents_writeable (args[argnum]);
1695
1696 align = arm_type_align (arg_type);
1697 /* Round alignment up to a whole number of words. */
1698 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
1699 /* Different ABIs have different maximum alignments. */
1700 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
1701 {
1702 /* The APCS ABI only requires word alignment. */
1703 align = INT_REGISTER_SIZE;
1704 }
1705 else
1706 {
1707 /* The AAPCS requires at most doubleword alignment. */
1708 if (align > INT_REGISTER_SIZE * 2)
1709 align = INT_REGISTER_SIZE * 2;
1710 }
1711
1712 if (use_vfp_abi
1713 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
1714 &vfp_base_count))
1715 {
1716 int regno;
1717 int unit_length;
1718 int shift;
1719 unsigned mask;
1720
1721 /* Because this is a CPRC it cannot go in a core register or
1722 cause a core register to be skipped for alignment.
1723 Either it goes in VFP registers and the rest of this loop
1724 iteration is skipped for this argument, or it goes on the
1725 stack (and the stack alignment code is correct for this
1726 case). */
1727 may_use_core_reg = 0;
1728
1729 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
1730 shift = unit_length / 4;
1731 mask = (1 << (shift * vfp_base_count)) - 1;
1732 for (regno = 0; regno < 16; regno += shift)
1733 if (((vfp_regs_free >> regno) & mask) == mask)
1734 break;
1735
1736 if (regno < 16)
1737 {
1738 int reg_char;
1739 int reg_scaled;
1740 int i;
1741
1742 vfp_regs_free &= ~(mask << regno);
1743 reg_scaled = regno / shift;
1744 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
1745 for (i = 0; i < vfp_base_count; i++)
1746 {
1747 char name_buf[4];
1748 int regnum;
1749 if (reg_char == 'q')
1750 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
1751 val + i * unit_length);
1752 else
1753 {
1754 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
1755 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
1756 strlen (name_buf));
1757 regcache_cooked_write (regcache, regnum,
1758 val + i * unit_length);
1759 }
1760 }
1761 continue;
1762 }
1763 else
1764 {
1765 /* This CPRC could not go in VFP registers, so all VFP
1766 registers are now marked as used. */
1767 vfp_regs_free = 0;
1768 }
1769 }
1770
1771 /* Push stack padding for dowubleword alignment. */
1772 if (nstack & (align - 1))
1773 {
1774 si = push_stack_item (si, val, INT_REGISTER_SIZE);
1775 nstack += INT_REGISTER_SIZE;
1776 }
1777
1778 /* Doubleword aligned quantities must go in even register pairs. */
1779 if (may_use_core_reg
1780 && argreg <= ARM_LAST_ARG_REGNUM
1781 && align > INT_REGISTER_SIZE
1782 && argreg & 1)
1783 argreg++;
1784
1785 /* If the argument is a pointer to a function, and it is a
1786 Thumb function, create a LOCAL copy of the value and set
1787 the THUMB bit in it. */
1788 if (TYPE_CODE_PTR == typecode
1789 && target_type != NULL
1790 && TYPE_CODE_FUNC == TYPE_CODE (target_type))
1791 {
1792 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
1793 if (arm_pc_is_thumb (regval))
1794 {
1795 val = alloca (len);
1796 store_unsigned_integer (val, len, byte_order,
1797 MAKE_THUMB_ADDR (regval));
1798 }
1799 }
1800
1801 /* Copy the argument to general registers or the stack in
1802 register-sized pieces. Large arguments are split between
1803 registers and stack. */
1804 while (len > 0)
1805 {
1806 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
1807
1808 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
1809 {
1810 /* The argument is being passed in a general purpose
1811 register. */
1812 CORE_ADDR regval
1813 = extract_unsigned_integer (val, partial_len, byte_order);
1814 if (byte_order == BFD_ENDIAN_BIG)
1815 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
1816 if (arm_debug)
1817 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
1818 argnum,
1819 gdbarch_register_name
1820 (gdbarch, argreg),
1821 phex (regval, INT_REGISTER_SIZE));
1822 regcache_cooked_write_unsigned (regcache, argreg, regval);
1823 argreg++;
1824 }
1825 else
1826 {
1827 /* Push the arguments onto the stack. */
1828 if (arm_debug)
1829 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
1830 argnum, nstack);
1831 si = push_stack_item (si, val, INT_REGISTER_SIZE);
1832 nstack += INT_REGISTER_SIZE;
1833 }
1834
1835 len -= partial_len;
1836 val += partial_len;
1837 }
1838 }
1839 /* If we have an odd number of words to push, then decrement the stack
1840 by one word now, so first stack argument will be dword aligned. */
1841 if (nstack & 4)
1842 sp -= 4;
1843
1844 while (si)
1845 {
1846 sp -= si->len;
1847 write_memory (sp, si->data, si->len);
1848 si = pop_stack_item (si);
1849 }
1850
1851 /* Finally, update teh SP register. */
1852 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
1853
1854 return sp;
1855 }
1856
1857
1858 /* Always align the frame to an 8-byte boundary. This is required on
1859 some platforms and harmless on the rest. */
1860
1861 static CORE_ADDR
1862 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1863 {
1864 /* Align the stack to eight bytes. */
1865 return sp & ~ (CORE_ADDR) 7;
1866 }
1867
1868 static void
1869 print_fpu_flags (int flags)
1870 {
1871 if (flags & (1 << 0))
1872 fputs ("IVO ", stdout);
1873 if (flags & (1 << 1))
1874 fputs ("DVZ ", stdout);
1875 if (flags & (1 << 2))
1876 fputs ("OFL ", stdout);
1877 if (flags & (1 << 3))
1878 fputs ("UFL ", stdout);
1879 if (flags & (1 << 4))
1880 fputs ("INX ", stdout);
1881 putchar ('\n');
1882 }
1883
1884 /* Print interesting information about the floating point processor
1885 (if present) or emulator. */
1886 static void
1887 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
1888 struct frame_info *frame, const char *args)
1889 {
1890 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
1891 int type;
1892
1893 type = (status >> 24) & 127;
1894 if (status & (1 << 31))
1895 printf (_("Hardware FPU type %d\n"), type);
1896 else
1897 printf (_("Software FPU type %d\n"), type);
1898 /* i18n: [floating point unit] mask */
1899 fputs (_("mask: "), stdout);
1900 print_fpu_flags (status >> 16);
1901 /* i18n: [floating point unit] flags */
1902 fputs (_("flags: "), stdout);
1903 print_fpu_flags (status);
1904 }
1905
1906 /* Construct the ARM extended floating point type. */
1907 static struct type *
1908 arm_ext_type (struct gdbarch *gdbarch)
1909 {
1910 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1911
1912 if (!tdep->arm_ext_type)
1913 tdep->arm_ext_type
1914 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
1915 floatformats_arm_ext);
1916
1917 return tdep->arm_ext_type;
1918 }
1919
1920 static struct type *
1921 arm_neon_double_type (struct gdbarch *gdbarch)
1922 {
1923 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1924
1925 if (tdep->neon_double_type == NULL)
1926 {
1927 struct type *t, *elem;
1928
1929 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
1930 TYPE_CODE_UNION);
1931 elem = builtin_type (gdbarch)->builtin_uint8;
1932 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
1933 elem = builtin_type (gdbarch)->builtin_uint16;
1934 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
1935 elem = builtin_type (gdbarch)->builtin_uint32;
1936 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
1937 elem = builtin_type (gdbarch)->builtin_uint64;
1938 append_composite_type_field (t, "u64", elem);
1939 elem = builtin_type (gdbarch)->builtin_float;
1940 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
1941 elem = builtin_type (gdbarch)->builtin_double;
1942 append_composite_type_field (t, "f64", elem);
1943
1944 TYPE_VECTOR (t) = 1;
1945 TYPE_NAME (t) = "neon_d";
1946 tdep->neon_double_type = t;
1947 }
1948
1949 return tdep->neon_double_type;
1950 }
1951
1952 /* FIXME: The vector types are not correctly ordered on big-endian
1953 targets. Just as s0 is the low bits of d0, d0[0] is also the low
1954 bits of d0 - regardless of what unit size is being held in d0. So
1955 the offset of the first uint8 in d0 is 7, but the offset of the
1956 first float is 4. This code works as-is for little-endian
1957 targets. */
1958
1959 static struct type *
1960 arm_neon_quad_type (struct gdbarch *gdbarch)
1961 {
1962 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1963
1964 if (tdep->neon_quad_type == NULL)
1965 {
1966 struct type *t, *elem;
1967
1968 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
1969 TYPE_CODE_UNION);
1970 elem = builtin_type (gdbarch)->builtin_uint8;
1971 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
1972 elem = builtin_type (gdbarch)->builtin_uint16;
1973 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
1974 elem = builtin_type (gdbarch)->builtin_uint32;
1975 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
1976 elem = builtin_type (gdbarch)->builtin_uint64;
1977 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
1978 elem = builtin_type (gdbarch)->builtin_float;
1979 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
1980 elem = builtin_type (gdbarch)->builtin_double;
1981 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
1982
1983 TYPE_VECTOR (t) = 1;
1984 TYPE_NAME (t) = "neon_q";
1985 tdep->neon_quad_type = t;
1986 }
1987
1988 return tdep->neon_quad_type;
1989 }
1990
1991 /* Return the GDB type object for the "standard" data type of data in
1992 register N. */
1993
1994 static struct type *
1995 arm_register_type (struct gdbarch *gdbarch, int regnum)
1996 {
1997 int num_regs = gdbarch_num_regs (gdbarch);
1998
1999 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
2000 && regnum >= num_regs && regnum < num_regs + 32)
2001 return builtin_type (gdbarch)->builtin_float;
2002
2003 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
2004 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
2005 return arm_neon_quad_type (gdbarch);
2006
2007 /* If the target description has register information, we are only
2008 in this function so that we can override the types of
2009 double-precision registers for NEON. */
2010 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
2011 {
2012 struct type *t = tdesc_register_type (gdbarch, regnum);
2013
2014 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
2015 && TYPE_CODE (t) == TYPE_CODE_FLT
2016 && gdbarch_tdep (gdbarch)->have_neon)
2017 return arm_neon_double_type (gdbarch);
2018 else
2019 return t;
2020 }
2021
2022 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
2023 {
2024 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
2025 return builtin_type (gdbarch)->builtin_void;
2026
2027 return arm_ext_type (gdbarch);
2028 }
2029 else if (regnum == ARM_SP_REGNUM)
2030 return builtin_type (gdbarch)->builtin_data_ptr;
2031 else if (regnum == ARM_PC_REGNUM)
2032 return builtin_type (gdbarch)->builtin_func_ptr;
2033 else if (regnum >= ARRAY_SIZE (arm_register_names))
2034 /* These registers are only supported on targets which supply
2035 an XML description. */
2036 return builtin_type (gdbarch)->builtin_int0;
2037 else
2038 return builtin_type (gdbarch)->builtin_uint32;
2039 }
2040
2041 /* Map a DWARF register REGNUM onto the appropriate GDB register
2042 number. */
2043
2044 static int
2045 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2046 {
2047 /* Core integer regs. */
2048 if (reg >= 0 && reg <= 15)
2049 return reg;
2050
2051 /* Legacy FPA encoding. These were once used in a way which
2052 overlapped with VFP register numbering, so their use is
2053 discouraged, but GDB doesn't support the ARM toolchain
2054 which used them for VFP. */
2055 if (reg >= 16 && reg <= 23)
2056 return ARM_F0_REGNUM + reg - 16;
2057
2058 /* New assignments for the FPA registers. */
2059 if (reg >= 96 && reg <= 103)
2060 return ARM_F0_REGNUM + reg - 96;
2061
2062 /* WMMX register assignments. */
2063 if (reg >= 104 && reg <= 111)
2064 return ARM_WCGR0_REGNUM + reg - 104;
2065
2066 if (reg >= 112 && reg <= 127)
2067 return ARM_WR0_REGNUM + reg - 112;
2068
2069 if (reg >= 192 && reg <= 199)
2070 return ARM_WC0_REGNUM + reg - 192;
2071
2072 /* VFP v2 registers. A double precision value is actually
2073 in d1 rather than s2, but the ABI only defines numbering
2074 for the single precision registers. This will "just work"
2075 in GDB for little endian targets (we'll read eight bytes,
2076 starting in s0 and then progressing to s1), but will be
2077 reversed on big endian targets with VFP. This won't
2078 be a problem for the new Neon quad registers; you're supposed
2079 to use DW_OP_piece for those. */
2080 if (reg >= 64 && reg <= 95)
2081 {
2082 char name_buf[4];
2083
2084 sprintf (name_buf, "s%d", reg - 64);
2085 return user_reg_map_name_to_regnum (gdbarch, name_buf,
2086 strlen (name_buf));
2087 }
2088
2089 /* VFP v3 / Neon registers. This range is also used for VFP v2
2090 registers, except that it now describes d0 instead of s0. */
2091 if (reg >= 256 && reg <= 287)
2092 {
2093 char name_buf[4];
2094
2095 sprintf (name_buf, "d%d", reg - 256);
2096 return user_reg_map_name_to_regnum (gdbarch, name_buf,
2097 strlen (name_buf));
2098 }
2099
2100 return -1;
2101 }
2102
2103 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
2104 static int
2105 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
2106 {
2107 int reg = regnum;
2108 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
2109
2110 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
2111 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
2112
2113 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
2114 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
2115
2116 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
2117 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
2118
2119 if (reg < NUM_GREGS)
2120 return SIM_ARM_R0_REGNUM + reg;
2121 reg -= NUM_GREGS;
2122
2123 if (reg < NUM_FREGS)
2124 return SIM_ARM_FP0_REGNUM + reg;
2125 reg -= NUM_FREGS;
2126
2127 if (reg < NUM_SREGS)
2128 return SIM_ARM_FPS_REGNUM + reg;
2129 reg -= NUM_SREGS;
2130
2131 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
2132 }
2133
2134 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
2135 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
2136 It is thought that this is is the floating-point register format on
2137 little-endian systems. */
2138
2139 static void
2140 convert_from_extended (const struct floatformat *fmt, const void *ptr,
2141 void *dbl, int endianess)
2142 {
2143 DOUBLEST d;
2144
2145 if (endianess == BFD_ENDIAN_BIG)
2146 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
2147 else
2148 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
2149 ptr, &d);
2150 floatformat_from_doublest (fmt, &d, dbl);
2151 }
2152
2153 static void
2154 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
2155 int endianess)
2156 {
2157 DOUBLEST d;
2158
2159 floatformat_to_doublest (fmt, ptr, &d);
2160 if (endianess == BFD_ENDIAN_BIG)
2161 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
2162 else
2163 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
2164 &d, dbl);
2165 }
2166
2167 static int
2168 condition_true (unsigned long cond, unsigned long status_reg)
2169 {
2170 if (cond == INST_AL || cond == INST_NV)
2171 return 1;
2172
2173 switch (cond)
2174 {
2175 case INST_EQ:
2176 return ((status_reg & FLAG_Z) != 0);
2177 case INST_NE:
2178 return ((status_reg & FLAG_Z) == 0);
2179 case INST_CS:
2180 return ((status_reg & FLAG_C) != 0);
2181 case INST_CC:
2182 return ((status_reg & FLAG_C) == 0);
2183 case INST_MI:
2184 return ((status_reg & FLAG_N) != 0);
2185 case INST_PL:
2186 return ((status_reg & FLAG_N) == 0);
2187 case INST_VS:
2188 return ((status_reg & FLAG_V) != 0);
2189 case INST_VC:
2190 return ((status_reg & FLAG_V) == 0);
2191 case INST_HI:
2192 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
2193 case INST_LS:
2194 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
2195 case INST_GE:
2196 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
2197 case INST_LT:
2198 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
2199 case INST_GT:
2200 return (((status_reg & FLAG_Z) == 0)
2201 && (((status_reg & FLAG_N) == 0)
2202 == ((status_reg & FLAG_V) == 0)));
2203 case INST_LE:
2204 return (((status_reg & FLAG_Z) != 0)
2205 || (((status_reg & FLAG_N) == 0)
2206 != ((status_reg & FLAG_V) == 0)));
2207 }
2208 return 1;
2209 }
2210
2211 /* Support routines for single stepping. Calculate the next PC value. */
2212 #define submask(x) ((1L << ((x) + 1)) - 1)
2213 #define bit(obj,st) (((obj) >> (st)) & 1)
2214 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2215 #define sbits(obj,st,fn) \
2216 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
2217 #define BranchDest(addr,instr) \
2218 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
2219 #define ARM_PC_32 1
2220
2221 static unsigned long
2222 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
2223 unsigned long pc_val, unsigned long status_reg)
2224 {
2225 unsigned long res, shift;
2226 int rm = bits (inst, 0, 3);
2227 unsigned long shifttype = bits (inst, 5, 6);
2228
2229 if (bit (inst, 4))
2230 {
2231 int rs = bits (inst, 8, 11);
2232 shift = (rs == 15 ? pc_val + 8
2233 : get_frame_register_unsigned (frame, rs)) & 0xFF;
2234 }
2235 else
2236 shift = bits (inst, 7, 11);
2237
2238 res = (rm == 15
2239 ? ((pc_val | (ARM_PC_32 ? 0 : status_reg))
2240 + (bit (inst, 4) ? 12 : 8))
2241 : get_frame_register_unsigned (frame, rm));
2242
2243 switch (shifttype)
2244 {
2245 case 0: /* LSL */
2246 res = shift >= 32 ? 0 : res << shift;
2247 break;
2248
2249 case 1: /* LSR */
2250 res = shift >= 32 ? 0 : res >> shift;
2251 break;
2252
2253 case 2: /* ASR */
2254 if (shift >= 32)
2255 shift = 31;
2256 res = ((res & 0x80000000L)
2257 ? ~((~res) >> shift) : res >> shift);
2258 break;
2259
2260 case 3: /* ROR/RRX */
2261 shift &= 31;
2262 if (shift == 0)
2263 res = (res >> 1) | (carry ? 0x80000000L : 0);
2264 else
2265 res = (res >> shift) | (res << (32 - shift));
2266 break;
2267 }
2268
2269 return res & 0xffffffff;
2270 }
2271
2272 /* Return number of 1-bits in VAL. */
2273
2274 static int
2275 bitcount (unsigned long val)
2276 {
2277 int nbits;
2278 for (nbits = 0; val != 0; nbits++)
2279 val &= val - 1; /* delete rightmost 1-bit in val */
2280 return nbits;
2281 }
2282
2283 /* Return the size in bytes of the complete Thumb instruction whose
2284 first halfword is INST1. */
2285
2286 static int
2287 thumb_insn_size (unsigned short inst1)
2288 {
2289 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
2290 return 4;
2291 else
2292 return 2;
2293 }
2294
2295 static int
2296 thumb_advance_itstate (unsigned int itstate)
2297 {
2298 /* Preserve IT[7:5], the first three bits of the condition. Shift
2299 the upcoming condition flags left by one bit. */
2300 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
2301
2302 /* If we have finished the IT block, clear the state. */
2303 if ((itstate & 0x0f) == 0)
2304 itstate = 0;
2305
2306 return itstate;
2307 }
2308
2309 /* Find the next PC after the current instruction executes. In some
2310 cases we can not statically determine the answer (see the IT state
2311 handling in this function); in that case, a breakpoint may be
2312 inserted in addition to the returned PC, which will be used to set
2313 another breakpoint by our caller. */
2314
2315 static CORE_ADDR
2316 thumb_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
2317 {
2318 struct gdbarch *gdbarch = get_frame_arch (frame);
2319 struct address_space *aspace = get_frame_address_space (frame);
2320 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2321 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2322 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
2323 unsigned short inst1;
2324 CORE_ADDR nextpc = pc + 2; /* default is next instruction */
2325 unsigned long offset;
2326 ULONGEST status, itstate;
2327
2328 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
2329
2330 /* Thumb-2 conditional execution support. There are eight bits in
2331 the CPSR which describe conditional execution state. Once
2332 reconstructed (they're in a funny order), the low five bits
2333 describe the low bit of the condition for each instruction and
2334 how many instructions remain. The high three bits describe the
2335 base condition. One of the low four bits will be set if an IT
2336 block is active. These bits read as zero on earlier
2337 processors. */
2338 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
2339 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
2340
2341 /* If-Then handling. On GNU/Linux, where this routine is used, we
2342 use an undefined instruction as a breakpoint. Unlike BKPT, IT
2343 can disable execution of the undefined instruction. So we might
2344 miss the breakpoint if we set it on a skipped conditional
2345 instruction. Because conditional instructions can change the
2346 flags, affecting the execution of further instructions, we may
2347 need to set two breakpoints. */
2348
2349 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
2350 {
2351 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
2352 {
2353 /* An IT instruction. Because this instruction does not
2354 modify the flags, we can accurately predict the next
2355 executed instruction. */
2356 itstate = inst1 & 0x00ff;
2357 pc += thumb_insn_size (inst1);
2358
2359 while (itstate != 0 && ! condition_true (itstate >> 4, status))
2360 {
2361 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
2362 pc += thumb_insn_size (inst1);
2363 itstate = thumb_advance_itstate (itstate);
2364 }
2365
2366 return pc;
2367 }
2368 else if (itstate != 0)
2369 {
2370 /* We are in a conditional block. Check the condition. */
2371 if (! condition_true (itstate >> 4, status))
2372 {
2373 /* Advance to the next executed instruction. */
2374 pc += thumb_insn_size (inst1);
2375 itstate = thumb_advance_itstate (itstate);
2376
2377 while (itstate != 0 && ! condition_true (itstate >> 4, status))
2378 {
2379 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
2380 pc += thumb_insn_size (inst1);
2381 itstate = thumb_advance_itstate (itstate);
2382 }
2383
2384 return pc;
2385 }
2386 else if ((itstate & 0x0f) == 0x08)
2387 {
2388 /* This is the last instruction of the conditional
2389 block, and it is executed. We can handle it normally
2390 because the following instruction is not conditional,
2391 and we must handle it normally because it is
2392 permitted to branch. Fall through. */
2393 }
2394 else
2395 {
2396 int cond_negated;
2397
2398 /* There are conditional instructions after this one.
2399 If this instruction modifies the flags, then we can
2400 not predict what the next executed instruction will
2401 be. Fortunately, this instruction is architecturally
2402 forbidden to branch; we know it will fall through.
2403 Start by skipping past it. */
2404 pc += thumb_insn_size (inst1);
2405 itstate = thumb_advance_itstate (itstate);
2406
2407 /* Set a breakpoint on the following instruction. */
2408 gdb_assert ((itstate & 0x0f) != 0);
2409 insert_single_step_breakpoint (gdbarch, aspace, pc);
2410 cond_negated = (itstate >> 4) & 1;
2411
2412 /* Skip all following instructions with the same
2413 condition. If there is a later instruction in the IT
2414 block with the opposite condition, set the other
2415 breakpoint there. If not, then set a breakpoint on
2416 the instruction after the IT block. */
2417 do
2418 {
2419 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
2420 pc += thumb_insn_size (inst1);
2421 itstate = thumb_advance_itstate (itstate);
2422 }
2423 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
2424
2425 return pc;
2426 }
2427 }
2428 }
2429 else if (itstate & 0x0f)
2430 {
2431 /* We are in a conditional block. Check the condition. */
2432 int cond = itstate >> 4;
2433
2434 if (! condition_true (cond, status))
2435 {
2436 /* Advance to the next instruction. All the 32-bit
2437 instructions share a common prefix. */
2438 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
2439 return pc + 4;
2440 else
2441 return pc + 2;
2442 }
2443
2444 /* Otherwise, handle the instruction normally. */
2445 }
2446
2447 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
2448 {
2449 CORE_ADDR sp;
2450
2451 /* Fetch the saved PC from the stack. It's stored above
2452 all of the other registers. */
2453 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
2454 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
2455 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
2456 nextpc = gdbarch_addr_bits_remove (gdbarch, nextpc);
2457 if (nextpc == pc)
2458 error (_("Infinite loop detected"));
2459 }
2460 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
2461 {
2462 unsigned long cond = bits (inst1, 8, 11);
2463 if (cond != 0x0f && condition_true (cond, status)) /* 0x0f = SWI */
2464 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
2465 }
2466 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
2467 {
2468 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
2469 }
2470 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
2471 {
2472 unsigned short inst2;
2473 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
2474
2475 /* Default to the next instruction. */
2476 nextpc = pc + 4;
2477
2478 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
2479 {
2480 /* Branches and miscellaneous control instructions. */
2481
2482 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
2483 {
2484 /* B, BL, BLX. */
2485 int j1, j2, imm1, imm2;
2486
2487 imm1 = sbits (inst1, 0, 10);
2488 imm2 = bits (inst2, 0, 10);
2489 j1 = bit (inst2, 13);
2490 j2 = bit (inst2, 11);
2491
2492 offset = ((imm1 << 12) + (imm2 << 1));
2493 offset ^= ((!j2) << 22) | ((!j1) << 23);
2494
2495 nextpc = pc_val + offset;
2496 /* For BLX make sure to clear the low bits. */
2497 if (bit (inst2, 12) == 0)
2498 nextpc = nextpc & 0xfffffffc;
2499 }
2500 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
2501 {
2502 /* SUBS PC, LR, #imm8. */
2503 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
2504 nextpc -= inst2 & 0x00ff;
2505 }
2506 else if ((inst2 & 0xd000) == 0xc000 && (inst1 & 0x0380) != 0x0380)
2507 {
2508 /* Conditional branch. */
2509 if (condition_true (bits (inst1, 6, 9), status))
2510 {
2511 int sign, j1, j2, imm1, imm2;
2512
2513 sign = sbits (inst1, 10, 10);
2514 imm1 = bits (inst1, 0, 5);
2515 imm2 = bits (inst2, 0, 10);
2516 j1 = bit (inst2, 13);
2517 j2 = bit (inst2, 11);
2518
2519 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
2520 offset += (imm1 << 12) + (imm2 << 1);
2521
2522 nextpc = pc_val + offset;
2523 }
2524 }
2525 }
2526 else if ((inst1 & 0xfe50) == 0xe810)
2527 {
2528 /* Load multiple or RFE. */
2529 int rn, offset, load_pc = 1;
2530
2531 rn = bits (inst1, 0, 3);
2532 if (bit (inst1, 7) && !bit (inst1, 8))
2533 {
2534 /* LDMIA or POP */
2535 if (!bit (inst2, 15))
2536 load_pc = 0;
2537 offset = bitcount (inst2) * 4 - 4;
2538 }
2539 else if (!bit (inst1, 7) && bit (inst1, 8))
2540 {
2541 /* LDMDB */
2542 if (!bit (inst2, 15))
2543 load_pc = 0;
2544 offset = -4;
2545 }
2546 else if (bit (inst1, 7) && bit (inst1, 8))
2547 {
2548 /* RFEIA */
2549 offset = 0;
2550 }
2551 else if (!bit (inst1, 7) && !bit (inst1, 8))
2552 {
2553 /* RFEDB */
2554 offset = -8;
2555 }
2556 else
2557 load_pc = 0;
2558
2559 if (load_pc)
2560 {
2561 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
2562 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
2563 }
2564 }
2565 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
2566 {
2567 /* MOV PC or MOVS PC. */
2568 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
2569 }
2570 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
2571 {
2572 /* LDR PC. */
2573 CORE_ADDR base;
2574 int rn, load_pc = 1;
2575
2576 rn = bits (inst1, 0, 3);
2577 base = get_frame_register_unsigned (frame, rn);
2578 if (rn == 15)
2579 {
2580 base = (base + 4) & ~(CORE_ADDR) 0x3;
2581 if (bit (inst1, 7))
2582 base += bits (inst2, 0, 11);
2583 else
2584 base -= bits (inst2, 0, 11);
2585 }
2586 else if (bit (inst1, 7))
2587 base += bits (inst2, 0, 11);
2588 else if (bit (inst2, 11))
2589 {
2590 if (bit (inst2, 10))
2591 {
2592 if (bit (inst2, 9))
2593 base += bits (inst2, 0, 7);
2594 else
2595 base -= bits (inst2, 0, 7);
2596 }
2597 }
2598 else if ((inst2 & 0x0fc0) == 0x0000)
2599 {
2600 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
2601 base += get_frame_register_unsigned (frame, rm) << shift;
2602 }
2603 else
2604 /* Reserved. */
2605 load_pc = 0;
2606
2607 if (load_pc)
2608 nextpc = get_frame_memory_unsigned (frame, base, 4);
2609 }
2610 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
2611 {
2612 /* TBB. */
2613 CORE_ADDR table, offset, length;
2614
2615 table = get_frame_register_unsigned (frame, bits (inst1, 0, 3));
2616 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
2617 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
2618 nextpc = pc_val + length;
2619 }
2620 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
2621 {
2622 /* TBH. */
2623 CORE_ADDR table, offset, length;
2624
2625 table = get_frame_register_unsigned (frame, bits (inst1, 0, 3));
2626 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
2627 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
2628 nextpc = pc_val + length;
2629 }
2630 }
2631 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
2632 {
2633 if (bits (inst1, 3, 6) == 0x0f)
2634 nextpc = pc_val;
2635 else
2636 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
2637
2638 nextpc = gdbarch_addr_bits_remove (gdbarch, nextpc);
2639 if (nextpc == pc)
2640 error (_("Infinite loop detected"));
2641 }
2642 else if ((inst1 & 0xf500) == 0xb100)
2643 {
2644 /* CBNZ or CBZ. */
2645 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
2646 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
2647
2648 if (bit (inst1, 11) && reg != 0)
2649 nextpc = pc_val + imm;
2650 else if (!bit (inst1, 11) && reg == 0)
2651 nextpc = pc_val + imm;
2652 }
2653
2654 return nextpc;
2655 }
2656
2657 CORE_ADDR
2658 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
2659 {
2660 struct gdbarch *gdbarch = get_frame_arch (frame);
2661 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2662 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2663 unsigned long pc_val;
2664 unsigned long this_instr;
2665 unsigned long status;
2666 CORE_ADDR nextpc;
2667
2668 if (arm_frame_is_thumb (frame))
2669 return thumb_get_next_pc (frame, pc);
2670
2671 pc_val = (unsigned long) pc;
2672 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
2673
2674 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
2675 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
2676
2677 if (bits (this_instr, 28, 31) == INST_NV)
2678 switch (bits (this_instr, 24, 27))
2679 {
2680 case 0xa:
2681 case 0xb:
2682 {
2683 /* Branch with Link and change to Thumb. */
2684 nextpc = BranchDest (pc, this_instr);
2685 nextpc |= bit (this_instr, 24) << 1;
2686
2687 nextpc = gdbarch_addr_bits_remove (gdbarch, nextpc);
2688 if (nextpc == pc)
2689 error (_("Infinite loop detected"));
2690 break;
2691 }
2692 case 0xc:
2693 case 0xd:
2694 case 0xe:
2695 /* Coprocessor register transfer. */
2696 if (bits (this_instr, 12, 15) == 15)
2697 error (_("Invalid update to pc in instruction"));
2698 break;
2699 }
2700 else if (condition_true (bits (this_instr, 28, 31), status))
2701 {
2702 switch (bits (this_instr, 24, 27))
2703 {
2704 case 0x0:
2705 case 0x1: /* data processing */
2706 case 0x2:
2707 case 0x3:
2708 {
2709 unsigned long operand1, operand2, result = 0;
2710 unsigned long rn;
2711 int c;
2712
2713 if (bits (this_instr, 12, 15) != 15)
2714 break;
2715
2716 if (bits (this_instr, 22, 25) == 0
2717 && bits (this_instr, 4, 7) == 9) /* multiply */
2718 error (_("Invalid update to pc in instruction"));
2719
2720 /* BX <reg>, BLX <reg> */
2721 if (bits (this_instr, 4, 27) == 0x12fff1
2722 || bits (this_instr, 4, 27) == 0x12fff3)
2723 {
2724 rn = bits (this_instr, 0, 3);
2725 result = (rn == 15) ? pc_val + 8
2726 : get_frame_register_unsigned (frame, rn);
2727 nextpc = (CORE_ADDR) gdbarch_addr_bits_remove
2728 (gdbarch, result);
2729
2730 if (nextpc == pc)
2731 error (_("Infinite loop detected"));
2732
2733 return nextpc;
2734 }
2735
2736 /* Multiply into PC */
2737 c = (status & FLAG_C) ? 1 : 0;
2738 rn = bits (this_instr, 16, 19);
2739 operand1 = (rn == 15) ? pc_val + 8
2740 : get_frame_register_unsigned (frame, rn);
2741
2742 if (bit (this_instr, 25))
2743 {
2744 unsigned long immval = bits (this_instr, 0, 7);
2745 unsigned long rotate = 2 * bits (this_instr, 8, 11);
2746 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
2747 & 0xffffffff;
2748 }
2749 else /* operand 2 is a shifted register */
2750 operand2 = shifted_reg_val (frame, this_instr, c, pc_val, status);
2751
2752 switch (bits (this_instr, 21, 24))
2753 {
2754 case 0x0: /*and */
2755 result = operand1 & operand2;
2756 break;
2757
2758 case 0x1: /*eor */
2759 result = operand1 ^ operand2;
2760 break;
2761
2762 case 0x2: /*sub */
2763 result = operand1 - operand2;
2764 break;
2765
2766 case 0x3: /*rsb */
2767 result = operand2 - operand1;
2768 break;
2769
2770 case 0x4: /*add */
2771 result = operand1 + operand2;
2772 break;
2773
2774 case 0x5: /*adc */
2775 result = operand1 + operand2 + c;
2776 break;
2777
2778 case 0x6: /*sbc */
2779 result = operand1 - operand2 + c;
2780 break;
2781
2782 case 0x7: /*rsc */
2783 result = operand2 - operand1 + c;
2784 break;
2785
2786 case 0x8:
2787 case 0x9:
2788 case 0xa:
2789 case 0xb: /* tst, teq, cmp, cmn */
2790 result = (unsigned long) nextpc;
2791 break;
2792
2793 case 0xc: /*orr */
2794 result = operand1 | operand2;
2795 break;
2796
2797 case 0xd: /*mov */
2798 /* Always step into a function. */
2799 result = operand2;
2800 break;
2801
2802 case 0xe: /*bic */
2803 result = operand1 & ~operand2;
2804 break;
2805
2806 case 0xf: /*mvn */
2807 result = ~operand2;
2808 break;
2809 }
2810 nextpc = (CORE_ADDR) gdbarch_addr_bits_remove
2811 (gdbarch, result);
2812
2813 if (nextpc == pc)
2814 error (_("Infinite loop detected"));
2815 break;
2816 }
2817
2818 case 0x4:
2819 case 0x5: /* data transfer */
2820 case 0x6:
2821 case 0x7:
2822 if (bit (this_instr, 20))
2823 {
2824 /* load */
2825 if (bits (this_instr, 12, 15) == 15)
2826 {
2827 /* rd == pc */
2828 unsigned long rn;
2829 unsigned long base;
2830
2831 if (bit (this_instr, 22))
2832 error (_("Invalid update to pc in instruction"));
2833
2834 /* byte write to PC */
2835 rn = bits (this_instr, 16, 19);
2836 base = (rn == 15) ? pc_val + 8
2837 : get_frame_register_unsigned (frame, rn);
2838 if (bit (this_instr, 24))
2839 {
2840 /* pre-indexed */
2841 int c = (status & FLAG_C) ? 1 : 0;
2842 unsigned long offset =
2843 (bit (this_instr, 25)
2844 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
2845 : bits (this_instr, 0, 11));
2846
2847 if (bit (this_instr, 23))
2848 base += offset;
2849 else
2850 base -= offset;
2851 }
2852 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
2853 4, byte_order);
2854
2855 nextpc = gdbarch_addr_bits_remove (gdbarch, nextpc);
2856
2857 if (nextpc == pc)
2858 error (_("Infinite loop detected"));
2859 }
2860 }
2861 break;
2862
2863 case 0x8:
2864 case 0x9: /* block transfer */
2865 if (bit (this_instr, 20))
2866 {
2867 /* LDM */
2868 if (bit (this_instr, 15))
2869 {
2870 /* loading pc */
2871 int offset = 0;
2872
2873 if (bit (this_instr, 23))
2874 {
2875 /* up */
2876 unsigned long reglist = bits (this_instr, 0, 14);
2877 offset = bitcount (reglist) * 4;
2878 if (bit (this_instr, 24)) /* pre */
2879 offset += 4;
2880 }
2881 else if (bit (this_instr, 24))
2882 offset = -4;
2883
2884 {
2885 unsigned long rn_val =
2886 get_frame_register_unsigned (frame,
2887 bits (this_instr, 16, 19));
2888 nextpc =
2889 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
2890 + offset),
2891 4, byte_order);
2892 }
2893 nextpc = gdbarch_addr_bits_remove
2894 (gdbarch, nextpc);
2895 if (nextpc == pc)
2896 error (_("Infinite loop detected"));
2897 }
2898 }
2899 break;
2900
2901 case 0xb: /* branch & link */
2902 case 0xa: /* branch */
2903 {
2904 nextpc = BranchDest (pc, this_instr);
2905
2906 nextpc = gdbarch_addr_bits_remove (gdbarch, nextpc);
2907 if (nextpc == pc)
2908 error (_("Infinite loop detected"));
2909 break;
2910 }
2911
2912 case 0xc:
2913 case 0xd:
2914 case 0xe: /* coproc ops */
2915 case 0xf: /* SWI */
2916 break;
2917
2918 default:
2919 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
2920 return (pc);
2921 }
2922 }
2923
2924 return nextpc;
2925 }
2926
2927 /* single_step() is called just before we want to resume the inferior,
2928 if we want to single-step it but there is no hardware or kernel
2929 single-step support. We find the target of the coming instruction
2930 and breakpoint it. */
2931
2932 int
2933 arm_software_single_step (struct frame_info *frame)
2934 {
2935 struct gdbarch *gdbarch = get_frame_arch (frame);
2936 struct address_space *aspace = get_frame_address_space (frame);
2937
2938 /* NOTE: This may insert the wrong breakpoint instruction when
2939 single-stepping over a mode-changing instruction, if the
2940 CPSR heuristics are used. */
2941
2942 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
2943 insert_single_step_breakpoint (gdbarch, aspace, next_pc);
2944
2945 return 1;
2946 }
2947
2948 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
2949 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
2950 NULL if an error occurs. BUF is freed. */
2951
2952 static gdb_byte *
2953 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
2954 int old_len, int new_len)
2955 {
2956 gdb_byte *new_buf, *middle;
2957 int bytes_to_read = new_len - old_len;
2958
2959 new_buf = xmalloc (new_len);
2960 memcpy (new_buf + bytes_to_read, buf, old_len);
2961 xfree (buf);
2962 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
2963 {
2964 xfree (new_buf);
2965 return NULL;
2966 }
2967 return new_buf;
2968 }
2969
2970 /* An IT block is at most the 2-byte IT instruction followed by
2971 four 4-byte instructions. The furthest back we must search to
2972 find an IT block that affects the current instruction is thus
2973 2 + 3 * 4 == 14 bytes. */
2974 #define MAX_IT_BLOCK_PREFIX 14
2975
2976 /* Use a quick scan if there are more than this many bytes of
2977 code. */
2978 #define IT_SCAN_THRESHOLD 32
2979
2980 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
2981 A breakpoint in an IT block may not be hit, depending on the
2982 condition flags. */
2983 static CORE_ADDR
2984 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
2985 {
2986 gdb_byte *buf;
2987 char map_type;
2988 CORE_ADDR boundary, func_start;
2989 int buf_len, buf2_len;
2990 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
2991 int i, any, last_it, last_it_count;
2992
2993 /* If we are using BKPT breakpoints, none of this is necessary. */
2994 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
2995 return bpaddr;
2996
2997 /* ARM mode does not have this problem. */
2998 if (!arm_pc_is_thumb (bpaddr))
2999 return bpaddr;
3000
3001 /* We are setting a breakpoint in Thumb code that could potentially
3002 contain an IT block. The first step is to find how much Thumb
3003 code there is; we do not need to read outside of known Thumb
3004 sequences. */
3005 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
3006 if (map_type == 0)
3007 /* Thumb-2 code must have mapping symbols to have a chance. */
3008 return bpaddr;
3009
3010 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
3011
3012 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
3013 && func_start > boundary)
3014 boundary = func_start;
3015
3016 /* Search for a candidate IT instruction. We have to do some fancy
3017 footwork to distinguish a real IT instruction from the second
3018 half of a 32-bit instruction, but there is no need for that if
3019 there's no candidate. */
3020 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
3021 if (buf_len == 0)
3022 /* No room for an IT instruction. */
3023 return bpaddr;
3024
3025 buf = xmalloc (buf_len);
3026 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
3027 return bpaddr;
3028 any = 0;
3029 for (i = 0; i < buf_len; i += 2)
3030 {
3031 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
3032 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
3033 {
3034 any = 1;
3035 break;
3036 }
3037 }
3038 if (any == 0)
3039 {
3040 xfree (buf);
3041 return bpaddr;
3042 }
3043
3044 /* OK, the code bytes before this instruction contain at least one
3045 halfword which resembles an IT instruction. We know that it's
3046 Thumb code, but there are still two possibilities. Either the
3047 halfword really is an IT instruction, or it is the second half of
3048 a 32-bit Thumb instruction. The only way we can tell is to
3049 scan forwards from a known instruction boundary. */
3050 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
3051 {
3052 int definite;
3053
3054 /* There's a lot of code before this instruction. Start with an
3055 optimistic search; it's easy to recognize halfwords that can
3056 not be the start of a 32-bit instruction, and use that to
3057 lock on to the instruction boundaries. */
3058 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
3059 if (buf == NULL)
3060 return bpaddr;
3061 buf_len = IT_SCAN_THRESHOLD;
3062
3063 definite = 0;
3064 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
3065 {
3066 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
3067 if (thumb_insn_size (inst1) == 2)
3068 {
3069 definite = 1;
3070 break;
3071 }
3072 }
3073
3074 /* At this point, if DEFINITE, BUF[I] is the first place we
3075 are sure that we know the instruction boundaries, and it is far
3076 enough from BPADDR that we could not miss an IT instruction
3077 affecting BPADDR. If ! DEFINITE, give up - start from a
3078 known boundary. */
3079 if (! definite)
3080 {
3081 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
3082 if (buf == NULL)
3083 return bpaddr;
3084 buf_len = bpaddr - boundary;
3085 i = 0;
3086 }
3087 }
3088 else
3089 {
3090 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
3091 if (buf == NULL)
3092 return bpaddr;
3093 buf_len = bpaddr - boundary;
3094 i = 0;
3095 }
3096
3097 /* Scan forwards. Find the last IT instruction before BPADDR. */
3098 last_it = -1;
3099 last_it_count = 0;
3100 while (i < buf_len)
3101 {
3102 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
3103 last_it_count--;
3104 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
3105 {
3106 last_it = i;
3107 if (inst1 & 0x0001)
3108 last_it_count = 4;
3109 else if (inst1 & 0x0002)
3110 last_it_count = 3;
3111 else if (inst1 & 0x0004)
3112 last_it_count = 2;
3113 else
3114 last_it_count = 1;
3115 }
3116 i += thumb_insn_size (inst1);
3117 }
3118
3119 xfree (buf);
3120
3121 if (last_it == -1)
3122 /* There wasn't really an IT instruction after all. */
3123 return bpaddr;
3124
3125 if (last_it_count < 1)
3126 /* It was too far away. */
3127 return bpaddr;
3128
3129 /* This really is a trouble spot. Move the breakpoint to the IT
3130 instruction. */
3131 return bpaddr - buf_len + last_it;
3132 }
3133
3134 /* ARM displaced stepping support.
3135
3136 Generally ARM displaced stepping works as follows:
3137
3138 1. When an instruction is to be single-stepped, it is first decoded by
3139 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
3140 Depending on the type of instruction, it is then copied to a scratch
3141 location, possibly in a modified form. The copy_* set of functions
3142 performs such modification, as necessary. A breakpoint is placed after
3143 the modified instruction in the scratch space to return control to GDB.
3144 Note in particular that instructions which modify the PC will no longer
3145 do so after modification.
3146
3147 2. The instruction is single-stepped, by setting the PC to the scratch
3148 location address, and resuming. Control returns to GDB when the
3149 breakpoint is hit.
3150
3151 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
3152 function used for the current instruction. This function's job is to
3153 put the CPU/memory state back to what it would have been if the
3154 instruction had been executed unmodified in its original location. */
3155
3156 /* NOP instruction (mov r0, r0). */
3157 #define ARM_NOP 0xe1a00000
3158
3159 /* Helper for register reads for displaced stepping. In particular, this
3160 returns the PC as it would be seen by the instruction at its original
3161 location. */
3162
3163 ULONGEST
3164 displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
3165 {
3166 ULONGEST ret;
3167
3168 if (regno == 15)
3169 {
3170 if (debug_displaced)
3171 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
3172 (unsigned long) from + 8);
3173 return (ULONGEST) from + 8; /* Pipeline offset. */
3174 }
3175 else
3176 {
3177 regcache_cooked_read_unsigned (regs, regno, &ret);
3178 if (debug_displaced)
3179 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
3180 regno, (unsigned long) ret);
3181 return ret;
3182 }
3183 }
3184
3185 static int
3186 displaced_in_arm_mode (struct regcache *regs)
3187 {
3188 ULONGEST ps;
3189
3190 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
3191
3192 return (ps & CPSR_T) == 0;
3193 }
3194
3195 /* Write to the PC as from a branch instruction. */
3196
3197 static void
3198 branch_write_pc (struct regcache *regs, ULONGEST val)
3199 {
3200 if (displaced_in_arm_mode (regs))
3201 /* Note: If bits 0/1 are set, this branch would be unpredictable for
3202 architecture versions < 6. */
3203 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & ~(ULONGEST) 0x3);
3204 else
3205 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & ~(ULONGEST) 0x1);
3206 }
3207
3208 /* Write to the PC as from a branch-exchange instruction. */
3209
3210 static void
3211 bx_write_pc (struct regcache *regs, ULONGEST val)
3212 {
3213 ULONGEST ps;
3214
3215 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
3216
3217 if ((val & 1) == 1)
3218 {
3219 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | CPSR_T);
3220 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
3221 }
3222 else if ((val & 2) == 0)
3223 {
3224 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM,
3225 ps & ~(ULONGEST) CPSR_T);
3226 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
3227 }
3228 else
3229 {
3230 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
3231 mode, align dest to 4 bytes). */
3232 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
3233 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM,
3234 ps & ~(ULONGEST) CPSR_T);
3235 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
3236 }
3237 }
3238
3239 /* Write to the PC as if from a load instruction. */
3240
3241 static void
3242 load_write_pc (struct regcache *regs, ULONGEST val)
3243 {
3244 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
3245 bx_write_pc (regs, val);
3246 else
3247 branch_write_pc (regs, val);
3248 }
3249
3250 /* Write to the PC as if from an ALU instruction. */
3251
3252 static void
3253 alu_write_pc (struct regcache *regs, ULONGEST val)
3254 {
3255 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && displaced_in_arm_mode (regs))
3256 bx_write_pc (regs, val);
3257 else
3258 branch_write_pc (regs, val);
3259 }
3260
3261 /* Helper for writing to registers for displaced stepping. Writing to the PC
3262 has a varying effects depending on the instruction which does the write:
3263 this is controlled by the WRITE_PC argument. */
3264
3265 void
3266 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
3267 int regno, ULONGEST val, enum pc_write_style write_pc)
3268 {
3269 if (regno == 15)
3270 {
3271 if (debug_displaced)
3272 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
3273 (unsigned long) val);
3274 switch (write_pc)
3275 {
3276 case BRANCH_WRITE_PC:
3277 branch_write_pc (regs, val);
3278 break;
3279
3280 case BX_WRITE_PC:
3281 bx_write_pc (regs, val);
3282 break;
3283
3284 case LOAD_WRITE_PC:
3285 load_write_pc (regs, val);
3286 break;
3287
3288 case ALU_WRITE_PC:
3289 alu_write_pc (regs, val);
3290 break;
3291
3292 case CANNOT_WRITE_PC:
3293 warning (_("Instruction wrote to PC in an unexpected way when "
3294 "single-stepping"));
3295 break;
3296
3297 default:
3298 internal_error (__FILE__, __LINE__,
3299 _("Invalid argument to displaced_write_reg"));
3300 }
3301
3302 dsc->wrote_to_pc = 1;
3303 }
3304 else
3305 {
3306 if (debug_displaced)
3307 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
3308 regno, (unsigned long) val);
3309 regcache_cooked_write_unsigned (regs, regno, val);
3310 }
3311 }
3312
3313 /* This function is used to concisely determine if an instruction INSN
3314 references PC. Register fields of interest in INSN should have the
3315 corresponding fields of BITMASK set to 0b1111. The function returns return 1
3316 if any of these fields in INSN reference the PC (also 0b1111, r15), else it
3317 returns 0. */
3318
3319 static int
3320 insn_references_pc (uint32_t insn, uint32_t bitmask)
3321 {
3322 uint32_t lowbit = 1;
3323
3324 while (bitmask != 0)
3325 {
3326 uint32_t mask;
3327
3328 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
3329 ;
3330
3331 if (!lowbit)
3332 break;
3333
3334 mask = lowbit * 0xf;
3335
3336 if ((insn & mask) == mask)
3337 return 1;
3338
3339 bitmask &= ~mask;
3340 }
3341
3342 return 0;
3343 }
3344
3345 /* The simplest copy function. Many instructions have the same effect no
3346 matter what address they are executed at: in those cases, use this. */
3347
3348 static int
3349 copy_unmodified (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, uint32_t insn,
3350 const char *iname, struct displaced_step_closure *dsc)
3351 {
3352 if (debug_displaced)
3353 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
3354 "opcode/class '%s' unmodified\n", (unsigned long) insn,
3355 iname);
3356
3357 dsc->modinsn[0] = insn;
3358
3359 return 0;
3360 }
3361
3362 /* Preload instructions with immediate offset. */
3363
3364 static void
3365 cleanup_preload (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
3366 struct regcache *regs, struct displaced_step_closure *dsc)
3367 {
3368 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
3369 if (!dsc->u.preload.immed)
3370 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
3371 }
3372
3373 static int
3374 copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
3375 struct displaced_step_closure *dsc)
3376 {
3377 unsigned int rn = bits (insn, 16, 19);
3378 ULONGEST rn_val;
3379 CORE_ADDR from = dsc->insn_addr;
3380
3381 if (!insn_references_pc (insn, 0x000f0000ul))
3382 return copy_unmodified (gdbarch, insn, "preload", dsc);
3383
3384 if (debug_displaced)
3385 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
3386 (unsigned long) insn);
3387
3388 /* Preload instructions:
3389
3390 {pli/pld} [rn, #+/-imm]
3391 ->
3392 {pli/pld} [r0, #+/-imm]. */
3393
3394 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3395 rn_val = displaced_read_reg (regs, from, rn);
3396 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
3397
3398 dsc->u.preload.immed = 1;
3399
3400 dsc->modinsn[0] = insn & 0xfff0ffff;
3401
3402 dsc->cleanup = &cleanup_preload;
3403
3404 return 0;
3405 }
3406
3407 /* Preload instructions with register offset. */
3408
3409 static int
3410 copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
3411 struct displaced_step_closure *dsc)
3412 {
3413 unsigned int rn = bits (insn, 16, 19);
3414 unsigned int rm = bits (insn, 0, 3);
3415 ULONGEST rn_val, rm_val;
3416 CORE_ADDR from = dsc->insn_addr;
3417
3418 if (!insn_references_pc (insn, 0x000f000ful))
3419 return copy_unmodified (gdbarch, insn, "preload reg", dsc);
3420
3421 if (debug_displaced)
3422 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
3423 (unsigned long) insn);
3424
3425 /* Preload register-offset instructions:
3426
3427 {pli/pld} [rn, rm {, shift}]
3428 ->
3429 {pli/pld} [r0, r1 {, shift}]. */
3430
3431 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3432 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
3433 rn_val = displaced_read_reg (regs, from, rn);
3434 rm_val = displaced_read_reg (regs, from, rm);
3435 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
3436 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
3437
3438 dsc->u.preload.immed = 0;
3439
3440 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
3441
3442 dsc->cleanup = &cleanup_preload;
3443
3444 return 0;
3445 }
3446
3447 /* Copy/cleanup coprocessor load and store instructions. */
3448
3449 static void
3450 cleanup_copro_load_store (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
3451 struct regcache *regs,
3452 struct displaced_step_closure *dsc)
3453 {
3454 ULONGEST rn_val = displaced_read_reg (regs, dsc->insn_addr, 0);
3455
3456 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
3457
3458 if (dsc->u.ldst.writeback)
3459 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
3460 }
3461
3462 static int
3463 copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
3464 struct regcache *regs,
3465 struct displaced_step_closure *dsc)
3466 {
3467 unsigned int rn = bits (insn, 16, 19);
3468 ULONGEST rn_val;
3469 CORE_ADDR from = dsc->insn_addr;
3470
3471 if (!insn_references_pc (insn, 0x000f0000ul))
3472 return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
3473
3474 if (debug_displaced)
3475 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
3476 "load/store insn %.8lx\n", (unsigned long) insn);
3477
3478 /* Coprocessor load/store instructions:
3479
3480 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
3481 ->
3482 {stc/stc2} [r0, #+/-imm].
3483
3484 ldc/ldc2 are handled identically. */
3485
3486 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3487 rn_val = displaced_read_reg (regs, from, rn);
3488 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
3489
3490 dsc->u.ldst.writeback = bit (insn, 25);
3491 dsc->u.ldst.rn = rn;
3492
3493 dsc->modinsn[0] = insn & 0xfff0ffff;
3494
3495 dsc->cleanup = &cleanup_copro_load_store;
3496
3497 return 0;
3498 }
3499
3500 /* Clean up branch instructions (actually perform the branch, by setting
3501 PC). */
3502
3503 static void
3504 cleanup_branch (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, struct regcache *regs,
3505 struct displaced_step_closure *dsc)
3506 {
3507 ULONGEST from = dsc->insn_addr;
3508 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
3509 int branch_taken = condition_true (dsc->u.branch.cond, status);
3510 enum pc_write_style write_pc = dsc->u.branch.exchange
3511 ? BX_WRITE_PC : BRANCH_WRITE_PC;
3512
3513 if (!branch_taken)
3514 return;
3515
3516 if (dsc->u.branch.link)
3517 {
3518 ULONGEST pc = displaced_read_reg (regs, from, 15);
3519 displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
3520 }
3521
3522 displaced_write_reg (regs, dsc, 15, dsc->u.branch.dest, write_pc);
3523 }
3524
3525 /* Copy B/BL/BLX instructions with immediate destinations. */
3526
3527 static int
3528 copy_b_bl_blx (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, uint32_t insn,
3529 struct regcache *regs, struct displaced_step_closure *dsc)
3530 {
3531 unsigned int cond = bits (insn, 28, 31);
3532 int exchange = (cond == 0xf);
3533 int link = exchange || bit (insn, 24);
3534 CORE_ADDR from = dsc->insn_addr;
3535 long offset;
3536
3537 if (debug_displaced)
3538 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
3539 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
3540 (unsigned long) insn);
3541
3542 /* Implement "BL<cond> <label>" as:
3543
3544 Preparation: cond <- instruction condition
3545 Insn: mov r0, r0 (nop)
3546 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
3547
3548 B<cond> similar, but don't set r14 in cleanup. */
3549
3550 if (exchange)
3551 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
3552 then arrange the switch into Thumb mode. */
3553 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
3554 else
3555 offset = bits (insn, 0, 23) << 2;
3556
3557 if (bit (offset, 25))
3558 offset = offset | ~0x3ffffff;
3559
3560 dsc->u.branch.cond = cond;
3561 dsc->u.branch.link = link;
3562 dsc->u.branch.exchange = exchange;
3563 dsc->u.branch.dest = from + 8 + offset;
3564
3565 dsc->modinsn[0] = ARM_NOP;
3566
3567 dsc->cleanup = &cleanup_branch;
3568
3569 return 0;
3570 }
3571
3572 /* Copy BX/BLX with register-specified destinations. */
3573
3574 static int
3575 copy_bx_blx_reg (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, uint32_t insn,
3576 struct regcache *regs, struct displaced_step_closure *dsc)
3577 {
3578 unsigned int cond = bits (insn, 28, 31);
3579 /* BX: x12xxx1x
3580 BLX: x12xxx3x. */
3581 int link = bit (insn, 5);
3582 unsigned int rm = bits (insn, 0, 3);
3583 CORE_ADDR from = dsc->insn_addr;
3584
3585 if (debug_displaced)
3586 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
3587 "%.8lx\n", (link) ? "blx" : "bx", (unsigned long) insn);
3588
3589 /* Implement {BX,BLX}<cond> <reg>" as:
3590
3591 Preparation: cond <- instruction condition
3592 Insn: mov r0, r0 (nop)
3593 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
3594
3595 Don't set r14 in cleanup for BX. */
3596
3597 dsc->u.branch.dest = displaced_read_reg (regs, from, rm);
3598
3599 dsc->u.branch.cond = cond;
3600 dsc->u.branch.link = link;
3601 dsc->u.branch.exchange = 1;
3602
3603 dsc->modinsn[0] = ARM_NOP;
3604
3605 dsc->cleanup = &cleanup_branch;
3606
3607 return 0;
3608 }
3609
3610 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
3611
3612 static void
3613 cleanup_alu_imm (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
3614 struct regcache *regs, struct displaced_step_closure *dsc)
3615 {
3616 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
3617 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
3618 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
3619 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
3620 }
3621
3622 static int
3623 copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
3624 struct displaced_step_closure *dsc)
3625 {
3626 unsigned int rn = bits (insn, 16, 19);
3627 unsigned int rd = bits (insn, 12, 15);
3628 unsigned int op = bits (insn, 21, 24);
3629 int is_mov = (op == 0xd);
3630 ULONGEST rd_val, rn_val;
3631 CORE_ADDR from = dsc->insn_addr;
3632
3633 if (!insn_references_pc (insn, 0x000ff000ul))
3634 return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
3635
3636 if (debug_displaced)
3637 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
3638 "%.8lx\n", is_mov ? "move" : "ALU",
3639 (unsigned long) insn);
3640
3641 /* Instruction is of form:
3642
3643 <op><cond> rd, [rn,] #imm
3644
3645 Rewrite as:
3646
3647 Preparation: tmp1, tmp2 <- r0, r1;
3648 r0, r1 <- rd, rn
3649 Insn: <op><cond> r0, r1, #imm
3650 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
3651 */
3652
3653 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3654 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
3655 rn_val = displaced_read_reg (regs, from, rn);
3656 rd_val = displaced_read_reg (regs, from, rd);
3657 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
3658 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
3659 dsc->rd = rd;
3660
3661 if (is_mov)
3662 dsc->modinsn[0] = insn & 0xfff00fff;
3663 else
3664 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
3665
3666 dsc->cleanup = &cleanup_alu_imm;
3667
3668 return 0;
3669 }
3670
3671 /* Copy/cleanup arithmetic/logic insns with register RHS. */
3672
3673 static void
3674 cleanup_alu_reg (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
3675 struct regcache *regs, struct displaced_step_closure *dsc)
3676 {
3677 ULONGEST rd_val;
3678 int i;
3679
3680 rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
3681
3682 for (i = 0; i < 3; i++)
3683 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
3684
3685 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
3686 }
3687
3688 static int
3689 copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
3690 struct displaced_step_closure *dsc)
3691 {
3692 unsigned int rn = bits (insn, 16, 19);
3693 unsigned int rm = bits (insn, 0, 3);
3694 unsigned int rd = bits (insn, 12, 15);
3695 unsigned int op = bits (insn, 21, 24);
3696 int is_mov = (op == 0xd);
3697 ULONGEST rd_val, rn_val, rm_val;
3698 CORE_ADDR from = dsc->insn_addr;
3699
3700 if (!insn_references_pc (insn, 0x000ff00ful))
3701 return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
3702
3703 if (debug_displaced)
3704 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
3705 is_mov ? "move" : "ALU", (unsigned long) insn);
3706
3707 /* Instruction is of form:
3708
3709 <op><cond> rd, [rn,] rm [, <shift>]
3710
3711 Rewrite as:
3712
3713 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
3714 r0, r1, r2 <- rd, rn, rm
3715 Insn: <op><cond> r0, r1, r2 [, <shift>]
3716 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
3717 */
3718
3719 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3720 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
3721 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
3722 rd_val = displaced_read_reg (regs, from, rd);
3723 rn_val = displaced_read_reg (regs, from, rn);
3724 rm_val = displaced_read_reg (regs, from, rm);
3725 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
3726 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
3727 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
3728 dsc->rd = rd;
3729
3730 if (is_mov)
3731 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
3732 else
3733 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
3734
3735 dsc->cleanup = &cleanup_alu_reg;
3736
3737 return 0;
3738 }
3739
3740 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
3741
3742 static void
3743 cleanup_alu_shifted_reg (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
3744 struct regcache *regs,
3745 struct displaced_step_closure *dsc)
3746 {
3747 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
3748 int i;
3749
3750 for (i = 0; i < 4; i++)
3751 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
3752
3753 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
3754 }
3755
3756 static int
3757 copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
3758 struct regcache *regs, struct displaced_step_closure *dsc)
3759 {
3760 unsigned int rn = bits (insn, 16, 19);
3761 unsigned int rm = bits (insn, 0, 3);
3762 unsigned int rd = bits (insn, 12, 15);
3763 unsigned int rs = bits (insn, 8, 11);
3764 unsigned int op = bits (insn, 21, 24);
3765 int is_mov = (op == 0xd), i;
3766 ULONGEST rd_val, rn_val, rm_val, rs_val;
3767 CORE_ADDR from = dsc->insn_addr;
3768
3769 if (!insn_references_pc (insn, 0x000fff0ful))
3770 return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
3771
3772 if (debug_displaced)
3773 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
3774 "%.8lx\n", is_mov ? "move" : "ALU",
3775 (unsigned long) insn);
3776
3777 /* Instruction is of form:
3778
3779 <op><cond> rd, [rn,] rm, <shift> rs
3780
3781 Rewrite as:
3782
3783 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
3784 r0, r1, r2, r3 <- rd, rn, rm, rs
3785 Insn: <op><cond> r0, r1, r2, <shift> r3
3786 Cleanup: tmp5 <- r0
3787 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
3788 rd <- tmp5
3789 */
3790
3791 for (i = 0; i < 4; i++)
3792 dsc->tmp[i] = displaced_read_reg (regs, from, i);
3793
3794 rd_val = displaced_read_reg (regs, from, rd);
3795 rn_val = displaced_read_reg (regs, from, rn);
3796 rm_val = displaced_read_reg (regs, from, rm);
3797 rs_val = displaced_read_reg (regs, from, rs);
3798 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
3799 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
3800 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
3801 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
3802 dsc->rd = rd;
3803
3804 if (is_mov)
3805 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
3806 else
3807 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
3808
3809 dsc->cleanup = &cleanup_alu_shifted_reg;
3810
3811 return 0;
3812 }
3813
3814 /* Clean up load instructions. */
3815
3816 static void
3817 cleanup_load (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, struct regcache *regs,
3818 struct displaced_step_closure *dsc)
3819 {
3820 ULONGEST rt_val, rt_val2 = 0, rn_val;
3821 CORE_ADDR from = dsc->insn_addr;
3822
3823 rt_val = displaced_read_reg (regs, from, 0);
3824 if (dsc->u.ldst.xfersize == 8)
3825 rt_val2 = displaced_read_reg (regs, from, 1);
3826 rn_val = displaced_read_reg (regs, from, 2);
3827
3828 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
3829 if (dsc->u.ldst.xfersize > 4)
3830 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
3831 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
3832 if (!dsc->u.ldst.immed)
3833 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
3834
3835 /* Handle register writeback. */
3836 if (dsc->u.ldst.writeback)
3837 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
3838 /* Put result in right place. */
3839 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
3840 if (dsc->u.ldst.xfersize == 8)
3841 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
3842 }
3843
3844 /* Clean up store instructions. */
3845
3846 static void
3847 cleanup_store (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, struct regcache *regs,
3848 struct displaced_step_closure *dsc)
3849 {
3850 CORE_ADDR from = dsc->insn_addr;
3851 ULONGEST rn_val = displaced_read_reg (regs, from, 2);
3852
3853 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
3854 if (dsc->u.ldst.xfersize > 4)
3855 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
3856 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
3857 if (!dsc->u.ldst.immed)
3858 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
3859 if (!dsc->u.ldst.restore_r4)
3860 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
3861
3862 /* Writeback. */
3863 if (dsc->u.ldst.writeback)
3864 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
3865 }
3866
3867 /* Copy "extra" load/store instructions. These are halfword/doubleword
3868 transfers, which have a different encoding to byte/word transfers. */
3869
3870 static int
3871 copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
3872 struct regcache *regs, struct displaced_step_closure *dsc)
3873 {
3874 unsigned int op1 = bits (insn, 20, 24);
3875 unsigned int op2 = bits (insn, 5, 6);
3876 unsigned int rt = bits (insn, 12, 15);
3877 unsigned int rn = bits (insn, 16, 19);
3878 unsigned int rm = bits (insn, 0, 3);
3879 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
3880 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
3881 int immed = (op1 & 0x4) != 0;
3882 int opcode;
3883 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
3884 CORE_ADDR from = dsc->insn_addr;
3885
3886 if (!insn_references_pc (insn, 0x000ff00ful))
3887 return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
3888
3889 if (debug_displaced)
3890 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
3891 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
3892 (unsigned long) insn);
3893
3894 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
3895
3896 if (opcode < 0)
3897 internal_error (__FILE__, __LINE__,
3898 _("copy_extra_ld_st: instruction decode error"));
3899
3900 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3901 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
3902 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
3903 if (!immed)
3904 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
3905
3906 rt_val = displaced_read_reg (regs, from, rt);
3907 if (bytesize[opcode] == 8)
3908 rt_val2 = displaced_read_reg (regs, from, rt + 1);
3909 rn_val = displaced_read_reg (regs, from, rn);
3910 if (!immed)
3911 rm_val = displaced_read_reg (regs, from, rm);
3912
3913 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
3914 if (bytesize[opcode] == 8)
3915 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
3916 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
3917 if (!immed)
3918 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
3919
3920 dsc->rd = rt;
3921 dsc->u.ldst.xfersize = bytesize[opcode];
3922 dsc->u.ldst.rn = rn;
3923 dsc->u.ldst.immed = immed;
3924 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
3925 dsc->u.ldst.restore_r4 = 0;
3926
3927 if (immed)
3928 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
3929 ->
3930 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
3931 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
3932 else
3933 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
3934 ->
3935 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
3936 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
3937
3938 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
3939
3940 return 0;
3941 }
3942
3943 /* Copy byte/word loads and stores. */
3944
3945 static int
3946 copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
3947 struct regcache *regs,
3948 struct displaced_step_closure *dsc, int load, int byte,
3949 int usermode)
3950 {
3951 int immed = !bit (insn, 25);
3952 unsigned int rt = bits (insn, 12, 15);
3953 unsigned int rn = bits (insn, 16, 19);
3954 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
3955 ULONGEST rt_val, rn_val, rm_val = 0;
3956 CORE_ADDR from = dsc->insn_addr;
3957
3958 if (!insn_references_pc (insn, 0x000ff00ful))
3959 return copy_unmodified (gdbarch, insn, "load/store", dsc);
3960
3961 if (debug_displaced)
3962 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
3963 load ? (byte ? "ldrb" : "ldr")
3964 : (byte ? "strb" : "str"), usermode ? "t" : "",
3965 (unsigned long) insn);
3966
3967 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3968 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
3969 if (!immed)
3970 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
3971 if (!load)
3972 dsc->tmp[4] = displaced_read_reg (regs, from, 4);
3973
3974 rt_val = displaced_read_reg (regs, from, rt);
3975 rn_val = displaced_read_reg (regs, from, rn);
3976 if (!immed)
3977 rm_val = displaced_read_reg (regs, from, rm);
3978
3979 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
3980 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
3981 if (!immed)
3982 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
3983
3984 dsc->rd = rt;
3985 dsc->u.ldst.xfersize = byte ? 1 : 4;
3986 dsc->u.ldst.rn = rn;
3987 dsc->u.ldst.immed = immed;
3988 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
3989
3990 /* To write PC we can do:
3991
3992 scratch+0: str pc, temp (*temp = scratch + 8 + offset)
3993 scratch+4: ldr r4, temp
3994 scratch+8: sub r4, r4, pc (r4 = scratch + 8 + offset - scratch - 8 - 8)
3995 scratch+12: add r4, r4, #8 (r4 = offset)
3996 scratch+16: add r0, r0, r4
3997 scratch+20: str r0, [r2, #imm] (or str r0, [r2, r3])
3998 scratch+24: <temp>
3999
4000 Otherwise we don't know what value to write for PC, since the offset is
4001 architecture-dependent (sometimes PC+8, sometimes PC+12). */
4002
4003 if (load || rt != 15)
4004 {
4005 dsc->u.ldst.restore_r4 = 0;
4006
4007 if (immed)
4008 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
4009 ->
4010 {ldr,str}[b]<cond> r0, [r2, #imm]. */
4011 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
4012 else
4013 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
4014 ->
4015 {ldr,str}[b]<cond> r0, [r2, r3]. */
4016 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
4017 }
4018 else
4019 {
4020 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
4021 dsc->u.ldst.restore_r4 = 1;
4022
4023 dsc->modinsn[0] = 0xe58ff014; /* str pc, [pc, #20]. */
4024 dsc->modinsn[1] = 0xe59f4010; /* ldr r4, [pc, #16]. */
4025 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
4026 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
4027 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
4028
4029 /* As above. */
4030 if (immed)
4031 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
4032 else
4033 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
4034
4035 dsc->modinsn[6] = 0x0; /* breakpoint location. */
4036 dsc->modinsn[7] = 0x0; /* scratch space. */
4037
4038 dsc->numinsns = 6;
4039 }
4040
4041 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
4042
4043 return 0;
4044 }
4045
4046 /* Cleanup LDM instructions with fully-populated register list. This is an
4047 unfortunate corner case: it's impossible to implement correctly by modifying
4048 the instruction. The issue is as follows: we have an instruction,
4049
4050 ldm rN, {r0-r15}
4051
4052 which we must rewrite to avoid loading PC. A possible solution would be to
4053 do the load in two halves, something like (with suitable cleanup
4054 afterwards):
4055
4056 mov r8, rN
4057 ldm[id][ab] r8!, {r0-r7}
4058 str r7, <temp>
4059 ldm[id][ab] r8, {r7-r14}
4060 <bkpt>
4061
4062 but at present there's no suitable place for <temp>, since the scratch space
4063 is overwritten before the cleanup routine is called. For now, we simply
4064 emulate the instruction. */
4065
4066 static void
4067 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
4068 struct displaced_step_closure *dsc)
4069 {
4070 ULONGEST from = dsc->insn_addr;
4071 int inc = dsc->u.block.increment;
4072 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
4073 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
4074 uint32_t regmask = dsc->u.block.regmask;
4075 int regno = inc ? 0 : 15;
4076 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
4077 int exception_return = dsc->u.block.load && dsc->u.block.user
4078 && (regmask & 0x8000) != 0;
4079 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
4080 int do_transfer = condition_true (dsc->u.block.cond, status);
4081 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4082
4083 if (!do_transfer)
4084 return;
4085
4086 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
4087 sensible we can do here. Complain loudly. */
4088 if (exception_return)
4089 error (_("Cannot single-step exception return"));
4090
4091 /* We don't handle any stores here for now. */
4092 gdb_assert (dsc->u.block.load != 0);
4093
4094 if (debug_displaced)
4095 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
4096 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
4097 dsc->u.block.increment ? "inc" : "dec",
4098 dsc->u.block.before ? "before" : "after");
4099
4100 while (regmask)
4101 {
4102 uint32_t memword;
4103
4104 if (inc)
4105 while (regno <= 15 && (regmask & (1 << regno)) == 0)
4106 regno++;
4107 else
4108 while (regno >= 0 && (regmask & (1 << regno)) == 0)
4109 regno--;
4110
4111 xfer_addr += bump_before;
4112
4113 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
4114 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
4115
4116 xfer_addr += bump_after;
4117
4118 regmask &= ~(1 << regno);
4119 }
4120
4121 if (dsc->u.block.writeback)
4122 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
4123 CANNOT_WRITE_PC);
4124 }
4125
4126 /* Clean up an STM which included the PC in the register list. */
4127
4128 static void
4129 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
4130 struct displaced_step_closure *dsc)
4131 {
4132 ULONGEST from = dsc->insn_addr;
4133 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
4134 int store_executed = condition_true (dsc->u.block.cond, status);
4135 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
4136 CORE_ADDR stm_insn_addr;
4137 uint32_t pc_val;
4138 long offset;
4139 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4140
4141 /* If condition code fails, there's nothing else to do. */
4142 if (!store_executed)
4143 return;
4144
4145 if (dsc->u.block.increment)
4146 {
4147 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
4148
4149 if (dsc->u.block.before)
4150 pc_stored_at += 4;
4151 }
4152 else
4153 {
4154 pc_stored_at = dsc->u.block.xfer_addr;
4155
4156 if (dsc->u.block.before)
4157 pc_stored_at -= 4;
4158 }
4159
4160 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
4161 stm_insn_addr = dsc->scratch_base;
4162 offset = pc_val - stm_insn_addr;
4163
4164 if (debug_displaced)
4165 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
4166 "STM instruction\n", offset);
4167
4168 /* Rewrite the stored PC to the proper value for the non-displaced original
4169 instruction. */
4170 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
4171 dsc->insn_addr + offset);
4172 }
4173
4174 /* Clean up an LDM which includes the PC in the register list. We clumped all
4175 the registers in the transferred list into a contiguous range r0...rX (to
4176 avoid loading PC directly and losing control of the debugged program), so we
4177 must undo that here. */
4178
4179 static void
4180 cleanup_block_load_pc (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
4181 struct regcache *regs,
4182 struct displaced_step_closure *dsc)
4183 {
4184 ULONGEST from = dsc->insn_addr;
4185 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
4186 int load_executed = condition_true (dsc->u.block.cond, status), i;
4187 unsigned int mask = dsc->u.block.regmask, write_reg = 15;
4188 unsigned int regs_loaded = bitcount (mask);
4189 unsigned int num_to_shuffle = regs_loaded, clobbered;
4190
4191 /* The method employed here will fail if the register list is fully populated
4192 (we need to avoid loading PC directly). */
4193 gdb_assert (num_to_shuffle < 16);
4194
4195 if (!load_executed)
4196 return;
4197
4198 clobbered = (1 << num_to_shuffle) - 1;
4199
4200 while (num_to_shuffle > 0)
4201 {
4202 if ((mask & (1 << write_reg)) != 0)
4203 {
4204 unsigned int read_reg = num_to_shuffle - 1;
4205
4206 if (read_reg != write_reg)
4207 {
4208 ULONGEST rval = displaced_read_reg (regs, from, read_reg);
4209 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
4210 if (debug_displaced)
4211 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
4212 "loaded register r%d to r%d\n"), read_reg,
4213 write_reg);
4214 }
4215 else if (debug_displaced)
4216 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
4217 "r%d already in the right place\n"),
4218 write_reg);
4219
4220 clobbered &= ~(1 << write_reg);
4221
4222 num_to_shuffle--;
4223 }
4224
4225 write_reg--;
4226 }
4227
4228 /* Restore any registers we scribbled over. */
4229 for (write_reg = 0; clobbered != 0; write_reg++)
4230 {
4231 if ((clobbered & (1 << write_reg)) != 0)
4232 {
4233 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
4234 CANNOT_WRITE_PC);
4235 if (debug_displaced)
4236 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
4237 "clobbered register r%d\n"), write_reg);
4238 clobbered &= ~(1 << write_reg);
4239 }
4240 }
4241
4242 /* Perform register writeback manually. */
4243 if (dsc->u.block.writeback)
4244 {
4245 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
4246
4247 if (dsc->u.block.increment)
4248 new_rn_val += regs_loaded * 4;
4249 else
4250 new_rn_val -= regs_loaded * 4;
4251
4252 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
4253 CANNOT_WRITE_PC);
4254 }
4255 }
4256
4257 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
4258 in user-level code (in particular exception return, ldm rn, {...pc}^). */
4259
4260 static int
4261 copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4262 struct displaced_step_closure *dsc)
4263 {
4264 int load = bit (insn, 20);
4265 int user = bit (insn, 22);
4266 int increment = bit (insn, 23);
4267 int before = bit (insn, 24);
4268 int writeback = bit (insn, 21);
4269 int rn = bits (insn, 16, 19);
4270 CORE_ADDR from = dsc->insn_addr;
4271
4272 /* Block transfers which don't mention PC can be run directly out-of-line. */
4273 if (rn != 15 && (insn & 0x8000) == 0)
4274 return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
4275
4276 if (rn == 15)
4277 {
4278 warning (_("displaced: Unpredictable LDM or STM with base register r15"));
4279 return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
4280 }
4281
4282 if (debug_displaced)
4283 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
4284 "%.8lx\n", (unsigned long) insn);
4285
4286 dsc->u.block.xfer_addr = displaced_read_reg (regs, from, rn);
4287 dsc->u.block.rn = rn;
4288
4289 dsc->u.block.load = load;
4290 dsc->u.block.user = user;
4291 dsc->u.block.increment = increment;
4292 dsc->u.block.before = before;
4293 dsc->u.block.writeback = writeback;
4294 dsc->u.block.cond = bits (insn, 28, 31);
4295
4296 dsc->u.block.regmask = insn & 0xffff;
4297
4298 if (load)
4299 {
4300 if ((insn & 0xffff) == 0xffff)
4301 {
4302 /* LDM with a fully-populated register list. This case is
4303 particularly tricky. Implement for now by fully emulating the
4304 instruction (which might not behave perfectly in all cases, but
4305 these instructions should be rare enough for that not to matter
4306 too much). */
4307 dsc->modinsn[0] = ARM_NOP;
4308
4309 dsc->cleanup = &cleanup_block_load_all;
4310 }
4311 else
4312 {
4313 /* LDM of a list of registers which includes PC. Implement by
4314 rewriting the list of registers to be transferred into a
4315 contiguous chunk r0...rX before doing the transfer, then shuffling
4316 registers into the correct places in the cleanup routine. */
4317 unsigned int regmask = insn & 0xffff;
4318 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
4319 unsigned int to = 0, from = 0, i, new_rn;
4320
4321 for (i = 0; i < num_in_list; i++)
4322 dsc->tmp[i] = displaced_read_reg (regs, from, i);
4323
4324 /* Writeback makes things complicated. We need to avoid clobbering
4325 the base register with one of the registers in our modified
4326 register list, but just using a different register can't work in
4327 all cases, e.g.:
4328
4329 ldm r14!, {r0-r13,pc}
4330
4331 which would need to be rewritten as:
4332
4333 ldm rN!, {r0-r14}
4334
4335 but that can't work, because there's no free register for N.
4336
4337 Solve this by turning off the writeback bit, and emulating
4338 writeback manually in the cleanup routine. */
4339
4340 if (writeback)
4341 insn &= ~(1 << 21);
4342
4343 new_regmask = (1 << num_in_list) - 1;
4344
4345 if (debug_displaced)
4346 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
4347 "{..., pc}: original reg list %.4x, modified "
4348 "list %.4x\n"), rn, writeback ? "!" : "",
4349 (int) insn & 0xffff, new_regmask);
4350
4351 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
4352
4353 dsc->cleanup = &cleanup_block_load_pc;
4354 }
4355 }
4356 else
4357 {
4358 /* STM of a list of registers which includes PC. Run the instruction
4359 as-is, but out of line: this will store the wrong value for the PC,
4360 so we must manually fix up the memory in the cleanup routine.
4361 Doing things this way has the advantage that we can auto-detect
4362 the offset of the PC write (which is architecture-dependent) in
4363 the cleanup routine. */
4364 dsc->modinsn[0] = insn;
4365
4366 dsc->cleanup = &cleanup_block_store_pc;
4367 }
4368
4369 return 0;
4370 }
4371
4372 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
4373 for Linux, where some SVC instructions must be treated specially. */
4374
4375 static void
4376 cleanup_svc (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, struct regcache *regs,
4377 struct displaced_step_closure *dsc)
4378 {
4379 CORE_ADDR from = dsc->insn_addr;
4380 CORE_ADDR resume_addr = from + 4;
4381
4382 if (debug_displaced)
4383 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
4384 "%.8lx\n", (unsigned long) resume_addr);
4385
4386 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
4387 }
4388
4389 static int
4390 copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
4391 struct regcache *regs, struct displaced_step_closure *dsc)
4392 {
4393 CORE_ADDR from = dsc->insn_addr;
4394
4395 /* Allow OS-specific code to override SVC handling. */
4396 if (dsc->u.svc.copy_svc_os)
4397 return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
4398
4399 if (debug_displaced)
4400 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
4401 (unsigned long) insn);
4402
4403 /* Preparation: none.
4404 Insn: unmodified svc.
4405 Cleanup: pc <- insn_addr + 4. */
4406
4407 dsc->modinsn[0] = insn;
4408
4409 dsc->cleanup = &cleanup_svc;
4410 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
4411 instruction. */
4412 dsc->wrote_to_pc = 1;
4413
4414 return 0;
4415 }
4416
4417 /* Copy undefined instructions. */
4418
4419 static int
4420 copy_undef (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, uint32_t insn,
4421 struct displaced_step_closure *dsc)
4422 {
4423 if (debug_displaced)
4424 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn %.8lx\n",
4425 (unsigned long) insn);
4426
4427 dsc->modinsn[0] = insn;
4428
4429 return 0;
4430 }
4431
4432 /* Copy unpredictable instructions. */
4433
4434 static int
4435 copy_unpred (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, uint32_t insn,
4436 struct displaced_step_closure *dsc)
4437 {
4438 if (debug_displaced)
4439 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
4440 "%.8lx\n", (unsigned long) insn);
4441
4442 dsc->modinsn[0] = insn;
4443
4444 return 0;
4445 }
4446
4447 /* The decode_* functions are instruction decoding helpers. They mostly follow
4448 the presentation in the ARM ARM. */
4449
4450 static int
4451 decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
4452 struct regcache *regs,
4453 struct displaced_step_closure *dsc)
4454 {
4455 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
4456 unsigned int rn = bits (insn, 16, 19);
4457
4458 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
4459 return copy_unmodified (gdbarch, insn, "cps", dsc);
4460 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
4461 return copy_unmodified (gdbarch, insn, "setend", dsc);
4462 else if ((op1 & 0x60) == 0x20)
4463 return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
4464 else if ((op1 & 0x71) == 0x40)
4465 return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
4466 else if ((op1 & 0x77) == 0x41)
4467 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
4468 else if ((op1 & 0x77) == 0x45)
4469 return copy_preload (gdbarch, insn, regs, dsc); /* pli. */
4470 else if ((op1 & 0x77) == 0x51)
4471 {
4472 if (rn != 0xf)
4473 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
4474 else
4475 return copy_unpred (gdbarch, insn, dsc);
4476 }
4477 else if ((op1 & 0x77) == 0x55)
4478 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
4479 else if (op1 == 0x57)
4480 switch (op2)
4481 {
4482 case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
4483 case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
4484 case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
4485 case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
4486 default: return copy_unpred (gdbarch, insn, dsc);
4487 }
4488 else if ((op1 & 0x63) == 0x43)
4489 return copy_unpred (gdbarch, insn, dsc);
4490 else if ((op2 & 0x1) == 0x0)
4491 switch (op1 & ~0x80)
4492 {
4493 case 0x61:
4494 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
4495 case 0x65:
4496 return copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
4497 case 0x71: case 0x75:
4498 /* pld/pldw reg. */
4499 return copy_preload_reg (gdbarch, insn, regs, dsc);
4500 case 0x63: case 0x67: case 0x73: case 0x77:
4501 return copy_unpred (gdbarch, insn, dsc);
4502 default:
4503 return copy_undef (gdbarch, insn, dsc);
4504 }
4505 else
4506 return copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
4507 }
4508
4509 static int
4510 decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
4511 struct regcache *regs, struct displaced_step_closure *dsc)
4512 {
4513 if (bit (insn, 27) == 0)
4514 return decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
4515 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
4516 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
4517 {
4518 case 0x0: case 0x2:
4519 return copy_unmodified (gdbarch, insn, "srs", dsc);
4520
4521 case 0x1: case 0x3:
4522 return copy_unmodified (gdbarch, insn, "rfe", dsc);
4523
4524 case 0x4: case 0x5: case 0x6: case 0x7:
4525 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
4526
4527 case 0x8:
4528 switch ((insn & 0xe00000) >> 21)
4529 {
4530 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
4531 /* stc/stc2. */
4532 return copy_copro_load_store (gdbarch, insn, regs, dsc);
4533
4534 case 0x2:
4535 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
4536
4537 default:
4538 return copy_undef (gdbarch, insn, dsc);
4539 }
4540
4541 case 0x9:
4542 {
4543 int rn_f = (bits (insn, 16, 19) == 0xf);
4544 switch ((insn & 0xe00000) >> 21)
4545 {
4546 case 0x1: case 0x3:
4547 /* ldc/ldc2 imm (undefined for rn == pc). */
4548 return rn_f ? copy_undef (gdbarch, insn, dsc)
4549 : copy_copro_load_store (gdbarch, insn, regs, dsc);
4550
4551 case 0x2:
4552 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
4553
4554 case 0x4: case 0x5: case 0x6: case 0x7:
4555 /* ldc/ldc2 lit (undefined for rn != pc). */
4556 return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
4557 : copy_undef (gdbarch, insn, dsc);
4558
4559 default:
4560 return copy_undef (gdbarch, insn, dsc);
4561 }
4562 }
4563
4564 case 0xa:
4565 return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
4566
4567 case 0xb:
4568 if (bits (insn, 16, 19) == 0xf)
4569 /* ldc/ldc2 lit. */
4570 return copy_copro_load_store (gdbarch, insn, regs, dsc);
4571 else
4572 return copy_undef (gdbarch, insn, dsc);
4573
4574 case 0xc:
4575 if (bit (insn, 4))
4576 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
4577 else
4578 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
4579
4580 case 0xd:
4581 if (bit (insn, 4))
4582 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
4583 else
4584 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
4585
4586 default:
4587 return copy_undef (gdbarch, insn, dsc);
4588 }
4589 }
4590
4591 /* Decode miscellaneous instructions in dp/misc encoding space. */
4592
4593 static int
4594 decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
4595 struct regcache *regs, struct displaced_step_closure *dsc)
4596 {
4597 unsigned int op2 = bits (insn, 4, 6);
4598 unsigned int op = bits (insn, 21, 22);
4599 unsigned int op1 = bits (insn, 16, 19);
4600
4601 switch (op2)
4602 {
4603 case 0x0:
4604 return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
4605
4606 case 0x1:
4607 if (op == 0x1) /* bx. */
4608 return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
4609 else if (op == 0x3)
4610 return copy_unmodified (gdbarch, insn, "clz", dsc);
4611 else
4612 return copy_undef (gdbarch, insn, dsc);
4613
4614 case 0x2:
4615 if (op == 0x1)
4616 /* Not really supported. */
4617 return copy_unmodified (gdbarch, insn, "bxj", dsc);
4618 else
4619 return copy_undef (gdbarch, insn, dsc);
4620
4621 case 0x3:
4622 if (op == 0x1)
4623 return copy_bx_blx_reg (gdbarch, insn, regs, dsc); /* blx register. */
4624 else
4625 return copy_undef (gdbarch, insn, dsc);
4626
4627 case 0x5:
4628 return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
4629
4630 case 0x7:
4631 if (op == 0x1)
4632 return copy_unmodified (gdbarch, insn, "bkpt", dsc);
4633 else if (op == 0x3)
4634 /* Not really supported. */
4635 return copy_unmodified (gdbarch, insn, "smc", dsc);
4636
4637 default:
4638 return copy_undef (gdbarch, insn, dsc);
4639 }
4640 }
4641
4642 static int
4643 decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4644 struct displaced_step_closure *dsc)
4645 {
4646 if (bit (insn, 25))
4647 switch (bits (insn, 20, 24))
4648 {
4649 case 0x10:
4650 return copy_unmodified (gdbarch, insn, "movw", dsc);
4651
4652 case 0x14:
4653 return copy_unmodified (gdbarch, insn, "movt", dsc);
4654
4655 case 0x12: case 0x16:
4656 return copy_unmodified (gdbarch, insn, "msr imm", dsc);
4657
4658 default:
4659 return copy_alu_imm (gdbarch, insn, regs, dsc);
4660 }
4661 else
4662 {
4663 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
4664
4665 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
4666 return copy_alu_reg (gdbarch, insn, regs, dsc);
4667 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
4668 return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
4669 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
4670 return decode_miscellaneous (gdbarch, insn, regs, dsc);
4671 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
4672 return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
4673 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
4674 return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
4675 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
4676 return copy_unmodified (gdbarch, insn, "synch", dsc);
4677 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
4678 /* 2nd arg means "unpriveleged". */
4679 return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
4680 dsc);
4681 }
4682
4683 /* Should be unreachable. */
4684 return 1;
4685 }
4686
4687 static int
4688 decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
4689 struct regcache *regs,
4690 struct displaced_step_closure *dsc)
4691 {
4692 int a = bit (insn, 25), b = bit (insn, 4);
4693 uint32_t op1 = bits (insn, 20, 24);
4694 int rn_f = bits (insn, 16, 19) == 0xf;
4695
4696 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
4697 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
4698 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
4699 else if ((!a && (op1 & 0x17) == 0x02)
4700 || (a && (op1 & 0x17) == 0x02 && !b))
4701 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
4702 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
4703 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
4704 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
4705 else if ((!a && (op1 & 0x17) == 0x03)
4706 || (a && (op1 & 0x17) == 0x03 && !b))
4707 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
4708 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
4709 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
4710 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
4711 else if ((!a && (op1 & 0x17) == 0x06)
4712 || (a && (op1 & 0x17) == 0x06 && !b))
4713 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
4714 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
4715 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
4716 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
4717 else if ((!a && (op1 & 0x17) == 0x07)
4718 || (a && (op1 & 0x17) == 0x07 && !b))
4719 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
4720
4721 /* Should be unreachable. */
4722 return 1;
4723 }
4724
4725 static int
4726 decode_media (struct gdbarch *gdbarch, uint32_t insn,
4727 struct displaced_step_closure *dsc)
4728 {
4729 switch (bits (insn, 20, 24))
4730 {
4731 case 0x00: case 0x01: case 0x02: case 0x03:
4732 return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
4733
4734 case 0x04: case 0x05: case 0x06: case 0x07:
4735 return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
4736
4737 case 0x08: case 0x09: case 0x0a: case 0x0b:
4738 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
4739 return copy_unmodified (gdbarch, insn,
4740 "decode/pack/unpack/saturate/reverse", dsc);
4741
4742 case 0x18:
4743 if (bits (insn, 5, 7) == 0) /* op2. */
4744 {
4745 if (bits (insn, 12, 15) == 0xf)
4746 return copy_unmodified (gdbarch, insn, "usad8", dsc);
4747 else
4748 return copy_unmodified (gdbarch, insn, "usada8", dsc);
4749 }
4750 else
4751 return copy_undef (gdbarch, insn, dsc);
4752
4753 case 0x1a: case 0x1b:
4754 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
4755 return copy_unmodified (gdbarch, insn, "sbfx", dsc);
4756 else
4757 return copy_undef (gdbarch, insn, dsc);
4758
4759 case 0x1c: case 0x1d:
4760 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
4761 {
4762 if (bits (insn, 0, 3) == 0xf)
4763 return copy_unmodified (gdbarch, insn, "bfc", dsc);
4764 else
4765 return copy_unmodified (gdbarch, insn, "bfi", dsc);
4766 }
4767 else
4768 return copy_undef (gdbarch, insn, dsc);
4769
4770 case 0x1e: case 0x1f:
4771 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
4772 return copy_unmodified (gdbarch, insn, "ubfx", dsc);
4773 else
4774 return copy_undef (gdbarch, insn, dsc);
4775 }
4776
4777 /* Should be unreachable. */
4778 return 1;
4779 }
4780
4781 static int
4782 decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
4783 struct regcache *regs, struct displaced_step_closure *dsc)
4784 {
4785 if (bit (insn, 25))
4786 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
4787 else
4788 return copy_block_xfer (gdbarch, insn, regs, dsc);
4789 }
4790
4791 static int
4792 decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
4793 struct regcache *regs, struct displaced_step_closure *dsc)
4794 {
4795 unsigned int opcode = bits (insn, 20, 24);
4796
4797 switch (opcode)
4798 {
4799 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
4800 return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
4801
4802 case 0x08: case 0x0a: case 0x0c: case 0x0e:
4803 case 0x12: case 0x16:
4804 return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
4805
4806 case 0x09: case 0x0b: case 0x0d: case 0x0f:
4807 case 0x13: case 0x17:
4808 return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
4809
4810 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
4811 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
4812 /* Note: no writeback for these instructions. Bit 25 will always be
4813 zero though (via caller), so the following works OK. */
4814 return copy_copro_load_store (gdbarch, insn, regs, dsc);
4815 }
4816
4817 /* Should be unreachable. */
4818 return 1;
4819 }
4820
4821 static int
4822 decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
4823 struct regcache *regs, struct displaced_step_closure *dsc)
4824 {
4825 unsigned int op1 = bits (insn, 20, 25);
4826 int op = bit (insn, 4);
4827 unsigned int coproc = bits (insn, 8, 11);
4828 unsigned int rn = bits (insn, 16, 19);
4829
4830 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
4831 return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
4832 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
4833 && (coproc & 0xe) != 0xa)
4834 /* stc/stc2. */
4835 return copy_copro_load_store (gdbarch, insn, regs, dsc);
4836 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
4837 && (coproc & 0xe) != 0xa)
4838 /* ldc/ldc2 imm/lit. */
4839 return copy_copro_load_store (gdbarch, insn, regs, dsc);
4840 else if ((op1 & 0x3e) == 0x00)
4841 return copy_undef (gdbarch, insn, dsc);
4842 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
4843 return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
4844 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
4845 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
4846 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
4847 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
4848 else if ((op1 & 0x30) == 0x20 && !op)
4849 {
4850 if ((coproc & 0xe) == 0xa)
4851 return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
4852 else
4853 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
4854 }
4855 else if ((op1 & 0x30) == 0x20 && op)
4856 return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
4857 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
4858 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
4859 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
4860 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
4861 else if ((op1 & 0x30) == 0x30)
4862 return copy_svc (gdbarch, insn, to, regs, dsc);
4863 else
4864 return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
4865 }
4866
4867 void
4868 arm_process_displaced_insn (struct gdbarch *gdbarch, uint32_t insn,
4869 CORE_ADDR from, CORE_ADDR to, struct regcache *regs,
4870 struct displaced_step_closure *dsc)
4871 {
4872 int err = 0;
4873
4874 if (!displaced_in_arm_mode (regs))
4875 error (_("Displaced stepping is only supported in ARM mode"));
4876
4877 /* Most displaced instructions use a 1-instruction scratch space, so set this
4878 here and override below if/when necessary. */
4879 dsc->numinsns = 1;
4880 dsc->insn_addr = from;
4881 dsc->scratch_base = to;
4882 dsc->cleanup = NULL;
4883 dsc->wrote_to_pc = 0;
4884
4885 if ((insn & 0xf0000000) == 0xf0000000)
4886 err = decode_unconditional (gdbarch, insn, regs, dsc);
4887 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
4888 {
4889 case 0x0: case 0x1: case 0x2: case 0x3:
4890 err = decode_dp_misc (gdbarch, insn, regs, dsc);
4891 break;
4892
4893 case 0x4: case 0x5: case 0x6:
4894 err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
4895 break;
4896
4897 case 0x7:
4898 err = decode_media (gdbarch, insn, dsc);
4899 break;
4900
4901 case 0x8: case 0x9: case 0xa: case 0xb:
4902 err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
4903 break;
4904
4905 case 0xc: case 0xd: case 0xe: case 0xf:
4906 err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
4907 break;
4908 }
4909
4910 if (err)
4911 internal_error (__FILE__, __LINE__,
4912 _("arm_process_displaced_insn: Instruction decode error"));
4913 }
4914
4915 /* Actually set up the scratch space for a displaced instruction. */
4916
4917 void
4918 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
4919 CORE_ADDR to, struct displaced_step_closure *dsc)
4920 {
4921 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
4922 unsigned int i;
4923 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4924
4925 /* Poke modified instruction(s). */
4926 for (i = 0; i < dsc->numinsns; i++)
4927 {
4928 if (debug_displaced)
4929 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
4930 "%.8lx\n", (unsigned long) dsc->modinsn[i],
4931 (unsigned long) to + i * 4);
4932 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
4933 dsc->modinsn[i]);
4934 }
4935
4936 /* Put breakpoint afterwards. */
4937 write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
4938 tdep->arm_breakpoint_size);
4939
4940 if (debug_displaced)
4941 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
4942 paddress (gdbarch, from), paddress (gdbarch, to));
4943 }
4944
4945 /* Entry point for copying an instruction into scratch space for displaced
4946 stepping. */
4947
4948 struct displaced_step_closure *
4949 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
4950 CORE_ADDR from, CORE_ADDR to,
4951 struct regcache *regs)
4952 {
4953 struct displaced_step_closure *dsc
4954 = xmalloc (sizeof (struct displaced_step_closure));
4955 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4956 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
4957
4958 if (debug_displaced)
4959 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
4960 "at %.8lx\n", (unsigned long) insn,
4961 (unsigned long) from);
4962
4963 arm_process_displaced_insn (gdbarch, insn, from, to, regs, dsc);
4964 arm_displaced_init_closure (gdbarch, from, to, dsc);
4965
4966 return dsc;
4967 }
4968
4969 /* Entry point for cleaning things up after a displaced instruction has been
4970 single-stepped. */
4971
4972 void
4973 arm_displaced_step_fixup (struct gdbarch *gdbarch,
4974 struct displaced_step_closure *dsc,
4975 CORE_ADDR from, CORE_ADDR to,
4976 struct regcache *regs)
4977 {
4978 if (dsc->cleanup)
4979 dsc->cleanup (gdbarch, regs, dsc);
4980
4981 if (!dsc->wrote_to_pc)
4982 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
4983 }
4984
4985 #include "bfd-in2.h"
4986 #include "libcoff.h"
4987
4988 static int
4989 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
4990 {
4991 if (arm_pc_is_thumb (memaddr))
4992 {
4993 static asymbol *asym;
4994 static combined_entry_type ce;
4995 static struct coff_symbol_struct csym;
4996 static struct bfd fake_bfd;
4997 static bfd_target fake_target;
4998
4999 if (csym.native == NULL)
5000 {
5001 /* Create a fake symbol vector containing a Thumb symbol.
5002 This is solely so that the code in print_insn_little_arm()
5003 and print_insn_big_arm() in opcodes/arm-dis.c will detect
5004 the presence of a Thumb symbol and switch to decoding
5005 Thumb instructions. */
5006
5007 fake_target.flavour = bfd_target_coff_flavour;
5008 fake_bfd.xvec = &fake_target;
5009 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
5010 csym.native = &ce;
5011 csym.symbol.the_bfd = &fake_bfd;
5012 csym.symbol.name = "fake";
5013 asym = (asymbol *) & csym;
5014 }
5015
5016 memaddr = UNMAKE_THUMB_ADDR (memaddr);
5017 info->symbols = &asym;
5018 }
5019 else
5020 info->symbols = NULL;
5021
5022 if (info->endian == BFD_ENDIAN_BIG)
5023 return print_insn_big_arm (memaddr, info);
5024 else
5025 return print_insn_little_arm (memaddr, info);
5026 }
5027
5028 /* The following define instruction sequences that will cause ARM
5029 cpu's to take an undefined instruction trap. These are used to
5030 signal a breakpoint to GDB.
5031
5032 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
5033 modes. A different instruction is required for each mode. The ARM
5034 cpu's can also be big or little endian. Thus four different
5035 instructions are needed to support all cases.
5036
5037 Note: ARMv4 defines several new instructions that will take the
5038 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
5039 not in fact add the new instructions. The new undefined
5040 instructions in ARMv4 are all instructions that had no defined
5041 behaviour in earlier chips. There is no guarantee that they will
5042 raise an exception, but may be treated as NOP's. In practice, it
5043 may only safe to rely on instructions matching:
5044
5045 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
5046 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
5047 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
5048
5049 Even this may only true if the condition predicate is true. The
5050 following use a condition predicate of ALWAYS so it is always TRUE.
5051
5052 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
5053 and NetBSD all use a software interrupt rather than an undefined
5054 instruction to force a trap. This can be handled by by the
5055 abi-specific code during establishment of the gdbarch vector. */
5056
5057 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
5058 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
5059 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
5060 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
5061
5062 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
5063 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
5064 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
5065 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
5066
5067 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
5068 the program counter value to determine whether a 16-bit or 32-bit
5069 breakpoint should be used. It returns a pointer to a string of
5070 bytes that encode a breakpoint instruction, stores the length of
5071 the string to *lenptr, and adjusts the program counter (if
5072 necessary) to point to the actual memory location where the
5073 breakpoint should be inserted. */
5074
5075 static const unsigned char *
5076 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
5077 {
5078 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5079 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5080
5081 if (arm_pc_is_thumb (*pcptr))
5082 {
5083 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
5084
5085 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
5086 check whether we are replacing a 32-bit instruction. */
5087 if (tdep->thumb2_breakpoint != NULL)
5088 {
5089 gdb_byte buf[2];
5090 if (target_read_memory (*pcptr, buf, 2) == 0)
5091 {
5092 unsigned short inst1;
5093 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
5094 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
5095 {
5096 *lenptr = tdep->thumb2_breakpoint_size;
5097 return tdep->thumb2_breakpoint;
5098 }
5099 }
5100 }
5101
5102 *lenptr = tdep->thumb_breakpoint_size;
5103 return tdep->thumb_breakpoint;
5104 }
5105 else
5106 {
5107 *lenptr = tdep->arm_breakpoint_size;
5108 return tdep->arm_breakpoint;
5109 }
5110 }
5111
5112 static void
5113 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
5114 int *kindptr)
5115 {
5116 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5117
5118 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
5119
5120 if (arm_pc_is_thumb (*pcptr) && *kindptr == 4)
5121 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
5122 that this is not confused with a 32-bit ARM breakpoint. */
5123 *kindptr = 3;
5124 }
5125
5126 /* Extract from an array REGBUF containing the (raw) register state a
5127 function return value of type TYPE, and copy that, in virtual
5128 format, into VALBUF. */
5129
5130 static void
5131 arm_extract_return_value (struct type *type, struct regcache *regs,
5132 gdb_byte *valbuf)
5133 {
5134 struct gdbarch *gdbarch = get_regcache_arch (regs);
5135 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5136
5137 if (TYPE_CODE_FLT == TYPE_CODE (type))
5138 {
5139 switch (gdbarch_tdep (gdbarch)->fp_model)
5140 {
5141 case ARM_FLOAT_FPA:
5142 {
5143 /* The value is in register F0 in internal format. We need to
5144 extract the raw value and then convert it to the desired
5145 internal type. */
5146 bfd_byte tmpbuf[FP_REGISTER_SIZE];
5147
5148 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
5149 convert_from_extended (floatformat_from_type (type), tmpbuf,
5150 valbuf, gdbarch_byte_order (gdbarch));
5151 }
5152 break;
5153
5154 case ARM_FLOAT_SOFT_FPA:
5155 case ARM_FLOAT_SOFT_VFP:
5156 /* ARM_FLOAT_VFP can arise if this is a variadic function so
5157 not using the VFP ABI code. */
5158 case ARM_FLOAT_VFP:
5159 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
5160 if (TYPE_LENGTH (type) > 4)
5161 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
5162 valbuf + INT_REGISTER_SIZE);
5163 break;
5164
5165 default:
5166 internal_error
5167 (__FILE__, __LINE__,
5168 _("arm_extract_return_value: Floating point model not supported"));
5169 break;
5170 }
5171 }
5172 else if (TYPE_CODE (type) == TYPE_CODE_INT
5173 || TYPE_CODE (type) == TYPE_CODE_CHAR
5174 || TYPE_CODE (type) == TYPE_CODE_BOOL
5175 || TYPE_CODE (type) == TYPE_CODE_PTR
5176 || TYPE_CODE (type) == TYPE_CODE_REF
5177 || TYPE_CODE (type) == TYPE_CODE_ENUM)
5178 {
5179 /* If the the type is a plain integer, then the access is
5180 straight-forward. Otherwise we have to play around a bit more. */
5181 int len = TYPE_LENGTH (type);
5182 int regno = ARM_A1_REGNUM;
5183 ULONGEST tmp;
5184
5185 while (len > 0)
5186 {
5187 /* By using store_unsigned_integer we avoid having to do
5188 anything special for small big-endian values. */
5189 regcache_cooked_read_unsigned (regs, regno++, &tmp);
5190 store_unsigned_integer (valbuf,
5191 (len > INT_REGISTER_SIZE
5192 ? INT_REGISTER_SIZE : len),
5193 byte_order, tmp);
5194 len -= INT_REGISTER_SIZE;
5195 valbuf += INT_REGISTER_SIZE;
5196 }
5197 }
5198 else
5199 {
5200 /* For a structure or union the behaviour is as if the value had
5201 been stored to word-aligned memory and then loaded into
5202 registers with 32-bit load instruction(s). */
5203 int len = TYPE_LENGTH (type);
5204 int regno = ARM_A1_REGNUM;
5205 bfd_byte tmpbuf[INT_REGISTER_SIZE];
5206
5207 while (len > 0)
5208 {
5209 regcache_cooked_read (regs, regno++, tmpbuf);
5210 memcpy (valbuf, tmpbuf,
5211 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
5212 len -= INT_REGISTER_SIZE;
5213 valbuf += INT_REGISTER_SIZE;
5214 }
5215 }
5216 }
5217
5218
5219 /* Will a function return an aggregate type in memory or in a
5220 register? Return 0 if an aggregate type can be returned in a
5221 register, 1 if it must be returned in memory. */
5222
5223 static int
5224 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
5225 {
5226 int nRc;
5227 enum type_code code;
5228
5229 CHECK_TYPEDEF (type);
5230
5231 /* In the ARM ABI, "integer" like aggregate types are returned in
5232 registers. For an aggregate type to be integer like, its size
5233 must be less than or equal to INT_REGISTER_SIZE and the
5234 offset of each addressable subfield must be zero. Note that bit
5235 fields are not addressable, and all addressable subfields of
5236 unions always start at offset zero.
5237
5238 This function is based on the behaviour of GCC 2.95.1.
5239 See: gcc/arm.c: arm_return_in_memory() for details.
5240
5241 Note: All versions of GCC before GCC 2.95.2 do not set up the
5242 parameters correctly for a function returning the following
5243 structure: struct { float f;}; This should be returned in memory,
5244 not a register. Richard Earnshaw sent me a patch, but I do not
5245 know of any way to detect if a function like the above has been
5246 compiled with the correct calling convention. */
5247
5248 /* All aggregate types that won't fit in a register must be returned
5249 in memory. */
5250 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
5251 {
5252 return 1;
5253 }
5254
5255 /* The AAPCS says all aggregates not larger than a word are returned
5256 in a register. */
5257 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
5258 return 0;
5259
5260 /* The only aggregate types that can be returned in a register are
5261 structs and unions. Arrays must be returned in memory. */
5262 code = TYPE_CODE (type);
5263 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
5264 {
5265 return 1;
5266 }
5267
5268 /* Assume all other aggregate types can be returned in a register.
5269 Run a check for structures, unions and arrays. */
5270 nRc = 0;
5271
5272 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
5273 {
5274 int i;
5275 /* Need to check if this struct/union is "integer" like. For
5276 this to be true, its size must be less than or equal to
5277 INT_REGISTER_SIZE and the offset of each addressable
5278 subfield must be zero. Note that bit fields are not
5279 addressable, and unions always start at offset zero. If any
5280 of the subfields is a floating point type, the struct/union
5281 cannot be an integer type. */
5282
5283 /* For each field in the object, check:
5284 1) Is it FP? --> yes, nRc = 1;
5285 2) Is it addressable (bitpos != 0) and
5286 not packed (bitsize == 0)?
5287 --> yes, nRc = 1
5288 */
5289
5290 for (i = 0; i < TYPE_NFIELDS (type); i++)
5291 {
5292 enum type_code field_type_code;
5293 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type, i)));
5294
5295 /* Is it a floating point type field? */
5296 if (field_type_code == TYPE_CODE_FLT)
5297 {
5298 nRc = 1;
5299 break;
5300 }
5301
5302 /* If bitpos != 0, then we have to care about it. */
5303 if (TYPE_FIELD_BITPOS (type, i) != 0)
5304 {
5305 /* Bitfields are not addressable. If the field bitsize is
5306 zero, then the field is not packed. Hence it cannot be
5307 a bitfield or any other packed type. */
5308 if (TYPE_FIELD_BITSIZE (type, i) == 0)
5309 {
5310 nRc = 1;
5311 break;
5312 }
5313 }
5314 }
5315 }
5316
5317 return nRc;
5318 }
5319
5320 /* Write into appropriate registers a function return value of type
5321 TYPE, given in virtual format. */
5322
5323 static void
5324 arm_store_return_value (struct type *type, struct regcache *regs,
5325 const gdb_byte *valbuf)
5326 {
5327 struct gdbarch *gdbarch = get_regcache_arch (regs);
5328 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5329
5330 if (TYPE_CODE (type) == TYPE_CODE_FLT)
5331 {
5332 char buf[MAX_REGISTER_SIZE];
5333
5334 switch (gdbarch_tdep (gdbarch)->fp_model)
5335 {
5336 case ARM_FLOAT_FPA:
5337
5338 convert_to_extended (floatformat_from_type (type), buf, valbuf,
5339 gdbarch_byte_order (gdbarch));
5340 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
5341 break;
5342
5343 case ARM_FLOAT_SOFT_FPA:
5344 case ARM_FLOAT_SOFT_VFP:
5345 /* ARM_FLOAT_VFP can arise if this is a variadic function so
5346 not using the VFP ABI code. */
5347 case ARM_FLOAT_VFP:
5348 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
5349 if (TYPE_LENGTH (type) > 4)
5350 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
5351 valbuf + INT_REGISTER_SIZE);
5352 break;
5353
5354 default:
5355 internal_error
5356 (__FILE__, __LINE__,
5357 _("arm_store_return_value: Floating point model not supported"));
5358 break;
5359 }
5360 }
5361 else if (TYPE_CODE (type) == TYPE_CODE_INT
5362 || TYPE_CODE (type) == TYPE_CODE_CHAR
5363 || TYPE_CODE (type) == TYPE_CODE_BOOL
5364 || TYPE_CODE (type) == TYPE_CODE_PTR
5365 || TYPE_CODE (type) == TYPE_CODE_REF
5366 || TYPE_CODE (type) == TYPE_CODE_ENUM)
5367 {
5368 if (TYPE_LENGTH (type) <= 4)
5369 {
5370 /* Values of one word or less are zero/sign-extended and
5371 returned in r0. */
5372 bfd_byte tmpbuf[INT_REGISTER_SIZE];
5373 LONGEST val = unpack_long (type, valbuf);
5374
5375 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
5376 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
5377 }
5378 else
5379 {
5380 /* Integral values greater than one word are stored in consecutive
5381 registers starting with r0. This will always be a multiple of
5382 the regiser size. */
5383 int len = TYPE_LENGTH (type);
5384 int regno = ARM_A1_REGNUM;
5385
5386 while (len > 0)
5387 {
5388 regcache_cooked_write (regs, regno++, valbuf);
5389 len -= INT_REGISTER_SIZE;
5390 valbuf += INT_REGISTER_SIZE;
5391 }
5392 }
5393 }
5394 else
5395 {
5396 /* For a structure or union the behaviour is as if the value had
5397 been stored to word-aligned memory and then loaded into
5398 registers with 32-bit load instruction(s). */
5399 int len = TYPE_LENGTH (type);
5400 int regno = ARM_A1_REGNUM;
5401 bfd_byte tmpbuf[INT_REGISTER_SIZE];
5402
5403 while (len > 0)
5404 {
5405 memcpy (tmpbuf, valbuf,
5406 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
5407 regcache_cooked_write (regs, regno++, tmpbuf);
5408 len -= INT_REGISTER_SIZE;
5409 valbuf += INT_REGISTER_SIZE;
5410 }
5411 }
5412 }
5413
5414
5415 /* Handle function return values. */
5416
5417 static enum return_value_convention
5418 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
5419 struct type *valtype, struct regcache *regcache,
5420 gdb_byte *readbuf, const gdb_byte *writebuf)
5421 {
5422 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5423 enum arm_vfp_cprc_base_type vfp_base_type;
5424 int vfp_base_count;
5425
5426 if (arm_vfp_abi_for_function (gdbarch, func_type)
5427 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
5428 {
5429 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
5430 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
5431 int i;
5432 for (i = 0; i < vfp_base_count; i++)
5433 {
5434 if (reg_char == 'q')
5435 {
5436 if (writebuf)
5437 arm_neon_quad_write (gdbarch, regcache, i,
5438 writebuf + i * unit_length);
5439
5440 if (readbuf)
5441 arm_neon_quad_read (gdbarch, regcache, i,
5442 readbuf + i * unit_length);
5443 }
5444 else
5445 {
5446 char name_buf[4];
5447 int regnum;
5448
5449 sprintf (name_buf, "%c%d", reg_char, i);
5450 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
5451 strlen (name_buf));
5452 if (writebuf)
5453 regcache_cooked_write (regcache, regnum,
5454 writebuf + i * unit_length);
5455 if (readbuf)
5456 regcache_cooked_read (regcache, regnum,
5457 readbuf + i * unit_length);
5458 }
5459 }
5460 return RETURN_VALUE_REGISTER_CONVENTION;
5461 }
5462
5463 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
5464 || TYPE_CODE (valtype) == TYPE_CODE_UNION
5465 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
5466 {
5467 if (tdep->struct_return == pcc_struct_return
5468 || arm_return_in_memory (gdbarch, valtype))
5469 return RETURN_VALUE_STRUCT_CONVENTION;
5470 }
5471
5472 if (writebuf)
5473 arm_store_return_value (valtype, regcache, writebuf);
5474
5475 if (readbuf)
5476 arm_extract_return_value (valtype, regcache, readbuf);
5477
5478 return RETURN_VALUE_REGISTER_CONVENTION;
5479 }
5480
5481
5482 static int
5483 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
5484 {
5485 struct gdbarch *gdbarch = get_frame_arch (frame);
5486 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5487 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5488 CORE_ADDR jb_addr;
5489 char buf[INT_REGISTER_SIZE];
5490
5491 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
5492
5493 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
5494 INT_REGISTER_SIZE))
5495 return 0;
5496
5497 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
5498 return 1;
5499 }
5500
5501 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
5502 return the target PC. Otherwise return 0. */
5503
5504 CORE_ADDR
5505 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
5506 {
5507 char *name;
5508 int namelen;
5509 CORE_ADDR start_addr;
5510
5511 /* Find the starting address and name of the function containing the PC. */
5512 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
5513 return 0;
5514
5515 /* If PC is in a Thumb call or return stub, return the address of the
5516 target PC, which is in a register. The thunk functions are called
5517 _call_via_xx, where x is the register name. The possible names
5518 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
5519 functions, named __ARM_call_via_r[0-7]. */
5520 if (strncmp (name, "_call_via_", 10) == 0
5521 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
5522 {
5523 /* Use the name suffix to determine which register contains the
5524 target PC. */
5525 static char *table[15] =
5526 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
5527 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
5528 };
5529 int regno;
5530 int offset = strlen (name) - 2;
5531
5532 for (regno = 0; regno <= 14; regno++)
5533 if (strcmp (&name[offset], table[regno]) == 0)
5534 return get_frame_register_unsigned (frame, regno);
5535 }
5536
5537 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
5538 non-interworking calls to foo. We could decode the stubs
5539 to find the target but it's easier to use the symbol table. */
5540 namelen = strlen (name);
5541 if (name[0] == '_' && name[1] == '_'
5542 && ((namelen > 2 + strlen ("_from_thumb")
5543 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
5544 strlen ("_from_thumb")) == 0)
5545 || (namelen > 2 + strlen ("_from_arm")
5546 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
5547 strlen ("_from_arm")) == 0)))
5548 {
5549 char *target_name;
5550 int target_len = namelen - 2;
5551 struct minimal_symbol *minsym;
5552 struct objfile *objfile;
5553 struct obj_section *sec;
5554
5555 if (name[namelen - 1] == 'b')
5556 target_len -= strlen ("_from_thumb");
5557 else
5558 target_len -= strlen ("_from_arm");
5559
5560 target_name = alloca (target_len + 1);
5561 memcpy (target_name, name + 2, target_len);
5562 target_name[target_len] = '\0';
5563
5564 sec = find_pc_section (pc);
5565 objfile = (sec == NULL) ? NULL : sec->objfile;
5566 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
5567 if (minsym != NULL)
5568 return SYMBOL_VALUE_ADDRESS (minsym);
5569 else
5570 return 0;
5571 }
5572
5573 return 0; /* not a stub */
5574 }
5575
5576 static void
5577 set_arm_command (char *args, int from_tty)
5578 {
5579 printf_unfiltered (_("\
5580 \"set arm\" must be followed by an apporpriate subcommand.\n"));
5581 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
5582 }
5583
5584 static void
5585 show_arm_command (char *args, int from_tty)
5586 {
5587 cmd_show_list (showarmcmdlist, from_tty, "");
5588 }
5589
5590 static void
5591 arm_update_current_architecture (void)
5592 {
5593 struct gdbarch_info info;
5594
5595 /* If the current architecture is not ARM, we have nothing to do. */
5596 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
5597 return;
5598
5599 /* Update the architecture. */
5600 gdbarch_info_init (&info);
5601
5602 if (!gdbarch_update_p (info))
5603 internal_error (__FILE__, __LINE__, "could not update architecture");
5604 }
5605
5606 static void
5607 set_fp_model_sfunc (char *args, int from_tty,
5608 struct cmd_list_element *c)
5609 {
5610 enum arm_float_model fp_model;
5611
5612 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
5613 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
5614 {
5615 arm_fp_model = fp_model;
5616 break;
5617 }
5618
5619 if (fp_model == ARM_FLOAT_LAST)
5620 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
5621 current_fp_model);
5622
5623 arm_update_current_architecture ();
5624 }
5625
5626 static void
5627 show_fp_model (struct ui_file *file, int from_tty,
5628 struct cmd_list_element *c, const char *value)
5629 {
5630 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
5631
5632 if (arm_fp_model == ARM_FLOAT_AUTO
5633 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
5634 fprintf_filtered (file, _("\
5635 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
5636 fp_model_strings[tdep->fp_model]);
5637 else
5638 fprintf_filtered (file, _("\
5639 The current ARM floating point model is \"%s\".\n"),
5640 fp_model_strings[arm_fp_model]);
5641 }
5642
5643 static void
5644 arm_set_abi (char *args, int from_tty,
5645 struct cmd_list_element *c)
5646 {
5647 enum arm_abi_kind arm_abi;
5648
5649 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
5650 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
5651 {
5652 arm_abi_global = arm_abi;
5653 break;
5654 }
5655
5656 if (arm_abi == ARM_ABI_LAST)
5657 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
5658 arm_abi_string);
5659
5660 arm_update_current_architecture ();
5661 }
5662
5663 static void
5664 arm_show_abi (struct ui_file *file, int from_tty,
5665 struct cmd_list_element *c, const char *value)
5666 {
5667 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
5668
5669 if (arm_abi_global == ARM_ABI_AUTO
5670 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
5671 fprintf_filtered (file, _("\
5672 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
5673 arm_abi_strings[tdep->arm_abi]);
5674 else
5675 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
5676 arm_abi_string);
5677 }
5678
5679 static void
5680 arm_show_fallback_mode (struct ui_file *file, int from_tty,
5681 struct cmd_list_element *c, const char *value)
5682 {
5683 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
5684
5685 fprintf_filtered (file, _("\
5686 The current execution mode assumed (when symbols are unavailable) is \"%s\".\n"),
5687 arm_fallback_mode_string);
5688 }
5689
5690 static void
5691 arm_show_force_mode (struct ui_file *file, int from_tty,
5692 struct cmd_list_element *c, const char *value)
5693 {
5694 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
5695
5696 fprintf_filtered (file, _("\
5697 The current execution mode assumed (even when symbols are available) is \"%s\".\n"),
5698 arm_force_mode_string);
5699 }
5700
5701 /* If the user changes the register disassembly style used for info
5702 register and other commands, we have to also switch the style used
5703 in opcodes for disassembly output. This function is run in the "set
5704 arm disassembly" command, and does that. */
5705
5706 static void
5707 set_disassembly_style_sfunc (char *args, int from_tty,
5708 struct cmd_list_element *c)
5709 {
5710 set_disassembly_style ();
5711 }
5712 \f
5713 /* Return the ARM register name corresponding to register I. */
5714 static const char *
5715 arm_register_name (struct gdbarch *gdbarch, int i)
5716 {
5717 const int num_regs = gdbarch_num_regs (gdbarch);
5718
5719 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
5720 && i >= num_regs && i < num_regs + 32)
5721 {
5722 static const char *const vfp_pseudo_names[] = {
5723 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
5724 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
5725 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
5726 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
5727 };
5728
5729 return vfp_pseudo_names[i - num_regs];
5730 }
5731
5732 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
5733 && i >= num_regs + 32 && i < num_regs + 32 + 16)
5734 {
5735 static const char *const neon_pseudo_names[] = {
5736 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
5737 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
5738 };
5739
5740 return neon_pseudo_names[i - num_regs - 32];
5741 }
5742
5743 if (i >= ARRAY_SIZE (arm_register_names))
5744 /* These registers are only supported on targets which supply
5745 an XML description. */
5746 return "";
5747
5748 return arm_register_names[i];
5749 }
5750
5751 static void
5752 set_disassembly_style (void)
5753 {
5754 int current;
5755
5756 /* Find the style that the user wants. */
5757 for (current = 0; current < num_disassembly_options; current++)
5758 if (disassembly_style == valid_disassembly_styles[current])
5759 break;
5760 gdb_assert (current < num_disassembly_options);
5761
5762 /* Synchronize the disassembler. */
5763 set_arm_regname_option (current);
5764 }
5765
5766 /* Test whether the coff symbol specific value corresponds to a Thumb
5767 function. */
5768
5769 static int
5770 coff_sym_is_thumb (int val)
5771 {
5772 return (val == C_THUMBEXT
5773 || val == C_THUMBSTAT
5774 || val == C_THUMBEXTFUNC
5775 || val == C_THUMBSTATFUNC
5776 || val == C_THUMBLABEL);
5777 }
5778
5779 /* arm_coff_make_msymbol_special()
5780 arm_elf_make_msymbol_special()
5781
5782 These functions test whether the COFF or ELF symbol corresponds to
5783 an address in thumb code, and set a "special" bit in a minimal
5784 symbol to indicate that it does. */
5785
5786 static void
5787 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
5788 {
5789 /* Thumb symbols are of type STT_LOPROC, (synonymous with
5790 STT_ARM_TFUNC). */
5791 if (ELF_ST_TYPE (((elf_symbol_type *)sym)->internal_elf_sym.st_info)
5792 == STT_LOPROC)
5793 MSYMBOL_SET_SPECIAL (msym);
5794 }
5795
5796 static void
5797 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
5798 {
5799 if (coff_sym_is_thumb (val))
5800 MSYMBOL_SET_SPECIAL (msym);
5801 }
5802
5803 static void
5804 arm_objfile_data_free (struct objfile *objfile, void *arg)
5805 {
5806 struct arm_per_objfile *data = arg;
5807 unsigned int i;
5808
5809 for (i = 0; i < objfile->obfd->section_count; i++)
5810 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
5811 }
5812
5813 static void
5814 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
5815 asymbol *sym)
5816 {
5817 const char *name = bfd_asymbol_name (sym);
5818 struct arm_per_objfile *data;
5819 VEC(arm_mapping_symbol_s) **map_p;
5820 struct arm_mapping_symbol new_map_sym;
5821
5822 gdb_assert (name[0] == '$');
5823 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
5824 return;
5825
5826 data = objfile_data (objfile, arm_objfile_data_key);
5827 if (data == NULL)
5828 {
5829 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
5830 struct arm_per_objfile);
5831 set_objfile_data (objfile, arm_objfile_data_key, data);
5832 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
5833 objfile->obfd->section_count,
5834 VEC(arm_mapping_symbol_s) *);
5835 }
5836 map_p = &data->section_maps[bfd_get_section (sym)->index];
5837
5838 new_map_sym.value = sym->value;
5839 new_map_sym.type = name[1];
5840
5841 /* Assume that most mapping symbols appear in order of increasing
5842 value. If they were randomly distributed, it would be faster to
5843 always push here and then sort at first use. */
5844 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
5845 {
5846 struct arm_mapping_symbol *prev_map_sym;
5847
5848 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
5849 if (prev_map_sym->value >= sym->value)
5850 {
5851 unsigned int idx;
5852 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
5853 arm_compare_mapping_symbols);
5854 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
5855 return;
5856 }
5857 }
5858
5859 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
5860 }
5861
5862 static void
5863 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
5864 {
5865 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
5866
5867 /* If necessary, set the T bit. */
5868 if (arm_apcs_32)
5869 {
5870 ULONGEST val;
5871 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
5872 if (arm_pc_is_thumb (pc))
5873 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM, val | CPSR_T);
5874 else
5875 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
5876 val & ~(ULONGEST) CPSR_T);
5877 }
5878 }
5879
5880 /* Read the contents of a NEON quad register, by reading from two
5881 double registers. This is used to implement the quad pseudo
5882 registers, and for argument passing in case the quad registers are
5883 missing; vectors are passed in quad registers when using the VFP
5884 ABI, even if a NEON unit is not present. REGNUM is the index of
5885 the quad register, in [0, 15]. */
5886
5887 static void
5888 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
5889 int regnum, gdb_byte *buf)
5890 {
5891 char name_buf[4];
5892 gdb_byte reg_buf[8];
5893 int offset, double_regnum;
5894
5895 sprintf (name_buf, "d%d", regnum << 1);
5896 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
5897 strlen (name_buf));
5898
5899 /* d0 is always the least significant half of q0. */
5900 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
5901 offset = 8;
5902 else
5903 offset = 0;
5904
5905 regcache_raw_read (regcache, double_regnum, reg_buf);
5906 memcpy (buf + offset, reg_buf, 8);
5907
5908 offset = 8 - offset;
5909 regcache_raw_read (regcache, double_regnum + 1, reg_buf);
5910 memcpy (buf + offset, reg_buf, 8);
5911 }
5912
5913 static void
5914 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
5915 int regnum, gdb_byte *buf)
5916 {
5917 const int num_regs = gdbarch_num_regs (gdbarch);
5918 char name_buf[4];
5919 gdb_byte reg_buf[8];
5920 int offset, double_regnum;
5921
5922 gdb_assert (regnum >= num_regs);
5923 regnum -= num_regs;
5924
5925 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
5926 /* Quad-precision register. */
5927 arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
5928 else
5929 {
5930 /* Single-precision register. */
5931 gdb_assert (regnum < 32);
5932
5933 /* s0 is always the least significant half of d0. */
5934 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
5935 offset = (regnum & 1) ? 0 : 4;
5936 else
5937 offset = (regnum & 1) ? 4 : 0;
5938
5939 sprintf (name_buf, "d%d", regnum >> 1);
5940 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
5941 strlen (name_buf));
5942
5943 regcache_raw_read (regcache, double_regnum, reg_buf);
5944 memcpy (buf, reg_buf + offset, 4);
5945 }
5946 }
5947
5948 /* Store the contents of BUF to a NEON quad register, by writing to
5949 two double registers. This is used to implement the quad pseudo
5950 registers, and for argument passing in case the quad registers are
5951 missing; vectors are passed in quad registers when using the VFP
5952 ABI, even if a NEON unit is not present. REGNUM is the index
5953 of the quad register, in [0, 15]. */
5954
5955 static void
5956 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
5957 int regnum, const gdb_byte *buf)
5958 {
5959 char name_buf[4];
5960 gdb_byte reg_buf[8];
5961 int offset, double_regnum;
5962
5963 sprintf (name_buf, "d%d", regnum << 1);
5964 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
5965 strlen (name_buf));
5966
5967 /* d0 is always the least significant half of q0. */
5968 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
5969 offset = 8;
5970 else
5971 offset = 0;
5972
5973 regcache_raw_write (regcache, double_regnum, buf + offset);
5974 offset = 8 - offset;
5975 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
5976 }
5977
5978 static void
5979 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
5980 int regnum, const gdb_byte *buf)
5981 {
5982 const int num_regs = gdbarch_num_regs (gdbarch);
5983 char name_buf[4];
5984 gdb_byte reg_buf[8];
5985 int offset, double_regnum;
5986
5987 gdb_assert (regnum >= num_regs);
5988 regnum -= num_regs;
5989
5990 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
5991 /* Quad-precision register. */
5992 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
5993 else
5994 {
5995 /* Single-precision register. */
5996 gdb_assert (regnum < 32);
5997
5998 /* s0 is always the least significant half of d0. */
5999 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6000 offset = (regnum & 1) ? 0 : 4;
6001 else
6002 offset = (regnum & 1) ? 4 : 0;
6003
6004 sprintf (name_buf, "d%d", regnum >> 1);
6005 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6006 strlen (name_buf));
6007
6008 regcache_raw_read (regcache, double_regnum, reg_buf);
6009 memcpy (reg_buf + offset, buf, 4);
6010 regcache_raw_write (regcache, double_regnum, reg_buf);
6011 }
6012 }
6013
6014 static struct value *
6015 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
6016 {
6017 const int *reg_p = baton;
6018 return value_of_register (*reg_p, frame);
6019 }
6020 \f
6021 static enum gdb_osabi
6022 arm_elf_osabi_sniffer (bfd *abfd)
6023 {
6024 unsigned int elfosabi;
6025 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
6026
6027 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
6028
6029 if (elfosabi == ELFOSABI_ARM)
6030 /* GNU tools use this value. Check note sections in this case,
6031 as well. */
6032 bfd_map_over_sections (abfd,
6033 generic_elf_osabi_sniff_abi_tag_sections,
6034 &osabi);
6035
6036 /* Anything else will be handled by the generic ELF sniffer. */
6037 return osabi;
6038 }
6039
6040 \f
6041 /* Initialize the current architecture based on INFO. If possible,
6042 re-use an architecture from ARCHES, which is a list of
6043 architectures already created during this debugging session.
6044
6045 Called e.g. at program startup, when reading a core file, and when
6046 reading a binary file. */
6047
6048 static struct gdbarch *
6049 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
6050 {
6051 struct gdbarch_tdep *tdep;
6052 struct gdbarch *gdbarch;
6053 struct gdbarch_list *best_arch;
6054 enum arm_abi_kind arm_abi = arm_abi_global;
6055 enum arm_float_model fp_model = arm_fp_model;
6056 struct tdesc_arch_data *tdesc_data = NULL;
6057 int i;
6058 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
6059 int have_neon = 0;
6060 int have_fpa_registers = 1;
6061
6062 /* Check any target description for validity. */
6063 if (tdesc_has_registers (info.target_desc))
6064 {
6065 /* For most registers we require GDB's default names; but also allow
6066 the numeric names for sp / lr / pc, as a convenience. */
6067 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
6068 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
6069 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
6070
6071 const struct tdesc_feature *feature;
6072 int valid_p;
6073
6074 feature = tdesc_find_feature (info.target_desc,
6075 "org.gnu.gdb.arm.core");
6076 if (feature == NULL)
6077 return NULL;
6078
6079 tdesc_data = tdesc_data_alloc ();
6080
6081 valid_p = 1;
6082 for (i = 0; i < ARM_SP_REGNUM; i++)
6083 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
6084 arm_register_names[i]);
6085 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
6086 ARM_SP_REGNUM,
6087 arm_sp_names);
6088 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
6089 ARM_LR_REGNUM,
6090 arm_lr_names);
6091 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
6092 ARM_PC_REGNUM,
6093 arm_pc_names);
6094 valid_p &= tdesc_numbered_register (feature, tdesc_data,
6095 ARM_PS_REGNUM, "cpsr");
6096
6097 if (!valid_p)
6098 {
6099 tdesc_data_cleanup (tdesc_data);
6100 return NULL;
6101 }
6102
6103 feature = tdesc_find_feature (info.target_desc,
6104 "org.gnu.gdb.arm.fpa");
6105 if (feature != NULL)
6106 {
6107 valid_p = 1;
6108 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
6109 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
6110 arm_register_names[i]);
6111 if (!valid_p)
6112 {
6113 tdesc_data_cleanup (tdesc_data);
6114 return NULL;
6115 }
6116 }
6117 else
6118 have_fpa_registers = 0;
6119
6120 feature = tdesc_find_feature (info.target_desc,
6121 "org.gnu.gdb.xscale.iwmmxt");
6122 if (feature != NULL)
6123 {
6124 static const char *const iwmmxt_names[] = {
6125 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
6126 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
6127 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
6128 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
6129 };
6130
6131 valid_p = 1;
6132 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
6133 valid_p
6134 &= tdesc_numbered_register (feature, tdesc_data, i,
6135 iwmmxt_names[i - ARM_WR0_REGNUM]);
6136
6137 /* Check for the control registers, but do not fail if they
6138 are missing. */
6139 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
6140 tdesc_numbered_register (feature, tdesc_data, i,
6141 iwmmxt_names[i - ARM_WR0_REGNUM]);
6142
6143 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
6144 valid_p
6145 &= tdesc_numbered_register (feature, tdesc_data, i,
6146 iwmmxt_names[i - ARM_WR0_REGNUM]);
6147
6148 if (!valid_p)
6149 {
6150 tdesc_data_cleanup (tdesc_data);
6151 return NULL;
6152 }
6153 }
6154
6155 /* If we have a VFP unit, check whether the single precision registers
6156 are present. If not, then we will synthesize them as pseudo
6157 registers. */
6158 feature = tdesc_find_feature (info.target_desc,
6159 "org.gnu.gdb.arm.vfp");
6160 if (feature != NULL)
6161 {
6162 static const char *const vfp_double_names[] = {
6163 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
6164 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
6165 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
6166 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
6167 };
6168
6169 /* Require the double precision registers. There must be either
6170 16 or 32. */
6171 valid_p = 1;
6172 for (i = 0; i < 32; i++)
6173 {
6174 valid_p &= tdesc_numbered_register (feature, tdesc_data,
6175 ARM_D0_REGNUM + i,
6176 vfp_double_names[i]);
6177 if (!valid_p)
6178 break;
6179 }
6180
6181 if (!valid_p && i != 16)
6182 {
6183 tdesc_data_cleanup (tdesc_data);
6184 return NULL;
6185 }
6186
6187 if (tdesc_unnumbered_register (feature, "s0") == 0)
6188 have_vfp_pseudos = 1;
6189
6190 have_vfp_registers = 1;
6191
6192 /* If we have VFP, also check for NEON. The architecture allows
6193 NEON without VFP (integer vector operations only), but GDB
6194 does not support that. */
6195 feature = tdesc_find_feature (info.target_desc,
6196 "org.gnu.gdb.arm.neon");
6197 if (feature != NULL)
6198 {
6199 /* NEON requires 32 double-precision registers. */
6200 if (i != 32)
6201 {
6202 tdesc_data_cleanup (tdesc_data);
6203 return NULL;
6204 }
6205
6206 /* If there are quad registers defined by the stub, use
6207 their type; otherwise (normally) provide them with
6208 the default type. */
6209 if (tdesc_unnumbered_register (feature, "q0") == 0)
6210 have_neon_pseudos = 1;
6211
6212 have_neon = 1;
6213 }
6214 }
6215 }
6216
6217 /* If we have an object to base this architecture on, try to determine
6218 its ABI. */
6219
6220 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
6221 {
6222 int ei_osabi, e_flags;
6223
6224 switch (bfd_get_flavour (info.abfd))
6225 {
6226 case bfd_target_aout_flavour:
6227 /* Assume it's an old APCS-style ABI. */
6228 arm_abi = ARM_ABI_APCS;
6229 break;
6230
6231 case bfd_target_coff_flavour:
6232 /* Assume it's an old APCS-style ABI. */
6233 /* XXX WinCE? */
6234 arm_abi = ARM_ABI_APCS;
6235 break;
6236
6237 case bfd_target_elf_flavour:
6238 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
6239 e_flags = elf_elfheader (info.abfd)->e_flags;
6240
6241 if (ei_osabi == ELFOSABI_ARM)
6242 {
6243 /* GNU tools used to use this value, but do not for EABI
6244 objects. There's nowhere to tag an EABI version
6245 anyway, so assume APCS. */
6246 arm_abi = ARM_ABI_APCS;
6247 }
6248 else if (ei_osabi == ELFOSABI_NONE)
6249 {
6250 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
6251
6252 switch (eabi_ver)
6253 {
6254 case EF_ARM_EABI_UNKNOWN:
6255 /* Assume GNU tools. */
6256 arm_abi = ARM_ABI_APCS;
6257 break;
6258
6259 case EF_ARM_EABI_VER4:
6260 case EF_ARM_EABI_VER5:
6261 arm_abi = ARM_ABI_AAPCS;
6262 /* EABI binaries default to VFP float ordering.
6263 They may also contain build attributes that can
6264 be used to identify if the VFP argument-passing
6265 ABI is in use. */
6266 if (fp_model == ARM_FLOAT_AUTO)
6267 {
6268 #ifdef HAVE_ELF
6269 switch (bfd_elf_get_obj_attr_int (info.abfd,
6270 OBJ_ATTR_PROC,
6271 Tag_ABI_VFP_args))
6272 {
6273 case 0:
6274 /* "The user intended FP parameter/result
6275 passing to conform to AAPCS, base
6276 variant". */
6277 fp_model = ARM_FLOAT_SOFT_VFP;
6278 break;
6279 case 1:
6280 /* "The user intended FP parameter/result
6281 passing to conform to AAPCS, VFP
6282 variant". */
6283 fp_model = ARM_FLOAT_VFP;
6284 break;
6285 case 2:
6286 /* "The user intended FP parameter/result
6287 passing to conform to tool chain-specific
6288 conventions" - we don't know any such
6289 conventions, so leave it as "auto". */
6290 break;
6291 default:
6292 /* Attribute value not mentioned in the
6293 October 2008 ABI, so leave it as
6294 "auto". */
6295 break;
6296 }
6297 #else
6298 fp_model = ARM_FLOAT_SOFT_VFP;
6299 #endif
6300 }
6301 break;
6302
6303 default:
6304 /* Leave it as "auto". */
6305 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
6306 break;
6307 }
6308 }
6309
6310 if (fp_model == ARM_FLOAT_AUTO)
6311 {
6312 int e_flags = elf_elfheader (info.abfd)->e_flags;
6313
6314 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
6315 {
6316 case 0:
6317 /* Leave it as "auto". Strictly speaking this case
6318 means FPA, but almost nobody uses that now, and
6319 many toolchains fail to set the appropriate bits
6320 for the floating-point model they use. */
6321 break;
6322 case EF_ARM_SOFT_FLOAT:
6323 fp_model = ARM_FLOAT_SOFT_FPA;
6324 break;
6325 case EF_ARM_VFP_FLOAT:
6326 fp_model = ARM_FLOAT_VFP;
6327 break;
6328 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
6329 fp_model = ARM_FLOAT_SOFT_VFP;
6330 break;
6331 }
6332 }
6333
6334 if (e_flags & EF_ARM_BE8)
6335 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
6336
6337 break;
6338
6339 default:
6340 /* Leave it as "auto". */
6341 break;
6342 }
6343 }
6344
6345 /* If there is already a candidate, use it. */
6346 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
6347 best_arch != NULL;
6348 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
6349 {
6350 if (arm_abi != ARM_ABI_AUTO
6351 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
6352 continue;
6353
6354 if (fp_model != ARM_FLOAT_AUTO
6355 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
6356 continue;
6357
6358 /* There are various other properties in tdep that we do not
6359 need to check here: those derived from a target description,
6360 since gdbarches with a different target description are
6361 automatically disqualified. */
6362
6363 /* Found a match. */
6364 break;
6365 }
6366
6367 if (best_arch != NULL)
6368 {
6369 if (tdesc_data != NULL)
6370 tdesc_data_cleanup (tdesc_data);
6371 return best_arch->gdbarch;
6372 }
6373
6374 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
6375 gdbarch = gdbarch_alloc (&info, tdep);
6376
6377 /* Record additional information about the architecture we are defining.
6378 These are gdbarch discriminators, like the OSABI. */
6379 tdep->arm_abi = arm_abi;
6380 tdep->fp_model = fp_model;
6381 tdep->have_fpa_registers = have_fpa_registers;
6382 tdep->have_vfp_registers = have_vfp_registers;
6383 tdep->have_vfp_pseudos = have_vfp_pseudos;
6384 tdep->have_neon_pseudos = have_neon_pseudos;
6385 tdep->have_neon = have_neon;
6386
6387 /* Breakpoints. */
6388 switch (info.byte_order_for_code)
6389 {
6390 case BFD_ENDIAN_BIG:
6391 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
6392 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
6393 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
6394 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
6395
6396 break;
6397
6398 case BFD_ENDIAN_LITTLE:
6399 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
6400 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
6401 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
6402 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
6403
6404 break;
6405
6406 default:
6407 internal_error (__FILE__, __LINE__,
6408 _("arm_gdbarch_init: bad byte order for float format"));
6409 }
6410
6411 /* On ARM targets char defaults to unsigned. */
6412 set_gdbarch_char_signed (gdbarch, 0);
6413
6414 /* Note: for displaced stepping, this includes the breakpoint, and one word
6415 of additional scratch space. This setting isn't used for anything beside
6416 displaced stepping at present. */
6417 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
6418
6419 /* This should be low enough for everything. */
6420 tdep->lowest_pc = 0x20;
6421 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
6422
6423 /* The default, for both APCS and AAPCS, is to return small
6424 structures in registers. */
6425 tdep->struct_return = reg_struct_return;
6426
6427 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
6428 set_gdbarch_frame_align (gdbarch, arm_frame_align);
6429
6430 set_gdbarch_write_pc (gdbarch, arm_write_pc);
6431
6432 /* Frame handling. */
6433 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
6434 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
6435 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
6436
6437 frame_base_set_default (gdbarch, &arm_normal_base);
6438
6439 /* Address manipulation. */
6440 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
6441 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
6442
6443 /* Advance PC across function entry code. */
6444 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
6445
6446 /* Skip trampolines. */
6447 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
6448
6449 /* The stack grows downward. */
6450 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
6451
6452 /* Breakpoint manipulation. */
6453 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
6454 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
6455 arm_remote_breakpoint_from_pc);
6456
6457 /* Information about registers, etc. */
6458 set_gdbarch_deprecated_fp_regnum (gdbarch, ARM_FP_REGNUM); /* ??? */
6459 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
6460 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
6461 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
6462 set_gdbarch_register_type (gdbarch, arm_register_type);
6463
6464 /* This "info float" is FPA-specific. Use the generic version if we
6465 do not have FPA. */
6466 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
6467 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
6468
6469 /* Internal <-> external register number maps. */
6470 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
6471 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
6472
6473 set_gdbarch_register_name (gdbarch, arm_register_name);
6474
6475 /* Returning results. */
6476 set_gdbarch_return_value (gdbarch, arm_return_value);
6477
6478 /* Disassembly. */
6479 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
6480
6481 /* Minsymbol frobbing. */
6482 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
6483 set_gdbarch_coff_make_msymbol_special (gdbarch,
6484 arm_coff_make_msymbol_special);
6485 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
6486
6487 /* Thumb-2 IT block support. */
6488 set_gdbarch_adjust_breakpoint_address (gdbarch,
6489 arm_adjust_breakpoint_address);
6490
6491 /* Virtual tables. */
6492 set_gdbarch_vbit_in_delta (gdbarch, 1);
6493
6494 /* Hook in the ABI-specific overrides, if they have been registered. */
6495 gdbarch_init_osabi (info, gdbarch);
6496
6497 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
6498
6499 /* Add some default predicates. */
6500 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
6501 dwarf2_append_unwinders (gdbarch);
6502 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
6503
6504 /* Now we have tuned the configuration, set a few final things,
6505 based on what the OS ABI has told us. */
6506
6507 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
6508 binaries are always marked. */
6509 if (tdep->arm_abi == ARM_ABI_AUTO)
6510 tdep->arm_abi = ARM_ABI_APCS;
6511
6512 /* We used to default to FPA for generic ARM, but almost nobody
6513 uses that now, and we now provide a way for the user to force
6514 the model. So default to the most useful variant. */
6515 if (tdep->fp_model == ARM_FLOAT_AUTO)
6516 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
6517
6518 if (tdep->jb_pc >= 0)
6519 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
6520
6521 /* Floating point sizes and format. */
6522 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
6523 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
6524 {
6525 set_gdbarch_double_format
6526 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
6527 set_gdbarch_long_double_format
6528 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
6529 }
6530 else
6531 {
6532 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
6533 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
6534 }
6535
6536 if (have_vfp_pseudos)
6537 {
6538 /* NOTE: These are the only pseudo registers used by
6539 the ARM target at the moment. If more are added, a
6540 little more care in numbering will be needed. */
6541
6542 int num_pseudos = 32;
6543 if (have_neon_pseudos)
6544 num_pseudos += 16;
6545 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
6546 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
6547 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
6548 }
6549
6550 if (tdesc_data)
6551 {
6552 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
6553
6554 tdesc_use_registers (gdbarch, info.target_desc, tdesc_data);
6555
6556 /* Override tdesc_register_type to adjust the types of VFP
6557 registers for NEON. */
6558 set_gdbarch_register_type (gdbarch, arm_register_type);
6559 }
6560
6561 /* Add standard register aliases. We add aliases even for those
6562 nanes which are used by the current architecture - it's simpler,
6563 and does no harm, since nothing ever lists user registers. */
6564 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
6565 user_reg_add (gdbarch, arm_register_aliases[i].name,
6566 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
6567
6568 return gdbarch;
6569 }
6570
6571 static void
6572 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
6573 {
6574 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6575
6576 if (tdep == NULL)
6577 return;
6578
6579 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
6580 (unsigned long) tdep->lowest_pc);
6581 }
6582
6583 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
6584
6585 void
6586 _initialize_arm_tdep (void)
6587 {
6588 struct ui_file *stb;
6589 long length;
6590 struct cmd_list_element *new_set, *new_show;
6591 const char *setname;
6592 const char *setdesc;
6593 const char *const *regnames;
6594 int numregs, i, j;
6595 static char *helptext;
6596 char regdesc[1024], *rdptr = regdesc;
6597 size_t rest = sizeof (regdesc);
6598
6599 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
6600
6601 arm_objfile_data_key
6602 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
6603
6604 /* Register an ELF OS ABI sniffer for ARM binaries. */
6605 gdbarch_register_osabi_sniffer (bfd_arch_arm,
6606 bfd_target_elf_flavour,
6607 arm_elf_osabi_sniffer);
6608
6609 /* Get the number of possible sets of register names defined in opcodes. */
6610 num_disassembly_options = get_arm_regname_num_options ();
6611
6612 /* Add root prefix command for all "set arm"/"show arm" commands. */
6613 add_prefix_cmd ("arm", no_class, set_arm_command,
6614 _("Various ARM-specific commands."),
6615 &setarmcmdlist, "set arm ", 0, &setlist);
6616
6617 add_prefix_cmd ("arm", no_class, show_arm_command,
6618 _("Various ARM-specific commands."),
6619 &showarmcmdlist, "show arm ", 0, &showlist);
6620
6621 /* Sync the opcode insn printer with our register viewer. */
6622 parse_arm_disassembler_option ("reg-names-std");
6623
6624 /* Initialize the array that will be passed to
6625 add_setshow_enum_cmd(). */
6626 valid_disassembly_styles
6627 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
6628 for (i = 0; i < num_disassembly_options; i++)
6629 {
6630 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
6631 valid_disassembly_styles[i] = setname;
6632 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
6633 rdptr += length;
6634 rest -= length;
6635 /* When we find the default names, tell the disassembler to use
6636 them. */
6637 if (!strcmp (setname, "std"))
6638 {
6639 disassembly_style = setname;
6640 set_arm_regname_option (i);
6641 }
6642 }
6643 /* Mark the end of valid options. */
6644 valid_disassembly_styles[num_disassembly_options] = NULL;
6645
6646 /* Create the help text. */
6647 stb = mem_fileopen ();
6648 fprintf_unfiltered (stb, "%s%s%s",
6649 _("The valid values are:\n"),
6650 regdesc,
6651 _("The default is \"std\"."));
6652 helptext = ui_file_xstrdup (stb, NULL);
6653 ui_file_delete (stb);
6654
6655 add_setshow_enum_cmd("disassembler", no_class,
6656 valid_disassembly_styles, &disassembly_style,
6657 _("Set the disassembly style."),
6658 _("Show the disassembly style."),
6659 helptext,
6660 set_disassembly_style_sfunc,
6661 NULL, /* FIXME: i18n: The disassembly style is \"%s\". */
6662 &setarmcmdlist, &showarmcmdlist);
6663
6664 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
6665 _("Set usage of ARM 32-bit mode."),
6666 _("Show usage of ARM 32-bit mode."),
6667 _("When off, a 26-bit PC will be used."),
6668 NULL,
6669 NULL, /* FIXME: i18n: Usage of ARM 32-bit mode is %s. */
6670 &setarmcmdlist, &showarmcmdlist);
6671
6672 /* Add a command to allow the user to force the FPU model. */
6673 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
6674 _("Set the floating point type."),
6675 _("Show the floating point type."),
6676 _("auto - Determine the FP typefrom the OS-ABI.\n\
6677 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
6678 fpa - FPA co-processor (GCC compiled).\n\
6679 softvfp - Software FP with pure-endian doubles.\n\
6680 vfp - VFP co-processor."),
6681 set_fp_model_sfunc, show_fp_model,
6682 &setarmcmdlist, &showarmcmdlist);
6683
6684 /* Add a command to allow the user to force the ABI. */
6685 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
6686 _("Set the ABI."),
6687 _("Show the ABI."),
6688 NULL, arm_set_abi, arm_show_abi,
6689 &setarmcmdlist, &showarmcmdlist);
6690
6691 /* Add two commands to allow the user to force the assumed
6692 execution mode. */
6693 add_setshow_enum_cmd ("fallback-mode", class_support,
6694 arm_mode_strings, &arm_fallback_mode_string,
6695 _("Set the mode assumed when symbols are unavailable."),
6696 _("Show the mode assumed when symbols are unavailable."),
6697 NULL, NULL, arm_show_fallback_mode,
6698 &setarmcmdlist, &showarmcmdlist);
6699 add_setshow_enum_cmd ("force-mode", class_support,
6700 arm_mode_strings, &arm_force_mode_string,
6701 _("Set the mode assumed even when symbols are available."),
6702 _("Show the mode assumed even when symbols are available."),
6703 NULL, NULL, arm_show_force_mode,
6704 &setarmcmdlist, &showarmcmdlist);
6705
6706 /* Debugging flag. */
6707 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
6708 _("Set ARM debugging."),
6709 _("Show ARM debugging."),
6710 _("When on, arm-specific debugging is enabled."),
6711 NULL,
6712 NULL, /* FIXME: i18n: "ARM debugging is %s. */
6713 &setdebuglist, &showdebuglist);
6714 }
This page took 0.170842 seconds and 4 git commands to generate.