1ac8817f282c02116196c7c26dd20e891dc24d0a
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper () */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "doublest.h"
33 #include "value.h"
34 #include "arch-utils.h"
35 #include "osabi.h"
36 #include "frame-unwind.h"
37 #include "frame-base.h"
38 #include "trad-frame.h"
39 #include "objfiles.h"
40 #include "dwarf2-frame.h"
41 #include "gdbtypes.h"
42 #include "prologue-value.h"
43 #include "target-descriptions.h"
44 #include "user-regs.h"
45
46 #include "arm-tdep.h"
47 #include "gdb/sim-arm.h"
48
49 #include "elf-bfd.h"
50 #include "coff/internal.h"
51 #include "elf/arm.h"
52
53 #include "gdb_assert.h"
54 #include "vec.h"
55
56 static int arm_debug;
57
58 /* Macros for setting and testing a bit in a minimal symbol that marks
59 it as Thumb function. The MSB of the minimal symbol's "info" field
60 is used for this purpose.
61
62 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
63 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
64
65 #define MSYMBOL_SET_SPECIAL(msym) \
66 MSYMBOL_TARGET_FLAG_1 (msym) = 1
67
68 #define MSYMBOL_IS_SPECIAL(msym) \
69 MSYMBOL_TARGET_FLAG_1 (msym)
70
71 /* Per-objfile data used for mapping symbols. */
72 static const struct objfile_data *arm_objfile_data_key;
73
74 struct arm_mapping_symbol
75 {
76 bfd_vma value;
77 char type;
78 };
79 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
80 DEF_VEC_O(arm_mapping_symbol_s);
81
82 struct arm_per_objfile
83 {
84 VEC(arm_mapping_symbol_s) **section_maps;
85 };
86
87 /* The list of available "set arm ..." and "show arm ..." commands. */
88 static struct cmd_list_element *setarmcmdlist = NULL;
89 static struct cmd_list_element *showarmcmdlist = NULL;
90
91 /* The type of floating-point to use. Keep this in sync with enum
92 arm_float_model, and the help string in _initialize_arm_tdep. */
93 static const char *fp_model_strings[] =
94 {
95 "auto",
96 "softfpa",
97 "fpa",
98 "softvfp",
99 "vfp",
100 NULL
101 };
102
103 /* A variable that can be configured by the user. */
104 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
105 static const char *current_fp_model = "auto";
106
107 /* The ABI to use. Keep this in sync with arm_abi_kind. */
108 static const char *arm_abi_strings[] =
109 {
110 "auto",
111 "APCS",
112 "AAPCS",
113 NULL
114 };
115
116 /* A variable that can be configured by the user. */
117 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
118 static const char *arm_abi_string = "auto";
119
120 /* The execution mode to assume. */
121 static const char *arm_mode_strings[] =
122 {
123 "auto",
124 "arm",
125 "thumb"
126 };
127
128 static const char *arm_fallback_mode_string = "auto";
129 static const char *arm_force_mode_string = "auto";
130
131 /* Number of different reg name sets (options). */
132 static int num_disassembly_options;
133
134 /* The standard register names, and all the valid aliases for them. */
135 static const struct
136 {
137 const char *name;
138 int regnum;
139 } arm_register_aliases[] = {
140 /* Basic register numbers. */
141 { "r0", 0 },
142 { "r1", 1 },
143 { "r2", 2 },
144 { "r3", 3 },
145 { "r4", 4 },
146 { "r5", 5 },
147 { "r6", 6 },
148 { "r7", 7 },
149 { "r8", 8 },
150 { "r9", 9 },
151 { "r10", 10 },
152 { "r11", 11 },
153 { "r12", 12 },
154 { "r13", 13 },
155 { "r14", 14 },
156 { "r15", 15 },
157 /* Synonyms (argument and variable registers). */
158 { "a1", 0 },
159 { "a2", 1 },
160 { "a3", 2 },
161 { "a4", 3 },
162 { "v1", 4 },
163 { "v2", 5 },
164 { "v3", 6 },
165 { "v4", 7 },
166 { "v5", 8 },
167 { "v6", 9 },
168 { "v7", 10 },
169 { "v8", 11 },
170 /* Other platform-specific names for r9. */
171 { "sb", 9 },
172 { "tr", 9 },
173 /* Special names. */
174 { "ip", 12 },
175 { "sp", 13 },
176 { "lr", 14 },
177 { "pc", 15 },
178 /* Names used by GCC (not listed in the ARM EABI). */
179 { "sl", 10 },
180 { "fp", 11 },
181 /* A special name from the older ATPCS. */
182 { "wr", 7 },
183 };
184
185 static const char *const arm_register_names[] =
186 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
187 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
188 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
189 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
190 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
191 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
192 "fps", "cpsr" }; /* 24 25 */
193
194 /* Valid register name styles. */
195 static const char **valid_disassembly_styles;
196
197 /* Disassembly style to use. Default to "std" register names. */
198 static const char *disassembly_style;
199
200 /* This is used to keep the bfd arch_info in sync with the disassembly
201 style. */
202 static void set_disassembly_style_sfunc(char *, int,
203 struct cmd_list_element *);
204 static void set_disassembly_style (void);
205
206 static void convert_from_extended (const struct floatformat *, const void *,
207 void *, int);
208 static void convert_to_extended (const struct floatformat *, void *,
209 const void *, int);
210
211 static void arm_neon_quad_read (struct gdbarch *gdbarch,
212 struct regcache *regcache,
213 int regnum, gdb_byte *buf);
214 static void arm_neon_quad_write (struct gdbarch *gdbarch,
215 struct regcache *regcache,
216 int regnum, const gdb_byte *buf);
217
218 struct arm_prologue_cache
219 {
220 /* The stack pointer at the time this frame was created; i.e. the
221 caller's stack pointer when this function was called. It is used
222 to identify this frame. */
223 CORE_ADDR prev_sp;
224
225 /* The frame base for this frame is just prev_sp - frame size.
226 FRAMESIZE is the distance from the frame pointer to the
227 initial stack pointer. */
228
229 int framesize;
230
231 /* The register used to hold the frame pointer for this frame. */
232 int framereg;
233
234 /* Saved register offsets. */
235 struct trad_frame_saved_reg *saved_regs;
236 };
237
238 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
239 CORE_ADDR prologue_start,
240 CORE_ADDR prologue_end,
241 struct arm_prologue_cache *cache);
242
243 /* Architecture version for displaced stepping. This effects the behaviour of
244 certain instructions, and really should not be hard-wired. */
245
246 #define DISPLACED_STEPPING_ARCH_VERSION 5
247
248 /* Addresses for calling Thumb functions have the bit 0 set.
249 Here are some macros to test, set, or clear bit 0 of addresses. */
250 #define IS_THUMB_ADDR(addr) ((addr) & 1)
251 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
252 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
253
254 /* Set to true if the 32-bit mode is in use. */
255
256 int arm_apcs_32 = 1;
257
258 /* Determine if FRAME is executing in Thumb mode. */
259
260 static int
261 arm_frame_is_thumb (struct frame_info *frame)
262 {
263 CORE_ADDR cpsr;
264
265 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
266 directly (from a signal frame or dummy frame) or by interpreting
267 the saved LR (from a prologue or DWARF frame). So consult it and
268 trust the unwinders. */
269 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
270
271 return (cpsr & CPSR_T) != 0;
272 }
273
274 /* Callback for VEC_lower_bound. */
275
276 static inline int
277 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
278 const struct arm_mapping_symbol *rhs)
279 {
280 return lhs->value < rhs->value;
281 }
282
283 /* Search for the mapping symbol covering MEMADDR. If one is found,
284 return its type. Otherwise, return 0. If START is non-NULL,
285 set *START to the location of the mapping symbol. */
286
287 static char
288 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
289 {
290 struct obj_section *sec;
291
292 /* If there are mapping symbols, consult them. */
293 sec = find_pc_section (memaddr);
294 if (sec != NULL)
295 {
296 struct arm_per_objfile *data;
297 VEC(arm_mapping_symbol_s) *map;
298 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
299 0 };
300 unsigned int idx;
301
302 data = objfile_data (sec->objfile, arm_objfile_data_key);
303 if (data != NULL)
304 {
305 map = data->section_maps[sec->the_bfd_section->index];
306 if (!VEC_empty (arm_mapping_symbol_s, map))
307 {
308 struct arm_mapping_symbol *map_sym;
309
310 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
311 arm_compare_mapping_symbols);
312
313 /* VEC_lower_bound finds the earliest ordered insertion
314 point. If the following symbol starts at this exact
315 address, we use that; otherwise, the preceding
316 mapping symbol covers this address. */
317 if (idx < VEC_length (arm_mapping_symbol_s, map))
318 {
319 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
320 if (map_sym->value == map_key.value)
321 {
322 if (start)
323 *start = map_sym->value + obj_section_addr (sec);
324 return map_sym->type;
325 }
326 }
327
328 if (idx > 0)
329 {
330 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
331 if (start)
332 *start = map_sym->value + obj_section_addr (sec);
333 return map_sym->type;
334 }
335 }
336 }
337 }
338
339 return 0;
340 }
341
342 static CORE_ADDR arm_get_next_pc_raw (struct frame_info *frame,
343 CORE_ADDR pc, int insert_bkpt);
344
345 /* Determine if the program counter specified in MEMADDR is in a Thumb
346 function. This function should be called for addresses unrelated to
347 any executing frame; otherwise, prefer arm_frame_is_thumb. */
348
349 static int
350 arm_pc_is_thumb (CORE_ADDR memaddr)
351 {
352 struct obj_section *sec;
353 struct minimal_symbol *sym;
354 char type;
355
356 /* If bit 0 of the address is set, assume this is a Thumb address. */
357 if (IS_THUMB_ADDR (memaddr))
358 return 1;
359
360 /* If the user wants to override the symbol table, let him. */
361 if (strcmp (arm_force_mode_string, "arm") == 0)
362 return 0;
363 if (strcmp (arm_force_mode_string, "thumb") == 0)
364 return 1;
365
366 /* If there are mapping symbols, consult them. */
367 type = arm_find_mapping_symbol (memaddr, NULL);
368 if (type)
369 return type == 't';
370
371 /* Thumb functions have a "special" bit set in minimal symbols. */
372 sym = lookup_minimal_symbol_by_pc (memaddr);
373 if (sym)
374 return (MSYMBOL_IS_SPECIAL (sym));
375
376 /* If the user wants to override the fallback mode, let them. */
377 if (strcmp (arm_fallback_mode_string, "arm") == 0)
378 return 0;
379 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
380 return 1;
381
382 /* If we couldn't find any symbol, but we're talking to a running
383 target, then trust the current value of $cpsr. This lets
384 "display/i $pc" always show the correct mode (though if there is
385 a symbol table we will not reach here, so it still may not be
386 displayed in the mode it will be executed).
387
388 As a further heuristic if we detect that we are doing a single-step we
389 see what state executing the current instruction ends up with us being
390 in. */
391 if (target_has_registers)
392 {
393 struct frame_info *current_frame = get_current_frame ();
394 CORE_ADDR current_pc = get_frame_pc (current_frame);
395 int is_thumb = arm_frame_is_thumb (current_frame);
396 CORE_ADDR next_pc;
397 if (memaddr == current_pc)
398 return is_thumb;
399 else
400 {
401 struct gdbarch *gdbarch = get_frame_arch (current_frame);
402 next_pc = arm_get_next_pc_raw (current_frame, current_pc, FALSE);
403 if (memaddr == gdbarch_addr_bits_remove (gdbarch, next_pc))
404 return IS_THUMB_ADDR (next_pc);
405 else
406 return is_thumb;
407 }
408 }
409
410 /* Otherwise we're out of luck; we assume ARM. */
411 return 0;
412 }
413
414 /* Remove useless bits from addresses in a running program. */
415 static CORE_ADDR
416 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
417 {
418 if (arm_apcs_32)
419 return UNMAKE_THUMB_ADDR (val);
420 else
421 return (val & 0x03fffffc);
422 }
423
424 /* When reading symbols, we need to zap the low bit of the address,
425 which may be set to 1 for Thumb functions. */
426 static CORE_ADDR
427 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
428 {
429 return val & ~1;
430 }
431
432 /* Return 1 if PC is the start of a compiler helper function which
433 can be safely ignored during prologue skipping. */
434 static int
435 skip_prologue_function (CORE_ADDR pc)
436 {
437 struct minimal_symbol *msym;
438 const char *name;
439
440 msym = lookup_minimal_symbol_by_pc (pc);
441 if (msym == NULL || SYMBOL_VALUE_ADDRESS (msym) != pc)
442 return 0;
443
444 name = SYMBOL_LINKAGE_NAME (msym);
445 if (name == NULL)
446 return 0;
447
448 /* The GNU linker's Thumb call stub to foo is named
449 __foo_from_thumb. */
450 if (strstr (name, "_from_thumb") != NULL)
451 name += 2;
452
453 /* On soft-float targets, __truncdfsf2 is called to convert promoted
454 arguments to their argument types in non-prototyped
455 functions. */
456 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
457 return 1;
458 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
459 return 1;
460
461 return 0;
462 }
463
464 /* Support routines for instruction parsing. */
465 #define submask(x) ((1L << ((x) + 1)) - 1)
466 #define bit(obj,st) (((obj) >> (st)) & 1)
467 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
468 #define sbits(obj,st,fn) \
469 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
470 #define BranchDest(addr,instr) \
471 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
472
473 /* Analyze a Thumb prologue, looking for a recognizable stack frame
474 and frame pointer. Scan until we encounter a store that could
475 clobber the stack frame unexpectedly, or an unknown instruction.
476 Return the last address which is definitely safe to skip for an
477 initial breakpoint. */
478
479 static CORE_ADDR
480 thumb_analyze_prologue (struct gdbarch *gdbarch,
481 CORE_ADDR start, CORE_ADDR limit,
482 struct arm_prologue_cache *cache)
483 {
484 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
485 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
486 int i;
487 pv_t regs[16];
488 struct pv_area *stack;
489 struct cleanup *back_to;
490 CORE_ADDR offset;
491
492 for (i = 0; i < 16; i++)
493 regs[i] = pv_register (i, 0);
494 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
495 back_to = make_cleanup_free_pv_area (stack);
496
497 while (start < limit)
498 {
499 unsigned short insn;
500
501 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
502
503 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
504 {
505 int regno;
506 int mask;
507
508 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
509 break;
510
511 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
512 whether to save LR (R14). */
513 mask = (insn & 0xff) | ((insn & 0x100) << 6);
514
515 /* Calculate offsets of saved R0-R7 and LR. */
516 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
517 if (mask & (1 << regno))
518 {
519 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
520 -4);
521 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
522 }
523 }
524 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
525 sub sp, #simm */
526 {
527 offset = (insn & 0x7f) << 2; /* get scaled offset */
528 if (insn & 0x80) /* Check for SUB. */
529 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
530 -offset);
531 else
532 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
533 offset);
534 }
535 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
536 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
537 (insn & 0xff) << 2);
538 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
539 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
540 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
541 bits (insn, 6, 8));
542 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
543 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
544 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
545 bits (insn, 0, 7));
546 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
547 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
548 && pv_is_constant (regs[bits (insn, 3, 5)]))
549 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
550 regs[bits (insn, 6, 8)]);
551 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
552 && pv_is_constant (regs[bits (insn, 3, 6)]))
553 {
554 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
555 int rm = bits (insn, 3, 6);
556 regs[rd] = pv_add (regs[rd], regs[rm]);
557 }
558 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
559 {
560 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
561 int src_reg = (insn & 0x78) >> 3;
562 regs[dst_reg] = regs[src_reg];
563 }
564 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
565 {
566 /* Handle stores to the stack. Normally pushes are used,
567 but with GCC -mtpcs-frame, there may be other stores
568 in the prologue to create the frame. */
569 int regno = (insn >> 8) & 0x7;
570 pv_t addr;
571
572 offset = (insn & 0xff) << 2;
573 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
574
575 if (pv_area_store_would_trash (stack, addr))
576 break;
577
578 pv_area_store (stack, addr, 4, regs[regno]);
579 }
580 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
581 {
582 int rd = bits (insn, 0, 2);
583 int rn = bits (insn, 3, 5);
584 pv_t addr;
585
586 offset = bits (insn, 6, 10) << 2;
587 addr = pv_add_constant (regs[rn], offset);
588
589 if (pv_area_store_would_trash (stack, addr))
590 break;
591
592 pv_area_store (stack, addr, 4, regs[rd]);
593 }
594 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
595 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
596 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
597 /* Ignore stores of argument registers to the stack. */
598 ;
599 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
600 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
601 /* Ignore block loads from the stack, potentially copying
602 parameters from memory. */
603 ;
604 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
605 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
606 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
607 /* Similarly ignore single loads from the stack. */
608 ;
609 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
610 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
611 /* Skip register copies, i.e. saves to another register
612 instead of the stack. */
613 ;
614 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
615 /* Recognize constant loads; even with small stacks these are necessary
616 on Thumb. */
617 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
618 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
619 {
620 /* Constant pool loads, for the same reason. */
621 unsigned int constant;
622 CORE_ADDR loc;
623
624 loc = start + 4 + bits (insn, 0, 7) * 4;
625 constant = read_memory_unsigned_integer (loc, 4, byte_order);
626 regs[bits (insn, 8, 10)] = pv_constant (constant);
627 }
628 else if ((insn & 0xe000) == 0xe000 && cache == NULL)
629 {
630 /* Only recognize 32-bit instructions for prologue skipping. */
631 unsigned short inst2;
632
633 inst2 = read_memory_unsigned_integer (start + 2, 2,
634 byte_order_for_code);
635
636 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
637 {
638 /* BL, BLX. Allow some special function calls when
639 skipping the prologue; GCC generates these before
640 storing arguments to the stack. */
641 CORE_ADDR nextpc;
642 int j1, j2, imm1, imm2;
643
644 imm1 = sbits (insn, 0, 10);
645 imm2 = bits (inst2, 0, 10);
646 j1 = bit (inst2, 13);
647 j2 = bit (inst2, 11);
648
649 offset = ((imm1 << 12) + (imm2 << 1));
650 offset ^= ((!j2) << 22) | ((!j1) << 23);
651
652 nextpc = start + 4 + offset;
653 /* For BLX make sure to clear the low bits. */
654 if (bit (inst2, 12) == 0)
655 nextpc = nextpc & 0xfffffffc;
656
657 if (!skip_prologue_function (nextpc))
658 break;
659 }
660 else if ((insn & 0xfe50) == 0xe800 /* stm{db,ia} Rn[!], { registers } */
661 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
662 ;
663 else if ((insn & 0xfe50) == 0xe840 /* strd Rt, Rt2, [Rn, #imm] */
664 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
665 ;
666 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!], { registers } */
667 && (inst2 & 0x8000) == 0x0000
668 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
669 ;
670 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
671 && (inst2 & 0x8000) == 0x0000)
672 /* Since we only recognize this for prologue skipping, do not bother
673 to compute the constant. */
674 regs[bits (inst2, 8, 11)] = regs[bits (insn, 0, 3)];
675 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm12 */
676 && (inst2 & 0x8000) == 0x0000)
677 /* Since we only recognize this for prologue skipping, do not bother
678 to compute the constant. */
679 regs[bits (inst2, 8, 11)] = regs[bits (insn, 0, 3)];
680 else if ((insn & 0xfbf0) == 0xf2a0 /* sub.w Rd, Rn, #imm8 */
681 && (inst2 & 0x8000) == 0x0000)
682 /* Since we only recognize this for prologue skipping, do not bother
683 to compute the constant. */
684 regs[bits (inst2, 8, 11)] = regs[bits (insn, 0, 3)];
685 else if ((insn & 0xff50) == 0xf850 /* ldr.w Rd, [Rn, #imm]{!} */
686 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
687 ;
688 else if ((insn & 0xff50) == 0xe950 /* ldrd Rt, Rt2, [Rn, #imm]{!} */
689 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
690 ;
691 else if ((insn & 0xff50) == 0xf800 /* strb.w or strh.w */
692 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
693 ;
694 else
695 {
696 /* We don't know what this instruction is. We're finished
697 scanning. NOTE: Recognizing more safe-to-ignore
698 instructions here will improve support for optimized
699 code. */
700 break;
701 }
702
703 start += 2;
704 }
705 else
706 {
707 /* We don't know what this instruction is. We're finished
708 scanning. NOTE: Recognizing more safe-to-ignore
709 instructions here will improve support for optimized
710 code. */
711 break;
712 }
713
714 start += 2;
715 }
716
717 if (arm_debug)
718 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
719 paddress (gdbarch, start));
720
721 if (cache == NULL)
722 {
723 do_cleanups (back_to);
724 return start;
725 }
726
727 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
728 {
729 /* Frame pointer is fp. Frame size is constant. */
730 cache->framereg = ARM_FP_REGNUM;
731 cache->framesize = -regs[ARM_FP_REGNUM].k;
732 }
733 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
734 {
735 /* Frame pointer is r7. Frame size is constant. */
736 cache->framereg = THUMB_FP_REGNUM;
737 cache->framesize = -regs[THUMB_FP_REGNUM].k;
738 }
739 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
740 {
741 /* Try the stack pointer... this is a bit desperate. */
742 cache->framereg = ARM_SP_REGNUM;
743 cache->framesize = -regs[ARM_SP_REGNUM].k;
744 }
745 else
746 {
747 /* We're just out of luck. We don't know where the frame is. */
748 cache->framereg = -1;
749 cache->framesize = 0;
750 }
751
752 for (i = 0; i < 16; i++)
753 if (pv_area_find_reg (stack, gdbarch, i, &offset))
754 cache->saved_regs[i].addr = offset;
755
756 do_cleanups (back_to);
757 return start;
758 }
759
760 /* Advance the PC across any function entry prologue instructions to
761 reach some "real" code.
762
763 The APCS (ARM Procedure Call Standard) defines the following
764 prologue:
765
766 mov ip, sp
767 [stmfd sp!, {a1,a2,a3,a4}]
768 stmfd sp!, {...,fp,ip,lr,pc}
769 [stfe f7, [sp, #-12]!]
770 [stfe f6, [sp, #-12]!]
771 [stfe f5, [sp, #-12]!]
772 [stfe f4, [sp, #-12]!]
773 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn */
774
775 static CORE_ADDR
776 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
777 {
778 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
779 unsigned long inst;
780 CORE_ADDR skip_pc;
781 CORE_ADDR func_addr, limit_pc;
782 struct symtab_and_line sal;
783
784 /* See if we can determine the end of the prologue via the symbol table.
785 If so, then return either PC, or the PC after the prologue, whichever
786 is greater. */
787 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
788 {
789 CORE_ADDR post_prologue_pc
790 = skip_prologue_using_sal (gdbarch, func_addr);
791 struct symtab *s = find_pc_symtab (func_addr);
792
793 /* GCC always emits a line note before the prologue and another
794 one after, even if the two are at the same address or on the
795 same line. Take advantage of this so that we do not need to
796 know every instruction that might appear in the prologue. We
797 will have producer information for most binaries; if it is
798 missing (e.g. for -gstabs), assuming the GNU tools. */
799 if (post_prologue_pc
800 && (s == NULL
801 || s->producer == NULL
802 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
803 return post_prologue_pc;
804
805 if (post_prologue_pc != 0)
806 {
807 CORE_ADDR analyzed_limit;
808
809 /* For non-GCC compilers, make sure the entire line is an
810 acceptable prologue; GDB will round this function's
811 return value up to the end of the following line so we
812 can not skip just part of a line (and we do not want to).
813
814 RealView does not treat the prologue specially, but does
815 associate prologue code with the opening brace; so this
816 lets us skip the first line if we think it is the opening
817 brace. */
818 if (arm_pc_is_thumb (func_addr))
819 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
820 post_prologue_pc, NULL);
821 else
822 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
823 post_prologue_pc, NULL);
824
825 if (analyzed_limit != post_prologue_pc)
826 return func_addr;
827
828 return post_prologue_pc;
829 }
830 }
831
832 /* Can't determine prologue from the symbol table, need to examine
833 instructions. */
834
835 /* Find an upper limit on the function prologue using the debug
836 information. If the debug information could not be used to provide
837 that bound, then use an arbitrary large number as the upper bound. */
838 /* Like arm_scan_prologue, stop no later than pc + 64. */
839 limit_pc = skip_prologue_using_sal (gdbarch, pc);
840 if (limit_pc == 0)
841 limit_pc = pc + 64; /* Magic. */
842
843
844 /* Check if this is Thumb code. */
845 if (arm_pc_is_thumb (pc))
846 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
847
848 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
849 {
850 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
851
852 /* "mov ip, sp" is no longer a required part of the prologue. */
853 if (inst == 0xe1a0c00d) /* mov ip, sp */
854 continue;
855
856 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
857 continue;
858
859 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
860 continue;
861
862 /* Some prologues begin with "str lr, [sp, #-4]!". */
863 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
864 continue;
865
866 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
867 continue;
868
869 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
870 continue;
871
872 /* Any insns after this point may float into the code, if it makes
873 for better instruction scheduling, so we skip them only if we
874 find them, but still consider the function to be frame-ful. */
875
876 /* We may have either one sfmfd instruction here, or several stfe
877 insns, depending on the version of floating point code we
878 support. */
879 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
880 continue;
881
882 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
883 continue;
884
885 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
886 continue;
887
888 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
889 continue;
890
891 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
892 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
893 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
894 continue;
895
896 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
897 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
898 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
899 continue;
900
901 /* Un-recognized instruction; stop scanning. */
902 break;
903 }
904
905 return skip_pc; /* End of prologue */
906 }
907
908 /* *INDENT-OFF* */
909 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
910 This function decodes a Thumb function prologue to determine:
911 1) the size of the stack frame
912 2) which registers are saved on it
913 3) the offsets of saved regs
914 4) the offset from the stack pointer to the frame pointer
915
916 A typical Thumb function prologue would create this stack frame
917 (offsets relative to FP)
918 old SP -> 24 stack parameters
919 20 LR
920 16 R7
921 R7 -> 0 local variables (16 bytes)
922 SP -> -12 additional stack space (12 bytes)
923 The frame size would thus be 36 bytes, and the frame offset would be
924 12 bytes. The frame register is R7.
925
926 The comments for thumb_skip_prolog() describe the algorithm we use
927 to detect the end of the prolog. */
928 /* *INDENT-ON* */
929
930 static void
931 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
932 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
933 {
934 CORE_ADDR prologue_start;
935 CORE_ADDR prologue_end;
936 CORE_ADDR current_pc;
937
938 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
939 &prologue_end))
940 {
941 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
942
943 if (sal.line == 0) /* no line info, use current PC */
944 prologue_end = prev_pc;
945 else if (sal.end < prologue_end) /* next line begins after fn end */
946 prologue_end = sal.end; /* (probably means no prologue) */
947 }
948 else
949 /* We're in the boondocks: we have no idea where the start of the
950 function is. */
951 return;
952
953 prologue_end = min (prologue_end, prev_pc);
954
955 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
956 }
957
958 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
959
960 static int
961 arm_instruction_changes_pc (uint32_t this_instr)
962 {
963 if (bits (this_instr, 28, 31) == INST_NV)
964 /* Unconditional instructions. */
965 switch (bits (this_instr, 24, 27))
966 {
967 case 0xa:
968 case 0xb:
969 /* Branch with Link and change to Thumb. */
970 return 1;
971 case 0xc:
972 case 0xd:
973 case 0xe:
974 /* Coprocessor register transfer. */
975 if (bits (this_instr, 12, 15) == 15)
976 error (_("Invalid update to pc in instruction"));
977 return 0;
978 default:
979 return 0;
980 }
981 else
982 switch (bits (this_instr, 25, 27))
983 {
984 case 0x0:
985 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
986 {
987 /* Multiplies and extra load/stores. */
988 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
989 /* Neither multiplies nor extension load/stores are allowed
990 to modify PC. */
991 return 0;
992
993 /* Otherwise, miscellaneous instructions. */
994
995 /* BX <reg>, BXJ <reg>, BLX <reg> */
996 if (bits (this_instr, 4, 27) == 0x12fff1
997 || bits (this_instr, 4, 27) == 0x12fff2
998 || bits (this_instr, 4, 27) == 0x12fff3)
999 return 1;
1000
1001 /* Other miscellaneous instructions are unpredictable if they
1002 modify PC. */
1003 return 0;
1004 }
1005 /* Data processing instruction. Fall through. */
1006
1007 case 0x1:
1008 if (bits (this_instr, 12, 15) == 15)
1009 return 1;
1010 else
1011 return 0;
1012
1013 case 0x2:
1014 case 0x3:
1015 /* Media instructions and architecturally undefined instructions. */
1016 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1017 return 0;
1018
1019 /* Stores. */
1020 if (bit (this_instr, 20) == 0)
1021 return 0;
1022
1023 /* Loads. */
1024 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1025 return 1;
1026 else
1027 return 0;
1028
1029 case 0x4:
1030 /* Load/store multiple. */
1031 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1032 return 1;
1033 else
1034 return 0;
1035
1036 case 0x5:
1037 /* Branch and branch with link. */
1038 return 1;
1039
1040 case 0x6:
1041 case 0x7:
1042 /* Coprocessor transfers or SWIs can not affect PC. */
1043 return 0;
1044
1045 default:
1046 internal_error (__FILE__, __LINE__, "bad value in switch");
1047 }
1048 }
1049
1050 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1051 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1052 fill it in. Return the first address not recognized as a prologue
1053 instruction.
1054
1055 We recognize all the instructions typically found in ARM prologues,
1056 plus harmless instructions which can be skipped (either for analysis
1057 purposes, or a more restrictive set that can be skipped when finding
1058 the end of the prologue). */
1059
1060 static CORE_ADDR
1061 arm_analyze_prologue (struct gdbarch *gdbarch,
1062 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1063 struct arm_prologue_cache *cache)
1064 {
1065 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1066 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1067 int regno;
1068 CORE_ADDR offset, current_pc;
1069 pv_t regs[ARM_FPS_REGNUM];
1070 struct pv_area *stack;
1071 struct cleanup *back_to;
1072 int framereg, framesize;
1073 CORE_ADDR unrecognized_pc = 0;
1074
1075 /* Search the prologue looking for instructions that set up the
1076 frame pointer, adjust the stack pointer, and save registers.
1077
1078 Be careful, however, and if it doesn't look like a prologue,
1079 don't try to scan it. If, for instance, a frameless function
1080 begins with stmfd sp!, then we will tell ourselves there is
1081 a frame, which will confuse stack traceback, as well as "finish"
1082 and other operations that rely on a knowledge of the stack
1083 traceback. */
1084
1085 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1086 regs[regno] = pv_register (regno, 0);
1087 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1088 back_to = make_cleanup_free_pv_area (stack);
1089
1090 for (current_pc = prologue_start;
1091 current_pc < prologue_end;
1092 current_pc += 4)
1093 {
1094 unsigned int insn
1095 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1096
1097 if (insn == 0xe1a0c00d) /* mov ip, sp */
1098 {
1099 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1100 continue;
1101 }
1102 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1103 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1104 {
1105 unsigned imm = insn & 0xff; /* immediate value */
1106 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1107 int rd = bits (insn, 12, 15);
1108 imm = (imm >> rot) | (imm << (32 - rot));
1109 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1110 continue;
1111 }
1112 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1113 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1114 {
1115 unsigned imm = insn & 0xff; /* immediate value */
1116 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1117 int rd = bits (insn, 12, 15);
1118 imm = (imm >> rot) | (imm << (32 - rot));
1119 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1120 continue;
1121 }
1122 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd, [sp, #-4]! */
1123 {
1124 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1125 break;
1126 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1127 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1128 regs[bits (insn, 12, 15)]);
1129 continue;
1130 }
1131 else if ((insn & 0xffff0000) == 0xe92d0000)
1132 /* stmfd sp!, {..., fp, ip, lr, pc}
1133 or
1134 stmfd sp!, {a1, a2, a3, a4} */
1135 {
1136 int mask = insn & 0xffff;
1137
1138 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1139 break;
1140
1141 /* Calculate offsets of saved registers. */
1142 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1143 if (mask & (1 << regno))
1144 {
1145 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1146 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1147 }
1148 }
1149 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1150 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1151 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1152 {
1153 /* No need to add this to saved_regs -- it's just an arg reg. */
1154 continue;
1155 }
1156 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1157 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1158 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1159 {
1160 /* No need to add this to saved_regs -- it's just an arg reg. */
1161 continue;
1162 }
1163 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn, { registers } */
1164 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1165 {
1166 /* No need to add this to saved_regs -- it's just arg regs. */
1167 continue;
1168 }
1169 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1170 {
1171 unsigned imm = insn & 0xff; /* immediate value */
1172 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1173 imm = (imm >> rot) | (imm << (32 - rot));
1174 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1175 }
1176 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1177 {
1178 unsigned imm = insn & 0xff; /* immediate value */
1179 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1180 imm = (imm >> rot) | (imm << (32 - rot));
1181 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1182 }
1183 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?, [sp, -#c]! */
1184 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1185 {
1186 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1187 break;
1188
1189 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1190 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1191 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1192 }
1193 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4, [sp!] */
1194 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1195 {
1196 int n_saved_fp_regs;
1197 unsigned int fp_start_reg, fp_bound_reg;
1198
1199 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1200 break;
1201
1202 if ((insn & 0x800) == 0x800) /* N0 is set */
1203 {
1204 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1205 n_saved_fp_regs = 3;
1206 else
1207 n_saved_fp_regs = 1;
1208 }
1209 else
1210 {
1211 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1212 n_saved_fp_regs = 2;
1213 else
1214 n_saved_fp_regs = 4;
1215 }
1216
1217 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1218 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1219 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1220 {
1221 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1222 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1223 regs[fp_start_reg++]);
1224 }
1225 }
1226 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1227 {
1228 /* Allow some special function calls when skipping the
1229 prologue; GCC generates these before storing arguments to
1230 the stack. */
1231 CORE_ADDR dest = BranchDest (current_pc, insn);
1232
1233 if (skip_prologue_function (dest))
1234 continue;
1235 else
1236 break;
1237 }
1238 else if ((insn & 0xf0000000) != 0xe0000000)
1239 break; /* Condition not true, exit early */
1240 else if (arm_instruction_changes_pc (insn))
1241 /* Don't scan past anything that might change control flow. */
1242 break;
1243 else if ((insn & 0xfe500000) == 0xe8100000) /* ldm */
1244 {
1245 /* Ignore block loads from the stack, potentially copying
1246 parameters from memory. */
1247 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1248 continue;
1249 else
1250 break;
1251 }
1252 else if ((insn & 0xfc500000) == 0xe4100000)
1253 {
1254 /* Similarly ignore single loads from the stack. */
1255 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1256 continue;
1257 else
1258 break;
1259 }
1260 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1261 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1262 register instead of the stack. */
1263 continue;
1264 else
1265 {
1266 /* The optimizer might shove anything into the prologue,
1267 so we just skip what we don't recognize. */
1268 unrecognized_pc = current_pc;
1269 continue;
1270 }
1271 }
1272
1273 if (unrecognized_pc == 0)
1274 unrecognized_pc = current_pc;
1275
1276 /* The frame size is just the distance from the frame register
1277 to the original stack pointer. */
1278 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1279 {
1280 /* Frame pointer is fp. */
1281 framereg = ARM_FP_REGNUM;
1282 framesize = -regs[ARM_FP_REGNUM].k;
1283 }
1284 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1285 {
1286 /* Try the stack pointer... this is a bit desperate. */
1287 framereg = ARM_SP_REGNUM;
1288 framesize = -regs[ARM_SP_REGNUM].k;
1289 }
1290 else
1291 {
1292 /* We're just out of luck. We don't know where the frame is. */
1293 framereg = -1;
1294 framesize = 0;
1295 }
1296
1297 if (cache)
1298 {
1299 cache->framereg = framereg;
1300 cache->framesize = framesize;
1301
1302 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1303 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1304 cache->saved_regs[regno].addr = offset;
1305 }
1306
1307 if (arm_debug)
1308 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1309 paddress (gdbarch, unrecognized_pc));
1310
1311 do_cleanups (back_to);
1312 return unrecognized_pc;
1313 }
1314
1315 static void
1316 arm_scan_prologue (struct frame_info *this_frame,
1317 struct arm_prologue_cache *cache)
1318 {
1319 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1320 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1321 int regno;
1322 CORE_ADDR prologue_start, prologue_end, current_pc;
1323 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1324 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1325 pv_t regs[ARM_FPS_REGNUM];
1326 struct pv_area *stack;
1327 struct cleanup *back_to;
1328 CORE_ADDR offset;
1329
1330 /* Assume there is no frame until proven otherwise. */
1331 cache->framereg = ARM_SP_REGNUM;
1332 cache->framesize = 0;
1333
1334 /* Check for Thumb prologue. */
1335 if (arm_frame_is_thumb (this_frame))
1336 {
1337 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1338 return;
1339 }
1340
1341 /* Find the function prologue. If we can't find the function in
1342 the symbol table, peek in the stack frame to find the PC. */
1343 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1344 &prologue_end))
1345 {
1346 /* One way to find the end of the prologue (which works well
1347 for unoptimized code) is to do the following:
1348
1349 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1350
1351 if (sal.line == 0)
1352 prologue_end = prev_pc;
1353 else if (sal.end < prologue_end)
1354 prologue_end = sal.end;
1355
1356 This mechanism is very accurate so long as the optimizer
1357 doesn't move any instructions from the function body into the
1358 prologue. If this happens, sal.end will be the last
1359 instruction in the first hunk of prologue code just before
1360 the first instruction that the scheduler has moved from
1361 the body to the prologue.
1362
1363 In order to make sure that we scan all of the prologue
1364 instructions, we use a slightly less accurate mechanism which
1365 may scan more than necessary. To help compensate for this
1366 lack of accuracy, the prologue scanning loop below contains
1367 several clauses which'll cause the loop to terminate early if
1368 an implausible prologue instruction is encountered.
1369
1370 The expression
1371
1372 prologue_start + 64
1373
1374 is a suitable endpoint since it accounts for the largest
1375 possible prologue plus up to five instructions inserted by
1376 the scheduler. */
1377
1378 if (prologue_end > prologue_start + 64)
1379 {
1380 prologue_end = prologue_start + 64; /* See above. */
1381 }
1382 }
1383 else
1384 {
1385 /* We have no symbol information. Our only option is to assume this
1386 function has a standard stack frame and the normal frame register.
1387 Then, we can find the value of our frame pointer on entrance to
1388 the callee (or at the present moment if this is the innermost frame).
1389 The value stored there should be the address of the stmfd + 8. */
1390 CORE_ADDR frame_loc;
1391 LONGEST return_value;
1392
1393 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1394 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1395 return;
1396 else
1397 {
1398 prologue_start = gdbarch_addr_bits_remove
1399 (gdbarch, return_value) - 8;
1400 prologue_end = prologue_start + 64; /* See above. */
1401 }
1402 }
1403
1404 if (prev_pc < prologue_end)
1405 prologue_end = prev_pc;
1406
1407 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1408 }
1409
1410 static struct arm_prologue_cache *
1411 arm_make_prologue_cache (struct frame_info *this_frame)
1412 {
1413 int reg;
1414 struct arm_prologue_cache *cache;
1415 CORE_ADDR unwound_fp;
1416
1417 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1418 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1419
1420 arm_scan_prologue (this_frame, cache);
1421
1422 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1423 if (unwound_fp == 0)
1424 return cache;
1425
1426 cache->prev_sp = unwound_fp + cache->framesize;
1427
1428 /* Calculate actual addresses of saved registers using offsets
1429 determined by arm_scan_prologue. */
1430 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1431 if (trad_frame_addr_p (cache->saved_regs, reg))
1432 cache->saved_regs[reg].addr += cache->prev_sp;
1433
1434 return cache;
1435 }
1436
1437 /* Our frame ID for a normal frame is the current function's starting PC
1438 and the caller's SP when we were called. */
1439
1440 static void
1441 arm_prologue_this_id (struct frame_info *this_frame,
1442 void **this_cache,
1443 struct frame_id *this_id)
1444 {
1445 struct arm_prologue_cache *cache;
1446 struct frame_id id;
1447 CORE_ADDR pc, func;
1448
1449 if (*this_cache == NULL)
1450 *this_cache = arm_make_prologue_cache (this_frame);
1451 cache = *this_cache;
1452
1453 /* This is meant to halt the backtrace at "_start". */
1454 pc = get_frame_pc (this_frame);
1455 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1456 return;
1457
1458 /* If we've hit a wall, stop. */
1459 if (cache->prev_sp == 0)
1460 return;
1461
1462 func = get_frame_func (this_frame);
1463 id = frame_id_build (cache->prev_sp, func);
1464 *this_id = id;
1465 }
1466
1467 static struct value *
1468 arm_prologue_prev_register (struct frame_info *this_frame,
1469 void **this_cache,
1470 int prev_regnum)
1471 {
1472 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1473 struct arm_prologue_cache *cache;
1474
1475 if (*this_cache == NULL)
1476 *this_cache = arm_make_prologue_cache (this_frame);
1477 cache = *this_cache;
1478
1479 /* If we are asked to unwind the PC, then we need to return the LR
1480 instead. The prologue may save PC, but it will point into this
1481 frame's prologue, not the next frame's resume location. Also
1482 strip the saved T bit. A valid LR may have the low bit set, but
1483 a valid PC never does. */
1484 if (prev_regnum == ARM_PC_REGNUM)
1485 {
1486 CORE_ADDR lr;
1487
1488 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1489 return frame_unwind_got_constant (this_frame, prev_regnum,
1490 arm_addr_bits_remove (gdbarch, lr));
1491 }
1492
1493 /* SP is generally not saved to the stack, but this frame is
1494 identified by the next frame's stack pointer at the time of the call.
1495 The value was already reconstructed into PREV_SP. */
1496 if (prev_regnum == ARM_SP_REGNUM)
1497 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
1498
1499 /* The CPSR may have been changed by the call instruction and by the
1500 called function. The only bit we can reconstruct is the T bit,
1501 by checking the low bit of LR as of the call. This is a reliable
1502 indicator of Thumb-ness except for some ARM v4T pre-interworking
1503 Thumb code, which could get away with a clear low bit as long as
1504 the called function did not use bx. Guess that all other
1505 bits are unchanged; the condition flags are presumably lost,
1506 but the processor status is likely valid. */
1507 if (prev_regnum == ARM_PS_REGNUM)
1508 {
1509 CORE_ADDR lr, cpsr;
1510
1511 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
1512 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1513 if (IS_THUMB_ADDR (lr))
1514 cpsr |= CPSR_T;
1515 else
1516 cpsr &= ~CPSR_T;
1517 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
1518 }
1519
1520 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1521 prev_regnum);
1522 }
1523
1524 struct frame_unwind arm_prologue_unwind = {
1525 NORMAL_FRAME,
1526 arm_prologue_this_id,
1527 arm_prologue_prev_register,
1528 NULL,
1529 default_frame_sniffer
1530 };
1531
1532 static struct arm_prologue_cache *
1533 arm_make_stub_cache (struct frame_info *this_frame)
1534 {
1535 struct arm_prologue_cache *cache;
1536
1537 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1538 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1539
1540 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
1541
1542 return cache;
1543 }
1544
1545 /* Our frame ID for a stub frame is the current SP and LR. */
1546
1547 static void
1548 arm_stub_this_id (struct frame_info *this_frame,
1549 void **this_cache,
1550 struct frame_id *this_id)
1551 {
1552 struct arm_prologue_cache *cache;
1553
1554 if (*this_cache == NULL)
1555 *this_cache = arm_make_stub_cache (this_frame);
1556 cache = *this_cache;
1557
1558 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
1559 }
1560
1561 static int
1562 arm_stub_unwind_sniffer (const struct frame_unwind *self,
1563 struct frame_info *this_frame,
1564 void **this_prologue_cache)
1565 {
1566 CORE_ADDR addr_in_block;
1567 char dummy[4];
1568
1569 addr_in_block = get_frame_address_in_block (this_frame);
1570 if (in_plt_section (addr_in_block, NULL)
1571 /* We also use the stub winder if the target memory is unreadable
1572 to avoid having the prologue unwinder trying to read it. */
1573 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1574 return 1;
1575
1576 return 0;
1577 }
1578
1579 struct frame_unwind arm_stub_unwind = {
1580 NORMAL_FRAME,
1581 arm_stub_this_id,
1582 arm_prologue_prev_register,
1583 NULL,
1584 arm_stub_unwind_sniffer
1585 };
1586
1587 static CORE_ADDR
1588 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1589 {
1590 struct arm_prologue_cache *cache;
1591
1592 if (*this_cache == NULL)
1593 *this_cache = arm_make_prologue_cache (this_frame);
1594 cache = *this_cache;
1595
1596 return cache->prev_sp - cache->framesize;
1597 }
1598
1599 struct frame_base arm_normal_base = {
1600 &arm_prologue_unwind,
1601 arm_normal_frame_base,
1602 arm_normal_frame_base,
1603 arm_normal_frame_base
1604 };
1605
1606 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1607 dummy frame. The frame ID's base needs to match the TOS value
1608 saved by save_dummy_frame_tos() and returned from
1609 arm_push_dummy_call, and the PC needs to match the dummy frame's
1610 breakpoint. */
1611
1612 static struct frame_id
1613 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1614 {
1615 return frame_id_build (get_frame_register_unsigned (this_frame, ARM_SP_REGNUM),
1616 get_frame_pc (this_frame));
1617 }
1618
1619 /* Given THIS_FRAME, find the previous frame's resume PC (which will
1620 be used to construct the previous frame's ID, after looking up the
1621 containing function). */
1622
1623 static CORE_ADDR
1624 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1625 {
1626 CORE_ADDR pc;
1627 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
1628 return arm_addr_bits_remove (gdbarch, pc);
1629 }
1630
1631 static CORE_ADDR
1632 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1633 {
1634 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
1635 }
1636
1637 static struct value *
1638 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
1639 int regnum)
1640 {
1641 struct gdbarch * gdbarch = get_frame_arch (this_frame);
1642 CORE_ADDR lr, cpsr;
1643
1644 switch (regnum)
1645 {
1646 case ARM_PC_REGNUM:
1647 /* The PC is normally copied from the return column, which
1648 describes saves of LR. However, that version may have an
1649 extra bit set to indicate Thumb state. The bit is not
1650 part of the PC. */
1651 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1652 return frame_unwind_got_constant (this_frame, regnum,
1653 arm_addr_bits_remove (gdbarch, lr));
1654
1655 case ARM_PS_REGNUM:
1656 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
1657 cpsr = get_frame_register_unsigned (this_frame, regnum);
1658 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1659 if (IS_THUMB_ADDR (lr))
1660 cpsr |= CPSR_T;
1661 else
1662 cpsr &= ~CPSR_T;
1663 return frame_unwind_got_constant (this_frame, regnum, cpsr);
1664
1665 default:
1666 internal_error (__FILE__, __LINE__,
1667 _("Unexpected register %d"), regnum);
1668 }
1669 }
1670
1671 static void
1672 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1673 struct dwarf2_frame_state_reg *reg,
1674 struct frame_info *this_frame)
1675 {
1676 switch (regnum)
1677 {
1678 case ARM_PC_REGNUM:
1679 case ARM_PS_REGNUM:
1680 reg->how = DWARF2_FRAME_REG_FN;
1681 reg->loc.fn = arm_dwarf2_prev_register;
1682 break;
1683 case ARM_SP_REGNUM:
1684 reg->how = DWARF2_FRAME_REG_CFA;
1685 break;
1686 }
1687 }
1688
1689 /* When arguments must be pushed onto the stack, they go on in reverse
1690 order. The code below implements a FILO (stack) to do this. */
1691
1692 struct stack_item
1693 {
1694 int len;
1695 struct stack_item *prev;
1696 void *data;
1697 };
1698
1699 static struct stack_item *
1700 push_stack_item (struct stack_item *prev, const void *contents, int len)
1701 {
1702 struct stack_item *si;
1703 si = xmalloc (sizeof (struct stack_item));
1704 si->data = xmalloc (len);
1705 si->len = len;
1706 si->prev = prev;
1707 memcpy (si->data, contents, len);
1708 return si;
1709 }
1710
1711 static struct stack_item *
1712 pop_stack_item (struct stack_item *si)
1713 {
1714 struct stack_item *dead = si;
1715 si = si->prev;
1716 xfree (dead->data);
1717 xfree (dead);
1718 return si;
1719 }
1720
1721
1722 /* Return the alignment (in bytes) of the given type. */
1723
1724 static int
1725 arm_type_align (struct type *t)
1726 {
1727 int n;
1728 int align;
1729 int falign;
1730
1731 t = check_typedef (t);
1732 switch (TYPE_CODE (t))
1733 {
1734 default:
1735 /* Should never happen. */
1736 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1737 return 4;
1738
1739 case TYPE_CODE_PTR:
1740 case TYPE_CODE_ENUM:
1741 case TYPE_CODE_INT:
1742 case TYPE_CODE_FLT:
1743 case TYPE_CODE_SET:
1744 case TYPE_CODE_RANGE:
1745 case TYPE_CODE_BITSTRING:
1746 case TYPE_CODE_REF:
1747 case TYPE_CODE_CHAR:
1748 case TYPE_CODE_BOOL:
1749 return TYPE_LENGTH (t);
1750
1751 case TYPE_CODE_ARRAY:
1752 case TYPE_CODE_COMPLEX:
1753 /* TODO: What about vector types? */
1754 return arm_type_align (TYPE_TARGET_TYPE (t));
1755
1756 case TYPE_CODE_STRUCT:
1757 case TYPE_CODE_UNION:
1758 align = 1;
1759 for (n = 0; n < TYPE_NFIELDS (t); n++)
1760 {
1761 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
1762 if (falign > align)
1763 align = falign;
1764 }
1765 return align;
1766 }
1767 }
1768
1769 /* Possible base types for a candidate for passing and returning in
1770 VFP registers. */
1771
1772 enum arm_vfp_cprc_base_type
1773 {
1774 VFP_CPRC_UNKNOWN,
1775 VFP_CPRC_SINGLE,
1776 VFP_CPRC_DOUBLE,
1777 VFP_CPRC_VEC64,
1778 VFP_CPRC_VEC128
1779 };
1780
1781 /* The length of one element of base type B. */
1782
1783 static unsigned
1784 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
1785 {
1786 switch (b)
1787 {
1788 case VFP_CPRC_SINGLE:
1789 return 4;
1790 case VFP_CPRC_DOUBLE:
1791 return 8;
1792 case VFP_CPRC_VEC64:
1793 return 8;
1794 case VFP_CPRC_VEC128:
1795 return 16;
1796 default:
1797 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
1798 (int) b);
1799 }
1800 }
1801
1802 /* The character ('s', 'd' or 'q') for the type of VFP register used
1803 for passing base type B. */
1804
1805 static int
1806 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
1807 {
1808 switch (b)
1809 {
1810 case VFP_CPRC_SINGLE:
1811 return 's';
1812 case VFP_CPRC_DOUBLE:
1813 return 'd';
1814 case VFP_CPRC_VEC64:
1815 return 'd';
1816 case VFP_CPRC_VEC128:
1817 return 'q';
1818 default:
1819 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
1820 (int) b);
1821 }
1822 }
1823
1824 /* Determine whether T may be part of a candidate for passing and
1825 returning in VFP registers, ignoring the limit on the total number
1826 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
1827 classification of the first valid component found; if it is not
1828 VFP_CPRC_UNKNOWN, all components must have the same classification
1829 as *BASE_TYPE. If it is found that T contains a type not permitted
1830 for passing and returning in VFP registers, a type differently
1831 classified from *BASE_TYPE, or two types differently classified
1832 from each other, return -1, otherwise return the total number of
1833 base-type elements found (possibly 0 in an empty structure or
1834 array). Vectors and complex types are not currently supported,
1835 matching the generic AAPCS support. */
1836
1837 static int
1838 arm_vfp_cprc_sub_candidate (struct type *t,
1839 enum arm_vfp_cprc_base_type *base_type)
1840 {
1841 t = check_typedef (t);
1842 switch (TYPE_CODE (t))
1843 {
1844 case TYPE_CODE_FLT:
1845 switch (TYPE_LENGTH (t))
1846 {
1847 case 4:
1848 if (*base_type == VFP_CPRC_UNKNOWN)
1849 *base_type = VFP_CPRC_SINGLE;
1850 else if (*base_type != VFP_CPRC_SINGLE)
1851 return -1;
1852 return 1;
1853
1854 case 8:
1855 if (*base_type == VFP_CPRC_UNKNOWN)
1856 *base_type = VFP_CPRC_DOUBLE;
1857 else if (*base_type != VFP_CPRC_DOUBLE)
1858 return -1;
1859 return 1;
1860
1861 default:
1862 return -1;
1863 }
1864 break;
1865
1866 case TYPE_CODE_ARRAY:
1867 {
1868 int count;
1869 unsigned unitlen;
1870 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
1871 if (count == -1)
1872 return -1;
1873 if (TYPE_LENGTH (t) == 0)
1874 {
1875 gdb_assert (count == 0);
1876 return 0;
1877 }
1878 else if (count == 0)
1879 return -1;
1880 unitlen = arm_vfp_cprc_unit_length (*base_type);
1881 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
1882 return TYPE_LENGTH (t) / unitlen;
1883 }
1884 break;
1885
1886 case TYPE_CODE_STRUCT:
1887 {
1888 int count = 0;
1889 unsigned unitlen;
1890 int i;
1891 for (i = 0; i < TYPE_NFIELDS (t); i++)
1892 {
1893 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
1894 base_type);
1895 if (sub_count == -1)
1896 return -1;
1897 count += sub_count;
1898 }
1899 if (TYPE_LENGTH (t) == 0)
1900 {
1901 gdb_assert (count == 0);
1902 return 0;
1903 }
1904 else if (count == 0)
1905 return -1;
1906 unitlen = arm_vfp_cprc_unit_length (*base_type);
1907 if (TYPE_LENGTH (t) != unitlen * count)
1908 return -1;
1909 return count;
1910 }
1911
1912 case TYPE_CODE_UNION:
1913 {
1914 int count = 0;
1915 unsigned unitlen;
1916 int i;
1917 for (i = 0; i < TYPE_NFIELDS (t); i++)
1918 {
1919 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
1920 base_type);
1921 if (sub_count == -1)
1922 return -1;
1923 count = (count > sub_count ? count : sub_count);
1924 }
1925 if (TYPE_LENGTH (t) == 0)
1926 {
1927 gdb_assert (count == 0);
1928 return 0;
1929 }
1930 else if (count == 0)
1931 return -1;
1932 unitlen = arm_vfp_cprc_unit_length (*base_type);
1933 if (TYPE_LENGTH (t) != unitlen * count)
1934 return -1;
1935 return count;
1936 }
1937
1938 default:
1939 break;
1940 }
1941
1942 return -1;
1943 }
1944
1945 /* Determine whether T is a VFP co-processor register candidate (CPRC)
1946 if passed to or returned from a non-variadic function with the VFP
1947 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
1948 *BASE_TYPE to the base type for T and *COUNT to the number of
1949 elements of that base type before returning. */
1950
1951 static int
1952 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
1953 int *count)
1954 {
1955 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
1956 int c = arm_vfp_cprc_sub_candidate (t, &b);
1957 if (c <= 0 || c > 4)
1958 return 0;
1959 *base_type = b;
1960 *count = c;
1961 return 1;
1962 }
1963
1964 /* Return 1 if the VFP ABI should be used for passing arguments to and
1965 returning values from a function of type FUNC_TYPE, 0
1966 otherwise. */
1967
1968 static int
1969 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
1970 {
1971 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1972 /* Variadic functions always use the base ABI. Assume that functions
1973 without debug info are not variadic. */
1974 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
1975 return 0;
1976 /* The VFP ABI is only supported as a variant of AAPCS. */
1977 if (tdep->arm_abi != ARM_ABI_AAPCS)
1978 return 0;
1979 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
1980 }
1981
1982 /* We currently only support passing parameters in integer registers, which
1983 conforms with GCC's default model, and VFP argument passing following
1984 the VFP variant of AAPCS. Several other variants exist and
1985 we should probably support some of them based on the selected ABI. */
1986
1987 static CORE_ADDR
1988 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1989 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
1990 struct value **args, CORE_ADDR sp, int struct_return,
1991 CORE_ADDR struct_addr)
1992 {
1993 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1994 int argnum;
1995 int argreg;
1996 int nstack;
1997 struct stack_item *si = NULL;
1998 int use_vfp_abi;
1999 struct type *ftype;
2000 unsigned vfp_regs_free = (1 << 16) - 1;
2001
2002 /* Determine the type of this function and whether the VFP ABI
2003 applies. */
2004 ftype = check_typedef (value_type (function));
2005 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
2006 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
2007 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
2008
2009 /* Set the return address. For the ARM, the return breakpoint is
2010 always at BP_ADDR. */
2011 if (arm_pc_is_thumb (bp_addr))
2012 bp_addr |= 1;
2013 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
2014
2015 /* Walk through the list of args and determine how large a temporary
2016 stack is required. Need to take care here as structs may be
2017 passed on the stack, and we have to to push them. */
2018 nstack = 0;
2019
2020 argreg = ARM_A1_REGNUM;
2021 nstack = 0;
2022
2023 /* The struct_return pointer occupies the first parameter
2024 passing register. */
2025 if (struct_return)
2026 {
2027 if (arm_debug)
2028 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
2029 gdbarch_register_name (gdbarch, argreg),
2030 paddress (gdbarch, struct_addr));
2031 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
2032 argreg++;
2033 }
2034
2035 for (argnum = 0; argnum < nargs; argnum++)
2036 {
2037 int len;
2038 struct type *arg_type;
2039 struct type *target_type;
2040 enum type_code typecode;
2041 const bfd_byte *val;
2042 int align;
2043 enum arm_vfp_cprc_base_type vfp_base_type;
2044 int vfp_base_count;
2045 int may_use_core_reg = 1;
2046
2047 arg_type = check_typedef (value_type (args[argnum]));
2048 len = TYPE_LENGTH (arg_type);
2049 target_type = TYPE_TARGET_TYPE (arg_type);
2050 typecode = TYPE_CODE (arg_type);
2051 val = value_contents (args[argnum]);
2052
2053 align = arm_type_align (arg_type);
2054 /* Round alignment up to a whole number of words. */
2055 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
2056 /* Different ABIs have different maximum alignments. */
2057 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
2058 {
2059 /* The APCS ABI only requires word alignment. */
2060 align = INT_REGISTER_SIZE;
2061 }
2062 else
2063 {
2064 /* The AAPCS requires at most doubleword alignment. */
2065 if (align > INT_REGISTER_SIZE * 2)
2066 align = INT_REGISTER_SIZE * 2;
2067 }
2068
2069 if (use_vfp_abi
2070 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
2071 &vfp_base_count))
2072 {
2073 int regno;
2074 int unit_length;
2075 int shift;
2076 unsigned mask;
2077
2078 /* Because this is a CPRC it cannot go in a core register or
2079 cause a core register to be skipped for alignment.
2080 Either it goes in VFP registers and the rest of this loop
2081 iteration is skipped for this argument, or it goes on the
2082 stack (and the stack alignment code is correct for this
2083 case). */
2084 may_use_core_reg = 0;
2085
2086 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
2087 shift = unit_length / 4;
2088 mask = (1 << (shift * vfp_base_count)) - 1;
2089 for (regno = 0; regno < 16; regno += shift)
2090 if (((vfp_regs_free >> regno) & mask) == mask)
2091 break;
2092
2093 if (regno < 16)
2094 {
2095 int reg_char;
2096 int reg_scaled;
2097 int i;
2098
2099 vfp_regs_free &= ~(mask << regno);
2100 reg_scaled = regno / shift;
2101 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
2102 for (i = 0; i < vfp_base_count; i++)
2103 {
2104 char name_buf[4];
2105 int regnum;
2106 if (reg_char == 'q')
2107 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
2108 val + i * unit_length);
2109 else
2110 {
2111 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
2112 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
2113 strlen (name_buf));
2114 regcache_cooked_write (regcache, regnum,
2115 val + i * unit_length);
2116 }
2117 }
2118 continue;
2119 }
2120 else
2121 {
2122 /* This CPRC could not go in VFP registers, so all VFP
2123 registers are now marked as used. */
2124 vfp_regs_free = 0;
2125 }
2126 }
2127
2128 /* Push stack padding for dowubleword alignment. */
2129 if (nstack & (align - 1))
2130 {
2131 si = push_stack_item (si, val, INT_REGISTER_SIZE);
2132 nstack += INT_REGISTER_SIZE;
2133 }
2134
2135 /* Doubleword aligned quantities must go in even register pairs. */
2136 if (may_use_core_reg
2137 && argreg <= ARM_LAST_ARG_REGNUM
2138 && align > INT_REGISTER_SIZE
2139 && argreg & 1)
2140 argreg++;
2141
2142 /* If the argument is a pointer to a function, and it is a
2143 Thumb function, create a LOCAL copy of the value and set
2144 the THUMB bit in it. */
2145 if (TYPE_CODE_PTR == typecode
2146 && target_type != NULL
2147 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
2148 {
2149 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
2150 if (arm_pc_is_thumb (regval))
2151 {
2152 bfd_byte *copy = alloca (len);
2153 store_unsigned_integer (copy, len, byte_order,
2154 MAKE_THUMB_ADDR (regval));
2155 val = copy;
2156 }
2157 }
2158
2159 /* Copy the argument to general registers or the stack in
2160 register-sized pieces. Large arguments are split between
2161 registers and stack. */
2162 while (len > 0)
2163 {
2164 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
2165
2166 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
2167 {
2168 /* The argument is being passed in a general purpose
2169 register. */
2170 CORE_ADDR regval
2171 = extract_unsigned_integer (val, partial_len, byte_order);
2172 if (byte_order == BFD_ENDIAN_BIG)
2173 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
2174 if (arm_debug)
2175 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
2176 argnum,
2177 gdbarch_register_name
2178 (gdbarch, argreg),
2179 phex (regval, INT_REGISTER_SIZE));
2180 regcache_cooked_write_unsigned (regcache, argreg, regval);
2181 argreg++;
2182 }
2183 else
2184 {
2185 /* Push the arguments onto the stack. */
2186 if (arm_debug)
2187 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
2188 argnum, nstack);
2189 si = push_stack_item (si, val, INT_REGISTER_SIZE);
2190 nstack += INT_REGISTER_SIZE;
2191 }
2192
2193 len -= partial_len;
2194 val += partial_len;
2195 }
2196 }
2197 /* If we have an odd number of words to push, then decrement the stack
2198 by one word now, so first stack argument will be dword aligned. */
2199 if (nstack & 4)
2200 sp -= 4;
2201
2202 while (si)
2203 {
2204 sp -= si->len;
2205 write_memory (sp, si->data, si->len);
2206 si = pop_stack_item (si);
2207 }
2208
2209 /* Finally, update teh SP register. */
2210 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
2211
2212 return sp;
2213 }
2214
2215
2216 /* Always align the frame to an 8-byte boundary. This is required on
2217 some platforms and harmless on the rest. */
2218
2219 static CORE_ADDR
2220 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2221 {
2222 /* Align the stack to eight bytes. */
2223 return sp & ~ (CORE_ADDR) 7;
2224 }
2225
2226 static void
2227 print_fpu_flags (int flags)
2228 {
2229 if (flags & (1 << 0))
2230 fputs ("IVO ", stdout);
2231 if (flags & (1 << 1))
2232 fputs ("DVZ ", stdout);
2233 if (flags & (1 << 2))
2234 fputs ("OFL ", stdout);
2235 if (flags & (1 << 3))
2236 fputs ("UFL ", stdout);
2237 if (flags & (1 << 4))
2238 fputs ("INX ", stdout);
2239 putchar ('\n');
2240 }
2241
2242 /* Print interesting information about the floating point processor
2243 (if present) or emulator. */
2244 static void
2245 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
2246 struct frame_info *frame, const char *args)
2247 {
2248 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
2249 int type;
2250
2251 type = (status >> 24) & 127;
2252 if (status & (1 << 31))
2253 printf (_("Hardware FPU type %d\n"), type);
2254 else
2255 printf (_("Software FPU type %d\n"), type);
2256 /* i18n: [floating point unit] mask */
2257 fputs (_("mask: "), stdout);
2258 print_fpu_flags (status >> 16);
2259 /* i18n: [floating point unit] flags */
2260 fputs (_("flags: "), stdout);
2261 print_fpu_flags (status);
2262 }
2263
2264 /* Construct the ARM extended floating point type. */
2265 static struct type *
2266 arm_ext_type (struct gdbarch *gdbarch)
2267 {
2268 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2269
2270 if (!tdep->arm_ext_type)
2271 tdep->arm_ext_type
2272 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
2273 floatformats_arm_ext);
2274
2275 return tdep->arm_ext_type;
2276 }
2277
2278 static struct type *
2279 arm_neon_double_type (struct gdbarch *gdbarch)
2280 {
2281 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2282
2283 if (tdep->neon_double_type == NULL)
2284 {
2285 struct type *t, *elem;
2286
2287 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
2288 TYPE_CODE_UNION);
2289 elem = builtin_type (gdbarch)->builtin_uint8;
2290 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
2291 elem = builtin_type (gdbarch)->builtin_uint16;
2292 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
2293 elem = builtin_type (gdbarch)->builtin_uint32;
2294 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
2295 elem = builtin_type (gdbarch)->builtin_uint64;
2296 append_composite_type_field (t, "u64", elem);
2297 elem = builtin_type (gdbarch)->builtin_float;
2298 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
2299 elem = builtin_type (gdbarch)->builtin_double;
2300 append_composite_type_field (t, "f64", elem);
2301
2302 TYPE_VECTOR (t) = 1;
2303 TYPE_NAME (t) = "neon_d";
2304 tdep->neon_double_type = t;
2305 }
2306
2307 return tdep->neon_double_type;
2308 }
2309
2310 /* FIXME: The vector types are not correctly ordered on big-endian
2311 targets. Just as s0 is the low bits of d0, d0[0] is also the low
2312 bits of d0 - regardless of what unit size is being held in d0. So
2313 the offset of the first uint8 in d0 is 7, but the offset of the
2314 first float is 4. This code works as-is for little-endian
2315 targets. */
2316
2317 static struct type *
2318 arm_neon_quad_type (struct gdbarch *gdbarch)
2319 {
2320 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2321
2322 if (tdep->neon_quad_type == NULL)
2323 {
2324 struct type *t, *elem;
2325
2326 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
2327 TYPE_CODE_UNION);
2328 elem = builtin_type (gdbarch)->builtin_uint8;
2329 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
2330 elem = builtin_type (gdbarch)->builtin_uint16;
2331 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
2332 elem = builtin_type (gdbarch)->builtin_uint32;
2333 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
2334 elem = builtin_type (gdbarch)->builtin_uint64;
2335 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
2336 elem = builtin_type (gdbarch)->builtin_float;
2337 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
2338 elem = builtin_type (gdbarch)->builtin_double;
2339 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
2340
2341 TYPE_VECTOR (t) = 1;
2342 TYPE_NAME (t) = "neon_q";
2343 tdep->neon_quad_type = t;
2344 }
2345
2346 return tdep->neon_quad_type;
2347 }
2348
2349 /* Return the GDB type object for the "standard" data type of data in
2350 register N. */
2351
2352 static struct type *
2353 arm_register_type (struct gdbarch *gdbarch, int regnum)
2354 {
2355 int num_regs = gdbarch_num_regs (gdbarch);
2356
2357 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
2358 && regnum >= num_regs && regnum < num_regs + 32)
2359 return builtin_type (gdbarch)->builtin_float;
2360
2361 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
2362 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
2363 return arm_neon_quad_type (gdbarch);
2364
2365 /* If the target description has register information, we are only
2366 in this function so that we can override the types of
2367 double-precision registers for NEON. */
2368 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
2369 {
2370 struct type *t = tdesc_register_type (gdbarch, regnum);
2371
2372 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
2373 && TYPE_CODE (t) == TYPE_CODE_FLT
2374 && gdbarch_tdep (gdbarch)->have_neon)
2375 return arm_neon_double_type (gdbarch);
2376 else
2377 return t;
2378 }
2379
2380 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
2381 {
2382 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
2383 return builtin_type (gdbarch)->builtin_void;
2384
2385 return arm_ext_type (gdbarch);
2386 }
2387 else if (regnum == ARM_SP_REGNUM)
2388 return builtin_type (gdbarch)->builtin_data_ptr;
2389 else if (regnum == ARM_PC_REGNUM)
2390 return builtin_type (gdbarch)->builtin_func_ptr;
2391 else if (regnum >= ARRAY_SIZE (arm_register_names))
2392 /* These registers are only supported on targets which supply
2393 an XML description. */
2394 return builtin_type (gdbarch)->builtin_int0;
2395 else
2396 return builtin_type (gdbarch)->builtin_uint32;
2397 }
2398
2399 /* Map a DWARF register REGNUM onto the appropriate GDB register
2400 number. */
2401
2402 static int
2403 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2404 {
2405 /* Core integer regs. */
2406 if (reg >= 0 && reg <= 15)
2407 return reg;
2408
2409 /* Legacy FPA encoding. These were once used in a way which
2410 overlapped with VFP register numbering, so their use is
2411 discouraged, but GDB doesn't support the ARM toolchain
2412 which used them for VFP. */
2413 if (reg >= 16 && reg <= 23)
2414 return ARM_F0_REGNUM + reg - 16;
2415
2416 /* New assignments for the FPA registers. */
2417 if (reg >= 96 && reg <= 103)
2418 return ARM_F0_REGNUM + reg - 96;
2419
2420 /* WMMX register assignments. */
2421 if (reg >= 104 && reg <= 111)
2422 return ARM_WCGR0_REGNUM + reg - 104;
2423
2424 if (reg >= 112 && reg <= 127)
2425 return ARM_WR0_REGNUM + reg - 112;
2426
2427 if (reg >= 192 && reg <= 199)
2428 return ARM_WC0_REGNUM + reg - 192;
2429
2430 /* VFP v2 registers. A double precision value is actually
2431 in d1 rather than s2, but the ABI only defines numbering
2432 for the single precision registers. This will "just work"
2433 in GDB for little endian targets (we'll read eight bytes,
2434 starting in s0 and then progressing to s1), but will be
2435 reversed on big endian targets with VFP. This won't
2436 be a problem for the new Neon quad registers; you're supposed
2437 to use DW_OP_piece for those. */
2438 if (reg >= 64 && reg <= 95)
2439 {
2440 char name_buf[4];
2441
2442 sprintf (name_buf, "s%d", reg - 64);
2443 return user_reg_map_name_to_regnum (gdbarch, name_buf,
2444 strlen (name_buf));
2445 }
2446
2447 /* VFP v3 / Neon registers. This range is also used for VFP v2
2448 registers, except that it now describes d0 instead of s0. */
2449 if (reg >= 256 && reg <= 287)
2450 {
2451 char name_buf[4];
2452
2453 sprintf (name_buf, "d%d", reg - 256);
2454 return user_reg_map_name_to_regnum (gdbarch, name_buf,
2455 strlen (name_buf));
2456 }
2457
2458 return -1;
2459 }
2460
2461 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
2462 static int
2463 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
2464 {
2465 int reg = regnum;
2466 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
2467
2468 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
2469 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
2470
2471 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
2472 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
2473
2474 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
2475 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
2476
2477 if (reg < NUM_GREGS)
2478 return SIM_ARM_R0_REGNUM + reg;
2479 reg -= NUM_GREGS;
2480
2481 if (reg < NUM_FREGS)
2482 return SIM_ARM_FP0_REGNUM + reg;
2483 reg -= NUM_FREGS;
2484
2485 if (reg < NUM_SREGS)
2486 return SIM_ARM_FPS_REGNUM + reg;
2487 reg -= NUM_SREGS;
2488
2489 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
2490 }
2491
2492 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
2493 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
2494 It is thought that this is is the floating-point register format on
2495 little-endian systems. */
2496
2497 static void
2498 convert_from_extended (const struct floatformat *fmt, const void *ptr,
2499 void *dbl, int endianess)
2500 {
2501 DOUBLEST d;
2502
2503 if (endianess == BFD_ENDIAN_BIG)
2504 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
2505 else
2506 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
2507 ptr, &d);
2508 floatformat_from_doublest (fmt, &d, dbl);
2509 }
2510
2511 static void
2512 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
2513 int endianess)
2514 {
2515 DOUBLEST d;
2516
2517 floatformat_to_doublest (fmt, ptr, &d);
2518 if (endianess == BFD_ENDIAN_BIG)
2519 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
2520 else
2521 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
2522 &d, dbl);
2523 }
2524
2525 static int
2526 condition_true (unsigned long cond, unsigned long status_reg)
2527 {
2528 if (cond == INST_AL || cond == INST_NV)
2529 return 1;
2530
2531 switch (cond)
2532 {
2533 case INST_EQ:
2534 return ((status_reg & FLAG_Z) != 0);
2535 case INST_NE:
2536 return ((status_reg & FLAG_Z) == 0);
2537 case INST_CS:
2538 return ((status_reg & FLAG_C) != 0);
2539 case INST_CC:
2540 return ((status_reg & FLAG_C) == 0);
2541 case INST_MI:
2542 return ((status_reg & FLAG_N) != 0);
2543 case INST_PL:
2544 return ((status_reg & FLAG_N) == 0);
2545 case INST_VS:
2546 return ((status_reg & FLAG_V) != 0);
2547 case INST_VC:
2548 return ((status_reg & FLAG_V) == 0);
2549 case INST_HI:
2550 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
2551 case INST_LS:
2552 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
2553 case INST_GE:
2554 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
2555 case INST_LT:
2556 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
2557 case INST_GT:
2558 return (((status_reg & FLAG_Z) == 0)
2559 && (((status_reg & FLAG_N) == 0)
2560 == ((status_reg & FLAG_V) == 0)));
2561 case INST_LE:
2562 return (((status_reg & FLAG_Z) != 0)
2563 || (((status_reg & FLAG_N) == 0)
2564 != ((status_reg & FLAG_V) == 0)));
2565 }
2566 return 1;
2567 }
2568
2569 static unsigned long
2570 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
2571 unsigned long pc_val, unsigned long status_reg)
2572 {
2573 unsigned long res, shift;
2574 int rm = bits (inst, 0, 3);
2575 unsigned long shifttype = bits (inst, 5, 6);
2576
2577 if (bit (inst, 4))
2578 {
2579 int rs = bits (inst, 8, 11);
2580 shift = (rs == 15 ? pc_val + 8
2581 : get_frame_register_unsigned (frame, rs)) & 0xFF;
2582 }
2583 else
2584 shift = bits (inst, 7, 11);
2585
2586 res = (rm == 15
2587 ? (pc_val + (bit (inst, 4) ? 12 : 8))
2588 : get_frame_register_unsigned (frame, rm));
2589
2590 switch (shifttype)
2591 {
2592 case 0: /* LSL */
2593 res = shift >= 32 ? 0 : res << shift;
2594 break;
2595
2596 case 1: /* LSR */
2597 res = shift >= 32 ? 0 : res >> shift;
2598 break;
2599
2600 case 2: /* ASR */
2601 if (shift >= 32)
2602 shift = 31;
2603 res = ((res & 0x80000000L)
2604 ? ~((~res) >> shift) : res >> shift);
2605 break;
2606
2607 case 3: /* ROR/RRX */
2608 shift &= 31;
2609 if (shift == 0)
2610 res = (res >> 1) | (carry ? 0x80000000L : 0);
2611 else
2612 res = (res >> shift) | (res << (32 - shift));
2613 break;
2614 }
2615
2616 return res & 0xffffffff;
2617 }
2618
2619 /* Return number of 1-bits in VAL. */
2620
2621 static int
2622 bitcount (unsigned long val)
2623 {
2624 int nbits;
2625 for (nbits = 0; val != 0; nbits++)
2626 val &= val - 1; /* delete rightmost 1-bit in val */
2627 return nbits;
2628 }
2629
2630 /* Return the size in bytes of the complete Thumb instruction whose
2631 first halfword is INST1. */
2632
2633 static int
2634 thumb_insn_size (unsigned short inst1)
2635 {
2636 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
2637 return 4;
2638 else
2639 return 2;
2640 }
2641
2642 static int
2643 thumb_advance_itstate (unsigned int itstate)
2644 {
2645 /* Preserve IT[7:5], the first three bits of the condition. Shift
2646 the upcoming condition flags left by one bit. */
2647 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
2648
2649 /* If we have finished the IT block, clear the state. */
2650 if ((itstate & 0x0f) == 0)
2651 itstate = 0;
2652
2653 return itstate;
2654 }
2655
2656 /* Find the next PC after the current instruction executes. In some
2657 cases we can not statically determine the answer (see the IT state
2658 handling in this function); in that case, a breakpoint may be
2659 inserted in addition to the returned PC, which will be used to set
2660 another breakpoint by our caller. */
2661
2662 static CORE_ADDR
2663 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
2664 {
2665 struct gdbarch *gdbarch = get_frame_arch (frame);
2666 struct address_space *aspace = get_frame_address_space (frame);
2667 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2668 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2669 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
2670 unsigned short inst1;
2671 CORE_ADDR nextpc = pc + 2; /* default is next instruction */
2672 unsigned long offset;
2673 ULONGEST status, itstate;
2674
2675 nextpc = MAKE_THUMB_ADDR (nextpc);
2676 pc_val = MAKE_THUMB_ADDR (pc_val);
2677
2678 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
2679
2680 /* Thumb-2 conditional execution support. There are eight bits in
2681 the CPSR which describe conditional execution state. Once
2682 reconstructed (they're in a funny order), the low five bits
2683 describe the low bit of the condition for each instruction and
2684 how many instructions remain. The high three bits describe the
2685 base condition. One of the low four bits will be set if an IT
2686 block is active. These bits read as zero on earlier
2687 processors. */
2688 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
2689 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
2690
2691 /* If-Then handling. On GNU/Linux, where this routine is used, we
2692 use an undefined instruction as a breakpoint. Unlike BKPT, IT
2693 can disable execution of the undefined instruction. So we might
2694 miss the breakpoint if we set it on a skipped conditional
2695 instruction. Because conditional instructions can change the
2696 flags, affecting the execution of further instructions, we may
2697 need to set two breakpoints. */
2698
2699 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
2700 {
2701 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
2702 {
2703 /* An IT instruction. Because this instruction does not
2704 modify the flags, we can accurately predict the next
2705 executed instruction. */
2706 itstate = inst1 & 0x00ff;
2707 pc += thumb_insn_size (inst1);
2708
2709 while (itstate != 0 && ! condition_true (itstate >> 4, status))
2710 {
2711 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
2712 pc += thumb_insn_size (inst1);
2713 itstate = thumb_advance_itstate (itstate);
2714 }
2715
2716 return MAKE_THUMB_ADDR (pc);
2717 }
2718 else if (itstate != 0)
2719 {
2720 /* We are in a conditional block. Check the condition. */
2721 if (! condition_true (itstate >> 4, status))
2722 {
2723 /* Advance to the next executed instruction. */
2724 pc += thumb_insn_size (inst1);
2725 itstate = thumb_advance_itstate (itstate);
2726
2727 while (itstate != 0 && ! condition_true (itstate >> 4, status))
2728 {
2729 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
2730 pc += thumb_insn_size (inst1);
2731 itstate = thumb_advance_itstate (itstate);
2732 }
2733
2734 return MAKE_THUMB_ADDR (pc);
2735 }
2736 else if ((itstate & 0x0f) == 0x08)
2737 {
2738 /* This is the last instruction of the conditional
2739 block, and it is executed. We can handle it normally
2740 because the following instruction is not conditional,
2741 and we must handle it normally because it is
2742 permitted to branch. Fall through. */
2743 }
2744 else
2745 {
2746 int cond_negated;
2747
2748 /* There are conditional instructions after this one.
2749 If this instruction modifies the flags, then we can
2750 not predict what the next executed instruction will
2751 be. Fortunately, this instruction is architecturally
2752 forbidden to branch; we know it will fall through.
2753 Start by skipping past it. */
2754 pc += thumb_insn_size (inst1);
2755 itstate = thumb_advance_itstate (itstate);
2756
2757 /* Set a breakpoint on the following instruction. */
2758 gdb_assert ((itstate & 0x0f) != 0);
2759 if (insert_bkpt)
2760 insert_single_step_breakpoint (gdbarch, aspace, pc);
2761 cond_negated = (itstate >> 4) & 1;
2762
2763 /* Skip all following instructions with the same
2764 condition. If there is a later instruction in the IT
2765 block with the opposite condition, set the other
2766 breakpoint there. If not, then set a breakpoint on
2767 the instruction after the IT block. */
2768 do
2769 {
2770 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
2771 pc += thumb_insn_size (inst1);
2772 itstate = thumb_advance_itstate (itstate);
2773 }
2774 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
2775
2776 return MAKE_THUMB_ADDR (pc);
2777 }
2778 }
2779 }
2780 else if (itstate & 0x0f)
2781 {
2782 /* We are in a conditional block. Check the condition. */
2783 int cond = itstate >> 4;
2784
2785 if (! condition_true (cond, status))
2786 {
2787 /* Advance to the next instruction. All the 32-bit
2788 instructions share a common prefix. */
2789 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
2790 return MAKE_THUMB_ADDR (pc + 4);
2791 else
2792 return MAKE_THUMB_ADDR (pc + 2);
2793 }
2794
2795 /* Otherwise, handle the instruction normally. */
2796 }
2797
2798 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
2799 {
2800 CORE_ADDR sp;
2801
2802 /* Fetch the saved PC from the stack. It's stored above
2803 all of the other registers. */
2804 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
2805 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
2806 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
2807 }
2808 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
2809 {
2810 unsigned long cond = bits (inst1, 8, 11);
2811 if (cond != 0x0f && condition_true (cond, status)) /* 0x0f = SWI */
2812 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
2813 }
2814 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
2815 {
2816 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
2817 }
2818 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
2819 {
2820 unsigned short inst2;
2821 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
2822
2823 /* Default to the next instruction. */
2824 nextpc = pc + 4;
2825 nextpc = MAKE_THUMB_ADDR (nextpc);
2826
2827 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
2828 {
2829 /* Branches and miscellaneous control instructions. */
2830
2831 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
2832 {
2833 /* B, BL, BLX. */
2834 int j1, j2, imm1, imm2;
2835
2836 imm1 = sbits (inst1, 0, 10);
2837 imm2 = bits (inst2, 0, 10);
2838 j1 = bit (inst2, 13);
2839 j2 = bit (inst2, 11);
2840
2841 offset = ((imm1 << 12) + (imm2 << 1));
2842 offset ^= ((!j2) << 22) | ((!j1) << 23);
2843
2844 nextpc = pc_val + offset;
2845 /* For BLX make sure to clear the low bits. */
2846 if (bit (inst2, 12) == 0)
2847 nextpc = nextpc & 0xfffffffc;
2848 }
2849 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
2850 {
2851 /* SUBS PC, LR, #imm8. */
2852 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
2853 nextpc -= inst2 & 0x00ff;
2854 }
2855 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
2856 {
2857 /* Conditional branch. */
2858 if (condition_true (bits (inst1, 6, 9), status))
2859 {
2860 int sign, j1, j2, imm1, imm2;
2861
2862 sign = sbits (inst1, 10, 10);
2863 imm1 = bits (inst1, 0, 5);
2864 imm2 = bits (inst2, 0, 10);
2865 j1 = bit (inst2, 13);
2866 j2 = bit (inst2, 11);
2867
2868 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
2869 offset += (imm1 << 12) + (imm2 << 1);
2870
2871 nextpc = pc_val + offset;
2872 }
2873 }
2874 }
2875 else if ((inst1 & 0xfe50) == 0xe810)
2876 {
2877 /* Load multiple or RFE. */
2878 int rn, offset, load_pc = 1;
2879
2880 rn = bits (inst1, 0, 3);
2881 if (bit (inst1, 7) && !bit (inst1, 8))
2882 {
2883 /* LDMIA or POP */
2884 if (!bit (inst2, 15))
2885 load_pc = 0;
2886 offset = bitcount (inst2) * 4 - 4;
2887 }
2888 else if (!bit (inst1, 7) && bit (inst1, 8))
2889 {
2890 /* LDMDB */
2891 if (!bit (inst2, 15))
2892 load_pc = 0;
2893 offset = -4;
2894 }
2895 else if (bit (inst1, 7) && bit (inst1, 8))
2896 {
2897 /* RFEIA */
2898 offset = 0;
2899 }
2900 else if (!bit (inst1, 7) && !bit (inst1, 8))
2901 {
2902 /* RFEDB */
2903 offset = -8;
2904 }
2905 else
2906 load_pc = 0;
2907
2908 if (load_pc)
2909 {
2910 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
2911 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
2912 }
2913 }
2914 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
2915 {
2916 /* MOV PC or MOVS PC. */
2917 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
2918 nextpc = MAKE_THUMB_ADDR (nextpc);
2919 }
2920 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
2921 {
2922 /* LDR PC. */
2923 CORE_ADDR base;
2924 int rn, load_pc = 1;
2925
2926 rn = bits (inst1, 0, 3);
2927 base = get_frame_register_unsigned (frame, rn);
2928 if (rn == 15)
2929 {
2930 base = (base + 4) & ~(CORE_ADDR) 0x3;
2931 if (bit (inst1, 7))
2932 base += bits (inst2, 0, 11);
2933 else
2934 base -= bits (inst2, 0, 11);
2935 }
2936 else if (bit (inst1, 7))
2937 base += bits (inst2, 0, 11);
2938 else if (bit (inst2, 11))
2939 {
2940 if (bit (inst2, 10))
2941 {
2942 if (bit (inst2, 9))
2943 base += bits (inst2, 0, 7);
2944 else
2945 base -= bits (inst2, 0, 7);
2946 }
2947 }
2948 else if ((inst2 & 0x0fc0) == 0x0000)
2949 {
2950 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
2951 base += get_frame_register_unsigned (frame, rm) << shift;
2952 }
2953 else
2954 /* Reserved. */
2955 load_pc = 0;
2956
2957 if (load_pc)
2958 nextpc = get_frame_memory_unsigned (frame, base, 4);
2959 }
2960 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
2961 {
2962 /* TBB. */
2963 CORE_ADDR tbl_reg, table, offset, length;
2964
2965 tbl_reg = bits (inst1, 0, 3);
2966 if (tbl_reg == 0x0f)
2967 table = pc + 4; /* Regcache copy of PC isn't right yet. */
2968 else
2969 table = get_frame_register_unsigned (frame, tbl_reg);
2970
2971 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
2972 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
2973 nextpc = pc_val + length;
2974 }
2975 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
2976 {
2977 /* TBH. */
2978 CORE_ADDR tbl_reg, table, offset, length;
2979
2980 tbl_reg = bits (inst1, 0, 3);
2981 if (tbl_reg == 0x0f)
2982 table = pc + 4; /* Regcache copy of PC isn't right yet. */
2983 else
2984 table = get_frame_register_unsigned (frame, tbl_reg);
2985
2986 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
2987 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
2988 nextpc = pc_val + length;
2989 }
2990 }
2991 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
2992 {
2993 if (bits (inst1, 3, 6) == 0x0f)
2994 nextpc = pc_val;
2995 else
2996 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
2997 }
2998 else if ((inst1 & 0xf500) == 0xb100)
2999 {
3000 /* CBNZ or CBZ. */
3001 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
3002 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
3003
3004 if (bit (inst1, 11) && reg != 0)
3005 nextpc = pc_val + imm;
3006 else if (!bit (inst1, 11) && reg == 0)
3007 nextpc = pc_val + imm;
3008 }
3009 return nextpc;
3010 }
3011
3012 /* Get the raw next address. PC is the current program counter, in
3013 FRAME. INSERT_BKPT should be TRUE if we want a breakpoint set on
3014 the alternative next instruction if there are two options.
3015
3016 The value returned has the execution state of the next instruction
3017 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
3018 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
3019 address.
3020 */
3021 static CORE_ADDR
3022 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
3023 {
3024 struct gdbarch *gdbarch = get_frame_arch (frame);
3025 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3026 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3027 unsigned long pc_val;
3028 unsigned long this_instr;
3029 unsigned long status;
3030 CORE_ADDR nextpc;
3031
3032 if (arm_frame_is_thumb (frame))
3033 return thumb_get_next_pc_raw (frame, pc, insert_bkpt);
3034
3035 pc_val = (unsigned long) pc;
3036 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3037
3038 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
3039 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
3040
3041 if (bits (this_instr, 28, 31) == INST_NV)
3042 switch (bits (this_instr, 24, 27))
3043 {
3044 case 0xa:
3045 case 0xb:
3046 {
3047 /* Branch with Link and change to Thumb. */
3048 nextpc = BranchDest (pc, this_instr);
3049 nextpc |= bit (this_instr, 24) << 1;
3050 nextpc = MAKE_THUMB_ADDR (nextpc);
3051 break;
3052 }
3053 case 0xc:
3054 case 0xd:
3055 case 0xe:
3056 /* Coprocessor register transfer. */
3057 if (bits (this_instr, 12, 15) == 15)
3058 error (_("Invalid update to pc in instruction"));
3059 break;
3060 }
3061 else if (condition_true (bits (this_instr, 28, 31), status))
3062 {
3063 switch (bits (this_instr, 24, 27))
3064 {
3065 case 0x0:
3066 case 0x1: /* data processing */
3067 case 0x2:
3068 case 0x3:
3069 {
3070 unsigned long operand1, operand2, result = 0;
3071 unsigned long rn;
3072 int c;
3073
3074 if (bits (this_instr, 12, 15) != 15)
3075 break;
3076
3077 if (bits (this_instr, 22, 25) == 0
3078 && bits (this_instr, 4, 7) == 9) /* multiply */
3079 error (_("Invalid update to pc in instruction"));
3080
3081 /* BX <reg>, BLX <reg> */
3082 if (bits (this_instr, 4, 27) == 0x12fff1
3083 || bits (this_instr, 4, 27) == 0x12fff3)
3084 {
3085 rn = bits (this_instr, 0, 3);
3086 nextpc = (rn == 15) ? pc_val + 8
3087 : get_frame_register_unsigned (frame, rn);
3088 return nextpc;
3089 }
3090
3091 /* Multiply into PC */
3092 c = (status & FLAG_C) ? 1 : 0;
3093 rn = bits (this_instr, 16, 19);
3094 operand1 = (rn == 15) ? pc_val + 8
3095 : get_frame_register_unsigned (frame, rn);
3096
3097 if (bit (this_instr, 25))
3098 {
3099 unsigned long immval = bits (this_instr, 0, 7);
3100 unsigned long rotate = 2 * bits (this_instr, 8, 11);
3101 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
3102 & 0xffffffff;
3103 }
3104 else /* operand 2 is a shifted register */
3105 operand2 = shifted_reg_val (frame, this_instr, c, pc_val, status);
3106
3107 switch (bits (this_instr, 21, 24))
3108 {
3109 case 0x0: /*and */
3110 result = operand1 & operand2;
3111 break;
3112
3113 case 0x1: /*eor */
3114 result = operand1 ^ operand2;
3115 break;
3116
3117 case 0x2: /*sub */
3118 result = operand1 - operand2;
3119 break;
3120
3121 case 0x3: /*rsb */
3122 result = operand2 - operand1;
3123 break;
3124
3125 case 0x4: /*add */
3126 result = operand1 + operand2;
3127 break;
3128
3129 case 0x5: /*adc */
3130 result = operand1 + operand2 + c;
3131 break;
3132
3133 case 0x6: /*sbc */
3134 result = operand1 - operand2 + c;
3135 break;
3136
3137 case 0x7: /*rsc */
3138 result = operand2 - operand1 + c;
3139 break;
3140
3141 case 0x8:
3142 case 0x9:
3143 case 0xa:
3144 case 0xb: /* tst, teq, cmp, cmn */
3145 result = (unsigned long) nextpc;
3146 break;
3147
3148 case 0xc: /*orr */
3149 result = operand1 | operand2;
3150 break;
3151
3152 case 0xd: /*mov */
3153 /* Always step into a function. */
3154 result = operand2;
3155 break;
3156
3157 case 0xe: /*bic */
3158 result = operand1 & ~operand2;
3159 break;
3160
3161 case 0xf: /*mvn */
3162 result = ~operand2;
3163 break;
3164 }
3165
3166 /* In 26-bit APCS the bottom two bits of the result are
3167 ignored, and we always end up in ARM state. */
3168 if (!arm_apcs_32)
3169 nextpc = arm_addr_bits_remove (gdbarch, result);
3170 else
3171 nextpc = result;
3172
3173 break;
3174 }
3175
3176 case 0x4:
3177 case 0x5: /* data transfer */
3178 case 0x6:
3179 case 0x7:
3180 if (bit (this_instr, 20))
3181 {
3182 /* load */
3183 if (bits (this_instr, 12, 15) == 15)
3184 {
3185 /* rd == pc */
3186 unsigned long rn;
3187 unsigned long base;
3188
3189 if (bit (this_instr, 22))
3190 error (_("Invalid update to pc in instruction"));
3191
3192 /* byte write to PC */
3193 rn = bits (this_instr, 16, 19);
3194 base = (rn == 15) ? pc_val + 8
3195 : get_frame_register_unsigned (frame, rn);
3196 if (bit (this_instr, 24))
3197 {
3198 /* pre-indexed */
3199 int c = (status & FLAG_C) ? 1 : 0;
3200 unsigned long offset =
3201 (bit (this_instr, 25)
3202 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
3203 : bits (this_instr, 0, 11));
3204
3205 if (bit (this_instr, 23))
3206 base += offset;
3207 else
3208 base -= offset;
3209 }
3210 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
3211 4, byte_order);
3212 }
3213 }
3214 break;
3215
3216 case 0x8:
3217 case 0x9: /* block transfer */
3218 if (bit (this_instr, 20))
3219 {
3220 /* LDM */
3221 if (bit (this_instr, 15))
3222 {
3223 /* loading pc */
3224 int offset = 0;
3225
3226 if (bit (this_instr, 23))
3227 {
3228 /* up */
3229 unsigned long reglist = bits (this_instr, 0, 14);
3230 offset = bitcount (reglist) * 4;
3231 if (bit (this_instr, 24)) /* pre */
3232 offset += 4;
3233 }
3234 else if (bit (this_instr, 24))
3235 offset = -4;
3236
3237 {
3238 unsigned long rn_val =
3239 get_frame_register_unsigned (frame,
3240 bits (this_instr, 16, 19));
3241 nextpc =
3242 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
3243 + offset),
3244 4, byte_order);
3245 }
3246 }
3247 }
3248 break;
3249
3250 case 0xb: /* branch & link */
3251 case 0xa: /* branch */
3252 {
3253 nextpc = BranchDest (pc, this_instr);
3254 break;
3255 }
3256
3257 case 0xc:
3258 case 0xd:
3259 case 0xe: /* coproc ops */
3260 case 0xf: /* SWI */
3261 break;
3262
3263 default:
3264 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
3265 return (pc);
3266 }
3267 }
3268
3269 return nextpc;
3270 }
3271
3272 CORE_ADDR
3273 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
3274 {
3275 struct gdbarch *gdbarch = get_frame_arch (frame);
3276 CORE_ADDR nextpc =
3277 gdbarch_addr_bits_remove (gdbarch,
3278 arm_get_next_pc_raw (frame, pc, TRUE));
3279 if (nextpc == pc)
3280 error (_("Infinite loop detected"));
3281 return nextpc;
3282 }
3283
3284 /* single_step() is called just before we want to resume the inferior,
3285 if we want to single-step it but there is no hardware or kernel
3286 single-step support. We find the target of the coming instruction
3287 and breakpoint it. */
3288
3289 int
3290 arm_software_single_step (struct frame_info *frame)
3291 {
3292 struct gdbarch *gdbarch = get_frame_arch (frame);
3293 struct address_space *aspace = get_frame_address_space (frame);
3294
3295 /* NOTE: This may insert the wrong breakpoint instruction when
3296 single-stepping over a mode-changing instruction, if the
3297 CPSR heuristics are used. */
3298
3299 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
3300 insert_single_step_breakpoint (gdbarch, aspace, next_pc);
3301
3302 return 1;
3303 }
3304
3305 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
3306 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
3307 NULL if an error occurs. BUF is freed. */
3308
3309 static gdb_byte *
3310 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
3311 int old_len, int new_len)
3312 {
3313 gdb_byte *new_buf, *middle;
3314 int bytes_to_read = new_len - old_len;
3315
3316 new_buf = xmalloc (new_len);
3317 memcpy (new_buf + bytes_to_read, buf, old_len);
3318 xfree (buf);
3319 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
3320 {
3321 xfree (new_buf);
3322 return NULL;
3323 }
3324 return new_buf;
3325 }
3326
3327 /* An IT block is at most the 2-byte IT instruction followed by
3328 four 4-byte instructions. The furthest back we must search to
3329 find an IT block that affects the current instruction is thus
3330 2 + 3 * 4 == 14 bytes. */
3331 #define MAX_IT_BLOCK_PREFIX 14
3332
3333 /* Use a quick scan if there are more than this many bytes of
3334 code. */
3335 #define IT_SCAN_THRESHOLD 32
3336
3337 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
3338 A breakpoint in an IT block may not be hit, depending on the
3339 condition flags. */
3340 static CORE_ADDR
3341 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
3342 {
3343 gdb_byte *buf;
3344 char map_type;
3345 CORE_ADDR boundary, func_start;
3346 int buf_len, buf2_len;
3347 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
3348 int i, any, last_it, last_it_count;
3349
3350 /* If we are using BKPT breakpoints, none of this is necessary. */
3351 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
3352 return bpaddr;
3353
3354 /* ARM mode does not have this problem. */
3355 if (!arm_pc_is_thumb (bpaddr))
3356 return bpaddr;
3357
3358 /* We are setting a breakpoint in Thumb code that could potentially
3359 contain an IT block. The first step is to find how much Thumb
3360 code there is; we do not need to read outside of known Thumb
3361 sequences. */
3362 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
3363 if (map_type == 0)
3364 /* Thumb-2 code must have mapping symbols to have a chance. */
3365 return bpaddr;
3366
3367 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
3368
3369 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
3370 && func_start > boundary)
3371 boundary = func_start;
3372
3373 /* Search for a candidate IT instruction. We have to do some fancy
3374 footwork to distinguish a real IT instruction from the second
3375 half of a 32-bit instruction, but there is no need for that if
3376 there's no candidate. */
3377 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
3378 if (buf_len == 0)
3379 /* No room for an IT instruction. */
3380 return bpaddr;
3381
3382 buf = xmalloc (buf_len);
3383 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
3384 return bpaddr;
3385 any = 0;
3386 for (i = 0; i < buf_len; i += 2)
3387 {
3388 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
3389 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
3390 {
3391 any = 1;
3392 break;
3393 }
3394 }
3395 if (any == 0)
3396 {
3397 xfree (buf);
3398 return bpaddr;
3399 }
3400
3401 /* OK, the code bytes before this instruction contain at least one
3402 halfword which resembles an IT instruction. We know that it's
3403 Thumb code, but there are still two possibilities. Either the
3404 halfword really is an IT instruction, or it is the second half of
3405 a 32-bit Thumb instruction. The only way we can tell is to
3406 scan forwards from a known instruction boundary. */
3407 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
3408 {
3409 int definite;
3410
3411 /* There's a lot of code before this instruction. Start with an
3412 optimistic search; it's easy to recognize halfwords that can
3413 not be the start of a 32-bit instruction, and use that to
3414 lock on to the instruction boundaries. */
3415 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
3416 if (buf == NULL)
3417 return bpaddr;
3418 buf_len = IT_SCAN_THRESHOLD;
3419
3420 definite = 0;
3421 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
3422 {
3423 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
3424 if (thumb_insn_size (inst1) == 2)
3425 {
3426 definite = 1;
3427 break;
3428 }
3429 }
3430
3431 /* At this point, if DEFINITE, BUF[I] is the first place we
3432 are sure that we know the instruction boundaries, and it is far
3433 enough from BPADDR that we could not miss an IT instruction
3434 affecting BPADDR. If ! DEFINITE, give up - start from a
3435 known boundary. */
3436 if (! definite)
3437 {
3438 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
3439 if (buf == NULL)
3440 return bpaddr;
3441 buf_len = bpaddr - boundary;
3442 i = 0;
3443 }
3444 }
3445 else
3446 {
3447 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
3448 if (buf == NULL)
3449 return bpaddr;
3450 buf_len = bpaddr - boundary;
3451 i = 0;
3452 }
3453
3454 /* Scan forwards. Find the last IT instruction before BPADDR. */
3455 last_it = -1;
3456 last_it_count = 0;
3457 while (i < buf_len)
3458 {
3459 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
3460 last_it_count--;
3461 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
3462 {
3463 last_it = i;
3464 if (inst1 & 0x0001)
3465 last_it_count = 4;
3466 else if (inst1 & 0x0002)
3467 last_it_count = 3;
3468 else if (inst1 & 0x0004)
3469 last_it_count = 2;
3470 else
3471 last_it_count = 1;
3472 }
3473 i += thumb_insn_size (inst1);
3474 }
3475
3476 xfree (buf);
3477
3478 if (last_it == -1)
3479 /* There wasn't really an IT instruction after all. */
3480 return bpaddr;
3481
3482 if (last_it_count < 1)
3483 /* It was too far away. */
3484 return bpaddr;
3485
3486 /* This really is a trouble spot. Move the breakpoint to the IT
3487 instruction. */
3488 return bpaddr - buf_len + last_it;
3489 }
3490
3491 /* ARM displaced stepping support.
3492
3493 Generally ARM displaced stepping works as follows:
3494
3495 1. When an instruction is to be single-stepped, it is first decoded by
3496 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
3497 Depending on the type of instruction, it is then copied to a scratch
3498 location, possibly in a modified form. The copy_* set of functions
3499 performs such modification, as necessary. A breakpoint is placed after
3500 the modified instruction in the scratch space to return control to GDB.
3501 Note in particular that instructions which modify the PC will no longer
3502 do so after modification.
3503
3504 2. The instruction is single-stepped, by setting the PC to the scratch
3505 location address, and resuming. Control returns to GDB when the
3506 breakpoint is hit.
3507
3508 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
3509 function used for the current instruction. This function's job is to
3510 put the CPU/memory state back to what it would have been if the
3511 instruction had been executed unmodified in its original location. */
3512
3513 /* NOP instruction (mov r0, r0). */
3514 #define ARM_NOP 0xe1a00000
3515
3516 /* Helper for register reads for displaced stepping. In particular, this
3517 returns the PC as it would be seen by the instruction at its original
3518 location. */
3519
3520 ULONGEST
3521 displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
3522 {
3523 ULONGEST ret;
3524
3525 if (regno == 15)
3526 {
3527 if (debug_displaced)
3528 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
3529 (unsigned long) from + 8);
3530 return (ULONGEST) from + 8; /* Pipeline offset. */
3531 }
3532 else
3533 {
3534 regcache_cooked_read_unsigned (regs, regno, &ret);
3535 if (debug_displaced)
3536 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
3537 regno, (unsigned long) ret);
3538 return ret;
3539 }
3540 }
3541
3542 static int
3543 displaced_in_arm_mode (struct regcache *regs)
3544 {
3545 ULONGEST ps;
3546
3547 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
3548
3549 return (ps & CPSR_T) == 0;
3550 }
3551
3552 /* Write to the PC as from a branch instruction. */
3553
3554 static void
3555 branch_write_pc (struct regcache *regs, ULONGEST val)
3556 {
3557 if (displaced_in_arm_mode (regs))
3558 /* Note: If bits 0/1 are set, this branch would be unpredictable for
3559 architecture versions < 6. */
3560 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & ~(ULONGEST) 0x3);
3561 else
3562 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & ~(ULONGEST) 0x1);
3563 }
3564
3565 /* Write to the PC as from a branch-exchange instruction. */
3566
3567 static void
3568 bx_write_pc (struct regcache *regs, ULONGEST val)
3569 {
3570 ULONGEST ps;
3571
3572 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
3573
3574 if ((val & 1) == 1)
3575 {
3576 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | CPSR_T);
3577 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
3578 }
3579 else if ((val & 2) == 0)
3580 {
3581 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM,
3582 ps & ~(ULONGEST) CPSR_T);
3583 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
3584 }
3585 else
3586 {
3587 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
3588 mode, align dest to 4 bytes). */
3589 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
3590 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM,
3591 ps & ~(ULONGEST) CPSR_T);
3592 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
3593 }
3594 }
3595
3596 /* Write to the PC as if from a load instruction. */
3597
3598 static void
3599 load_write_pc (struct regcache *regs, ULONGEST val)
3600 {
3601 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
3602 bx_write_pc (regs, val);
3603 else
3604 branch_write_pc (regs, val);
3605 }
3606
3607 /* Write to the PC as if from an ALU instruction. */
3608
3609 static void
3610 alu_write_pc (struct regcache *regs, ULONGEST val)
3611 {
3612 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && displaced_in_arm_mode (regs))
3613 bx_write_pc (regs, val);
3614 else
3615 branch_write_pc (regs, val);
3616 }
3617
3618 /* Helper for writing to registers for displaced stepping. Writing to the PC
3619 has a varying effects depending on the instruction which does the write:
3620 this is controlled by the WRITE_PC argument. */
3621
3622 void
3623 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
3624 int regno, ULONGEST val, enum pc_write_style write_pc)
3625 {
3626 if (regno == 15)
3627 {
3628 if (debug_displaced)
3629 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
3630 (unsigned long) val);
3631 switch (write_pc)
3632 {
3633 case BRANCH_WRITE_PC:
3634 branch_write_pc (regs, val);
3635 break;
3636
3637 case BX_WRITE_PC:
3638 bx_write_pc (regs, val);
3639 break;
3640
3641 case LOAD_WRITE_PC:
3642 load_write_pc (regs, val);
3643 break;
3644
3645 case ALU_WRITE_PC:
3646 alu_write_pc (regs, val);
3647 break;
3648
3649 case CANNOT_WRITE_PC:
3650 warning (_("Instruction wrote to PC in an unexpected way when "
3651 "single-stepping"));
3652 break;
3653
3654 default:
3655 internal_error (__FILE__, __LINE__,
3656 _("Invalid argument to displaced_write_reg"));
3657 }
3658
3659 dsc->wrote_to_pc = 1;
3660 }
3661 else
3662 {
3663 if (debug_displaced)
3664 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
3665 regno, (unsigned long) val);
3666 regcache_cooked_write_unsigned (regs, regno, val);
3667 }
3668 }
3669
3670 /* This function is used to concisely determine if an instruction INSN
3671 references PC. Register fields of interest in INSN should have the
3672 corresponding fields of BITMASK set to 0b1111. The function returns return 1
3673 if any of these fields in INSN reference the PC (also 0b1111, r15), else it
3674 returns 0. */
3675
3676 static int
3677 insn_references_pc (uint32_t insn, uint32_t bitmask)
3678 {
3679 uint32_t lowbit = 1;
3680
3681 while (bitmask != 0)
3682 {
3683 uint32_t mask;
3684
3685 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
3686 ;
3687
3688 if (!lowbit)
3689 break;
3690
3691 mask = lowbit * 0xf;
3692
3693 if ((insn & mask) == mask)
3694 return 1;
3695
3696 bitmask &= ~mask;
3697 }
3698
3699 return 0;
3700 }
3701
3702 /* The simplest copy function. Many instructions have the same effect no
3703 matter what address they are executed at: in those cases, use this. */
3704
3705 static int
3706 copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
3707 const char *iname, struct displaced_step_closure *dsc)
3708 {
3709 if (debug_displaced)
3710 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
3711 "opcode/class '%s' unmodified\n", (unsigned long) insn,
3712 iname);
3713
3714 dsc->modinsn[0] = insn;
3715
3716 return 0;
3717 }
3718
3719 /* Preload instructions with immediate offset. */
3720
3721 static void
3722 cleanup_preload (struct gdbarch *gdbarch,
3723 struct regcache *regs, struct displaced_step_closure *dsc)
3724 {
3725 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
3726 if (!dsc->u.preload.immed)
3727 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
3728 }
3729
3730 static int
3731 copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
3732 struct displaced_step_closure *dsc)
3733 {
3734 unsigned int rn = bits (insn, 16, 19);
3735 ULONGEST rn_val;
3736 CORE_ADDR from = dsc->insn_addr;
3737
3738 if (!insn_references_pc (insn, 0x000f0000ul))
3739 return copy_unmodified (gdbarch, insn, "preload", dsc);
3740
3741 if (debug_displaced)
3742 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
3743 (unsigned long) insn);
3744
3745 /* Preload instructions:
3746
3747 {pli/pld} [rn, #+/-imm]
3748 ->
3749 {pli/pld} [r0, #+/-imm]. */
3750
3751 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3752 rn_val = displaced_read_reg (regs, from, rn);
3753 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
3754
3755 dsc->u.preload.immed = 1;
3756
3757 dsc->modinsn[0] = insn & 0xfff0ffff;
3758
3759 dsc->cleanup = &cleanup_preload;
3760
3761 return 0;
3762 }
3763
3764 /* Preload instructions with register offset. */
3765
3766 static int
3767 copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
3768 struct displaced_step_closure *dsc)
3769 {
3770 unsigned int rn = bits (insn, 16, 19);
3771 unsigned int rm = bits (insn, 0, 3);
3772 ULONGEST rn_val, rm_val;
3773 CORE_ADDR from = dsc->insn_addr;
3774
3775 if (!insn_references_pc (insn, 0x000f000ful))
3776 return copy_unmodified (gdbarch, insn, "preload reg", dsc);
3777
3778 if (debug_displaced)
3779 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
3780 (unsigned long) insn);
3781
3782 /* Preload register-offset instructions:
3783
3784 {pli/pld} [rn, rm {, shift}]
3785 ->
3786 {pli/pld} [r0, r1 {, shift}]. */
3787
3788 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3789 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
3790 rn_val = displaced_read_reg (regs, from, rn);
3791 rm_val = displaced_read_reg (regs, from, rm);
3792 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
3793 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
3794
3795 dsc->u.preload.immed = 0;
3796
3797 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
3798
3799 dsc->cleanup = &cleanup_preload;
3800
3801 return 0;
3802 }
3803
3804 /* Copy/cleanup coprocessor load and store instructions. */
3805
3806 static void
3807 cleanup_copro_load_store (struct gdbarch *gdbarch,
3808 struct regcache *regs,
3809 struct displaced_step_closure *dsc)
3810 {
3811 ULONGEST rn_val = displaced_read_reg (regs, dsc->insn_addr, 0);
3812
3813 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
3814
3815 if (dsc->u.ldst.writeback)
3816 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
3817 }
3818
3819 static int
3820 copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
3821 struct regcache *regs,
3822 struct displaced_step_closure *dsc)
3823 {
3824 unsigned int rn = bits (insn, 16, 19);
3825 ULONGEST rn_val;
3826 CORE_ADDR from = dsc->insn_addr;
3827
3828 if (!insn_references_pc (insn, 0x000f0000ul))
3829 return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
3830
3831 if (debug_displaced)
3832 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
3833 "load/store insn %.8lx\n", (unsigned long) insn);
3834
3835 /* Coprocessor load/store instructions:
3836
3837 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
3838 ->
3839 {stc/stc2} [r0, #+/-imm].
3840
3841 ldc/ldc2 are handled identically. */
3842
3843 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3844 rn_val = displaced_read_reg (regs, from, rn);
3845 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
3846
3847 dsc->u.ldst.writeback = bit (insn, 25);
3848 dsc->u.ldst.rn = rn;
3849
3850 dsc->modinsn[0] = insn & 0xfff0ffff;
3851
3852 dsc->cleanup = &cleanup_copro_load_store;
3853
3854 return 0;
3855 }
3856
3857 /* Clean up branch instructions (actually perform the branch, by setting
3858 PC). */
3859
3860 static void
3861 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
3862 struct displaced_step_closure *dsc)
3863 {
3864 ULONGEST from = dsc->insn_addr;
3865 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
3866 int branch_taken = condition_true (dsc->u.branch.cond, status);
3867 enum pc_write_style write_pc = dsc->u.branch.exchange
3868 ? BX_WRITE_PC : BRANCH_WRITE_PC;
3869
3870 if (!branch_taken)
3871 return;
3872
3873 if (dsc->u.branch.link)
3874 {
3875 ULONGEST pc = displaced_read_reg (regs, from, 15);
3876 displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
3877 }
3878
3879 displaced_write_reg (regs, dsc, 15, dsc->u.branch.dest, write_pc);
3880 }
3881
3882 /* Copy B/BL/BLX instructions with immediate destinations. */
3883
3884 static int
3885 copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
3886 struct regcache *regs, struct displaced_step_closure *dsc)
3887 {
3888 unsigned int cond = bits (insn, 28, 31);
3889 int exchange = (cond == 0xf);
3890 int link = exchange || bit (insn, 24);
3891 CORE_ADDR from = dsc->insn_addr;
3892 long offset;
3893
3894 if (debug_displaced)
3895 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
3896 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
3897 (unsigned long) insn);
3898
3899 /* Implement "BL<cond> <label>" as:
3900
3901 Preparation: cond <- instruction condition
3902 Insn: mov r0, r0 (nop)
3903 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
3904
3905 B<cond> similar, but don't set r14 in cleanup. */
3906
3907 if (exchange)
3908 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
3909 then arrange the switch into Thumb mode. */
3910 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
3911 else
3912 offset = bits (insn, 0, 23) << 2;
3913
3914 if (bit (offset, 25))
3915 offset = offset | ~0x3ffffff;
3916
3917 dsc->u.branch.cond = cond;
3918 dsc->u.branch.link = link;
3919 dsc->u.branch.exchange = exchange;
3920 dsc->u.branch.dest = from + 8 + offset;
3921
3922 dsc->modinsn[0] = ARM_NOP;
3923
3924 dsc->cleanup = &cleanup_branch;
3925
3926 return 0;
3927 }
3928
3929 /* Copy BX/BLX with register-specified destinations. */
3930
3931 static int
3932 copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
3933 struct regcache *regs, struct displaced_step_closure *dsc)
3934 {
3935 unsigned int cond = bits (insn, 28, 31);
3936 /* BX: x12xxx1x
3937 BLX: x12xxx3x. */
3938 int link = bit (insn, 5);
3939 unsigned int rm = bits (insn, 0, 3);
3940 CORE_ADDR from = dsc->insn_addr;
3941
3942 if (debug_displaced)
3943 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
3944 "%.8lx\n", (link) ? "blx" : "bx", (unsigned long) insn);
3945
3946 /* Implement {BX,BLX}<cond> <reg>" as:
3947
3948 Preparation: cond <- instruction condition
3949 Insn: mov r0, r0 (nop)
3950 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
3951
3952 Don't set r14 in cleanup for BX. */
3953
3954 dsc->u.branch.dest = displaced_read_reg (regs, from, rm);
3955
3956 dsc->u.branch.cond = cond;
3957 dsc->u.branch.link = link;
3958 dsc->u.branch.exchange = 1;
3959
3960 dsc->modinsn[0] = ARM_NOP;
3961
3962 dsc->cleanup = &cleanup_branch;
3963
3964 return 0;
3965 }
3966
3967 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
3968
3969 static void
3970 cleanup_alu_imm (struct gdbarch *gdbarch,
3971 struct regcache *regs, struct displaced_step_closure *dsc)
3972 {
3973 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
3974 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
3975 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
3976 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
3977 }
3978
3979 static int
3980 copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
3981 struct displaced_step_closure *dsc)
3982 {
3983 unsigned int rn = bits (insn, 16, 19);
3984 unsigned int rd = bits (insn, 12, 15);
3985 unsigned int op = bits (insn, 21, 24);
3986 int is_mov = (op == 0xd);
3987 ULONGEST rd_val, rn_val;
3988 CORE_ADDR from = dsc->insn_addr;
3989
3990 if (!insn_references_pc (insn, 0x000ff000ul))
3991 return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
3992
3993 if (debug_displaced)
3994 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
3995 "%.8lx\n", is_mov ? "move" : "ALU",
3996 (unsigned long) insn);
3997
3998 /* Instruction is of form:
3999
4000 <op><cond> rd, [rn,] #imm
4001
4002 Rewrite as:
4003
4004 Preparation: tmp1, tmp2 <- r0, r1;
4005 r0, r1 <- rd, rn
4006 Insn: <op><cond> r0, r1, #imm
4007 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
4008 */
4009
4010 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4011 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4012 rn_val = displaced_read_reg (regs, from, rn);
4013 rd_val = displaced_read_reg (regs, from, rd);
4014 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4015 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4016 dsc->rd = rd;
4017
4018 if (is_mov)
4019 dsc->modinsn[0] = insn & 0xfff00fff;
4020 else
4021 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
4022
4023 dsc->cleanup = &cleanup_alu_imm;
4024
4025 return 0;
4026 }
4027
4028 /* Copy/cleanup arithmetic/logic insns with register RHS. */
4029
4030 static void
4031 cleanup_alu_reg (struct gdbarch *gdbarch,
4032 struct regcache *regs, struct displaced_step_closure *dsc)
4033 {
4034 ULONGEST rd_val;
4035 int i;
4036
4037 rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4038
4039 for (i = 0; i < 3; i++)
4040 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
4041
4042 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4043 }
4044
4045 static int
4046 copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4047 struct displaced_step_closure *dsc)
4048 {
4049 unsigned int rn = bits (insn, 16, 19);
4050 unsigned int rm = bits (insn, 0, 3);
4051 unsigned int rd = bits (insn, 12, 15);
4052 unsigned int op = bits (insn, 21, 24);
4053 int is_mov = (op == 0xd);
4054 ULONGEST rd_val, rn_val, rm_val;
4055 CORE_ADDR from = dsc->insn_addr;
4056
4057 if (!insn_references_pc (insn, 0x000ff00ful))
4058 return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
4059
4060 if (debug_displaced)
4061 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
4062 is_mov ? "move" : "ALU", (unsigned long) insn);
4063
4064 /* Instruction is of form:
4065
4066 <op><cond> rd, [rn,] rm [, <shift>]
4067
4068 Rewrite as:
4069
4070 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
4071 r0, r1, r2 <- rd, rn, rm
4072 Insn: <op><cond> r0, r1, r2 [, <shift>]
4073 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
4074 */
4075
4076 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4077 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4078 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4079 rd_val = displaced_read_reg (regs, from, rd);
4080 rn_val = displaced_read_reg (regs, from, rn);
4081 rm_val = displaced_read_reg (regs, from, rm);
4082 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4083 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4084 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
4085 dsc->rd = rd;
4086
4087 if (is_mov)
4088 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
4089 else
4090 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
4091
4092 dsc->cleanup = &cleanup_alu_reg;
4093
4094 return 0;
4095 }
4096
4097 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
4098
4099 static void
4100 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
4101 struct regcache *regs,
4102 struct displaced_step_closure *dsc)
4103 {
4104 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4105 int i;
4106
4107 for (i = 0; i < 4; i++)
4108 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
4109
4110 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4111 }
4112
4113 static int
4114 copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
4115 struct regcache *regs, struct displaced_step_closure *dsc)
4116 {
4117 unsigned int rn = bits (insn, 16, 19);
4118 unsigned int rm = bits (insn, 0, 3);
4119 unsigned int rd = bits (insn, 12, 15);
4120 unsigned int rs = bits (insn, 8, 11);
4121 unsigned int op = bits (insn, 21, 24);
4122 int is_mov = (op == 0xd), i;
4123 ULONGEST rd_val, rn_val, rm_val, rs_val;
4124 CORE_ADDR from = dsc->insn_addr;
4125
4126 if (!insn_references_pc (insn, 0x000fff0ful))
4127 return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
4128
4129 if (debug_displaced)
4130 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
4131 "%.8lx\n", is_mov ? "move" : "ALU",
4132 (unsigned long) insn);
4133
4134 /* Instruction is of form:
4135
4136 <op><cond> rd, [rn,] rm, <shift> rs
4137
4138 Rewrite as:
4139
4140 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
4141 r0, r1, r2, r3 <- rd, rn, rm, rs
4142 Insn: <op><cond> r0, r1, r2, <shift> r3
4143 Cleanup: tmp5 <- r0
4144 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
4145 rd <- tmp5
4146 */
4147
4148 for (i = 0; i < 4; i++)
4149 dsc->tmp[i] = displaced_read_reg (regs, from, i);
4150
4151 rd_val = displaced_read_reg (regs, from, rd);
4152 rn_val = displaced_read_reg (regs, from, rn);
4153 rm_val = displaced_read_reg (regs, from, rm);
4154 rs_val = displaced_read_reg (regs, from, rs);
4155 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4156 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4157 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
4158 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
4159 dsc->rd = rd;
4160
4161 if (is_mov)
4162 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
4163 else
4164 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
4165
4166 dsc->cleanup = &cleanup_alu_shifted_reg;
4167
4168 return 0;
4169 }
4170
4171 /* Clean up load instructions. */
4172
4173 static void
4174 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
4175 struct displaced_step_closure *dsc)
4176 {
4177 ULONGEST rt_val, rt_val2 = 0, rn_val;
4178 CORE_ADDR from = dsc->insn_addr;
4179
4180 rt_val = displaced_read_reg (regs, from, 0);
4181 if (dsc->u.ldst.xfersize == 8)
4182 rt_val2 = displaced_read_reg (regs, from, 1);
4183 rn_val = displaced_read_reg (regs, from, 2);
4184
4185 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4186 if (dsc->u.ldst.xfersize > 4)
4187 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4188 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
4189 if (!dsc->u.ldst.immed)
4190 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
4191
4192 /* Handle register writeback. */
4193 if (dsc->u.ldst.writeback)
4194 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
4195 /* Put result in right place. */
4196 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
4197 if (dsc->u.ldst.xfersize == 8)
4198 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
4199 }
4200
4201 /* Clean up store instructions. */
4202
4203 static void
4204 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
4205 struct displaced_step_closure *dsc)
4206 {
4207 CORE_ADDR from = dsc->insn_addr;
4208 ULONGEST rn_val = displaced_read_reg (regs, from, 2);
4209
4210 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4211 if (dsc->u.ldst.xfersize > 4)
4212 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4213 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
4214 if (!dsc->u.ldst.immed)
4215 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
4216 if (!dsc->u.ldst.restore_r4)
4217 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
4218
4219 /* Writeback. */
4220 if (dsc->u.ldst.writeback)
4221 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
4222 }
4223
4224 /* Copy "extra" load/store instructions. These are halfword/doubleword
4225 transfers, which have a different encoding to byte/word transfers. */
4226
4227 static int
4228 copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
4229 struct regcache *regs, struct displaced_step_closure *dsc)
4230 {
4231 unsigned int op1 = bits (insn, 20, 24);
4232 unsigned int op2 = bits (insn, 5, 6);
4233 unsigned int rt = bits (insn, 12, 15);
4234 unsigned int rn = bits (insn, 16, 19);
4235 unsigned int rm = bits (insn, 0, 3);
4236 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
4237 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
4238 int immed = (op1 & 0x4) != 0;
4239 int opcode;
4240 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
4241 CORE_ADDR from = dsc->insn_addr;
4242
4243 if (!insn_references_pc (insn, 0x000ff00ful))
4244 return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
4245
4246 if (debug_displaced)
4247 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
4248 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
4249 (unsigned long) insn);
4250
4251 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
4252
4253 if (opcode < 0)
4254 internal_error (__FILE__, __LINE__,
4255 _("copy_extra_ld_st: instruction decode error"));
4256
4257 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4258 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4259 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4260 if (!immed)
4261 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
4262
4263 rt_val = displaced_read_reg (regs, from, rt);
4264 if (bytesize[opcode] == 8)
4265 rt_val2 = displaced_read_reg (regs, from, rt + 1);
4266 rn_val = displaced_read_reg (regs, from, rn);
4267 if (!immed)
4268 rm_val = displaced_read_reg (regs, from, rm);
4269
4270 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
4271 if (bytesize[opcode] == 8)
4272 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
4273 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
4274 if (!immed)
4275 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
4276
4277 dsc->rd = rt;
4278 dsc->u.ldst.xfersize = bytesize[opcode];
4279 dsc->u.ldst.rn = rn;
4280 dsc->u.ldst.immed = immed;
4281 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
4282 dsc->u.ldst.restore_r4 = 0;
4283
4284 if (immed)
4285 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
4286 ->
4287 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
4288 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
4289 else
4290 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
4291 ->
4292 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
4293 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
4294
4295 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
4296
4297 return 0;
4298 }
4299
4300 /* Copy byte/word loads and stores. */
4301
4302 static int
4303 copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
4304 struct regcache *regs,
4305 struct displaced_step_closure *dsc, int load, int byte,
4306 int usermode)
4307 {
4308 int immed = !bit (insn, 25);
4309 unsigned int rt = bits (insn, 12, 15);
4310 unsigned int rn = bits (insn, 16, 19);
4311 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
4312 ULONGEST rt_val, rn_val, rm_val = 0;
4313 CORE_ADDR from = dsc->insn_addr;
4314
4315 if (!insn_references_pc (insn, 0x000ff00ful))
4316 return copy_unmodified (gdbarch, insn, "load/store", dsc);
4317
4318 if (debug_displaced)
4319 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
4320 load ? (byte ? "ldrb" : "ldr")
4321 : (byte ? "strb" : "str"), usermode ? "t" : "",
4322 (unsigned long) insn);
4323
4324 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4325 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4326 if (!immed)
4327 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
4328 if (!load)
4329 dsc->tmp[4] = displaced_read_reg (regs, from, 4);
4330
4331 rt_val = displaced_read_reg (regs, from, rt);
4332 rn_val = displaced_read_reg (regs, from, rn);
4333 if (!immed)
4334 rm_val = displaced_read_reg (regs, from, rm);
4335
4336 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
4337 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
4338 if (!immed)
4339 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
4340
4341 dsc->rd = rt;
4342 dsc->u.ldst.xfersize = byte ? 1 : 4;
4343 dsc->u.ldst.rn = rn;
4344 dsc->u.ldst.immed = immed;
4345 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
4346
4347 /* To write PC we can do:
4348
4349 scratch+0: str pc, temp (*temp = scratch + 8 + offset)
4350 scratch+4: ldr r4, temp
4351 scratch+8: sub r4, r4, pc (r4 = scratch + 8 + offset - scratch - 8 - 8)
4352 scratch+12: add r4, r4, #8 (r4 = offset)
4353 scratch+16: add r0, r0, r4
4354 scratch+20: str r0, [r2, #imm] (or str r0, [r2, r3])
4355 scratch+24: <temp>
4356
4357 Otherwise we don't know what value to write for PC, since the offset is
4358 architecture-dependent (sometimes PC+8, sometimes PC+12). */
4359
4360 if (load || rt != 15)
4361 {
4362 dsc->u.ldst.restore_r4 = 0;
4363
4364 if (immed)
4365 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
4366 ->
4367 {ldr,str}[b]<cond> r0, [r2, #imm]. */
4368 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
4369 else
4370 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
4371 ->
4372 {ldr,str}[b]<cond> r0, [r2, r3]. */
4373 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
4374 }
4375 else
4376 {
4377 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
4378 dsc->u.ldst.restore_r4 = 1;
4379
4380 dsc->modinsn[0] = 0xe58ff014; /* str pc, [pc, #20]. */
4381 dsc->modinsn[1] = 0xe59f4010; /* ldr r4, [pc, #16]. */
4382 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
4383 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
4384 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
4385
4386 /* As above. */
4387 if (immed)
4388 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
4389 else
4390 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
4391
4392 dsc->modinsn[6] = 0x0; /* breakpoint location. */
4393 dsc->modinsn[7] = 0x0; /* scratch space. */
4394
4395 dsc->numinsns = 6;
4396 }
4397
4398 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
4399
4400 return 0;
4401 }
4402
4403 /* Cleanup LDM instructions with fully-populated register list. This is an
4404 unfortunate corner case: it's impossible to implement correctly by modifying
4405 the instruction. The issue is as follows: we have an instruction,
4406
4407 ldm rN, {r0-r15}
4408
4409 which we must rewrite to avoid loading PC. A possible solution would be to
4410 do the load in two halves, something like (with suitable cleanup
4411 afterwards):
4412
4413 mov r8, rN
4414 ldm[id][ab] r8!, {r0-r7}
4415 str r7, <temp>
4416 ldm[id][ab] r8, {r7-r14}
4417 <bkpt>
4418
4419 but at present there's no suitable place for <temp>, since the scratch space
4420 is overwritten before the cleanup routine is called. For now, we simply
4421 emulate the instruction. */
4422
4423 static void
4424 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
4425 struct displaced_step_closure *dsc)
4426 {
4427 ULONGEST from = dsc->insn_addr;
4428 int inc = dsc->u.block.increment;
4429 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
4430 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
4431 uint32_t regmask = dsc->u.block.regmask;
4432 int regno = inc ? 0 : 15;
4433 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
4434 int exception_return = dsc->u.block.load && dsc->u.block.user
4435 && (regmask & 0x8000) != 0;
4436 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
4437 int do_transfer = condition_true (dsc->u.block.cond, status);
4438 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4439
4440 if (!do_transfer)
4441 return;
4442
4443 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
4444 sensible we can do here. Complain loudly. */
4445 if (exception_return)
4446 error (_("Cannot single-step exception return"));
4447
4448 /* We don't handle any stores here for now. */
4449 gdb_assert (dsc->u.block.load != 0);
4450
4451 if (debug_displaced)
4452 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
4453 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
4454 dsc->u.block.increment ? "inc" : "dec",
4455 dsc->u.block.before ? "before" : "after");
4456
4457 while (regmask)
4458 {
4459 uint32_t memword;
4460
4461 if (inc)
4462 while (regno <= 15 && (regmask & (1 << regno)) == 0)
4463 regno++;
4464 else
4465 while (regno >= 0 && (regmask & (1 << regno)) == 0)
4466 regno--;
4467
4468 xfer_addr += bump_before;
4469
4470 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
4471 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
4472
4473 xfer_addr += bump_after;
4474
4475 regmask &= ~(1 << regno);
4476 }
4477
4478 if (dsc->u.block.writeback)
4479 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
4480 CANNOT_WRITE_PC);
4481 }
4482
4483 /* Clean up an STM which included the PC in the register list. */
4484
4485 static void
4486 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
4487 struct displaced_step_closure *dsc)
4488 {
4489 ULONGEST from = dsc->insn_addr;
4490 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
4491 int store_executed = condition_true (dsc->u.block.cond, status);
4492 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
4493 CORE_ADDR stm_insn_addr;
4494 uint32_t pc_val;
4495 long offset;
4496 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4497
4498 /* If condition code fails, there's nothing else to do. */
4499 if (!store_executed)
4500 return;
4501
4502 if (dsc->u.block.increment)
4503 {
4504 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
4505
4506 if (dsc->u.block.before)
4507 pc_stored_at += 4;
4508 }
4509 else
4510 {
4511 pc_stored_at = dsc->u.block.xfer_addr;
4512
4513 if (dsc->u.block.before)
4514 pc_stored_at -= 4;
4515 }
4516
4517 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
4518 stm_insn_addr = dsc->scratch_base;
4519 offset = pc_val - stm_insn_addr;
4520
4521 if (debug_displaced)
4522 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
4523 "STM instruction\n", offset);
4524
4525 /* Rewrite the stored PC to the proper value for the non-displaced original
4526 instruction. */
4527 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
4528 dsc->insn_addr + offset);
4529 }
4530
4531 /* Clean up an LDM which includes the PC in the register list. We clumped all
4532 the registers in the transferred list into a contiguous range r0...rX (to
4533 avoid loading PC directly and losing control of the debugged program), so we
4534 must undo that here. */
4535
4536 static void
4537 cleanup_block_load_pc (struct gdbarch *gdbarch,
4538 struct regcache *regs,
4539 struct displaced_step_closure *dsc)
4540 {
4541 ULONGEST from = dsc->insn_addr;
4542 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
4543 int load_executed = condition_true (dsc->u.block.cond, status), i;
4544 unsigned int mask = dsc->u.block.regmask, write_reg = 15;
4545 unsigned int regs_loaded = bitcount (mask);
4546 unsigned int num_to_shuffle = regs_loaded, clobbered;
4547
4548 /* The method employed here will fail if the register list is fully populated
4549 (we need to avoid loading PC directly). */
4550 gdb_assert (num_to_shuffle < 16);
4551
4552 if (!load_executed)
4553 return;
4554
4555 clobbered = (1 << num_to_shuffle) - 1;
4556
4557 while (num_to_shuffle > 0)
4558 {
4559 if ((mask & (1 << write_reg)) != 0)
4560 {
4561 unsigned int read_reg = num_to_shuffle - 1;
4562
4563 if (read_reg != write_reg)
4564 {
4565 ULONGEST rval = displaced_read_reg (regs, from, read_reg);
4566 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
4567 if (debug_displaced)
4568 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
4569 "loaded register r%d to r%d\n"), read_reg,
4570 write_reg);
4571 }
4572 else if (debug_displaced)
4573 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
4574 "r%d already in the right place\n"),
4575 write_reg);
4576
4577 clobbered &= ~(1 << write_reg);
4578
4579 num_to_shuffle--;
4580 }
4581
4582 write_reg--;
4583 }
4584
4585 /* Restore any registers we scribbled over. */
4586 for (write_reg = 0; clobbered != 0; write_reg++)
4587 {
4588 if ((clobbered & (1 << write_reg)) != 0)
4589 {
4590 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
4591 CANNOT_WRITE_PC);
4592 if (debug_displaced)
4593 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
4594 "clobbered register r%d\n"), write_reg);
4595 clobbered &= ~(1 << write_reg);
4596 }
4597 }
4598
4599 /* Perform register writeback manually. */
4600 if (dsc->u.block.writeback)
4601 {
4602 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
4603
4604 if (dsc->u.block.increment)
4605 new_rn_val += regs_loaded * 4;
4606 else
4607 new_rn_val -= regs_loaded * 4;
4608
4609 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
4610 CANNOT_WRITE_PC);
4611 }
4612 }
4613
4614 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
4615 in user-level code (in particular exception return, ldm rn, {...pc}^). */
4616
4617 static int
4618 copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4619 struct displaced_step_closure *dsc)
4620 {
4621 int load = bit (insn, 20);
4622 int user = bit (insn, 22);
4623 int increment = bit (insn, 23);
4624 int before = bit (insn, 24);
4625 int writeback = bit (insn, 21);
4626 int rn = bits (insn, 16, 19);
4627 CORE_ADDR from = dsc->insn_addr;
4628
4629 /* Block transfers which don't mention PC can be run directly out-of-line. */
4630 if (rn != 15 && (insn & 0x8000) == 0)
4631 return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
4632
4633 if (rn == 15)
4634 {
4635 warning (_("displaced: Unpredictable LDM or STM with base register r15"));
4636 return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
4637 }
4638
4639 if (debug_displaced)
4640 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
4641 "%.8lx\n", (unsigned long) insn);
4642
4643 dsc->u.block.xfer_addr = displaced_read_reg (regs, from, rn);
4644 dsc->u.block.rn = rn;
4645
4646 dsc->u.block.load = load;
4647 dsc->u.block.user = user;
4648 dsc->u.block.increment = increment;
4649 dsc->u.block.before = before;
4650 dsc->u.block.writeback = writeback;
4651 dsc->u.block.cond = bits (insn, 28, 31);
4652
4653 dsc->u.block.regmask = insn & 0xffff;
4654
4655 if (load)
4656 {
4657 if ((insn & 0xffff) == 0xffff)
4658 {
4659 /* LDM with a fully-populated register list. This case is
4660 particularly tricky. Implement for now by fully emulating the
4661 instruction (which might not behave perfectly in all cases, but
4662 these instructions should be rare enough for that not to matter
4663 too much). */
4664 dsc->modinsn[0] = ARM_NOP;
4665
4666 dsc->cleanup = &cleanup_block_load_all;
4667 }
4668 else
4669 {
4670 /* LDM of a list of registers which includes PC. Implement by
4671 rewriting the list of registers to be transferred into a
4672 contiguous chunk r0...rX before doing the transfer, then shuffling
4673 registers into the correct places in the cleanup routine. */
4674 unsigned int regmask = insn & 0xffff;
4675 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
4676 unsigned int to = 0, from = 0, i, new_rn;
4677
4678 for (i = 0; i < num_in_list; i++)
4679 dsc->tmp[i] = displaced_read_reg (regs, from, i);
4680
4681 /* Writeback makes things complicated. We need to avoid clobbering
4682 the base register with one of the registers in our modified
4683 register list, but just using a different register can't work in
4684 all cases, e.g.:
4685
4686 ldm r14!, {r0-r13,pc}
4687
4688 which would need to be rewritten as:
4689
4690 ldm rN!, {r0-r14}
4691
4692 but that can't work, because there's no free register for N.
4693
4694 Solve this by turning off the writeback bit, and emulating
4695 writeback manually in the cleanup routine. */
4696
4697 if (writeback)
4698 insn &= ~(1 << 21);
4699
4700 new_regmask = (1 << num_in_list) - 1;
4701
4702 if (debug_displaced)
4703 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
4704 "{..., pc}: original reg list %.4x, modified "
4705 "list %.4x\n"), rn, writeback ? "!" : "",
4706 (int) insn & 0xffff, new_regmask);
4707
4708 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
4709
4710 dsc->cleanup = &cleanup_block_load_pc;
4711 }
4712 }
4713 else
4714 {
4715 /* STM of a list of registers which includes PC. Run the instruction
4716 as-is, but out of line: this will store the wrong value for the PC,
4717 so we must manually fix up the memory in the cleanup routine.
4718 Doing things this way has the advantage that we can auto-detect
4719 the offset of the PC write (which is architecture-dependent) in
4720 the cleanup routine. */
4721 dsc->modinsn[0] = insn;
4722
4723 dsc->cleanup = &cleanup_block_store_pc;
4724 }
4725
4726 return 0;
4727 }
4728
4729 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
4730 for Linux, where some SVC instructions must be treated specially. */
4731
4732 static void
4733 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
4734 struct displaced_step_closure *dsc)
4735 {
4736 CORE_ADDR from = dsc->insn_addr;
4737 CORE_ADDR resume_addr = from + 4;
4738
4739 if (debug_displaced)
4740 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
4741 "%.8lx\n", (unsigned long) resume_addr);
4742
4743 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
4744 }
4745
4746 static int
4747 copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
4748 struct regcache *regs, struct displaced_step_closure *dsc)
4749 {
4750 CORE_ADDR from = dsc->insn_addr;
4751
4752 /* Allow OS-specific code to override SVC handling. */
4753 if (dsc->u.svc.copy_svc_os)
4754 return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
4755
4756 if (debug_displaced)
4757 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
4758 (unsigned long) insn);
4759
4760 /* Preparation: none.
4761 Insn: unmodified svc.
4762 Cleanup: pc <- insn_addr + 4. */
4763
4764 dsc->modinsn[0] = insn;
4765
4766 dsc->cleanup = &cleanup_svc;
4767 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
4768 instruction. */
4769 dsc->wrote_to_pc = 1;
4770
4771 return 0;
4772 }
4773
4774 /* Copy undefined instructions. */
4775
4776 static int
4777 copy_undef (struct gdbarch *gdbarch, uint32_t insn,
4778 struct displaced_step_closure *dsc)
4779 {
4780 if (debug_displaced)
4781 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn %.8lx\n",
4782 (unsigned long) insn);
4783
4784 dsc->modinsn[0] = insn;
4785
4786 return 0;
4787 }
4788
4789 /* Copy unpredictable instructions. */
4790
4791 static int
4792 copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
4793 struct displaced_step_closure *dsc)
4794 {
4795 if (debug_displaced)
4796 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
4797 "%.8lx\n", (unsigned long) insn);
4798
4799 dsc->modinsn[0] = insn;
4800
4801 return 0;
4802 }
4803
4804 /* The decode_* functions are instruction decoding helpers. They mostly follow
4805 the presentation in the ARM ARM. */
4806
4807 static int
4808 decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
4809 struct regcache *regs,
4810 struct displaced_step_closure *dsc)
4811 {
4812 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
4813 unsigned int rn = bits (insn, 16, 19);
4814
4815 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
4816 return copy_unmodified (gdbarch, insn, "cps", dsc);
4817 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
4818 return copy_unmodified (gdbarch, insn, "setend", dsc);
4819 else if ((op1 & 0x60) == 0x20)
4820 return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
4821 else if ((op1 & 0x71) == 0x40)
4822 return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
4823 else if ((op1 & 0x77) == 0x41)
4824 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
4825 else if ((op1 & 0x77) == 0x45)
4826 return copy_preload (gdbarch, insn, regs, dsc); /* pli. */
4827 else if ((op1 & 0x77) == 0x51)
4828 {
4829 if (rn != 0xf)
4830 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
4831 else
4832 return copy_unpred (gdbarch, insn, dsc);
4833 }
4834 else if ((op1 & 0x77) == 0x55)
4835 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
4836 else if (op1 == 0x57)
4837 switch (op2)
4838 {
4839 case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
4840 case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
4841 case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
4842 case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
4843 default: return copy_unpred (gdbarch, insn, dsc);
4844 }
4845 else if ((op1 & 0x63) == 0x43)
4846 return copy_unpred (gdbarch, insn, dsc);
4847 else if ((op2 & 0x1) == 0x0)
4848 switch (op1 & ~0x80)
4849 {
4850 case 0x61:
4851 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
4852 case 0x65:
4853 return copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
4854 case 0x71: case 0x75:
4855 /* pld/pldw reg. */
4856 return copy_preload_reg (gdbarch, insn, regs, dsc);
4857 case 0x63: case 0x67: case 0x73: case 0x77:
4858 return copy_unpred (gdbarch, insn, dsc);
4859 default:
4860 return copy_undef (gdbarch, insn, dsc);
4861 }
4862 else
4863 return copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
4864 }
4865
4866 static int
4867 decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
4868 struct regcache *regs, struct displaced_step_closure *dsc)
4869 {
4870 if (bit (insn, 27) == 0)
4871 return decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
4872 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
4873 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
4874 {
4875 case 0x0: case 0x2:
4876 return copy_unmodified (gdbarch, insn, "srs", dsc);
4877
4878 case 0x1: case 0x3:
4879 return copy_unmodified (gdbarch, insn, "rfe", dsc);
4880
4881 case 0x4: case 0x5: case 0x6: case 0x7:
4882 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
4883
4884 case 0x8:
4885 switch ((insn & 0xe00000) >> 21)
4886 {
4887 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
4888 /* stc/stc2. */
4889 return copy_copro_load_store (gdbarch, insn, regs, dsc);
4890
4891 case 0x2:
4892 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
4893
4894 default:
4895 return copy_undef (gdbarch, insn, dsc);
4896 }
4897
4898 case 0x9:
4899 {
4900 int rn_f = (bits (insn, 16, 19) == 0xf);
4901 switch ((insn & 0xe00000) >> 21)
4902 {
4903 case 0x1: case 0x3:
4904 /* ldc/ldc2 imm (undefined for rn == pc). */
4905 return rn_f ? copy_undef (gdbarch, insn, dsc)
4906 : copy_copro_load_store (gdbarch, insn, regs, dsc);
4907
4908 case 0x2:
4909 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
4910
4911 case 0x4: case 0x5: case 0x6: case 0x7:
4912 /* ldc/ldc2 lit (undefined for rn != pc). */
4913 return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
4914 : copy_undef (gdbarch, insn, dsc);
4915
4916 default:
4917 return copy_undef (gdbarch, insn, dsc);
4918 }
4919 }
4920
4921 case 0xa:
4922 return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
4923
4924 case 0xb:
4925 if (bits (insn, 16, 19) == 0xf)
4926 /* ldc/ldc2 lit. */
4927 return copy_copro_load_store (gdbarch, insn, regs, dsc);
4928 else
4929 return copy_undef (gdbarch, insn, dsc);
4930
4931 case 0xc:
4932 if (bit (insn, 4))
4933 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
4934 else
4935 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
4936
4937 case 0xd:
4938 if (bit (insn, 4))
4939 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
4940 else
4941 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
4942
4943 default:
4944 return copy_undef (gdbarch, insn, dsc);
4945 }
4946 }
4947
4948 /* Decode miscellaneous instructions in dp/misc encoding space. */
4949
4950 static int
4951 decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
4952 struct regcache *regs, struct displaced_step_closure *dsc)
4953 {
4954 unsigned int op2 = bits (insn, 4, 6);
4955 unsigned int op = bits (insn, 21, 22);
4956 unsigned int op1 = bits (insn, 16, 19);
4957
4958 switch (op2)
4959 {
4960 case 0x0:
4961 return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
4962
4963 case 0x1:
4964 if (op == 0x1) /* bx. */
4965 return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
4966 else if (op == 0x3)
4967 return copy_unmodified (gdbarch, insn, "clz", dsc);
4968 else
4969 return copy_undef (gdbarch, insn, dsc);
4970
4971 case 0x2:
4972 if (op == 0x1)
4973 /* Not really supported. */
4974 return copy_unmodified (gdbarch, insn, "bxj", dsc);
4975 else
4976 return copy_undef (gdbarch, insn, dsc);
4977
4978 case 0x3:
4979 if (op == 0x1)
4980 return copy_bx_blx_reg (gdbarch, insn, regs, dsc); /* blx register. */
4981 else
4982 return copy_undef (gdbarch, insn, dsc);
4983
4984 case 0x5:
4985 return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
4986
4987 case 0x7:
4988 if (op == 0x1)
4989 return copy_unmodified (gdbarch, insn, "bkpt", dsc);
4990 else if (op == 0x3)
4991 /* Not really supported. */
4992 return copy_unmodified (gdbarch, insn, "smc", dsc);
4993
4994 default:
4995 return copy_undef (gdbarch, insn, dsc);
4996 }
4997 }
4998
4999 static int
5000 decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5001 struct displaced_step_closure *dsc)
5002 {
5003 if (bit (insn, 25))
5004 switch (bits (insn, 20, 24))
5005 {
5006 case 0x10:
5007 return copy_unmodified (gdbarch, insn, "movw", dsc);
5008
5009 case 0x14:
5010 return copy_unmodified (gdbarch, insn, "movt", dsc);
5011
5012 case 0x12: case 0x16:
5013 return copy_unmodified (gdbarch, insn, "msr imm", dsc);
5014
5015 default:
5016 return copy_alu_imm (gdbarch, insn, regs, dsc);
5017 }
5018 else
5019 {
5020 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
5021
5022 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
5023 return copy_alu_reg (gdbarch, insn, regs, dsc);
5024 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
5025 return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
5026 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
5027 return decode_miscellaneous (gdbarch, insn, regs, dsc);
5028 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
5029 return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
5030 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
5031 return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
5032 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
5033 return copy_unmodified (gdbarch, insn, "synch", dsc);
5034 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
5035 /* 2nd arg means "unpriveleged". */
5036 return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
5037 dsc);
5038 }
5039
5040 /* Should be unreachable. */
5041 return 1;
5042 }
5043
5044 static int
5045 decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
5046 struct regcache *regs,
5047 struct displaced_step_closure *dsc)
5048 {
5049 int a = bit (insn, 25), b = bit (insn, 4);
5050 uint32_t op1 = bits (insn, 20, 24);
5051 int rn_f = bits (insn, 16, 19) == 0xf;
5052
5053 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
5054 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
5055 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
5056 else if ((!a && (op1 & 0x17) == 0x02)
5057 || (a && (op1 & 0x17) == 0x02 && !b))
5058 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
5059 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
5060 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
5061 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
5062 else if ((!a && (op1 & 0x17) == 0x03)
5063 || (a && (op1 & 0x17) == 0x03 && !b))
5064 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
5065 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
5066 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
5067 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
5068 else if ((!a && (op1 & 0x17) == 0x06)
5069 || (a && (op1 & 0x17) == 0x06 && !b))
5070 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
5071 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
5072 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
5073 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
5074 else if ((!a && (op1 & 0x17) == 0x07)
5075 || (a && (op1 & 0x17) == 0x07 && !b))
5076 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
5077
5078 /* Should be unreachable. */
5079 return 1;
5080 }
5081
5082 static int
5083 decode_media (struct gdbarch *gdbarch, uint32_t insn,
5084 struct displaced_step_closure *dsc)
5085 {
5086 switch (bits (insn, 20, 24))
5087 {
5088 case 0x00: case 0x01: case 0x02: case 0x03:
5089 return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
5090
5091 case 0x04: case 0x05: case 0x06: case 0x07:
5092 return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
5093
5094 case 0x08: case 0x09: case 0x0a: case 0x0b:
5095 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
5096 return copy_unmodified (gdbarch, insn,
5097 "decode/pack/unpack/saturate/reverse", dsc);
5098
5099 case 0x18:
5100 if (bits (insn, 5, 7) == 0) /* op2. */
5101 {
5102 if (bits (insn, 12, 15) == 0xf)
5103 return copy_unmodified (gdbarch, insn, "usad8", dsc);
5104 else
5105 return copy_unmodified (gdbarch, insn, "usada8", dsc);
5106 }
5107 else
5108 return copy_undef (gdbarch, insn, dsc);
5109
5110 case 0x1a: case 0x1b:
5111 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
5112 return copy_unmodified (gdbarch, insn, "sbfx", dsc);
5113 else
5114 return copy_undef (gdbarch, insn, dsc);
5115
5116 case 0x1c: case 0x1d:
5117 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
5118 {
5119 if (bits (insn, 0, 3) == 0xf)
5120 return copy_unmodified (gdbarch, insn, "bfc", dsc);
5121 else
5122 return copy_unmodified (gdbarch, insn, "bfi", dsc);
5123 }
5124 else
5125 return copy_undef (gdbarch, insn, dsc);
5126
5127 case 0x1e: case 0x1f:
5128 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
5129 return copy_unmodified (gdbarch, insn, "ubfx", dsc);
5130 else
5131 return copy_undef (gdbarch, insn, dsc);
5132 }
5133
5134 /* Should be unreachable. */
5135 return 1;
5136 }
5137
5138 static int
5139 decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
5140 struct regcache *regs, struct displaced_step_closure *dsc)
5141 {
5142 if (bit (insn, 25))
5143 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
5144 else
5145 return copy_block_xfer (gdbarch, insn, regs, dsc);
5146 }
5147
5148 static int
5149 decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
5150 struct regcache *regs, struct displaced_step_closure *dsc)
5151 {
5152 unsigned int opcode = bits (insn, 20, 24);
5153
5154 switch (opcode)
5155 {
5156 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
5157 return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
5158
5159 case 0x08: case 0x0a: case 0x0c: case 0x0e:
5160 case 0x12: case 0x16:
5161 return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
5162
5163 case 0x09: case 0x0b: case 0x0d: case 0x0f:
5164 case 0x13: case 0x17:
5165 return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
5166
5167 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
5168 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
5169 /* Note: no writeback for these instructions. Bit 25 will always be
5170 zero though (via caller), so the following works OK. */
5171 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5172 }
5173
5174 /* Should be unreachable. */
5175 return 1;
5176 }
5177
5178 static int
5179 decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
5180 struct regcache *regs, struct displaced_step_closure *dsc)
5181 {
5182 unsigned int op1 = bits (insn, 20, 25);
5183 int op = bit (insn, 4);
5184 unsigned int coproc = bits (insn, 8, 11);
5185 unsigned int rn = bits (insn, 16, 19);
5186
5187 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
5188 return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
5189 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
5190 && (coproc & 0xe) != 0xa)
5191 /* stc/stc2. */
5192 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5193 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
5194 && (coproc & 0xe) != 0xa)
5195 /* ldc/ldc2 imm/lit. */
5196 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5197 else if ((op1 & 0x3e) == 0x00)
5198 return copy_undef (gdbarch, insn, dsc);
5199 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
5200 return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
5201 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
5202 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
5203 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
5204 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
5205 else if ((op1 & 0x30) == 0x20 && !op)
5206 {
5207 if ((coproc & 0xe) == 0xa)
5208 return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
5209 else
5210 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5211 }
5212 else if ((op1 & 0x30) == 0x20 && op)
5213 return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
5214 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
5215 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
5216 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
5217 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
5218 else if ((op1 & 0x30) == 0x30)
5219 return copy_svc (gdbarch, insn, to, regs, dsc);
5220 else
5221 return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
5222 }
5223
5224 void
5225 arm_process_displaced_insn (struct gdbarch *gdbarch, uint32_t insn,
5226 CORE_ADDR from, CORE_ADDR to, struct regcache *regs,
5227 struct displaced_step_closure *dsc)
5228 {
5229 int err = 0;
5230
5231 if (!displaced_in_arm_mode (regs))
5232 error (_("Displaced stepping is only supported in ARM mode"));
5233
5234 /* Most displaced instructions use a 1-instruction scratch space, so set this
5235 here and override below if/when necessary. */
5236 dsc->numinsns = 1;
5237 dsc->insn_addr = from;
5238 dsc->scratch_base = to;
5239 dsc->cleanup = NULL;
5240 dsc->wrote_to_pc = 0;
5241
5242 if ((insn & 0xf0000000) == 0xf0000000)
5243 err = decode_unconditional (gdbarch, insn, regs, dsc);
5244 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
5245 {
5246 case 0x0: case 0x1: case 0x2: case 0x3:
5247 err = decode_dp_misc (gdbarch, insn, regs, dsc);
5248 break;
5249
5250 case 0x4: case 0x5: case 0x6:
5251 err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
5252 break;
5253
5254 case 0x7:
5255 err = decode_media (gdbarch, insn, dsc);
5256 break;
5257
5258 case 0x8: case 0x9: case 0xa: case 0xb:
5259 err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
5260 break;
5261
5262 case 0xc: case 0xd: case 0xe: case 0xf:
5263 err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
5264 break;
5265 }
5266
5267 if (err)
5268 internal_error (__FILE__, __LINE__,
5269 _("arm_process_displaced_insn: Instruction decode error"));
5270 }
5271
5272 /* Actually set up the scratch space for a displaced instruction. */
5273
5274 void
5275 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
5276 CORE_ADDR to, struct displaced_step_closure *dsc)
5277 {
5278 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5279 unsigned int i;
5280 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5281
5282 /* Poke modified instruction(s). */
5283 for (i = 0; i < dsc->numinsns; i++)
5284 {
5285 if (debug_displaced)
5286 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
5287 "%.8lx\n", (unsigned long) dsc->modinsn[i],
5288 (unsigned long) to + i * 4);
5289 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
5290 dsc->modinsn[i]);
5291 }
5292
5293 /* Put breakpoint afterwards. */
5294 write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
5295 tdep->arm_breakpoint_size);
5296
5297 if (debug_displaced)
5298 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
5299 paddress (gdbarch, from), paddress (gdbarch, to));
5300 }
5301
5302 /* Entry point for copying an instruction into scratch space for displaced
5303 stepping. */
5304
5305 struct displaced_step_closure *
5306 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
5307 CORE_ADDR from, CORE_ADDR to,
5308 struct regcache *regs)
5309 {
5310 struct displaced_step_closure *dsc
5311 = xmalloc (sizeof (struct displaced_step_closure));
5312 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5313 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
5314
5315 if (debug_displaced)
5316 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
5317 "at %.8lx\n", (unsigned long) insn,
5318 (unsigned long) from);
5319
5320 arm_process_displaced_insn (gdbarch, insn, from, to, regs, dsc);
5321 arm_displaced_init_closure (gdbarch, from, to, dsc);
5322
5323 return dsc;
5324 }
5325
5326 /* Entry point for cleaning things up after a displaced instruction has been
5327 single-stepped. */
5328
5329 void
5330 arm_displaced_step_fixup (struct gdbarch *gdbarch,
5331 struct displaced_step_closure *dsc,
5332 CORE_ADDR from, CORE_ADDR to,
5333 struct regcache *regs)
5334 {
5335 if (dsc->cleanup)
5336 dsc->cleanup (gdbarch, regs, dsc);
5337
5338 if (!dsc->wrote_to_pc)
5339 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
5340 }
5341
5342 #include "bfd-in2.h"
5343 #include "libcoff.h"
5344
5345 static int
5346 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
5347 {
5348 if (arm_pc_is_thumb (memaddr))
5349 {
5350 static asymbol *asym;
5351 static combined_entry_type ce;
5352 static struct coff_symbol_struct csym;
5353 static struct bfd fake_bfd;
5354 static bfd_target fake_target;
5355
5356 if (csym.native == NULL)
5357 {
5358 /* Create a fake symbol vector containing a Thumb symbol.
5359 This is solely so that the code in print_insn_little_arm()
5360 and print_insn_big_arm() in opcodes/arm-dis.c will detect
5361 the presence of a Thumb symbol and switch to decoding
5362 Thumb instructions. */
5363
5364 fake_target.flavour = bfd_target_coff_flavour;
5365 fake_bfd.xvec = &fake_target;
5366 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
5367 csym.native = &ce;
5368 csym.symbol.the_bfd = &fake_bfd;
5369 csym.symbol.name = "fake";
5370 asym = (asymbol *) & csym;
5371 }
5372
5373 memaddr = UNMAKE_THUMB_ADDR (memaddr);
5374 info->symbols = &asym;
5375 }
5376 else
5377 info->symbols = NULL;
5378
5379 if (info->endian == BFD_ENDIAN_BIG)
5380 return print_insn_big_arm (memaddr, info);
5381 else
5382 return print_insn_little_arm (memaddr, info);
5383 }
5384
5385 /* The following define instruction sequences that will cause ARM
5386 cpu's to take an undefined instruction trap. These are used to
5387 signal a breakpoint to GDB.
5388
5389 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
5390 modes. A different instruction is required for each mode. The ARM
5391 cpu's can also be big or little endian. Thus four different
5392 instructions are needed to support all cases.
5393
5394 Note: ARMv4 defines several new instructions that will take the
5395 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
5396 not in fact add the new instructions. The new undefined
5397 instructions in ARMv4 are all instructions that had no defined
5398 behaviour in earlier chips. There is no guarantee that they will
5399 raise an exception, but may be treated as NOP's. In practice, it
5400 may only safe to rely on instructions matching:
5401
5402 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
5403 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
5404 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
5405
5406 Even this may only true if the condition predicate is true. The
5407 following use a condition predicate of ALWAYS so it is always TRUE.
5408
5409 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
5410 and NetBSD all use a software interrupt rather than an undefined
5411 instruction to force a trap. This can be handled by by the
5412 abi-specific code during establishment of the gdbarch vector. */
5413
5414 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
5415 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
5416 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
5417 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
5418
5419 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
5420 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
5421 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
5422 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
5423
5424 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
5425 the program counter value to determine whether a 16-bit or 32-bit
5426 breakpoint should be used. It returns a pointer to a string of
5427 bytes that encode a breakpoint instruction, stores the length of
5428 the string to *lenptr, and adjusts the program counter (if
5429 necessary) to point to the actual memory location where the
5430 breakpoint should be inserted. */
5431
5432 static const unsigned char *
5433 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
5434 {
5435 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5436 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5437
5438 if (arm_pc_is_thumb (*pcptr))
5439 {
5440 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
5441
5442 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
5443 check whether we are replacing a 32-bit instruction. */
5444 if (tdep->thumb2_breakpoint != NULL)
5445 {
5446 gdb_byte buf[2];
5447 if (target_read_memory (*pcptr, buf, 2) == 0)
5448 {
5449 unsigned short inst1;
5450 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
5451 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
5452 {
5453 *lenptr = tdep->thumb2_breakpoint_size;
5454 return tdep->thumb2_breakpoint;
5455 }
5456 }
5457 }
5458
5459 *lenptr = tdep->thumb_breakpoint_size;
5460 return tdep->thumb_breakpoint;
5461 }
5462 else
5463 {
5464 *lenptr = tdep->arm_breakpoint_size;
5465 return tdep->arm_breakpoint;
5466 }
5467 }
5468
5469 static void
5470 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
5471 int *kindptr)
5472 {
5473 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5474
5475 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
5476
5477 if (arm_pc_is_thumb (*pcptr) && *kindptr == 4)
5478 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
5479 that this is not confused with a 32-bit ARM breakpoint. */
5480 *kindptr = 3;
5481 }
5482
5483 /* Extract from an array REGBUF containing the (raw) register state a
5484 function return value of type TYPE, and copy that, in virtual
5485 format, into VALBUF. */
5486
5487 static void
5488 arm_extract_return_value (struct type *type, struct regcache *regs,
5489 gdb_byte *valbuf)
5490 {
5491 struct gdbarch *gdbarch = get_regcache_arch (regs);
5492 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5493
5494 if (TYPE_CODE_FLT == TYPE_CODE (type))
5495 {
5496 switch (gdbarch_tdep (gdbarch)->fp_model)
5497 {
5498 case ARM_FLOAT_FPA:
5499 {
5500 /* The value is in register F0 in internal format. We need to
5501 extract the raw value and then convert it to the desired
5502 internal type. */
5503 bfd_byte tmpbuf[FP_REGISTER_SIZE];
5504
5505 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
5506 convert_from_extended (floatformat_from_type (type), tmpbuf,
5507 valbuf, gdbarch_byte_order (gdbarch));
5508 }
5509 break;
5510
5511 case ARM_FLOAT_SOFT_FPA:
5512 case ARM_FLOAT_SOFT_VFP:
5513 /* ARM_FLOAT_VFP can arise if this is a variadic function so
5514 not using the VFP ABI code. */
5515 case ARM_FLOAT_VFP:
5516 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
5517 if (TYPE_LENGTH (type) > 4)
5518 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
5519 valbuf + INT_REGISTER_SIZE);
5520 break;
5521
5522 default:
5523 internal_error
5524 (__FILE__, __LINE__,
5525 _("arm_extract_return_value: Floating point model not supported"));
5526 break;
5527 }
5528 }
5529 else if (TYPE_CODE (type) == TYPE_CODE_INT
5530 || TYPE_CODE (type) == TYPE_CODE_CHAR
5531 || TYPE_CODE (type) == TYPE_CODE_BOOL
5532 || TYPE_CODE (type) == TYPE_CODE_PTR
5533 || TYPE_CODE (type) == TYPE_CODE_REF
5534 || TYPE_CODE (type) == TYPE_CODE_ENUM)
5535 {
5536 /* If the the type is a plain integer, then the access is
5537 straight-forward. Otherwise we have to play around a bit more. */
5538 int len = TYPE_LENGTH (type);
5539 int regno = ARM_A1_REGNUM;
5540 ULONGEST tmp;
5541
5542 while (len > 0)
5543 {
5544 /* By using store_unsigned_integer we avoid having to do
5545 anything special for small big-endian values. */
5546 regcache_cooked_read_unsigned (regs, regno++, &tmp);
5547 store_unsigned_integer (valbuf,
5548 (len > INT_REGISTER_SIZE
5549 ? INT_REGISTER_SIZE : len),
5550 byte_order, tmp);
5551 len -= INT_REGISTER_SIZE;
5552 valbuf += INT_REGISTER_SIZE;
5553 }
5554 }
5555 else
5556 {
5557 /* For a structure or union the behaviour is as if the value had
5558 been stored to word-aligned memory and then loaded into
5559 registers with 32-bit load instruction(s). */
5560 int len = TYPE_LENGTH (type);
5561 int regno = ARM_A1_REGNUM;
5562 bfd_byte tmpbuf[INT_REGISTER_SIZE];
5563
5564 while (len > 0)
5565 {
5566 regcache_cooked_read (regs, regno++, tmpbuf);
5567 memcpy (valbuf, tmpbuf,
5568 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
5569 len -= INT_REGISTER_SIZE;
5570 valbuf += INT_REGISTER_SIZE;
5571 }
5572 }
5573 }
5574
5575
5576 /* Will a function return an aggregate type in memory or in a
5577 register? Return 0 if an aggregate type can be returned in a
5578 register, 1 if it must be returned in memory. */
5579
5580 static int
5581 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
5582 {
5583 int nRc;
5584 enum type_code code;
5585
5586 CHECK_TYPEDEF (type);
5587
5588 /* In the ARM ABI, "integer" like aggregate types are returned in
5589 registers. For an aggregate type to be integer like, its size
5590 must be less than or equal to INT_REGISTER_SIZE and the
5591 offset of each addressable subfield must be zero. Note that bit
5592 fields are not addressable, and all addressable subfields of
5593 unions always start at offset zero.
5594
5595 This function is based on the behaviour of GCC 2.95.1.
5596 See: gcc/arm.c: arm_return_in_memory() for details.
5597
5598 Note: All versions of GCC before GCC 2.95.2 do not set up the
5599 parameters correctly for a function returning the following
5600 structure: struct { float f;}; This should be returned in memory,
5601 not a register. Richard Earnshaw sent me a patch, but I do not
5602 know of any way to detect if a function like the above has been
5603 compiled with the correct calling convention. */
5604
5605 /* All aggregate types that won't fit in a register must be returned
5606 in memory. */
5607 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
5608 {
5609 return 1;
5610 }
5611
5612 /* The AAPCS says all aggregates not larger than a word are returned
5613 in a register. */
5614 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
5615 return 0;
5616
5617 /* The only aggregate types that can be returned in a register are
5618 structs and unions. Arrays must be returned in memory. */
5619 code = TYPE_CODE (type);
5620 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
5621 {
5622 return 1;
5623 }
5624
5625 /* Assume all other aggregate types can be returned in a register.
5626 Run a check for structures, unions and arrays. */
5627 nRc = 0;
5628
5629 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
5630 {
5631 int i;
5632 /* Need to check if this struct/union is "integer" like. For
5633 this to be true, its size must be less than or equal to
5634 INT_REGISTER_SIZE and the offset of each addressable
5635 subfield must be zero. Note that bit fields are not
5636 addressable, and unions always start at offset zero. If any
5637 of the subfields is a floating point type, the struct/union
5638 cannot be an integer type. */
5639
5640 /* For each field in the object, check:
5641 1) Is it FP? --> yes, nRc = 1;
5642 2) Is it addressable (bitpos != 0) and
5643 not packed (bitsize == 0)?
5644 --> yes, nRc = 1
5645 */
5646
5647 for (i = 0; i < TYPE_NFIELDS (type); i++)
5648 {
5649 enum type_code field_type_code;
5650 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type, i)));
5651
5652 /* Is it a floating point type field? */
5653 if (field_type_code == TYPE_CODE_FLT)
5654 {
5655 nRc = 1;
5656 break;
5657 }
5658
5659 /* If bitpos != 0, then we have to care about it. */
5660 if (TYPE_FIELD_BITPOS (type, i) != 0)
5661 {
5662 /* Bitfields are not addressable. If the field bitsize is
5663 zero, then the field is not packed. Hence it cannot be
5664 a bitfield or any other packed type. */
5665 if (TYPE_FIELD_BITSIZE (type, i) == 0)
5666 {
5667 nRc = 1;
5668 break;
5669 }
5670 }
5671 }
5672 }
5673
5674 return nRc;
5675 }
5676
5677 /* Write into appropriate registers a function return value of type
5678 TYPE, given in virtual format. */
5679
5680 static void
5681 arm_store_return_value (struct type *type, struct regcache *regs,
5682 const gdb_byte *valbuf)
5683 {
5684 struct gdbarch *gdbarch = get_regcache_arch (regs);
5685 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5686
5687 if (TYPE_CODE (type) == TYPE_CODE_FLT)
5688 {
5689 char buf[MAX_REGISTER_SIZE];
5690
5691 switch (gdbarch_tdep (gdbarch)->fp_model)
5692 {
5693 case ARM_FLOAT_FPA:
5694
5695 convert_to_extended (floatformat_from_type (type), buf, valbuf,
5696 gdbarch_byte_order (gdbarch));
5697 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
5698 break;
5699
5700 case ARM_FLOAT_SOFT_FPA:
5701 case ARM_FLOAT_SOFT_VFP:
5702 /* ARM_FLOAT_VFP can arise if this is a variadic function so
5703 not using the VFP ABI code. */
5704 case ARM_FLOAT_VFP:
5705 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
5706 if (TYPE_LENGTH (type) > 4)
5707 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
5708 valbuf + INT_REGISTER_SIZE);
5709 break;
5710
5711 default:
5712 internal_error
5713 (__FILE__, __LINE__,
5714 _("arm_store_return_value: Floating point model not supported"));
5715 break;
5716 }
5717 }
5718 else if (TYPE_CODE (type) == TYPE_CODE_INT
5719 || TYPE_CODE (type) == TYPE_CODE_CHAR
5720 || TYPE_CODE (type) == TYPE_CODE_BOOL
5721 || TYPE_CODE (type) == TYPE_CODE_PTR
5722 || TYPE_CODE (type) == TYPE_CODE_REF
5723 || TYPE_CODE (type) == TYPE_CODE_ENUM)
5724 {
5725 if (TYPE_LENGTH (type) <= 4)
5726 {
5727 /* Values of one word or less are zero/sign-extended and
5728 returned in r0. */
5729 bfd_byte tmpbuf[INT_REGISTER_SIZE];
5730 LONGEST val = unpack_long (type, valbuf);
5731
5732 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
5733 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
5734 }
5735 else
5736 {
5737 /* Integral values greater than one word are stored in consecutive
5738 registers starting with r0. This will always be a multiple of
5739 the regiser size. */
5740 int len = TYPE_LENGTH (type);
5741 int regno = ARM_A1_REGNUM;
5742
5743 while (len > 0)
5744 {
5745 regcache_cooked_write (regs, regno++, valbuf);
5746 len -= INT_REGISTER_SIZE;
5747 valbuf += INT_REGISTER_SIZE;
5748 }
5749 }
5750 }
5751 else
5752 {
5753 /* For a structure or union the behaviour is as if the value had
5754 been stored to word-aligned memory and then loaded into
5755 registers with 32-bit load instruction(s). */
5756 int len = TYPE_LENGTH (type);
5757 int regno = ARM_A1_REGNUM;
5758 bfd_byte tmpbuf[INT_REGISTER_SIZE];
5759
5760 while (len > 0)
5761 {
5762 memcpy (tmpbuf, valbuf,
5763 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
5764 regcache_cooked_write (regs, regno++, tmpbuf);
5765 len -= INT_REGISTER_SIZE;
5766 valbuf += INT_REGISTER_SIZE;
5767 }
5768 }
5769 }
5770
5771
5772 /* Handle function return values. */
5773
5774 static enum return_value_convention
5775 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
5776 struct type *valtype, struct regcache *regcache,
5777 gdb_byte *readbuf, const gdb_byte *writebuf)
5778 {
5779 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5780 enum arm_vfp_cprc_base_type vfp_base_type;
5781 int vfp_base_count;
5782
5783 if (arm_vfp_abi_for_function (gdbarch, func_type)
5784 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
5785 {
5786 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
5787 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
5788 int i;
5789 for (i = 0; i < vfp_base_count; i++)
5790 {
5791 if (reg_char == 'q')
5792 {
5793 if (writebuf)
5794 arm_neon_quad_write (gdbarch, regcache, i,
5795 writebuf + i * unit_length);
5796
5797 if (readbuf)
5798 arm_neon_quad_read (gdbarch, regcache, i,
5799 readbuf + i * unit_length);
5800 }
5801 else
5802 {
5803 char name_buf[4];
5804 int regnum;
5805
5806 sprintf (name_buf, "%c%d", reg_char, i);
5807 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
5808 strlen (name_buf));
5809 if (writebuf)
5810 regcache_cooked_write (regcache, regnum,
5811 writebuf + i * unit_length);
5812 if (readbuf)
5813 regcache_cooked_read (regcache, regnum,
5814 readbuf + i * unit_length);
5815 }
5816 }
5817 return RETURN_VALUE_REGISTER_CONVENTION;
5818 }
5819
5820 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
5821 || TYPE_CODE (valtype) == TYPE_CODE_UNION
5822 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
5823 {
5824 if (tdep->struct_return == pcc_struct_return
5825 || arm_return_in_memory (gdbarch, valtype))
5826 return RETURN_VALUE_STRUCT_CONVENTION;
5827 }
5828
5829 if (writebuf)
5830 arm_store_return_value (valtype, regcache, writebuf);
5831
5832 if (readbuf)
5833 arm_extract_return_value (valtype, regcache, readbuf);
5834
5835 return RETURN_VALUE_REGISTER_CONVENTION;
5836 }
5837
5838
5839 static int
5840 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
5841 {
5842 struct gdbarch *gdbarch = get_frame_arch (frame);
5843 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5844 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5845 CORE_ADDR jb_addr;
5846 char buf[INT_REGISTER_SIZE];
5847
5848 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
5849
5850 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
5851 INT_REGISTER_SIZE))
5852 return 0;
5853
5854 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
5855 return 1;
5856 }
5857
5858 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
5859 return the target PC. Otherwise return 0. */
5860
5861 CORE_ADDR
5862 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
5863 {
5864 char *name;
5865 int namelen;
5866 CORE_ADDR start_addr;
5867
5868 /* Find the starting address and name of the function containing the PC. */
5869 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
5870 return 0;
5871
5872 /* If PC is in a Thumb call or return stub, return the address of the
5873 target PC, which is in a register. The thunk functions are called
5874 _call_via_xx, where x is the register name. The possible names
5875 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
5876 functions, named __ARM_call_via_r[0-7]. */
5877 if (strncmp (name, "_call_via_", 10) == 0
5878 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
5879 {
5880 /* Use the name suffix to determine which register contains the
5881 target PC. */
5882 static char *table[15] =
5883 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
5884 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
5885 };
5886 int regno;
5887 int offset = strlen (name) - 2;
5888
5889 for (regno = 0; regno <= 14; regno++)
5890 if (strcmp (&name[offset], table[regno]) == 0)
5891 return get_frame_register_unsigned (frame, regno);
5892 }
5893
5894 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
5895 non-interworking calls to foo. We could decode the stubs
5896 to find the target but it's easier to use the symbol table. */
5897 namelen = strlen (name);
5898 if (name[0] == '_' && name[1] == '_'
5899 && ((namelen > 2 + strlen ("_from_thumb")
5900 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
5901 strlen ("_from_thumb")) == 0)
5902 || (namelen > 2 + strlen ("_from_arm")
5903 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
5904 strlen ("_from_arm")) == 0)))
5905 {
5906 char *target_name;
5907 int target_len = namelen - 2;
5908 struct minimal_symbol *minsym;
5909 struct objfile *objfile;
5910 struct obj_section *sec;
5911
5912 if (name[namelen - 1] == 'b')
5913 target_len -= strlen ("_from_thumb");
5914 else
5915 target_len -= strlen ("_from_arm");
5916
5917 target_name = alloca (target_len + 1);
5918 memcpy (target_name, name + 2, target_len);
5919 target_name[target_len] = '\0';
5920
5921 sec = find_pc_section (pc);
5922 objfile = (sec == NULL) ? NULL : sec->objfile;
5923 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
5924 if (minsym != NULL)
5925 return SYMBOL_VALUE_ADDRESS (minsym);
5926 else
5927 return 0;
5928 }
5929
5930 return 0; /* not a stub */
5931 }
5932
5933 static void
5934 set_arm_command (char *args, int from_tty)
5935 {
5936 printf_unfiltered (_("\
5937 \"set arm\" must be followed by an apporpriate subcommand.\n"));
5938 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
5939 }
5940
5941 static void
5942 show_arm_command (char *args, int from_tty)
5943 {
5944 cmd_show_list (showarmcmdlist, from_tty, "");
5945 }
5946
5947 static void
5948 arm_update_current_architecture (void)
5949 {
5950 struct gdbarch_info info;
5951
5952 /* If the current architecture is not ARM, we have nothing to do. */
5953 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
5954 return;
5955
5956 /* Update the architecture. */
5957 gdbarch_info_init (&info);
5958
5959 if (!gdbarch_update_p (info))
5960 internal_error (__FILE__, __LINE__, "could not update architecture");
5961 }
5962
5963 static void
5964 set_fp_model_sfunc (char *args, int from_tty,
5965 struct cmd_list_element *c)
5966 {
5967 enum arm_float_model fp_model;
5968
5969 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
5970 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
5971 {
5972 arm_fp_model = fp_model;
5973 break;
5974 }
5975
5976 if (fp_model == ARM_FLOAT_LAST)
5977 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
5978 current_fp_model);
5979
5980 arm_update_current_architecture ();
5981 }
5982
5983 static void
5984 show_fp_model (struct ui_file *file, int from_tty,
5985 struct cmd_list_element *c, const char *value)
5986 {
5987 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
5988
5989 if (arm_fp_model == ARM_FLOAT_AUTO
5990 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
5991 fprintf_filtered (file, _("\
5992 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
5993 fp_model_strings[tdep->fp_model]);
5994 else
5995 fprintf_filtered (file, _("\
5996 The current ARM floating point model is \"%s\".\n"),
5997 fp_model_strings[arm_fp_model]);
5998 }
5999
6000 static void
6001 arm_set_abi (char *args, int from_tty,
6002 struct cmd_list_element *c)
6003 {
6004 enum arm_abi_kind arm_abi;
6005
6006 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
6007 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
6008 {
6009 arm_abi_global = arm_abi;
6010 break;
6011 }
6012
6013 if (arm_abi == ARM_ABI_LAST)
6014 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
6015 arm_abi_string);
6016
6017 arm_update_current_architecture ();
6018 }
6019
6020 static void
6021 arm_show_abi (struct ui_file *file, int from_tty,
6022 struct cmd_list_element *c, const char *value)
6023 {
6024 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6025
6026 if (arm_abi_global == ARM_ABI_AUTO
6027 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
6028 fprintf_filtered (file, _("\
6029 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
6030 arm_abi_strings[tdep->arm_abi]);
6031 else
6032 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
6033 arm_abi_string);
6034 }
6035
6036 static void
6037 arm_show_fallback_mode (struct ui_file *file, int from_tty,
6038 struct cmd_list_element *c, const char *value)
6039 {
6040 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6041
6042 fprintf_filtered (file, _("\
6043 The current execution mode assumed (when symbols are unavailable) is \"%s\".\n"),
6044 arm_fallback_mode_string);
6045 }
6046
6047 static void
6048 arm_show_force_mode (struct ui_file *file, int from_tty,
6049 struct cmd_list_element *c, const char *value)
6050 {
6051 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6052
6053 fprintf_filtered (file, _("\
6054 The current execution mode assumed (even when symbols are available) is \"%s\".\n"),
6055 arm_force_mode_string);
6056 }
6057
6058 /* If the user changes the register disassembly style used for info
6059 register and other commands, we have to also switch the style used
6060 in opcodes for disassembly output. This function is run in the "set
6061 arm disassembly" command, and does that. */
6062
6063 static void
6064 set_disassembly_style_sfunc (char *args, int from_tty,
6065 struct cmd_list_element *c)
6066 {
6067 set_disassembly_style ();
6068 }
6069 \f
6070 /* Return the ARM register name corresponding to register I. */
6071 static const char *
6072 arm_register_name (struct gdbarch *gdbarch, int i)
6073 {
6074 const int num_regs = gdbarch_num_regs (gdbarch);
6075
6076 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
6077 && i >= num_regs && i < num_regs + 32)
6078 {
6079 static const char *const vfp_pseudo_names[] = {
6080 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
6081 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
6082 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
6083 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
6084 };
6085
6086 return vfp_pseudo_names[i - num_regs];
6087 }
6088
6089 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
6090 && i >= num_regs + 32 && i < num_regs + 32 + 16)
6091 {
6092 static const char *const neon_pseudo_names[] = {
6093 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
6094 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
6095 };
6096
6097 return neon_pseudo_names[i - num_regs - 32];
6098 }
6099
6100 if (i >= ARRAY_SIZE (arm_register_names))
6101 /* These registers are only supported on targets which supply
6102 an XML description. */
6103 return "";
6104
6105 return arm_register_names[i];
6106 }
6107
6108 static void
6109 set_disassembly_style (void)
6110 {
6111 int current;
6112
6113 /* Find the style that the user wants. */
6114 for (current = 0; current < num_disassembly_options; current++)
6115 if (disassembly_style == valid_disassembly_styles[current])
6116 break;
6117 gdb_assert (current < num_disassembly_options);
6118
6119 /* Synchronize the disassembler. */
6120 set_arm_regname_option (current);
6121 }
6122
6123 /* Test whether the coff symbol specific value corresponds to a Thumb
6124 function. */
6125
6126 static int
6127 coff_sym_is_thumb (int val)
6128 {
6129 return (val == C_THUMBEXT
6130 || val == C_THUMBSTAT
6131 || val == C_THUMBEXTFUNC
6132 || val == C_THUMBSTATFUNC
6133 || val == C_THUMBLABEL);
6134 }
6135
6136 /* arm_coff_make_msymbol_special()
6137 arm_elf_make_msymbol_special()
6138
6139 These functions test whether the COFF or ELF symbol corresponds to
6140 an address in thumb code, and set a "special" bit in a minimal
6141 symbol to indicate that it does. */
6142
6143 static void
6144 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
6145 {
6146 /* Thumb symbols are of type STT_LOPROC, (synonymous with
6147 STT_ARM_TFUNC). */
6148 if (ELF_ST_TYPE (((elf_symbol_type *)sym)->internal_elf_sym.st_info)
6149 == STT_LOPROC)
6150 MSYMBOL_SET_SPECIAL (msym);
6151 }
6152
6153 static void
6154 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
6155 {
6156 if (coff_sym_is_thumb (val))
6157 MSYMBOL_SET_SPECIAL (msym);
6158 }
6159
6160 static void
6161 arm_objfile_data_free (struct objfile *objfile, void *arg)
6162 {
6163 struct arm_per_objfile *data = arg;
6164 unsigned int i;
6165
6166 for (i = 0; i < objfile->obfd->section_count; i++)
6167 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
6168 }
6169
6170 static void
6171 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
6172 asymbol *sym)
6173 {
6174 const char *name = bfd_asymbol_name (sym);
6175 struct arm_per_objfile *data;
6176 VEC(arm_mapping_symbol_s) **map_p;
6177 struct arm_mapping_symbol new_map_sym;
6178
6179 gdb_assert (name[0] == '$');
6180 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
6181 return;
6182
6183 data = objfile_data (objfile, arm_objfile_data_key);
6184 if (data == NULL)
6185 {
6186 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
6187 struct arm_per_objfile);
6188 set_objfile_data (objfile, arm_objfile_data_key, data);
6189 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
6190 objfile->obfd->section_count,
6191 VEC(arm_mapping_symbol_s) *);
6192 }
6193 map_p = &data->section_maps[bfd_get_section (sym)->index];
6194
6195 new_map_sym.value = sym->value;
6196 new_map_sym.type = name[1];
6197
6198 /* Assume that most mapping symbols appear in order of increasing
6199 value. If they were randomly distributed, it would be faster to
6200 always push here and then sort at first use. */
6201 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
6202 {
6203 struct arm_mapping_symbol *prev_map_sym;
6204
6205 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
6206 if (prev_map_sym->value >= sym->value)
6207 {
6208 unsigned int idx;
6209 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
6210 arm_compare_mapping_symbols);
6211 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
6212 return;
6213 }
6214 }
6215
6216 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
6217 }
6218
6219 static void
6220 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
6221 {
6222 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
6223
6224 /* If necessary, set the T bit. */
6225 if (arm_apcs_32)
6226 {
6227 ULONGEST val;
6228 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
6229 if (arm_pc_is_thumb (pc))
6230 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM, val | CPSR_T);
6231 else
6232 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
6233 val & ~(ULONGEST) CPSR_T);
6234 }
6235 }
6236
6237 /* Read the contents of a NEON quad register, by reading from two
6238 double registers. This is used to implement the quad pseudo
6239 registers, and for argument passing in case the quad registers are
6240 missing; vectors are passed in quad registers when using the VFP
6241 ABI, even if a NEON unit is not present. REGNUM is the index of
6242 the quad register, in [0, 15]. */
6243
6244 static void
6245 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
6246 int regnum, gdb_byte *buf)
6247 {
6248 char name_buf[4];
6249 gdb_byte reg_buf[8];
6250 int offset, double_regnum;
6251
6252 sprintf (name_buf, "d%d", regnum << 1);
6253 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6254 strlen (name_buf));
6255
6256 /* d0 is always the least significant half of q0. */
6257 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6258 offset = 8;
6259 else
6260 offset = 0;
6261
6262 regcache_raw_read (regcache, double_regnum, reg_buf);
6263 memcpy (buf + offset, reg_buf, 8);
6264
6265 offset = 8 - offset;
6266 regcache_raw_read (regcache, double_regnum + 1, reg_buf);
6267 memcpy (buf + offset, reg_buf, 8);
6268 }
6269
6270 static void
6271 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
6272 int regnum, gdb_byte *buf)
6273 {
6274 const int num_regs = gdbarch_num_regs (gdbarch);
6275 char name_buf[4];
6276 gdb_byte reg_buf[8];
6277 int offset, double_regnum;
6278
6279 gdb_assert (regnum >= num_regs);
6280 regnum -= num_regs;
6281
6282 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
6283 /* Quad-precision register. */
6284 arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
6285 else
6286 {
6287 /* Single-precision register. */
6288 gdb_assert (regnum < 32);
6289
6290 /* s0 is always the least significant half of d0. */
6291 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6292 offset = (regnum & 1) ? 0 : 4;
6293 else
6294 offset = (regnum & 1) ? 4 : 0;
6295
6296 sprintf (name_buf, "d%d", regnum >> 1);
6297 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6298 strlen (name_buf));
6299
6300 regcache_raw_read (regcache, double_regnum, reg_buf);
6301 memcpy (buf, reg_buf + offset, 4);
6302 }
6303 }
6304
6305 /* Store the contents of BUF to a NEON quad register, by writing to
6306 two double registers. This is used to implement the quad pseudo
6307 registers, and for argument passing in case the quad registers are
6308 missing; vectors are passed in quad registers when using the VFP
6309 ABI, even if a NEON unit is not present. REGNUM is the index
6310 of the quad register, in [0, 15]. */
6311
6312 static void
6313 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
6314 int regnum, const gdb_byte *buf)
6315 {
6316 char name_buf[4];
6317 gdb_byte reg_buf[8];
6318 int offset, double_regnum;
6319
6320 sprintf (name_buf, "d%d", regnum << 1);
6321 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6322 strlen (name_buf));
6323
6324 /* d0 is always the least significant half of q0. */
6325 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6326 offset = 8;
6327 else
6328 offset = 0;
6329
6330 regcache_raw_write (regcache, double_regnum, buf + offset);
6331 offset = 8 - offset;
6332 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
6333 }
6334
6335 static void
6336 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
6337 int regnum, const gdb_byte *buf)
6338 {
6339 const int num_regs = gdbarch_num_regs (gdbarch);
6340 char name_buf[4];
6341 gdb_byte reg_buf[8];
6342 int offset, double_regnum;
6343
6344 gdb_assert (regnum >= num_regs);
6345 regnum -= num_regs;
6346
6347 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
6348 /* Quad-precision register. */
6349 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
6350 else
6351 {
6352 /* Single-precision register. */
6353 gdb_assert (regnum < 32);
6354
6355 /* s0 is always the least significant half of d0. */
6356 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6357 offset = (regnum & 1) ? 0 : 4;
6358 else
6359 offset = (regnum & 1) ? 4 : 0;
6360
6361 sprintf (name_buf, "d%d", regnum >> 1);
6362 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6363 strlen (name_buf));
6364
6365 regcache_raw_read (regcache, double_regnum, reg_buf);
6366 memcpy (reg_buf + offset, buf, 4);
6367 regcache_raw_write (regcache, double_regnum, reg_buf);
6368 }
6369 }
6370
6371 static struct value *
6372 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
6373 {
6374 const int *reg_p = baton;
6375 return value_of_register (*reg_p, frame);
6376 }
6377 \f
6378 static enum gdb_osabi
6379 arm_elf_osabi_sniffer (bfd *abfd)
6380 {
6381 unsigned int elfosabi;
6382 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
6383
6384 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
6385
6386 if (elfosabi == ELFOSABI_ARM)
6387 /* GNU tools use this value. Check note sections in this case,
6388 as well. */
6389 bfd_map_over_sections (abfd,
6390 generic_elf_osabi_sniff_abi_tag_sections,
6391 &osabi);
6392
6393 /* Anything else will be handled by the generic ELF sniffer. */
6394 return osabi;
6395 }
6396
6397 \f
6398 /* Initialize the current architecture based on INFO. If possible,
6399 re-use an architecture from ARCHES, which is a list of
6400 architectures already created during this debugging session.
6401
6402 Called e.g. at program startup, when reading a core file, and when
6403 reading a binary file. */
6404
6405 static struct gdbarch *
6406 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
6407 {
6408 struct gdbarch_tdep *tdep;
6409 struct gdbarch *gdbarch;
6410 struct gdbarch_list *best_arch;
6411 enum arm_abi_kind arm_abi = arm_abi_global;
6412 enum arm_float_model fp_model = arm_fp_model;
6413 struct tdesc_arch_data *tdesc_data = NULL;
6414 int i;
6415 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
6416 int have_neon = 0;
6417 int have_fpa_registers = 1;
6418
6419 /* Check any target description for validity. */
6420 if (tdesc_has_registers (info.target_desc))
6421 {
6422 /* For most registers we require GDB's default names; but also allow
6423 the numeric names for sp / lr / pc, as a convenience. */
6424 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
6425 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
6426 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
6427
6428 const struct tdesc_feature *feature;
6429 int valid_p;
6430
6431 feature = tdesc_find_feature (info.target_desc,
6432 "org.gnu.gdb.arm.core");
6433 if (feature == NULL)
6434 return NULL;
6435
6436 tdesc_data = tdesc_data_alloc ();
6437
6438 valid_p = 1;
6439 for (i = 0; i < ARM_SP_REGNUM; i++)
6440 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
6441 arm_register_names[i]);
6442 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
6443 ARM_SP_REGNUM,
6444 arm_sp_names);
6445 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
6446 ARM_LR_REGNUM,
6447 arm_lr_names);
6448 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
6449 ARM_PC_REGNUM,
6450 arm_pc_names);
6451 valid_p &= tdesc_numbered_register (feature, tdesc_data,
6452 ARM_PS_REGNUM, "cpsr");
6453
6454 if (!valid_p)
6455 {
6456 tdesc_data_cleanup (tdesc_data);
6457 return NULL;
6458 }
6459
6460 feature = tdesc_find_feature (info.target_desc,
6461 "org.gnu.gdb.arm.fpa");
6462 if (feature != NULL)
6463 {
6464 valid_p = 1;
6465 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
6466 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
6467 arm_register_names[i]);
6468 if (!valid_p)
6469 {
6470 tdesc_data_cleanup (tdesc_data);
6471 return NULL;
6472 }
6473 }
6474 else
6475 have_fpa_registers = 0;
6476
6477 feature = tdesc_find_feature (info.target_desc,
6478 "org.gnu.gdb.xscale.iwmmxt");
6479 if (feature != NULL)
6480 {
6481 static const char *const iwmmxt_names[] = {
6482 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
6483 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
6484 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
6485 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
6486 };
6487
6488 valid_p = 1;
6489 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
6490 valid_p
6491 &= tdesc_numbered_register (feature, tdesc_data, i,
6492 iwmmxt_names[i - ARM_WR0_REGNUM]);
6493
6494 /* Check for the control registers, but do not fail if they
6495 are missing. */
6496 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
6497 tdesc_numbered_register (feature, tdesc_data, i,
6498 iwmmxt_names[i - ARM_WR0_REGNUM]);
6499
6500 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
6501 valid_p
6502 &= tdesc_numbered_register (feature, tdesc_data, i,
6503 iwmmxt_names[i - ARM_WR0_REGNUM]);
6504
6505 if (!valid_p)
6506 {
6507 tdesc_data_cleanup (tdesc_data);
6508 return NULL;
6509 }
6510 }
6511
6512 /* If we have a VFP unit, check whether the single precision registers
6513 are present. If not, then we will synthesize them as pseudo
6514 registers. */
6515 feature = tdesc_find_feature (info.target_desc,
6516 "org.gnu.gdb.arm.vfp");
6517 if (feature != NULL)
6518 {
6519 static const char *const vfp_double_names[] = {
6520 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
6521 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
6522 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
6523 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
6524 };
6525
6526 /* Require the double precision registers. There must be either
6527 16 or 32. */
6528 valid_p = 1;
6529 for (i = 0; i < 32; i++)
6530 {
6531 valid_p &= tdesc_numbered_register (feature, tdesc_data,
6532 ARM_D0_REGNUM + i,
6533 vfp_double_names[i]);
6534 if (!valid_p)
6535 break;
6536 }
6537
6538 if (!valid_p && i != 16)
6539 {
6540 tdesc_data_cleanup (tdesc_data);
6541 return NULL;
6542 }
6543
6544 if (tdesc_unnumbered_register (feature, "s0") == 0)
6545 have_vfp_pseudos = 1;
6546
6547 have_vfp_registers = 1;
6548
6549 /* If we have VFP, also check for NEON. The architecture allows
6550 NEON without VFP (integer vector operations only), but GDB
6551 does not support that. */
6552 feature = tdesc_find_feature (info.target_desc,
6553 "org.gnu.gdb.arm.neon");
6554 if (feature != NULL)
6555 {
6556 /* NEON requires 32 double-precision registers. */
6557 if (i != 32)
6558 {
6559 tdesc_data_cleanup (tdesc_data);
6560 return NULL;
6561 }
6562
6563 /* If there are quad registers defined by the stub, use
6564 their type; otherwise (normally) provide them with
6565 the default type. */
6566 if (tdesc_unnumbered_register (feature, "q0") == 0)
6567 have_neon_pseudos = 1;
6568
6569 have_neon = 1;
6570 }
6571 }
6572 }
6573
6574 /* If we have an object to base this architecture on, try to determine
6575 its ABI. */
6576
6577 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
6578 {
6579 int ei_osabi, e_flags;
6580
6581 switch (bfd_get_flavour (info.abfd))
6582 {
6583 case bfd_target_aout_flavour:
6584 /* Assume it's an old APCS-style ABI. */
6585 arm_abi = ARM_ABI_APCS;
6586 break;
6587
6588 case bfd_target_coff_flavour:
6589 /* Assume it's an old APCS-style ABI. */
6590 /* XXX WinCE? */
6591 arm_abi = ARM_ABI_APCS;
6592 break;
6593
6594 case bfd_target_elf_flavour:
6595 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
6596 e_flags = elf_elfheader (info.abfd)->e_flags;
6597
6598 if (ei_osabi == ELFOSABI_ARM)
6599 {
6600 /* GNU tools used to use this value, but do not for EABI
6601 objects. There's nowhere to tag an EABI version
6602 anyway, so assume APCS. */
6603 arm_abi = ARM_ABI_APCS;
6604 }
6605 else if (ei_osabi == ELFOSABI_NONE)
6606 {
6607 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
6608
6609 switch (eabi_ver)
6610 {
6611 case EF_ARM_EABI_UNKNOWN:
6612 /* Assume GNU tools. */
6613 arm_abi = ARM_ABI_APCS;
6614 break;
6615
6616 case EF_ARM_EABI_VER4:
6617 case EF_ARM_EABI_VER5:
6618 arm_abi = ARM_ABI_AAPCS;
6619 /* EABI binaries default to VFP float ordering.
6620 They may also contain build attributes that can
6621 be used to identify if the VFP argument-passing
6622 ABI is in use. */
6623 if (fp_model == ARM_FLOAT_AUTO)
6624 {
6625 #ifdef HAVE_ELF
6626 switch (bfd_elf_get_obj_attr_int (info.abfd,
6627 OBJ_ATTR_PROC,
6628 Tag_ABI_VFP_args))
6629 {
6630 case 0:
6631 /* "The user intended FP parameter/result
6632 passing to conform to AAPCS, base
6633 variant". */
6634 fp_model = ARM_FLOAT_SOFT_VFP;
6635 break;
6636 case 1:
6637 /* "The user intended FP parameter/result
6638 passing to conform to AAPCS, VFP
6639 variant". */
6640 fp_model = ARM_FLOAT_VFP;
6641 break;
6642 case 2:
6643 /* "The user intended FP parameter/result
6644 passing to conform to tool chain-specific
6645 conventions" - we don't know any such
6646 conventions, so leave it as "auto". */
6647 break;
6648 default:
6649 /* Attribute value not mentioned in the
6650 October 2008 ABI, so leave it as
6651 "auto". */
6652 break;
6653 }
6654 #else
6655 fp_model = ARM_FLOAT_SOFT_VFP;
6656 #endif
6657 }
6658 break;
6659
6660 default:
6661 /* Leave it as "auto". */
6662 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
6663 break;
6664 }
6665 }
6666
6667 if (fp_model == ARM_FLOAT_AUTO)
6668 {
6669 int e_flags = elf_elfheader (info.abfd)->e_flags;
6670
6671 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
6672 {
6673 case 0:
6674 /* Leave it as "auto". Strictly speaking this case
6675 means FPA, but almost nobody uses that now, and
6676 many toolchains fail to set the appropriate bits
6677 for the floating-point model they use. */
6678 break;
6679 case EF_ARM_SOFT_FLOAT:
6680 fp_model = ARM_FLOAT_SOFT_FPA;
6681 break;
6682 case EF_ARM_VFP_FLOAT:
6683 fp_model = ARM_FLOAT_VFP;
6684 break;
6685 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
6686 fp_model = ARM_FLOAT_SOFT_VFP;
6687 break;
6688 }
6689 }
6690
6691 if (e_flags & EF_ARM_BE8)
6692 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
6693
6694 break;
6695
6696 default:
6697 /* Leave it as "auto". */
6698 break;
6699 }
6700 }
6701
6702 /* If there is already a candidate, use it. */
6703 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
6704 best_arch != NULL;
6705 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
6706 {
6707 if (arm_abi != ARM_ABI_AUTO
6708 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
6709 continue;
6710
6711 if (fp_model != ARM_FLOAT_AUTO
6712 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
6713 continue;
6714
6715 /* There are various other properties in tdep that we do not
6716 need to check here: those derived from a target description,
6717 since gdbarches with a different target description are
6718 automatically disqualified. */
6719
6720 /* Found a match. */
6721 break;
6722 }
6723
6724 if (best_arch != NULL)
6725 {
6726 if (tdesc_data != NULL)
6727 tdesc_data_cleanup (tdesc_data);
6728 return best_arch->gdbarch;
6729 }
6730
6731 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
6732 gdbarch = gdbarch_alloc (&info, tdep);
6733
6734 /* Record additional information about the architecture we are defining.
6735 These are gdbarch discriminators, like the OSABI. */
6736 tdep->arm_abi = arm_abi;
6737 tdep->fp_model = fp_model;
6738 tdep->have_fpa_registers = have_fpa_registers;
6739 tdep->have_vfp_registers = have_vfp_registers;
6740 tdep->have_vfp_pseudos = have_vfp_pseudos;
6741 tdep->have_neon_pseudos = have_neon_pseudos;
6742 tdep->have_neon = have_neon;
6743
6744 /* Breakpoints. */
6745 switch (info.byte_order_for_code)
6746 {
6747 case BFD_ENDIAN_BIG:
6748 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
6749 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
6750 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
6751 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
6752
6753 break;
6754
6755 case BFD_ENDIAN_LITTLE:
6756 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
6757 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
6758 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
6759 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
6760
6761 break;
6762
6763 default:
6764 internal_error (__FILE__, __LINE__,
6765 _("arm_gdbarch_init: bad byte order for float format"));
6766 }
6767
6768 /* On ARM targets char defaults to unsigned. */
6769 set_gdbarch_char_signed (gdbarch, 0);
6770
6771 /* Note: for displaced stepping, this includes the breakpoint, and one word
6772 of additional scratch space. This setting isn't used for anything beside
6773 displaced stepping at present. */
6774 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
6775
6776 /* This should be low enough for everything. */
6777 tdep->lowest_pc = 0x20;
6778 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
6779
6780 /* The default, for both APCS and AAPCS, is to return small
6781 structures in registers. */
6782 tdep->struct_return = reg_struct_return;
6783
6784 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
6785 set_gdbarch_frame_align (gdbarch, arm_frame_align);
6786
6787 set_gdbarch_write_pc (gdbarch, arm_write_pc);
6788
6789 /* Frame handling. */
6790 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
6791 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
6792 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
6793
6794 frame_base_set_default (gdbarch, &arm_normal_base);
6795
6796 /* Address manipulation. */
6797 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
6798 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
6799
6800 /* Advance PC across function entry code. */
6801 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
6802
6803 /* Skip trampolines. */
6804 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
6805
6806 /* The stack grows downward. */
6807 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
6808
6809 /* Breakpoint manipulation. */
6810 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
6811 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
6812 arm_remote_breakpoint_from_pc);
6813
6814 /* Information about registers, etc. */
6815 set_gdbarch_deprecated_fp_regnum (gdbarch, ARM_FP_REGNUM); /* ??? */
6816 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
6817 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
6818 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
6819 set_gdbarch_register_type (gdbarch, arm_register_type);
6820
6821 /* This "info float" is FPA-specific. Use the generic version if we
6822 do not have FPA. */
6823 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
6824 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
6825
6826 /* Internal <-> external register number maps. */
6827 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
6828 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
6829
6830 set_gdbarch_register_name (gdbarch, arm_register_name);
6831
6832 /* Returning results. */
6833 set_gdbarch_return_value (gdbarch, arm_return_value);
6834
6835 /* Disassembly. */
6836 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
6837
6838 /* Minsymbol frobbing. */
6839 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
6840 set_gdbarch_coff_make_msymbol_special (gdbarch,
6841 arm_coff_make_msymbol_special);
6842 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
6843
6844 /* Thumb-2 IT block support. */
6845 set_gdbarch_adjust_breakpoint_address (gdbarch,
6846 arm_adjust_breakpoint_address);
6847
6848 /* Virtual tables. */
6849 set_gdbarch_vbit_in_delta (gdbarch, 1);
6850
6851 /* Hook in the ABI-specific overrides, if they have been registered. */
6852 gdbarch_init_osabi (info, gdbarch);
6853
6854 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
6855
6856 /* Add some default predicates. */
6857 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
6858 dwarf2_append_unwinders (gdbarch);
6859 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
6860
6861 /* Now we have tuned the configuration, set a few final things,
6862 based on what the OS ABI has told us. */
6863
6864 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
6865 binaries are always marked. */
6866 if (tdep->arm_abi == ARM_ABI_AUTO)
6867 tdep->arm_abi = ARM_ABI_APCS;
6868
6869 /* We used to default to FPA for generic ARM, but almost nobody
6870 uses that now, and we now provide a way for the user to force
6871 the model. So default to the most useful variant. */
6872 if (tdep->fp_model == ARM_FLOAT_AUTO)
6873 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
6874
6875 if (tdep->jb_pc >= 0)
6876 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
6877
6878 /* Floating point sizes and format. */
6879 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
6880 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
6881 {
6882 set_gdbarch_double_format
6883 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
6884 set_gdbarch_long_double_format
6885 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
6886 }
6887 else
6888 {
6889 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
6890 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
6891 }
6892
6893 if (have_vfp_pseudos)
6894 {
6895 /* NOTE: These are the only pseudo registers used by
6896 the ARM target at the moment. If more are added, a
6897 little more care in numbering will be needed. */
6898
6899 int num_pseudos = 32;
6900 if (have_neon_pseudos)
6901 num_pseudos += 16;
6902 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
6903 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
6904 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
6905 }
6906
6907 if (tdesc_data)
6908 {
6909 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
6910
6911 tdesc_use_registers (gdbarch, info.target_desc, tdesc_data);
6912
6913 /* Override tdesc_register_type to adjust the types of VFP
6914 registers for NEON. */
6915 set_gdbarch_register_type (gdbarch, arm_register_type);
6916 }
6917
6918 /* Add standard register aliases. We add aliases even for those
6919 nanes which are used by the current architecture - it's simpler,
6920 and does no harm, since nothing ever lists user registers. */
6921 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
6922 user_reg_add (gdbarch, arm_register_aliases[i].name,
6923 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
6924
6925 return gdbarch;
6926 }
6927
6928 static void
6929 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
6930 {
6931 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6932
6933 if (tdep == NULL)
6934 return;
6935
6936 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
6937 (unsigned long) tdep->lowest_pc);
6938 }
6939
6940 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
6941
6942 void
6943 _initialize_arm_tdep (void)
6944 {
6945 struct ui_file *stb;
6946 long length;
6947 struct cmd_list_element *new_set, *new_show;
6948 const char *setname;
6949 const char *setdesc;
6950 const char *const *regnames;
6951 int numregs, i, j;
6952 static char *helptext;
6953 char regdesc[1024], *rdptr = regdesc;
6954 size_t rest = sizeof (regdesc);
6955
6956 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
6957
6958 arm_objfile_data_key
6959 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
6960
6961 /* Register an ELF OS ABI sniffer for ARM binaries. */
6962 gdbarch_register_osabi_sniffer (bfd_arch_arm,
6963 bfd_target_elf_flavour,
6964 arm_elf_osabi_sniffer);
6965
6966 /* Get the number of possible sets of register names defined in opcodes. */
6967 num_disassembly_options = get_arm_regname_num_options ();
6968
6969 /* Add root prefix command for all "set arm"/"show arm" commands. */
6970 add_prefix_cmd ("arm", no_class, set_arm_command,
6971 _("Various ARM-specific commands."),
6972 &setarmcmdlist, "set arm ", 0, &setlist);
6973
6974 add_prefix_cmd ("arm", no_class, show_arm_command,
6975 _("Various ARM-specific commands."),
6976 &showarmcmdlist, "show arm ", 0, &showlist);
6977
6978 /* Sync the opcode insn printer with our register viewer. */
6979 parse_arm_disassembler_option ("reg-names-std");
6980
6981 /* Initialize the array that will be passed to
6982 add_setshow_enum_cmd(). */
6983 valid_disassembly_styles
6984 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
6985 for (i = 0; i < num_disassembly_options; i++)
6986 {
6987 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
6988 valid_disassembly_styles[i] = setname;
6989 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
6990 rdptr += length;
6991 rest -= length;
6992 /* When we find the default names, tell the disassembler to use
6993 them. */
6994 if (!strcmp (setname, "std"))
6995 {
6996 disassembly_style = setname;
6997 set_arm_regname_option (i);
6998 }
6999 }
7000 /* Mark the end of valid options. */
7001 valid_disassembly_styles[num_disassembly_options] = NULL;
7002
7003 /* Create the help text. */
7004 stb = mem_fileopen ();
7005 fprintf_unfiltered (stb, "%s%s%s",
7006 _("The valid values are:\n"),
7007 regdesc,
7008 _("The default is \"std\"."));
7009 helptext = ui_file_xstrdup (stb, NULL);
7010 ui_file_delete (stb);
7011
7012 add_setshow_enum_cmd("disassembler", no_class,
7013 valid_disassembly_styles, &disassembly_style,
7014 _("Set the disassembly style."),
7015 _("Show the disassembly style."),
7016 helptext,
7017 set_disassembly_style_sfunc,
7018 NULL, /* FIXME: i18n: The disassembly style is \"%s\". */
7019 &setarmcmdlist, &showarmcmdlist);
7020
7021 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
7022 _("Set usage of ARM 32-bit mode."),
7023 _("Show usage of ARM 32-bit mode."),
7024 _("When off, a 26-bit PC will be used."),
7025 NULL,
7026 NULL, /* FIXME: i18n: Usage of ARM 32-bit mode is %s. */
7027 &setarmcmdlist, &showarmcmdlist);
7028
7029 /* Add a command to allow the user to force the FPU model. */
7030 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
7031 _("Set the floating point type."),
7032 _("Show the floating point type."),
7033 _("auto - Determine the FP typefrom the OS-ABI.\n\
7034 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
7035 fpa - FPA co-processor (GCC compiled).\n\
7036 softvfp - Software FP with pure-endian doubles.\n\
7037 vfp - VFP co-processor."),
7038 set_fp_model_sfunc, show_fp_model,
7039 &setarmcmdlist, &showarmcmdlist);
7040
7041 /* Add a command to allow the user to force the ABI. */
7042 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
7043 _("Set the ABI."),
7044 _("Show the ABI."),
7045 NULL, arm_set_abi, arm_show_abi,
7046 &setarmcmdlist, &showarmcmdlist);
7047
7048 /* Add two commands to allow the user to force the assumed
7049 execution mode. */
7050 add_setshow_enum_cmd ("fallback-mode", class_support,
7051 arm_mode_strings, &arm_fallback_mode_string,
7052 _("Set the mode assumed when symbols are unavailable."),
7053 _("Show the mode assumed when symbols are unavailable."),
7054 NULL, NULL, arm_show_fallback_mode,
7055 &setarmcmdlist, &showarmcmdlist);
7056 add_setshow_enum_cmd ("force-mode", class_support,
7057 arm_mode_strings, &arm_force_mode_string,
7058 _("Set the mode assumed even when symbols are available."),
7059 _("Show the mode assumed even when symbols are available."),
7060 NULL, NULL, arm_show_force_mode,
7061 &setarmcmdlist, &showarmcmdlist);
7062
7063 /* Debugging flag. */
7064 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
7065 _("Set ARM debugging."),
7066 _("Show ARM debugging."),
7067 _("When on, arm-specific debugging is enabled."),
7068 NULL,
7069 NULL, /* FIXME: i18n: "ARM debugging is %s. */
7070 &setdebuglist, &showdebuglist);
7071 }
This page took 0.392222 seconds and 3 git commands to generate.