* arm-tdep.c (skip_prologue_function): New function.
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper () */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "doublest.h"
33 #include "value.h"
34 #include "arch-utils.h"
35 #include "osabi.h"
36 #include "frame-unwind.h"
37 #include "frame-base.h"
38 #include "trad-frame.h"
39 #include "objfiles.h"
40 #include "dwarf2-frame.h"
41 #include "gdbtypes.h"
42 #include "prologue-value.h"
43 #include "target-descriptions.h"
44 #include "user-regs.h"
45
46 #include "arm-tdep.h"
47 #include "gdb/sim-arm.h"
48
49 #include "elf-bfd.h"
50 #include "coff/internal.h"
51 #include "elf/arm.h"
52
53 #include "gdb_assert.h"
54 #include "vec.h"
55
56 static int arm_debug;
57
58 /* Macros for setting and testing a bit in a minimal symbol that marks
59 it as Thumb function. The MSB of the minimal symbol's "info" field
60 is used for this purpose.
61
62 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
63 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
64
65 #define MSYMBOL_SET_SPECIAL(msym) \
66 MSYMBOL_TARGET_FLAG_1 (msym) = 1
67
68 #define MSYMBOL_IS_SPECIAL(msym) \
69 MSYMBOL_TARGET_FLAG_1 (msym)
70
71 /* Per-objfile data used for mapping symbols. */
72 static const struct objfile_data *arm_objfile_data_key;
73
74 struct arm_mapping_symbol
75 {
76 bfd_vma value;
77 char type;
78 };
79 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
80 DEF_VEC_O(arm_mapping_symbol_s);
81
82 struct arm_per_objfile
83 {
84 VEC(arm_mapping_symbol_s) **section_maps;
85 };
86
87 /* The list of available "set arm ..." and "show arm ..." commands. */
88 static struct cmd_list_element *setarmcmdlist = NULL;
89 static struct cmd_list_element *showarmcmdlist = NULL;
90
91 /* The type of floating-point to use. Keep this in sync with enum
92 arm_float_model, and the help string in _initialize_arm_tdep. */
93 static const char *fp_model_strings[] =
94 {
95 "auto",
96 "softfpa",
97 "fpa",
98 "softvfp",
99 "vfp",
100 NULL
101 };
102
103 /* A variable that can be configured by the user. */
104 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
105 static const char *current_fp_model = "auto";
106
107 /* The ABI to use. Keep this in sync with arm_abi_kind. */
108 static const char *arm_abi_strings[] =
109 {
110 "auto",
111 "APCS",
112 "AAPCS",
113 NULL
114 };
115
116 /* A variable that can be configured by the user. */
117 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
118 static const char *arm_abi_string = "auto";
119
120 /* The execution mode to assume. */
121 static const char *arm_mode_strings[] =
122 {
123 "auto",
124 "arm",
125 "thumb"
126 };
127
128 static const char *arm_fallback_mode_string = "auto";
129 static const char *arm_force_mode_string = "auto";
130
131 /* Number of different reg name sets (options). */
132 static int num_disassembly_options;
133
134 /* The standard register names, and all the valid aliases for them. */
135 static const struct
136 {
137 const char *name;
138 int regnum;
139 } arm_register_aliases[] = {
140 /* Basic register numbers. */
141 { "r0", 0 },
142 { "r1", 1 },
143 { "r2", 2 },
144 { "r3", 3 },
145 { "r4", 4 },
146 { "r5", 5 },
147 { "r6", 6 },
148 { "r7", 7 },
149 { "r8", 8 },
150 { "r9", 9 },
151 { "r10", 10 },
152 { "r11", 11 },
153 { "r12", 12 },
154 { "r13", 13 },
155 { "r14", 14 },
156 { "r15", 15 },
157 /* Synonyms (argument and variable registers). */
158 { "a1", 0 },
159 { "a2", 1 },
160 { "a3", 2 },
161 { "a4", 3 },
162 { "v1", 4 },
163 { "v2", 5 },
164 { "v3", 6 },
165 { "v4", 7 },
166 { "v5", 8 },
167 { "v6", 9 },
168 { "v7", 10 },
169 { "v8", 11 },
170 /* Other platform-specific names for r9. */
171 { "sb", 9 },
172 { "tr", 9 },
173 /* Special names. */
174 { "ip", 12 },
175 { "sp", 13 },
176 { "lr", 14 },
177 { "pc", 15 },
178 /* Names used by GCC (not listed in the ARM EABI). */
179 { "sl", 10 },
180 { "fp", 11 },
181 /* A special name from the older ATPCS. */
182 { "wr", 7 },
183 };
184
185 static const char *const arm_register_names[] =
186 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
187 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
188 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
189 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
190 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
191 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
192 "fps", "cpsr" }; /* 24 25 */
193
194 /* Valid register name styles. */
195 static const char **valid_disassembly_styles;
196
197 /* Disassembly style to use. Default to "std" register names. */
198 static const char *disassembly_style;
199
200 /* This is used to keep the bfd arch_info in sync with the disassembly
201 style. */
202 static void set_disassembly_style_sfunc(char *, int,
203 struct cmd_list_element *);
204 static void set_disassembly_style (void);
205
206 static void convert_from_extended (const struct floatformat *, const void *,
207 void *, int);
208 static void convert_to_extended (const struct floatformat *, void *,
209 const void *, int);
210
211 static void arm_neon_quad_read (struct gdbarch *gdbarch,
212 struct regcache *regcache,
213 int regnum, gdb_byte *buf);
214 static void arm_neon_quad_write (struct gdbarch *gdbarch,
215 struct regcache *regcache,
216 int regnum, const gdb_byte *buf);
217
218 struct arm_prologue_cache
219 {
220 /* The stack pointer at the time this frame was created; i.e. the
221 caller's stack pointer when this function was called. It is used
222 to identify this frame. */
223 CORE_ADDR prev_sp;
224
225 /* The frame base for this frame is just prev_sp - frame size.
226 FRAMESIZE is the distance from the frame pointer to the
227 initial stack pointer. */
228
229 int framesize;
230
231 /* The register used to hold the frame pointer for this frame. */
232 int framereg;
233
234 /* Saved register offsets. */
235 struct trad_frame_saved_reg *saved_regs;
236 };
237
238 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
239 CORE_ADDR prologue_start,
240 CORE_ADDR prologue_end,
241 struct arm_prologue_cache *cache);
242
243 /* Architecture version for displaced stepping. This effects the behaviour of
244 certain instructions, and really should not be hard-wired. */
245
246 #define DISPLACED_STEPPING_ARCH_VERSION 5
247
248 /* Addresses for calling Thumb functions have the bit 0 set.
249 Here are some macros to test, set, or clear bit 0 of addresses. */
250 #define IS_THUMB_ADDR(addr) ((addr) & 1)
251 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
252 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
253
254 /* Set to true if the 32-bit mode is in use. */
255
256 int arm_apcs_32 = 1;
257
258 /* Determine if FRAME is executing in Thumb mode. */
259
260 static int
261 arm_frame_is_thumb (struct frame_info *frame)
262 {
263 CORE_ADDR cpsr;
264
265 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
266 directly (from a signal frame or dummy frame) or by interpreting
267 the saved LR (from a prologue or DWARF frame). So consult it and
268 trust the unwinders. */
269 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
270
271 return (cpsr & CPSR_T) != 0;
272 }
273
274 /* Callback for VEC_lower_bound. */
275
276 static inline int
277 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
278 const struct arm_mapping_symbol *rhs)
279 {
280 return lhs->value < rhs->value;
281 }
282
283 /* Search for the mapping symbol covering MEMADDR. If one is found,
284 return its type. Otherwise, return 0. If START is non-NULL,
285 set *START to the location of the mapping symbol. */
286
287 static char
288 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
289 {
290 struct obj_section *sec;
291
292 /* If there are mapping symbols, consult them. */
293 sec = find_pc_section (memaddr);
294 if (sec != NULL)
295 {
296 struct arm_per_objfile *data;
297 VEC(arm_mapping_symbol_s) *map;
298 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
299 0 };
300 unsigned int idx;
301
302 data = objfile_data (sec->objfile, arm_objfile_data_key);
303 if (data != NULL)
304 {
305 map = data->section_maps[sec->the_bfd_section->index];
306 if (!VEC_empty (arm_mapping_symbol_s, map))
307 {
308 struct arm_mapping_symbol *map_sym;
309
310 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
311 arm_compare_mapping_symbols);
312
313 /* VEC_lower_bound finds the earliest ordered insertion
314 point. If the following symbol starts at this exact
315 address, we use that; otherwise, the preceding
316 mapping symbol covers this address. */
317 if (idx < VEC_length (arm_mapping_symbol_s, map))
318 {
319 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
320 if (map_sym->value == map_key.value)
321 {
322 if (start)
323 *start = map_sym->value + obj_section_addr (sec);
324 return map_sym->type;
325 }
326 }
327
328 if (idx > 0)
329 {
330 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
331 if (start)
332 *start = map_sym->value + obj_section_addr (sec);
333 return map_sym->type;
334 }
335 }
336 }
337 }
338
339 return 0;
340 }
341
342 static CORE_ADDR arm_get_next_pc_raw (struct frame_info *frame,
343 CORE_ADDR pc, int insert_bkpt);
344
345 /* Determine if the program counter specified in MEMADDR is in a Thumb
346 function. This function should be called for addresses unrelated to
347 any executing frame; otherwise, prefer arm_frame_is_thumb. */
348
349 static int
350 arm_pc_is_thumb (CORE_ADDR memaddr)
351 {
352 struct obj_section *sec;
353 struct minimal_symbol *sym;
354 char type;
355
356 /* If bit 0 of the address is set, assume this is a Thumb address. */
357 if (IS_THUMB_ADDR (memaddr))
358 return 1;
359
360 /* If the user wants to override the symbol table, let him. */
361 if (strcmp (arm_force_mode_string, "arm") == 0)
362 return 0;
363 if (strcmp (arm_force_mode_string, "thumb") == 0)
364 return 1;
365
366 /* If there are mapping symbols, consult them. */
367 type = arm_find_mapping_symbol (memaddr, NULL);
368 if (type)
369 return type == 't';
370
371 /* Thumb functions have a "special" bit set in minimal symbols. */
372 sym = lookup_minimal_symbol_by_pc (memaddr);
373 if (sym)
374 return (MSYMBOL_IS_SPECIAL (sym));
375
376 /* If the user wants to override the fallback mode, let them. */
377 if (strcmp (arm_fallback_mode_string, "arm") == 0)
378 return 0;
379 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
380 return 1;
381
382 /* If we couldn't find any symbol, but we're talking to a running
383 target, then trust the current value of $cpsr. This lets
384 "display/i $pc" always show the correct mode (though if there is
385 a symbol table we will not reach here, so it still may not be
386 displayed in the mode it will be executed).
387
388 As a further heuristic if we detect that we are doing a single-step we
389 see what state executing the current instruction ends up with us being
390 in. */
391 if (target_has_registers)
392 {
393 struct frame_info *current_frame = get_current_frame ();
394 CORE_ADDR current_pc = get_frame_pc (current_frame);
395 int is_thumb = arm_frame_is_thumb (current_frame);
396 CORE_ADDR next_pc;
397 if (memaddr == current_pc)
398 return is_thumb;
399 else
400 {
401 struct gdbarch *gdbarch = get_frame_arch (current_frame);
402 next_pc = arm_get_next_pc_raw (current_frame, current_pc, FALSE);
403 if (memaddr == gdbarch_addr_bits_remove (gdbarch, next_pc))
404 return IS_THUMB_ADDR (next_pc);
405 else
406 return is_thumb;
407 }
408 }
409
410 /* Otherwise we're out of luck; we assume ARM. */
411 return 0;
412 }
413
414 /* Remove useless bits from addresses in a running program. */
415 static CORE_ADDR
416 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
417 {
418 if (arm_apcs_32)
419 return UNMAKE_THUMB_ADDR (val);
420 else
421 return (val & 0x03fffffc);
422 }
423
424 /* When reading symbols, we need to zap the low bit of the address,
425 which may be set to 1 for Thumb functions. */
426 static CORE_ADDR
427 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
428 {
429 return val & ~1;
430 }
431
432 /* Return 1 if PC is the start of a compiler helper function which
433 can be safely ignored during prologue skipping. */
434 static int
435 skip_prologue_function (CORE_ADDR pc)
436 {
437 struct minimal_symbol *msym;
438 const char *name;
439
440 msym = lookup_minimal_symbol_by_pc (pc);
441 if (msym == NULL || SYMBOL_VALUE_ADDRESS (msym) != pc)
442 return 0;
443
444 name = SYMBOL_LINKAGE_NAME (msym);
445 if (name == NULL)
446 return 0;
447
448 /* The GNU linker's Thumb call stub to foo is named
449 __foo_from_thumb. */
450 if (strstr (name, "_from_thumb") != NULL)
451 name += 2;
452
453 /* On soft-float targets, __truncdfsf2 is called to convert promoted
454 arguments to their argument types in non-prototyped
455 functions. */
456 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
457 return 1;
458 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
459 return 1;
460
461 return 0;
462 }
463
464 /* Support routines for instruction parsing. */
465 #define submask(x) ((1L << ((x) + 1)) - 1)
466 #define bit(obj,st) (((obj) >> (st)) & 1)
467 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
468 #define sbits(obj,st,fn) \
469 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
470 #define BranchDest(addr,instr) \
471 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
472
473 /* Analyze a Thumb prologue, looking for a recognizable stack frame
474 and frame pointer. Scan until we encounter a store that could
475 clobber the stack frame unexpectedly, or an unknown instruction.
476 Return the last address which is definitely safe to skip for an
477 initial breakpoint. */
478
479 static CORE_ADDR
480 thumb_analyze_prologue (struct gdbarch *gdbarch,
481 CORE_ADDR start, CORE_ADDR limit,
482 struct arm_prologue_cache *cache)
483 {
484 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
485 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
486 int i;
487 pv_t regs[16];
488 struct pv_area *stack;
489 struct cleanup *back_to;
490 CORE_ADDR offset;
491
492 for (i = 0; i < 16; i++)
493 regs[i] = pv_register (i, 0);
494 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
495 back_to = make_cleanup_free_pv_area (stack);
496
497 while (start < limit)
498 {
499 unsigned short insn;
500
501 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
502
503 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
504 {
505 int regno;
506 int mask;
507
508 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
509 break;
510
511 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
512 whether to save LR (R14). */
513 mask = (insn & 0xff) | ((insn & 0x100) << 6);
514
515 /* Calculate offsets of saved R0-R7 and LR. */
516 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
517 if (mask & (1 << regno))
518 {
519 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
520 -4);
521 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
522 }
523 }
524 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
525 sub sp, #simm */
526 {
527 offset = (insn & 0x7f) << 2; /* get scaled offset */
528 if (insn & 0x80) /* Check for SUB. */
529 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
530 -offset);
531 else
532 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
533 offset);
534 }
535 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
536 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
537 (insn & 0xff) << 2);
538 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
539 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
540 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
541 bits (insn, 6, 8));
542 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
543 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
544 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
545 bits (insn, 0, 7));
546 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
547 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
548 && pv_is_constant (regs[bits (insn, 3, 5)]))
549 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
550 regs[bits (insn, 6, 8)]);
551 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
552 && pv_is_constant (regs[bits (insn, 3, 6)]))
553 {
554 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
555 int rm = bits (insn, 3, 6);
556 regs[rd] = pv_add (regs[rd], regs[rm]);
557 }
558 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
559 {
560 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
561 int src_reg = (insn & 0x78) >> 3;
562 regs[dst_reg] = regs[src_reg];
563 }
564 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
565 {
566 /* Handle stores to the stack. Normally pushes are used,
567 but with GCC -mtpcs-frame, there may be other stores
568 in the prologue to create the frame. */
569 int regno = (insn >> 8) & 0x7;
570 pv_t addr;
571
572 offset = (insn & 0xff) << 2;
573 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
574
575 if (pv_area_store_would_trash (stack, addr))
576 break;
577
578 pv_area_store (stack, addr, 4, regs[regno]);
579 }
580 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
581 {
582 int rd = bits (insn, 0, 2);
583 int rn = bits (insn, 3, 5);
584 pv_t addr;
585
586 offset = bits (insn, 6, 10) << 2;
587 addr = pv_add_constant (regs[rn], offset);
588
589 if (pv_area_store_would_trash (stack, addr))
590 break;
591
592 pv_area_store (stack, addr, 4, regs[rd]);
593 }
594 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
595 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
596 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
597 /* Ignore stores of argument registers to the stack. */
598 ;
599 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
600 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
601 /* Ignore block loads from the stack, potentially copying
602 parameters from memory. */
603 ;
604 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
605 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
606 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
607 /* Similarly ignore single loads from the stack. */
608 ;
609 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
610 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
611 /* Skip register copies, i.e. saves to another register
612 instead of the stack. */
613 ;
614 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
615 /* Recognize constant loads; even with small stacks these are necessary
616 on Thumb. */
617 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
618 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
619 {
620 /* Constant pool loads, for the same reason. */
621 unsigned int constant;
622 CORE_ADDR loc;
623
624 loc = start + 4 + bits (insn, 0, 7) * 4;
625 constant = read_memory_unsigned_integer (loc, 4, byte_order);
626 regs[bits (insn, 8, 10)] = pv_constant (constant);
627 }
628 else if ((insn & 0xe000) == 0xe000 && cache == NULL)
629 {
630 /* Only recognize 32-bit instructions for prologue skipping. */
631 unsigned short inst2;
632
633 inst2 = read_memory_unsigned_integer (start + 2, 2,
634 byte_order_for_code);
635
636 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
637 {
638 /* BL, BLX. Allow some special function calls when
639 skipping the prologue; GCC generates these before
640 storing arguments to the stack. */
641 CORE_ADDR nextpc;
642 int j1, j2, imm1, imm2;
643
644 imm1 = sbits (insn, 0, 10);
645 imm2 = bits (inst2, 0, 10);
646 j1 = bit (inst2, 13);
647 j2 = bit (inst2, 11);
648
649 offset = ((imm1 << 12) + (imm2 << 1));
650 offset ^= ((!j2) << 22) | ((!j1) << 23);
651
652 nextpc = start + 4 + offset;
653 /* For BLX make sure to clear the low bits. */
654 if (bit (inst2, 12) == 0)
655 nextpc = nextpc & 0xfffffffc;
656
657 if (!skip_prologue_function (nextpc))
658 break;
659 }
660 else if ((insn & 0xfe50) == 0xe800 /* stm{db,ia} Rn[!], { registers } */
661 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
662 ;
663 else if ((insn & 0xfe50) == 0xe840 /* strd Rt, Rt2, [Rn, #imm] */
664 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
665 ;
666 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!], { registers } */
667 && (inst2 & 0x8000) == 0x0000
668 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
669 ;
670 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
671 && (inst2 & 0x8000) == 0x0000)
672 /* Since we only recognize this for prologue skipping, do not bother
673 to compute the constant. */
674 regs[bits (inst2, 8, 11)] = regs[bits (insn, 0, 3)];
675 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm12 */
676 && (inst2 & 0x8000) == 0x0000)
677 /* Since we only recognize this for prologue skipping, do not bother
678 to compute the constant. */
679 regs[bits (inst2, 8, 11)] = regs[bits (insn, 0, 3)];
680 else if ((insn & 0xfbf0) == 0xf2a0 /* sub.w Rd, Rn, #imm8 */
681 && (inst2 & 0x8000) == 0x0000)
682 /* Since we only recognize this for prologue skipping, do not bother
683 to compute the constant. */
684 regs[bits (inst2, 8, 11)] = regs[bits (insn, 0, 3)];
685 else if ((insn & 0xff50) == 0xf850 /* ldr.w Rd, [Rn, #imm]{!} */
686 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
687 ;
688 else if ((insn & 0xff50) == 0xe950 /* ldrd Rt, Rt2, [Rn, #imm]{!} */
689 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
690 ;
691 else if ((insn & 0xff50) == 0xf800 /* strb.w or strh.w */
692 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
693 ;
694 else
695 {
696 /* We don't know what this instruction is. We're finished
697 scanning. NOTE: Recognizing more safe-to-ignore
698 instructions here will improve support for optimized
699 code. */
700 break;
701 }
702
703 start += 2;
704 }
705 else
706 {
707 /* We don't know what this instruction is. We're finished
708 scanning. NOTE: Recognizing more safe-to-ignore
709 instructions here will improve support for optimized
710 code. */
711 break;
712 }
713
714 start += 2;
715 }
716
717 if (arm_debug)
718 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
719 paddress (gdbarch, start));
720
721 if (cache == NULL)
722 {
723 do_cleanups (back_to);
724 return start;
725 }
726
727 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
728 {
729 /* Frame pointer is fp. Frame size is constant. */
730 cache->framereg = ARM_FP_REGNUM;
731 cache->framesize = -regs[ARM_FP_REGNUM].k;
732 }
733 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
734 {
735 /* Frame pointer is r7. Frame size is constant. */
736 cache->framereg = THUMB_FP_REGNUM;
737 cache->framesize = -regs[THUMB_FP_REGNUM].k;
738 }
739 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
740 {
741 /* Try the stack pointer... this is a bit desperate. */
742 cache->framereg = ARM_SP_REGNUM;
743 cache->framesize = -regs[ARM_SP_REGNUM].k;
744 }
745 else
746 {
747 /* We're just out of luck. We don't know where the frame is. */
748 cache->framereg = -1;
749 cache->framesize = 0;
750 }
751
752 for (i = 0; i < 16; i++)
753 if (pv_area_find_reg (stack, gdbarch, i, &offset))
754 cache->saved_regs[i].addr = offset;
755
756 do_cleanups (back_to);
757 return start;
758 }
759
760 /* Advance the PC across any function entry prologue instructions to
761 reach some "real" code.
762
763 The APCS (ARM Procedure Call Standard) defines the following
764 prologue:
765
766 mov ip, sp
767 [stmfd sp!, {a1,a2,a3,a4}]
768 stmfd sp!, {...,fp,ip,lr,pc}
769 [stfe f7, [sp, #-12]!]
770 [stfe f6, [sp, #-12]!]
771 [stfe f5, [sp, #-12]!]
772 [stfe f4, [sp, #-12]!]
773 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn */
774
775 static CORE_ADDR
776 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
777 {
778 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
779 unsigned long inst;
780 CORE_ADDR skip_pc;
781 CORE_ADDR func_addr, limit_pc;
782 struct symtab_and_line sal;
783
784 /* See if we can determine the end of the prologue via the symbol table.
785 If so, then return either PC, or the PC after the prologue, whichever
786 is greater. */
787 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
788 {
789 CORE_ADDR post_prologue_pc
790 = skip_prologue_using_sal (gdbarch, func_addr);
791 struct symtab *s = find_pc_symtab (func_addr);
792
793 /* GCC always emits a line note before the prologue and another
794 one after, even if the two are at the same address or on the
795 same line. Take advantage of this so that we do not need to
796 know every instruction that might appear in the prologue. We
797 will have producer information for most binaries; if it is
798 missing (e.g. for -gstabs), assuming the GNU tools. */
799 if (post_prologue_pc
800 && (s == NULL
801 || s->producer == NULL
802 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
803 return post_prologue_pc;
804
805 if (post_prologue_pc != 0)
806 {
807 CORE_ADDR analyzed_limit;
808
809 /* For non-GCC compilers, make sure the entire line is an
810 acceptable prologue; GDB will round this function's
811 return value up to the end of the following line so we
812 can not skip just part of a line (and we do not want to).
813
814 RealView does not treat the prologue specially, but does
815 associate prologue code with the opening brace; so this
816 lets us skip the first line if we think it is the opening
817 brace. */
818 if (arm_pc_is_thumb (func_addr))
819 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
820 post_prologue_pc, NULL);
821 else
822 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
823 post_prologue_pc, NULL);
824
825 if (analyzed_limit != post_prologue_pc)
826 return func_addr;
827
828 return post_prologue_pc;
829 }
830 }
831
832 /* Can't determine prologue from the symbol table, need to examine
833 instructions. */
834
835 /* Find an upper limit on the function prologue using the debug
836 information. If the debug information could not be used to provide
837 that bound, then use an arbitrary large number as the upper bound. */
838 /* Like arm_scan_prologue, stop no later than pc + 64. */
839 limit_pc = skip_prologue_using_sal (gdbarch, pc);
840 if (limit_pc == 0)
841 limit_pc = pc + 64; /* Magic. */
842
843
844 /* Check if this is Thumb code. */
845 if (arm_pc_is_thumb (pc))
846 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
847
848 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
849 {
850 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
851
852 /* "mov ip, sp" is no longer a required part of the prologue. */
853 if (inst == 0xe1a0c00d) /* mov ip, sp */
854 continue;
855
856 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
857 continue;
858
859 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
860 continue;
861
862 /* Some prologues begin with "str lr, [sp, #-4]!". */
863 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
864 continue;
865
866 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
867 continue;
868
869 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
870 continue;
871
872 /* Any insns after this point may float into the code, if it makes
873 for better instruction scheduling, so we skip them only if we
874 find them, but still consider the function to be frame-ful. */
875
876 /* We may have either one sfmfd instruction here, or several stfe
877 insns, depending on the version of floating point code we
878 support. */
879 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
880 continue;
881
882 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
883 continue;
884
885 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
886 continue;
887
888 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
889 continue;
890
891 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
892 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
893 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
894 continue;
895
896 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
897 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
898 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
899 continue;
900
901 /* Un-recognized instruction; stop scanning. */
902 break;
903 }
904
905 return skip_pc; /* End of prologue */
906 }
907
908 /* *INDENT-OFF* */
909 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
910 This function decodes a Thumb function prologue to determine:
911 1) the size of the stack frame
912 2) which registers are saved on it
913 3) the offsets of saved regs
914 4) the offset from the stack pointer to the frame pointer
915
916 A typical Thumb function prologue would create this stack frame
917 (offsets relative to FP)
918 old SP -> 24 stack parameters
919 20 LR
920 16 R7
921 R7 -> 0 local variables (16 bytes)
922 SP -> -12 additional stack space (12 bytes)
923 The frame size would thus be 36 bytes, and the frame offset would be
924 12 bytes. The frame register is R7.
925
926 The comments for thumb_skip_prolog() describe the algorithm we use
927 to detect the end of the prolog. */
928 /* *INDENT-ON* */
929
930 static void
931 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
932 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
933 {
934 CORE_ADDR prologue_start;
935 CORE_ADDR prologue_end;
936 CORE_ADDR current_pc;
937
938 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
939 &prologue_end))
940 {
941 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
942
943 if (sal.line == 0) /* no line info, use current PC */
944 prologue_end = prev_pc;
945 else if (sal.end < prologue_end) /* next line begins after fn end */
946 prologue_end = sal.end; /* (probably means no prologue) */
947 }
948 else
949 /* We're in the boondocks: we have no idea where the start of the
950 function is. */
951 return;
952
953 prologue_end = min (prologue_end, prev_pc);
954
955 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
956 }
957
958 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
959
960 static int
961 arm_instruction_changes_pc (uint32_t this_instr)
962 {
963 if (bits (this_instr, 28, 31) == INST_NV)
964 /* Unconditional instructions. */
965 switch (bits (this_instr, 24, 27))
966 {
967 case 0xa:
968 case 0xb:
969 /* Branch with Link and change to Thumb. */
970 return 1;
971 case 0xc:
972 case 0xd:
973 case 0xe:
974 /* Coprocessor register transfer. */
975 if (bits (this_instr, 12, 15) == 15)
976 error (_("Invalid update to pc in instruction"));
977 return 0;
978 default:
979 return 0;
980 }
981 else
982 switch (bits (this_instr, 25, 27))
983 {
984 case 0x0:
985 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
986 {
987 /* Multiplies and extra load/stores. */
988 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
989 /* Neither multiplies nor extension load/stores are allowed
990 to modify PC. */
991 return 0;
992
993 /* Otherwise, miscellaneous instructions. */
994
995 /* BX <reg>, BXJ <reg>, BLX <reg> */
996 if (bits (this_instr, 4, 27) == 0x12fff1
997 || bits (this_instr, 4, 27) == 0x12fff2
998 || bits (this_instr, 4, 27) == 0x12fff3)
999 return 1;
1000
1001 /* Other miscellaneous instructions are unpredictable if they
1002 modify PC. */
1003 return 0;
1004 }
1005 /* Data processing instruction. Fall through. */
1006
1007 case 0x1:
1008 if (bits (this_instr, 12, 15) == 15)
1009 return 1;
1010 else
1011 return 0;
1012
1013 case 0x2:
1014 case 0x3:
1015 /* Media instructions and architecturally undefined instructions. */
1016 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1017 return 0;
1018
1019 /* Stores. */
1020 if (bit (this_instr, 20) == 0)
1021 return 0;
1022
1023 /* Loads. */
1024 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1025 return 1;
1026 else
1027 return 0;
1028
1029 case 0x4:
1030 /* Load/store multiple. */
1031 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1032 return 1;
1033 else
1034 return 0;
1035
1036 case 0x5:
1037 /* Branch and branch with link. */
1038 return 1;
1039
1040 case 0x6:
1041 case 0x7:
1042 /* Coprocessor transfers or SWIs can not affect PC. */
1043 return 0;
1044
1045 default:
1046 internal_error (__FILE__, __LINE__, "bad value in switch");
1047 }
1048 }
1049
1050 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1051 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1052 fill it in. Return the first address not recognized as a prologue
1053 instruction.
1054
1055 We recognize all the instructions typically found in ARM prologues,
1056 plus harmless instructions which can be skipped (either for analysis
1057 purposes, or a more restrictive set that can be skipped when finding
1058 the end of the prologue). */
1059
1060 static CORE_ADDR
1061 arm_analyze_prologue (struct gdbarch *gdbarch,
1062 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1063 struct arm_prologue_cache *cache)
1064 {
1065 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1066 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1067 int regno;
1068 CORE_ADDR offset, current_pc;
1069 pv_t regs[ARM_FPS_REGNUM];
1070 struct pv_area *stack;
1071 struct cleanup *back_to;
1072 int framereg, framesize;
1073 CORE_ADDR unrecognized_pc = 0;
1074
1075 /* Search the prologue looking for instructions that set up the
1076 frame pointer, adjust the stack pointer, and save registers.
1077
1078 Be careful, however, and if it doesn't look like a prologue,
1079 don't try to scan it. If, for instance, a frameless function
1080 begins with stmfd sp!, then we will tell ourselves there is
1081 a frame, which will confuse stack traceback, as well as "finish"
1082 and other operations that rely on a knowledge of the stack
1083 traceback. */
1084
1085 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1086 regs[regno] = pv_register (regno, 0);
1087 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1088 back_to = make_cleanup_free_pv_area (stack);
1089
1090 for (current_pc = prologue_start;
1091 current_pc < prologue_end;
1092 current_pc += 4)
1093 {
1094 unsigned int insn
1095 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1096
1097 if (insn == 0xe1a0c00d) /* mov ip, sp */
1098 {
1099 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1100 continue;
1101 }
1102 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1103 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1104 {
1105 unsigned imm = insn & 0xff; /* immediate value */
1106 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1107 int rd = bits (insn, 12, 15);
1108 imm = (imm >> rot) | (imm << (32 - rot));
1109 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1110 continue;
1111 }
1112 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1113 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1114 {
1115 unsigned imm = insn & 0xff; /* immediate value */
1116 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1117 int rd = bits (insn, 12, 15);
1118 imm = (imm >> rot) | (imm << (32 - rot));
1119 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1120 continue;
1121 }
1122 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd, [sp, #-4]! */
1123 {
1124 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1125 break;
1126 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1127 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1128 regs[bits (insn, 12, 15)]);
1129 continue;
1130 }
1131 else if ((insn & 0xffff0000) == 0xe92d0000)
1132 /* stmfd sp!, {..., fp, ip, lr, pc}
1133 or
1134 stmfd sp!, {a1, a2, a3, a4} */
1135 {
1136 int mask = insn & 0xffff;
1137
1138 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1139 break;
1140
1141 /* Calculate offsets of saved registers. */
1142 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1143 if (mask & (1 << regno))
1144 {
1145 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1146 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1147 }
1148 }
1149 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1150 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1151 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1152 {
1153 /* No need to add this to saved_regs -- it's just an arg reg. */
1154 continue;
1155 }
1156 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1157 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1158 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1159 {
1160 /* No need to add this to saved_regs -- it's just an arg reg. */
1161 continue;
1162 }
1163 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn, { registers } */
1164 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1165 {
1166 /* No need to add this to saved_regs -- it's just arg regs. */
1167 continue;
1168 }
1169 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1170 {
1171 unsigned imm = insn & 0xff; /* immediate value */
1172 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1173 imm = (imm >> rot) | (imm << (32 - rot));
1174 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1175 }
1176 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1177 {
1178 unsigned imm = insn & 0xff; /* immediate value */
1179 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1180 imm = (imm >> rot) | (imm << (32 - rot));
1181 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1182 }
1183 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?, [sp, -#c]! */
1184 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1185 {
1186 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1187 break;
1188
1189 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1190 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1191 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1192 }
1193 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4, [sp!] */
1194 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1195 {
1196 int n_saved_fp_regs;
1197 unsigned int fp_start_reg, fp_bound_reg;
1198
1199 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1200 break;
1201
1202 if ((insn & 0x800) == 0x800) /* N0 is set */
1203 {
1204 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1205 n_saved_fp_regs = 3;
1206 else
1207 n_saved_fp_regs = 1;
1208 }
1209 else
1210 {
1211 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1212 n_saved_fp_regs = 2;
1213 else
1214 n_saved_fp_regs = 4;
1215 }
1216
1217 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1218 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1219 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1220 {
1221 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1222 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1223 regs[fp_start_reg++]);
1224 }
1225 }
1226 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1227 {
1228 /* Allow some special function calls when skipping the
1229 prologue; GCC generates these before storing arguments to
1230 the stack. */
1231 CORE_ADDR dest = BranchDest (current_pc, insn);
1232
1233 if (skip_prologue_function (dest))
1234 continue;
1235 else
1236 break;
1237 }
1238 else if ((insn & 0xf0000000) != 0xe0000000)
1239 break; /* Condition not true, exit early */
1240 else if (arm_instruction_changes_pc (insn))
1241 /* Don't scan past anything that might change control flow. */
1242 break;
1243 else if ((insn & 0xfe500000) == 0xe8100000) /* ldm */
1244 {
1245 /* Ignore block loads from the stack, potentially copying
1246 parameters from memory. */
1247 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1248 continue;
1249 else
1250 break;
1251 }
1252 else if ((insn & 0xfc500000) == 0xe4100000)
1253 {
1254 /* Similarly ignore single loads from the stack. */
1255 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1256 continue;
1257 else
1258 break;
1259 }
1260 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1261 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1262 register instead of the stack. */
1263 continue;
1264 else
1265 {
1266 /* The optimizer might shove anything into the prologue,
1267 so we just skip what we don't recognize. */
1268 unrecognized_pc = current_pc;
1269 continue;
1270 }
1271 }
1272
1273 if (unrecognized_pc == 0)
1274 unrecognized_pc = current_pc;
1275
1276 /* The frame size is just the distance from the frame register
1277 to the original stack pointer. */
1278 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1279 {
1280 /* Frame pointer is fp. */
1281 framereg = ARM_FP_REGNUM;
1282 framesize = -regs[ARM_FP_REGNUM].k;
1283 }
1284 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1285 {
1286 /* Try the stack pointer... this is a bit desperate. */
1287 framereg = ARM_SP_REGNUM;
1288 framesize = -regs[ARM_SP_REGNUM].k;
1289 }
1290 else
1291 {
1292 /* We're just out of luck. We don't know where the frame is. */
1293 framereg = -1;
1294 framesize = 0;
1295 }
1296
1297 if (cache)
1298 {
1299 cache->framereg = framereg;
1300 cache->framesize = framesize;
1301
1302 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1303 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1304 cache->saved_regs[regno].addr = offset;
1305 }
1306
1307 if (arm_debug)
1308 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1309 paddress (gdbarch, unrecognized_pc));
1310
1311 do_cleanups (back_to);
1312 return unrecognized_pc;
1313 }
1314
1315 static void
1316 arm_scan_prologue (struct frame_info *this_frame,
1317 struct arm_prologue_cache *cache)
1318 {
1319 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1320 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1321 int regno;
1322 CORE_ADDR prologue_start, prologue_end, current_pc;
1323 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1324 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1325 pv_t regs[ARM_FPS_REGNUM];
1326 struct pv_area *stack;
1327 struct cleanup *back_to;
1328 CORE_ADDR offset;
1329
1330 /* Assume there is no frame until proven otherwise. */
1331 cache->framereg = ARM_SP_REGNUM;
1332 cache->framesize = 0;
1333
1334 /* Check for Thumb prologue. */
1335 if (arm_frame_is_thumb (this_frame))
1336 {
1337 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1338 return;
1339 }
1340
1341 /* Find the function prologue. If we can't find the function in
1342 the symbol table, peek in the stack frame to find the PC. */
1343 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1344 &prologue_end))
1345 {
1346 /* One way to find the end of the prologue (which works well
1347 for unoptimized code) is to do the following:
1348
1349 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1350
1351 if (sal.line == 0)
1352 prologue_end = prev_pc;
1353 else if (sal.end < prologue_end)
1354 prologue_end = sal.end;
1355
1356 This mechanism is very accurate so long as the optimizer
1357 doesn't move any instructions from the function body into the
1358 prologue. If this happens, sal.end will be the last
1359 instruction in the first hunk of prologue code just before
1360 the first instruction that the scheduler has moved from
1361 the body to the prologue.
1362
1363 In order to make sure that we scan all of the prologue
1364 instructions, we use a slightly less accurate mechanism which
1365 may scan more than necessary. To help compensate for this
1366 lack of accuracy, the prologue scanning loop below contains
1367 several clauses which'll cause the loop to terminate early if
1368 an implausible prologue instruction is encountered.
1369
1370 The expression
1371
1372 prologue_start + 64
1373
1374 is a suitable endpoint since it accounts for the largest
1375 possible prologue plus up to five instructions inserted by
1376 the scheduler. */
1377
1378 if (prologue_end > prologue_start + 64)
1379 {
1380 prologue_end = prologue_start + 64; /* See above. */
1381 }
1382 }
1383 else
1384 {
1385 /* We have no symbol information. Our only option is to assume this
1386 function has a standard stack frame and the normal frame register.
1387 Then, we can find the value of our frame pointer on entrance to
1388 the callee (or at the present moment if this is the innermost frame).
1389 The value stored there should be the address of the stmfd + 8. */
1390 CORE_ADDR frame_loc;
1391 LONGEST return_value;
1392
1393 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1394 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1395 return;
1396 else
1397 {
1398 prologue_start = gdbarch_addr_bits_remove
1399 (gdbarch, return_value) - 8;
1400 prologue_end = prologue_start + 64; /* See above. */
1401 }
1402 }
1403
1404 if (prev_pc < prologue_end)
1405 prologue_end = prev_pc;
1406
1407 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1408 }
1409
1410 static struct arm_prologue_cache *
1411 arm_make_prologue_cache (struct frame_info *this_frame)
1412 {
1413 int reg;
1414 struct arm_prologue_cache *cache;
1415 CORE_ADDR unwound_fp;
1416
1417 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1418 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1419
1420 arm_scan_prologue (this_frame, cache);
1421
1422 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1423 if (unwound_fp == 0)
1424 return cache;
1425
1426 cache->prev_sp = unwound_fp + cache->framesize;
1427
1428 /* Calculate actual addresses of saved registers using offsets
1429 determined by arm_scan_prologue. */
1430 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1431 if (trad_frame_addr_p (cache->saved_regs, reg))
1432 cache->saved_regs[reg].addr += cache->prev_sp;
1433
1434 return cache;
1435 }
1436
1437 /* Our frame ID for a normal frame is the current function's starting PC
1438 and the caller's SP when we were called. */
1439
1440 static void
1441 arm_prologue_this_id (struct frame_info *this_frame,
1442 void **this_cache,
1443 struct frame_id *this_id)
1444 {
1445 struct arm_prologue_cache *cache;
1446 struct frame_id id;
1447 CORE_ADDR pc, func;
1448
1449 if (*this_cache == NULL)
1450 *this_cache = arm_make_prologue_cache (this_frame);
1451 cache = *this_cache;
1452
1453 /* This is meant to halt the backtrace at "_start". */
1454 pc = get_frame_pc (this_frame);
1455 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1456 return;
1457
1458 /* If we've hit a wall, stop. */
1459 if (cache->prev_sp == 0)
1460 return;
1461
1462 func = get_frame_func (this_frame);
1463 id = frame_id_build (cache->prev_sp, func);
1464 *this_id = id;
1465 }
1466
1467 static struct value *
1468 arm_prologue_prev_register (struct frame_info *this_frame,
1469 void **this_cache,
1470 int prev_regnum)
1471 {
1472 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1473 struct arm_prologue_cache *cache;
1474
1475 if (*this_cache == NULL)
1476 *this_cache = arm_make_prologue_cache (this_frame);
1477 cache = *this_cache;
1478
1479 /* If we are asked to unwind the PC, then we need to return the LR
1480 instead. The prologue may save PC, but it will point into this
1481 frame's prologue, not the next frame's resume location. Also
1482 strip the saved T bit. A valid LR may have the low bit set, but
1483 a valid PC never does. */
1484 if (prev_regnum == ARM_PC_REGNUM)
1485 {
1486 CORE_ADDR lr;
1487
1488 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1489 return frame_unwind_got_constant (this_frame, prev_regnum,
1490 arm_addr_bits_remove (gdbarch, lr));
1491 }
1492
1493 /* SP is generally not saved to the stack, but this frame is
1494 identified by the next frame's stack pointer at the time of the call.
1495 The value was already reconstructed into PREV_SP. */
1496 if (prev_regnum == ARM_SP_REGNUM)
1497 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
1498
1499 /* The CPSR may have been changed by the call instruction and by the
1500 called function. The only bit we can reconstruct is the T bit,
1501 by checking the low bit of LR as of the call. This is a reliable
1502 indicator of Thumb-ness except for some ARM v4T pre-interworking
1503 Thumb code, which could get away with a clear low bit as long as
1504 the called function did not use bx. Guess that all other
1505 bits are unchanged; the condition flags are presumably lost,
1506 but the processor status is likely valid. */
1507 if (prev_regnum == ARM_PS_REGNUM)
1508 {
1509 CORE_ADDR lr, cpsr;
1510
1511 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
1512 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1513 if (IS_THUMB_ADDR (lr))
1514 cpsr |= CPSR_T;
1515 else
1516 cpsr &= ~CPSR_T;
1517 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
1518 }
1519
1520 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1521 prev_regnum);
1522 }
1523
1524 struct frame_unwind arm_prologue_unwind = {
1525 NORMAL_FRAME,
1526 arm_prologue_this_id,
1527 arm_prologue_prev_register,
1528 NULL,
1529 default_frame_sniffer
1530 };
1531
1532 static struct arm_prologue_cache *
1533 arm_make_stub_cache (struct frame_info *this_frame)
1534 {
1535 struct arm_prologue_cache *cache;
1536
1537 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1538 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1539
1540 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
1541
1542 return cache;
1543 }
1544
1545 /* Our frame ID for a stub frame is the current SP and LR. */
1546
1547 static void
1548 arm_stub_this_id (struct frame_info *this_frame,
1549 void **this_cache,
1550 struct frame_id *this_id)
1551 {
1552 struct arm_prologue_cache *cache;
1553
1554 if (*this_cache == NULL)
1555 *this_cache = arm_make_stub_cache (this_frame);
1556 cache = *this_cache;
1557
1558 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
1559 }
1560
1561 static int
1562 arm_stub_unwind_sniffer (const struct frame_unwind *self,
1563 struct frame_info *this_frame,
1564 void **this_prologue_cache)
1565 {
1566 CORE_ADDR addr_in_block;
1567 char dummy[4];
1568
1569 addr_in_block = get_frame_address_in_block (this_frame);
1570 if (in_plt_section (addr_in_block, NULL)
1571 /* We also use the stub winder if the target memory is unreadable
1572 to avoid having the prologue unwinder trying to read it. */
1573 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1574 return 1;
1575
1576 return 0;
1577 }
1578
1579 struct frame_unwind arm_stub_unwind = {
1580 NORMAL_FRAME,
1581 arm_stub_this_id,
1582 arm_prologue_prev_register,
1583 NULL,
1584 arm_stub_unwind_sniffer
1585 };
1586
1587 static CORE_ADDR
1588 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1589 {
1590 struct arm_prologue_cache *cache;
1591
1592 if (*this_cache == NULL)
1593 *this_cache = arm_make_prologue_cache (this_frame);
1594 cache = *this_cache;
1595
1596 return cache->prev_sp - cache->framesize;
1597 }
1598
1599 struct frame_base arm_normal_base = {
1600 &arm_prologue_unwind,
1601 arm_normal_frame_base,
1602 arm_normal_frame_base,
1603 arm_normal_frame_base
1604 };
1605
1606 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1607 dummy frame. The frame ID's base needs to match the TOS value
1608 saved by save_dummy_frame_tos() and returned from
1609 arm_push_dummy_call, and the PC needs to match the dummy frame's
1610 breakpoint. */
1611
1612 static struct frame_id
1613 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1614 {
1615 return frame_id_build (get_frame_register_unsigned (this_frame, ARM_SP_REGNUM),
1616 get_frame_pc (this_frame));
1617 }
1618
1619 /* Given THIS_FRAME, find the previous frame's resume PC (which will
1620 be used to construct the previous frame's ID, after looking up the
1621 containing function). */
1622
1623 static CORE_ADDR
1624 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1625 {
1626 CORE_ADDR pc;
1627 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
1628 return arm_addr_bits_remove (gdbarch, pc);
1629 }
1630
1631 static CORE_ADDR
1632 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1633 {
1634 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
1635 }
1636
1637 static struct value *
1638 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
1639 int regnum)
1640 {
1641 struct gdbarch * gdbarch = get_frame_arch (this_frame);
1642 CORE_ADDR lr, cpsr;
1643
1644 switch (regnum)
1645 {
1646 case ARM_PC_REGNUM:
1647 /* The PC is normally copied from the return column, which
1648 describes saves of LR. However, that version may have an
1649 extra bit set to indicate Thumb state. The bit is not
1650 part of the PC. */
1651 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1652 return frame_unwind_got_constant (this_frame, regnum,
1653 arm_addr_bits_remove (gdbarch, lr));
1654
1655 case ARM_PS_REGNUM:
1656 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
1657 cpsr = get_frame_register_unsigned (this_frame, regnum);
1658 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1659 if (IS_THUMB_ADDR (lr))
1660 cpsr |= CPSR_T;
1661 else
1662 cpsr &= ~CPSR_T;
1663 return frame_unwind_got_constant (this_frame, regnum, cpsr);
1664
1665 default:
1666 internal_error (__FILE__, __LINE__,
1667 _("Unexpected register %d"), regnum);
1668 }
1669 }
1670
1671 static void
1672 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1673 struct dwarf2_frame_state_reg *reg,
1674 struct frame_info *this_frame)
1675 {
1676 switch (regnum)
1677 {
1678 case ARM_PC_REGNUM:
1679 case ARM_PS_REGNUM:
1680 reg->how = DWARF2_FRAME_REG_FN;
1681 reg->loc.fn = arm_dwarf2_prev_register;
1682 break;
1683 case ARM_SP_REGNUM:
1684 reg->how = DWARF2_FRAME_REG_CFA;
1685 break;
1686 }
1687 }
1688
1689 /* When arguments must be pushed onto the stack, they go on in reverse
1690 order. The code below implements a FILO (stack) to do this. */
1691
1692 struct stack_item
1693 {
1694 int len;
1695 struct stack_item *prev;
1696 void *data;
1697 };
1698
1699 static struct stack_item *
1700 push_stack_item (struct stack_item *prev, void *contents, int len)
1701 {
1702 struct stack_item *si;
1703 si = xmalloc (sizeof (struct stack_item));
1704 si->data = xmalloc (len);
1705 si->len = len;
1706 si->prev = prev;
1707 memcpy (si->data, contents, len);
1708 return si;
1709 }
1710
1711 static struct stack_item *
1712 pop_stack_item (struct stack_item *si)
1713 {
1714 struct stack_item *dead = si;
1715 si = si->prev;
1716 xfree (dead->data);
1717 xfree (dead);
1718 return si;
1719 }
1720
1721
1722 /* Return the alignment (in bytes) of the given type. */
1723
1724 static int
1725 arm_type_align (struct type *t)
1726 {
1727 int n;
1728 int align;
1729 int falign;
1730
1731 t = check_typedef (t);
1732 switch (TYPE_CODE (t))
1733 {
1734 default:
1735 /* Should never happen. */
1736 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1737 return 4;
1738
1739 case TYPE_CODE_PTR:
1740 case TYPE_CODE_ENUM:
1741 case TYPE_CODE_INT:
1742 case TYPE_CODE_FLT:
1743 case TYPE_CODE_SET:
1744 case TYPE_CODE_RANGE:
1745 case TYPE_CODE_BITSTRING:
1746 case TYPE_CODE_REF:
1747 case TYPE_CODE_CHAR:
1748 case TYPE_CODE_BOOL:
1749 return TYPE_LENGTH (t);
1750
1751 case TYPE_CODE_ARRAY:
1752 case TYPE_CODE_COMPLEX:
1753 /* TODO: What about vector types? */
1754 return arm_type_align (TYPE_TARGET_TYPE (t));
1755
1756 case TYPE_CODE_STRUCT:
1757 case TYPE_CODE_UNION:
1758 align = 1;
1759 for (n = 0; n < TYPE_NFIELDS (t); n++)
1760 {
1761 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
1762 if (falign > align)
1763 align = falign;
1764 }
1765 return align;
1766 }
1767 }
1768
1769 /* Possible base types for a candidate for passing and returning in
1770 VFP registers. */
1771
1772 enum arm_vfp_cprc_base_type
1773 {
1774 VFP_CPRC_UNKNOWN,
1775 VFP_CPRC_SINGLE,
1776 VFP_CPRC_DOUBLE,
1777 VFP_CPRC_VEC64,
1778 VFP_CPRC_VEC128
1779 };
1780
1781 /* The length of one element of base type B. */
1782
1783 static unsigned
1784 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
1785 {
1786 switch (b)
1787 {
1788 case VFP_CPRC_SINGLE:
1789 return 4;
1790 case VFP_CPRC_DOUBLE:
1791 return 8;
1792 case VFP_CPRC_VEC64:
1793 return 8;
1794 case VFP_CPRC_VEC128:
1795 return 16;
1796 default:
1797 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
1798 (int) b);
1799 }
1800 }
1801
1802 /* The character ('s', 'd' or 'q') for the type of VFP register used
1803 for passing base type B. */
1804
1805 static int
1806 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
1807 {
1808 switch (b)
1809 {
1810 case VFP_CPRC_SINGLE:
1811 return 's';
1812 case VFP_CPRC_DOUBLE:
1813 return 'd';
1814 case VFP_CPRC_VEC64:
1815 return 'd';
1816 case VFP_CPRC_VEC128:
1817 return 'q';
1818 default:
1819 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
1820 (int) b);
1821 }
1822 }
1823
1824 /* Determine whether T may be part of a candidate for passing and
1825 returning in VFP registers, ignoring the limit on the total number
1826 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
1827 classification of the first valid component found; if it is not
1828 VFP_CPRC_UNKNOWN, all components must have the same classification
1829 as *BASE_TYPE. If it is found that T contains a type not permitted
1830 for passing and returning in VFP registers, a type differently
1831 classified from *BASE_TYPE, or two types differently classified
1832 from each other, return -1, otherwise return the total number of
1833 base-type elements found (possibly 0 in an empty structure or
1834 array). Vectors and complex types are not currently supported,
1835 matching the generic AAPCS support. */
1836
1837 static int
1838 arm_vfp_cprc_sub_candidate (struct type *t,
1839 enum arm_vfp_cprc_base_type *base_type)
1840 {
1841 t = check_typedef (t);
1842 switch (TYPE_CODE (t))
1843 {
1844 case TYPE_CODE_FLT:
1845 switch (TYPE_LENGTH (t))
1846 {
1847 case 4:
1848 if (*base_type == VFP_CPRC_UNKNOWN)
1849 *base_type = VFP_CPRC_SINGLE;
1850 else if (*base_type != VFP_CPRC_SINGLE)
1851 return -1;
1852 return 1;
1853
1854 case 8:
1855 if (*base_type == VFP_CPRC_UNKNOWN)
1856 *base_type = VFP_CPRC_DOUBLE;
1857 else if (*base_type != VFP_CPRC_DOUBLE)
1858 return -1;
1859 return 1;
1860
1861 default:
1862 return -1;
1863 }
1864 break;
1865
1866 case TYPE_CODE_ARRAY:
1867 {
1868 int count;
1869 unsigned unitlen;
1870 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
1871 if (count == -1)
1872 return -1;
1873 if (TYPE_LENGTH (t) == 0)
1874 {
1875 gdb_assert (count == 0);
1876 return 0;
1877 }
1878 else if (count == 0)
1879 return -1;
1880 unitlen = arm_vfp_cprc_unit_length (*base_type);
1881 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
1882 return TYPE_LENGTH (t) / unitlen;
1883 }
1884 break;
1885
1886 case TYPE_CODE_STRUCT:
1887 {
1888 int count = 0;
1889 unsigned unitlen;
1890 int i;
1891 for (i = 0; i < TYPE_NFIELDS (t); i++)
1892 {
1893 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
1894 base_type);
1895 if (sub_count == -1)
1896 return -1;
1897 count += sub_count;
1898 }
1899 if (TYPE_LENGTH (t) == 0)
1900 {
1901 gdb_assert (count == 0);
1902 return 0;
1903 }
1904 else if (count == 0)
1905 return -1;
1906 unitlen = arm_vfp_cprc_unit_length (*base_type);
1907 if (TYPE_LENGTH (t) != unitlen * count)
1908 return -1;
1909 return count;
1910 }
1911
1912 case TYPE_CODE_UNION:
1913 {
1914 int count = 0;
1915 unsigned unitlen;
1916 int i;
1917 for (i = 0; i < TYPE_NFIELDS (t); i++)
1918 {
1919 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
1920 base_type);
1921 if (sub_count == -1)
1922 return -1;
1923 count = (count > sub_count ? count : sub_count);
1924 }
1925 if (TYPE_LENGTH (t) == 0)
1926 {
1927 gdb_assert (count == 0);
1928 return 0;
1929 }
1930 else if (count == 0)
1931 return -1;
1932 unitlen = arm_vfp_cprc_unit_length (*base_type);
1933 if (TYPE_LENGTH (t) != unitlen * count)
1934 return -1;
1935 return count;
1936 }
1937
1938 default:
1939 break;
1940 }
1941
1942 return -1;
1943 }
1944
1945 /* Determine whether T is a VFP co-processor register candidate (CPRC)
1946 if passed to or returned from a non-variadic function with the VFP
1947 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
1948 *BASE_TYPE to the base type for T and *COUNT to the number of
1949 elements of that base type before returning. */
1950
1951 static int
1952 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
1953 int *count)
1954 {
1955 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
1956 int c = arm_vfp_cprc_sub_candidate (t, &b);
1957 if (c <= 0 || c > 4)
1958 return 0;
1959 *base_type = b;
1960 *count = c;
1961 return 1;
1962 }
1963
1964 /* Return 1 if the VFP ABI should be used for passing arguments to and
1965 returning values from a function of type FUNC_TYPE, 0
1966 otherwise. */
1967
1968 static int
1969 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
1970 {
1971 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1972 /* Variadic functions always use the base ABI. Assume that functions
1973 without debug info are not variadic. */
1974 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
1975 return 0;
1976 /* The VFP ABI is only supported as a variant of AAPCS. */
1977 if (tdep->arm_abi != ARM_ABI_AAPCS)
1978 return 0;
1979 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
1980 }
1981
1982 /* We currently only support passing parameters in integer registers, which
1983 conforms with GCC's default model, and VFP argument passing following
1984 the VFP variant of AAPCS. Several other variants exist and
1985 we should probably support some of them based on the selected ABI. */
1986
1987 static CORE_ADDR
1988 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1989 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
1990 struct value **args, CORE_ADDR sp, int struct_return,
1991 CORE_ADDR struct_addr)
1992 {
1993 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1994 int argnum;
1995 int argreg;
1996 int nstack;
1997 struct stack_item *si = NULL;
1998 int use_vfp_abi;
1999 struct type *ftype;
2000 unsigned vfp_regs_free = (1 << 16) - 1;
2001
2002 /* Determine the type of this function and whether the VFP ABI
2003 applies. */
2004 ftype = check_typedef (value_type (function));
2005 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
2006 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
2007 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
2008
2009 /* Set the return address. For the ARM, the return breakpoint is
2010 always at BP_ADDR. */
2011 if (arm_pc_is_thumb (bp_addr))
2012 bp_addr |= 1;
2013 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
2014
2015 /* Walk through the list of args and determine how large a temporary
2016 stack is required. Need to take care here as structs may be
2017 passed on the stack, and we have to to push them. */
2018 nstack = 0;
2019
2020 argreg = ARM_A1_REGNUM;
2021 nstack = 0;
2022
2023 /* The struct_return pointer occupies the first parameter
2024 passing register. */
2025 if (struct_return)
2026 {
2027 if (arm_debug)
2028 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
2029 gdbarch_register_name (gdbarch, argreg),
2030 paddress (gdbarch, struct_addr));
2031 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
2032 argreg++;
2033 }
2034
2035 for (argnum = 0; argnum < nargs; argnum++)
2036 {
2037 int len;
2038 struct type *arg_type;
2039 struct type *target_type;
2040 enum type_code typecode;
2041 bfd_byte *val;
2042 int align;
2043 enum arm_vfp_cprc_base_type vfp_base_type;
2044 int vfp_base_count;
2045 int may_use_core_reg = 1;
2046
2047 arg_type = check_typedef (value_type (args[argnum]));
2048 len = TYPE_LENGTH (arg_type);
2049 target_type = TYPE_TARGET_TYPE (arg_type);
2050 typecode = TYPE_CODE (arg_type);
2051 val = value_contents_writeable (args[argnum]);
2052
2053 align = arm_type_align (arg_type);
2054 /* Round alignment up to a whole number of words. */
2055 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
2056 /* Different ABIs have different maximum alignments. */
2057 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
2058 {
2059 /* The APCS ABI only requires word alignment. */
2060 align = INT_REGISTER_SIZE;
2061 }
2062 else
2063 {
2064 /* The AAPCS requires at most doubleword alignment. */
2065 if (align > INT_REGISTER_SIZE * 2)
2066 align = INT_REGISTER_SIZE * 2;
2067 }
2068
2069 if (use_vfp_abi
2070 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
2071 &vfp_base_count))
2072 {
2073 int regno;
2074 int unit_length;
2075 int shift;
2076 unsigned mask;
2077
2078 /* Because this is a CPRC it cannot go in a core register or
2079 cause a core register to be skipped for alignment.
2080 Either it goes in VFP registers and the rest of this loop
2081 iteration is skipped for this argument, or it goes on the
2082 stack (and the stack alignment code is correct for this
2083 case). */
2084 may_use_core_reg = 0;
2085
2086 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
2087 shift = unit_length / 4;
2088 mask = (1 << (shift * vfp_base_count)) - 1;
2089 for (regno = 0; regno < 16; regno += shift)
2090 if (((vfp_regs_free >> regno) & mask) == mask)
2091 break;
2092
2093 if (regno < 16)
2094 {
2095 int reg_char;
2096 int reg_scaled;
2097 int i;
2098
2099 vfp_regs_free &= ~(mask << regno);
2100 reg_scaled = regno / shift;
2101 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
2102 for (i = 0; i < vfp_base_count; i++)
2103 {
2104 char name_buf[4];
2105 int regnum;
2106 if (reg_char == 'q')
2107 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
2108 val + i * unit_length);
2109 else
2110 {
2111 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
2112 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
2113 strlen (name_buf));
2114 regcache_cooked_write (regcache, regnum,
2115 val + i * unit_length);
2116 }
2117 }
2118 continue;
2119 }
2120 else
2121 {
2122 /* This CPRC could not go in VFP registers, so all VFP
2123 registers are now marked as used. */
2124 vfp_regs_free = 0;
2125 }
2126 }
2127
2128 /* Push stack padding for dowubleword alignment. */
2129 if (nstack & (align - 1))
2130 {
2131 si = push_stack_item (si, val, INT_REGISTER_SIZE);
2132 nstack += INT_REGISTER_SIZE;
2133 }
2134
2135 /* Doubleword aligned quantities must go in even register pairs. */
2136 if (may_use_core_reg
2137 && argreg <= ARM_LAST_ARG_REGNUM
2138 && align > INT_REGISTER_SIZE
2139 && argreg & 1)
2140 argreg++;
2141
2142 /* If the argument is a pointer to a function, and it is a
2143 Thumb function, create a LOCAL copy of the value and set
2144 the THUMB bit in it. */
2145 if (TYPE_CODE_PTR == typecode
2146 && target_type != NULL
2147 && TYPE_CODE_FUNC == TYPE_CODE (target_type))
2148 {
2149 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
2150 if (arm_pc_is_thumb (regval))
2151 {
2152 val = alloca (len);
2153 store_unsigned_integer (val, len, byte_order,
2154 MAKE_THUMB_ADDR (regval));
2155 }
2156 }
2157
2158 /* Copy the argument to general registers or the stack in
2159 register-sized pieces. Large arguments are split between
2160 registers and stack. */
2161 while (len > 0)
2162 {
2163 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
2164
2165 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
2166 {
2167 /* The argument is being passed in a general purpose
2168 register. */
2169 CORE_ADDR regval
2170 = extract_unsigned_integer (val, partial_len, byte_order);
2171 if (byte_order == BFD_ENDIAN_BIG)
2172 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
2173 if (arm_debug)
2174 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
2175 argnum,
2176 gdbarch_register_name
2177 (gdbarch, argreg),
2178 phex (regval, INT_REGISTER_SIZE));
2179 regcache_cooked_write_unsigned (regcache, argreg, regval);
2180 argreg++;
2181 }
2182 else
2183 {
2184 /* Push the arguments onto the stack. */
2185 if (arm_debug)
2186 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
2187 argnum, nstack);
2188 si = push_stack_item (si, val, INT_REGISTER_SIZE);
2189 nstack += INT_REGISTER_SIZE;
2190 }
2191
2192 len -= partial_len;
2193 val += partial_len;
2194 }
2195 }
2196 /* If we have an odd number of words to push, then decrement the stack
2197 by one word now, so first stack argument will be dword aligned. */
2198 if (nstack & 4)
2199 sp -= 4;
2200
2201 while (si)
2202 {
2203 sp -= si->len;
2204 write_memory (sp, si->data, si->len);
2205 si = pop_stack_item (si);
2206 }
2207
2208 /* Finally, update teh SP register. */
2209 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
2210
2211 return sp;
2212 }
2213
2214
2215 /* Always align the frame to an 8-byte boundary. This is required on
2216 some platforms and harmless on the rest. */
2217
2218 static CORE_ADDR
2219 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2220 {
2221 /* Align the stack to eight bytes. */
2222 return sp & ~ (CORE_ADDR) 7;
2223 }
2224
2225 static void
2226 print_fpu_flags (int flags)
2227 {
2228 if (flags & (1 << 0))
2229 fputs ("IVO ", stdout);
2230 if (flags & (1 << 1))
2231 fputs ("DVZ ", stdout);
2232 if (flags & (1 << 2))
2233 fputs ("OFL ", stdout);
2234 if (flags & (1 << 3))
2235 fputs ("UFL ", stdout);
2236 if (flags & (1 << 4))
2237 fputs ("INX ", stdout);
2238 putchar ('\n');
2239 }
2240
2241 /* Print interesting information about the floating point processor
2242 (if present) or emulator. */
2243 static void
2244 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
2245 struct frame_info *frame, const char *args)
2246 {
2247 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
2248 int type;
2249
2250 type = (status >> 24) & 127;
2251 if (status & (1 << 31))
2252 printf (_("Hardware FPU type %d\n"), type);
2253 else
2254 printf (_("Software FPU type %d\n"), type);
2255 /* i18n: [floating point unit] mask */
2256 fputs (_("mask: "), stdout);
2257 print_fpu_flags (status >> 16);
2258 /* i18n: [floating point unit] flags */
2259 fputs (_("flags: "), stdout);
2260 print_fpu_flags (status);
2261 }
2262
2263 /* Construct the ARM extended floating point type. */
2264 static struct type *
2265 arm_ext_type (struct gdbarch *gdbarch)
2266 {
2267 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2268
2269 if (!tdep->arm_ext_type)
2270 tdep->arm_ext_type
2271 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
2272 floatformats_arm_ext);
2273
2274 return tdep->arm_ext_type;
2275 }
2276
2277 static struct type *
2278 arm_neon_double_type (struct gdbarch *gdbarch)
2279 {
2280 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2281
2282 if (tdep->neon_double_type == NULL)
2283 {
2284 struct type *t, *elem;
2285
2286 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
2287 TYPE_CODE_UNION);
2288 elem = builtin_type (gdbarch)->builtin_uint8;
2289 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
2290 elem = builtin_type (gdbarch)->builtin_uint16;
2291 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
2292 elem = builtin_type (gdbarch)->builtin_uint32;
2293 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
2294 elem = builtin_type (gdbarch)->builtin_uint64;
2295 append_composite_type_field (t, "u64", elem);
2296 elem = builtin_type (gdbarch)->builtin_float;
2297 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
2298 elem = builtin_type (gdbarch)->builtin_double;
2299 append_composite_type_field (t, "f64", elem);
2300
2301 TYPE_VECTOR (t) = 1;
2302 TYPE_NAME (t) = "neon_d";
2303 tdep->neon_double_type = t;
2304 }
2305
2306 return tdep->neon_double_type;
2307 }
2308
2309 /* FIXME: The vector types are not correctly ordered on big-endian
2310 targets. Just as s0 is the low bits of d0, d0[0] is also the low
2311 bits of d0 - regardless of what unit size is being held in d0. So
2312 the offset of the first uint8 in d0 is 7, but the offset of the
2313 first float is 4. This code works as-is for little-endian
2314 targets. */
2315
2316 static struct type *
2317 arm_neon_quad_type (struct gdbarch *gdbarch)
2318 {
2319 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2320
2321 if (tdep->neon_quad_type == NULL)
2322 {
2323 struct type *t, *elem;
2324
2325 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
2326 TYPE_CODE_UNION);
2327 elem = builtin_type (gdbarch)->builtin_uint8;
2328 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
2329 elem = builtin_type (gdbarch)->builtin_uint16;
2330 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
2331 elem = builtin_type (gdbarch)->builtin_uint32;
2332 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
2333 elem = builtin_type (gdbarch)->builtin_uint64;
2334 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
2335 elem = builtin_type (gdbarch)->builtin_float;
2336 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
2337 elem = builtin_type (gdbarch)->builtin_double;
2338 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
2339
2340 TYPE_VECTOR (t) = 1;
2341 TYPE_NAME (t) = "neon_q";
2342 tdep->neon_quad_type = t;
2343 }
2344
2345 return tdep->neon_quad_type;
2346 }
2347
2348 /* Return the GDB type object for the "standard" data type of data in
2349 register N. */
2350
2351 static struct type *
2352 arm_register_type (struct gdbarch *gdbarch, int regnum)
2353 {
2354 int num_regs = gdbarch_num_regs (gdbarch);
2355
2356 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
2357 && regnum >= num_regs && regnum < num_regs + 32)
2358 return builtin_type (gdbarch)->builtin_float;
2359
2360 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
2361 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
2362 return arm_neon_quad_type (gdbarch);
2363
2364 /* If the target description has register information, we are only
2365 in this function so that we can override the types of
2366 double-precision registers for NEON. */
2367 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
2368 {
2369 struct type *t = tdesc_register_type (gdbarch, regnum);
2370
2371 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
2372 && TYPE_CODE (t) == TYPE_CODE_FLT
2373 && gdbarch_tdep (gdbarch)->have_neon)
2374 return arm_neon_double_type (gdbarch);
2375 else
2376 return t;
2377 }
2378
2379 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
2380 {
2381 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
2382 return builtin_type (gdbarch)->builtin_void;
2383
2384 return arm_ext_type (gdbarch);
2385 }
2386 else if (regnum == ARM_SP_REGNUM)
2387 return builtin_type (gdbarch)->builtin_data_ptr;
2388 else if (regnum == ARM_PC_REGNUM)
2389 return builtin_type (gdbarch)->builtin_func_ptr;
2390 else if (regnum >= ARRAY_SIZE (arm_register_names))
2391 /* These registers are only supported on targets which supply
2392 an XML description. */
2393 return builtin_type (gdbarch)->builtin_int0;
2394 else
2395 return builtin_type (gdbarch)->builtin_uint32;
2396 }
2397
2398 /* Map a DWARF register REGNUM onto the appropriate GDB register
2399 number. */
2400
2401 static int
2402 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2403 {
2404 /* Core integer regs. */
2405 if (reg >= 0 && reg <= 15)
2406 return reg;
2407
2408 /* Legacy FPA encoding. These were once used in a way which
2409 overlapped with VFP register numbering, so their use is
2410 discouraged, but GDB doesn't support the ARM toolchain
2411 which used them for VFP. */
2412 if (reg >= 16 && reg <= 23)
2413 return ARM_F0_REGNUM + reg - 16;
2414
2415 /* New assignments for the FPA registers. */
2416 if (reg >= 96 && reg <= 103)
2417 return ARM_F0_REGNUM + reg - 96;
2418
2419 /* WMMX register assignments. */
2420 if (reg >= 104 && reg <= 111)
2421 return ARM_WCGR0_REGNUM + reg - 104;
2422
2423 if (reg >= 112 && reg <= 127)
2424 return ARM_WR0_REGNUM + reg - 112;
2425
2426 if (reg >= 192 && reg <= 199)
2427 return ARM_WC0_REGNUM + reg - 192;
2428
2429 /* VFP v2 registers. A double precision value is actually
2430 in d1 rather than s2, but the ABI only defines numbering
2431 for the single precision registers. This will "just work"
2432 in GDB for little endian targets (we'll read eight bytes,
2433 starting in s0 and then progressing to s1), but will be
2434 reversed on big endian targets with VFP. This won't
2435 be a problem for the new Neon quad registers; you're supposed
2436 to use DW_OP_piece for those. */
2437 if (reg >= 64 && reg <= 95)
2438 {
2439 char name_buf[4];
2440
2441 sprintf (name_buf, "s%d", reg - 64);
2442 return user_reg_map_name_to_regnum (gdbarch, name_buf,
2443 strlen (name_buf));
2444 }
2445
2446 /* VFP v3 / Neon registers. This range is also used for VFP v2
2447 registers, except that it now describes d0 instead of s0. */
2448 if (reg >= 256 && reg <= 287)
2449 {
2450 char name_buf[4];
2451
2452 sprintf (name_buf, "d%d", reg - 256);
2453 return user_reg_map_name_to_regnum (gdbarch, name_buf,
2454 strlen (name_buf));
2455 }
2456
2457 return -1;
2458 }
2459
2460 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
2461 static int
2462 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
2463 {
2464 int reg = regnum;
2465 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
2466
2467 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
2468 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
2469
2470 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
2471 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
2472
2473 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
2474 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
2475
2476 if (reg < NUM_GREGS)
2477 return SIM_ARM_R0_REGNUM + reg;
2478 reg -= NUM_GREGS;
2479
2480 if (reg < NUM_FREGS)
2481 return SIM_ARM_FP0_REGNUM + reg;
2482 reg -= NUM_FREGS;
2483
2484 if (reg < NUM_SREGS)
2485 return SIM_ARM_FPS_REGNUM + reg;
2486 reg -= NUM_SREGS;
2487
2488 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
2489 }
2490
2491 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
2492 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
2493 It is thought that this is is the floating-point register format on
2494 little-endian systems. */
2495
2496 static void
2497 convert_from_extended (const struct floatformat *fmt, const void *ptr,
2498 void *dbl, int endianess)
2499 {
2500 DOUBLEST d;
2501
2502 if (endianess == BFD_ENDIAN_BIG)
2503 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
2504 else
2505 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
2506 ptr, &d);
2507 floatformat_from_doublest (fmt, &d, dbl);
2508 }
2509
2510 static void
2511 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
2512 int endianess)
2513 {
2514 DOUBLEST d;
2515
2516 floatformat_to_doublest (fmt, ptr, &d);
2517 if (endianess == BFD_ENDIAN_BIG)
2518 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
2519 else
2520 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
2521 &d, dbl);
2522 }
2523
2524 static int
2525 condition_true (unsigned long cond, unsigned long status_reg)
2526 {
2527 if (cond == INST_AL || cond == INST_NV)
2528 return 1;
2529
2530 switch (cond)
2531 {
2532 case INST_EQ:
2533 return ((status_reg & FLAG_Z) != 0);
2534 case INST_NE:
2535 return ((status_reg & FLAG_Z) == 0);
2536 case INST_CS:
2537 return ((status_reg & FLAG_C) != 0);
2538 case INST_CC:
2539 return ((status_reg & FLAG_C) == 0);
2540 case INST_MI:
2541 return ((status_reg & FLAG_N) != 0);
2542 case INST_PL:
2543 return ((status_reg & FLAG_N) == 0);
2544 case INST_VS:
2545 return ((status_reg & FLAG_V) != 0);
2546 case INST_VC:
2547 return ((status_reg & FLAG_V) == 0);
2548 case INST_HI:
2549 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
2550 case INST_LS:
2551 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
2552 case INST_GE:
2553 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
2554 case INST_LT:
2555 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
2556 case INST_GT:
2557 return (((status_reg & FLAG_Z) == 0)
2558 && (((status_reg & FLAG_N) == 0)
2559 == ((status_reg & FLAG_V) == 0)));
2560 case INST_LE:
2561 return (((status_reg & FLAG_Z) != 0)
2562 || (((status_reg & FLAG_N) == 0)
2563 != ((status_reg & FLAG_V) == 0)));
2564 }
2565 return 1;
2566 }
2567
2568 static unsigned long
2569 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
2570 unsigned long pc_val, unsigned long status_reg)
2571 {
2572 unsigned long res, shift;
2573 int rm = bits (inst, 0, 3);
2574 unsigned long shifttype = bits (inst, 5, 6);
2575
2576 if (bit (inst, 4))
2577 {
2578 int rs = bits (inst, 8, 11);
2579 shift = (rs == 15 ? pc_val + 8
2580 : get_frame_register_unsigned (frame, rs)) & 0xFF;
2581 }
2582 else
2583 shift = bits (inst, 7, 11);
2584
2585 res = (rm == 15
2586 ? (pc_val + (bit (inst, 4) ? 12 : 8))
2587 : get_frame_register_unsigned (frame, rm));
2588
2589 switch (shifttype)
2590 {
2591 case 0: /* LSL */
2592 res = shift >= 32 ? 0 : res << shift;
2593 break;
2594
2595 case 1: /* LSR */
2596 res = shift >= 32 ? 0 : res >> shift;
2597 break;
2598
2599 case 2: /* ASR */
2600 if (shift >= 32)
2601 shift = 31;
2602 res = ((res & 0x80000000L)
2603 ? ~((~res) >> shift) : res >> shift);
2604 break;
2605
2606 case 3: /* ROR/RRX */
2607 shift &= 31;
2608 if (shift == 0)
2609 res = (res >> 1) | (carry ? 0x80000000L : 0);
2610 else
2611 res = (res >> shift) | (res << (32 - shift));
2612 break;
2613 }
2614
2615 return res & 0xffffffff;
2616 }
2617
2618 /* Return number of 1-bits in VAL. */
2619
2620 static int
2621 bitcount (unsigned long val)
2622 {
2623 int nbits;
2624 for (nbits = 0; val != 0; nbits++)
2625 val &= val - 1; /* delete rightmost 1-bit in val */
2626 return nbits;
2627 }
2628
2629 /* Return the size in bytes of the complete Thumb instruction whose
2630 first halfword is INST1. */
2631
2632 static int
2633 thumb_insn_size (unsigned short inst1)
2634 {
2635 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
2636 return 4;
2637 else
2638 return 2;
2639 }
2640
2641 static int
2642 thumb_advance_itstate (unsigned int itstate)
2643 {
2644 /* Preserve IT[7:5], the first three bits of the condition. Shift
2645 the upcoming condition flags left by one bit. */
2646 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
2647
2648 /* If we have finished the IT block, clear the state. */
2649 if ((itstate & 0x0f) == 0)
2650 itstate = 0;
2651
2652 return itstate;
2653 }
2654
2655 /* Find the next PC after the current instruction executes. In some
2656 cases we can not statically determine the answer (see the IT state
2657 handling in this function); in that case, a breakpoint may be
2658 inserted in addition to the returned PC, which will be used to set
2659 another breakpoint by our caller. */
2660
2661 static CORE_ADDR
2662 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
2663 {
2664 struct gdbarch *gdbarch = get_frame_arch (frame);
2665 struct address_space *aspace = get_frame_address_space (frame);
2666 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2667 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2668 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
2669 unsigned short inst1;
2670 CORE_ADDR nextpc = pc + 2; /* default is next instruction */
2671 unsigned long offset;
2672 ULONGEST status, itstate;
2673
2674 nextpc = MAKE_THUMB_ADDR (nextpc);
2675 pc_val = MAKE_THUMB_ADDR (pc_val);
2676
2677 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
2678
2679 /* Thumb-2 conditional execution support. There are eight bits in
2680 the CPSR which describe conditional execution state. Once
2681 reconstructed (they're in a funny order), the low five bits
2682 describe the low bit of the condition for each instruction and
2683 how many instructions remain. The high three bits describe the
2684 base condition. One of the low four bits will be set if an IT
2685 block is active. These bits read as zero on earlier
2686 processors. */
2687 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
2688 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
2689
2690 /* If-Then handling. On GNU/Linux, where this routine is used, we
2691 use an undefined instruction as a breakpoint. Unlike BKPT, IT
2692 can disable execution of the undefined instruction. So we might
2693 miss the breakpoint if we set it on a skipped conditional
2694 instruction. Because conditional instructions can change the
2695 flags, affecting the execution of further instructions, we may
2696 need to set two breakpoints. */
2697
2698 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
2699 {
2700 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
2701 {
2702 /* An IT instruction. Because this instruction does not
2703 modify the flags, we can accurately predict the next
2704 executed instruction. */
2705 itstate = inst1 & 0x00ff;
2706 pc += thumb_insn_size (inst1);
2707
2708 while (itstate != 0 && ! condition_true (itstate >> 4, status))
2709 {
2710 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
2711 pc += thumb_insn_size (inst1);
2712 itstate = thumb_advance_itstate (itstate);
2713 }
2714
2715 return MAKE_THUMB_ADDR (pc);
2716 }
2717 else if (itstate != 0)
2718 {
2719 /* We are in a conditional block. Check the condition. */
2720 if (! condition_true (itstate >> 4, status))
2721 {
2722 /* Advance to the next executed instruction. */
2723 pc += thumb_insn_size (inst1);
2724 itstate = thumb_advance_itstate (itstate);
2725
2726 while (itstate != 0 && ! condition_true (itstate >> 4, status))
2727 {
2728 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
2729 pc += thumb_insn_size (inst1);
2730 itstate = thumb_advance_itstate (itstate);
2731 }
2732
2733 return MAKE_THUMB_ADDR (pc);
2734 }
2735 else if ((itstate & 0x0f) == 0x08)
2736 {
2737 /* This is the last instruction of the conditional
2738 block, and it is executed. We can handle it normally
2739 because the following instruction is not conditional,
2740 and we must handle it normally because it is
2741 permitted to branch. Fall through. */
2742 }
2743 else
2744 {
2745 int cond_negated;
2746
2747 /* There are conditional instructions after this one.
2748 If this instruction modifies the flags, then we can
2749 not predict what the next executed instruction will
2750 be. Fortunately, this instruction is architecturally
2751 forbidden to branch; we know it will fall through.
2752 Start by skipping past it. */
2753 pc += thumb_insn_size (inst1);
2754 itstate = thumb_advance_itstate (itstate);
2755
2756 /* Set a breakpoint on the following instruction. */
2757 gdb_assert ((itstate & 0x0f) != 0);
2758 if (insert_bkpt)
2759 insert_single_step_breakpoint (gdbarch, aspace, pc);
2760 cond_negated = (itstate >> 4) & 1;
2761
2762 /* Skip all following instructions with the same
2763 condition. If there is a later instruction in the IT
2764 block with the opposite condition, set the other
2765 breakpoint there. If not, then set a breakpoint on
2766 the instruction after the IT block. */
2767 do
2768 {
2769 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
2770 pc += thumb_insn_size (inst1);
2771 itstate = thumb_advance_itstate (itstate);
2772 }
2773 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
2774
2775 return MAKE_THUMB_ADDR (pc);
2776 }
2777 }
2778 }
2779 else if (itstate & 0x0f)
2780 {
2781 /* We are in a conditional block. Check the condition. */
2782 int cond = itstate >> 4;
2783
2784 if (! condition_true (cond, status))
2785 {
2786 /* Advance to the next instruction. All the 32-bit
2787 instructions share a common prefix. */
2788 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
2789 return MAKE_THUMB_ADDR (pc + 4);
2790 else
2791 return MAKE_THUMB_ADDR (pc + 2);
2792 }
2793
2794 /* Otherwise, handle the instruction normally. */
2795 }
2796
2797 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
2798 {
2799 CORE_ADDR sp;
2800
2801 /* Fetch the saved PC from the stack. It's stored above
2802 all of the other registers. */
2803 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
2804 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
2805 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
2806 }
2807 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
2808 {
2809 unsigned long cond = bits (inst1, 8, 11);
2810 if (cond != 0x0f && condition_true (cond, status)) /* 0x0f = SWI */
2811 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
2812 }
2813 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
2814 {
2815 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
2816 }
2817 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
2818 {
2819 unsigned short inst2;
2820 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
2821
2822 /* Default to the next instruction. */
2823 nextpc = pc + 4;
2824 nextpc = MAKE_THUMB_ADDR (nextpc);
2825
2826 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
2827 {
2828 /* Branches and miscellaneous control instructions. */
2829
2830 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
2831 {
2832 /* B, BL, BLX. */
2833 int j1, j2, imm1, imm2;
2834
2835 imm1 = sbits (inst1, 0, 10);
2836 imm2 = bits (inst2, 0, 10);
2837 j1 = bit (inst2, 13);
2838 j2 = bit (inst2, 11);
2839
2840 offset = ((imm1 << 12) + (imm2 << 1));
2841 offset ^= ((!j2) << 22) | ((!j1) << 23);
2842
2843 nextpc = pc_val + offset;
2844 /* For BLX make sure to clear the low bits. */
2845 if (bit (inst2, 12) == 0)
2846 nextpc = nextpc & 0xfffffffc;
2847 }
2848 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
2849 {
2850 /* SUBS PC, LR, #imm8. */
2851 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
2852 nextpc -= inst2 & 0x00ff;
2853 }
2854 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
2855 {
2856 /* Conditional branch. */
2857 if (condition_true (bits (inst1, 6, 9), status))
2858 {
2859 int sign, j1, j2, imm1, imm2;
2860
2861 sign = sbits (inst1, 10, 10);
2862 imm1 = bits (inst1, 0, 5);
2863 imm2 = bits (inst2, 0, 10);
2864 j1 = bit (inst2, 13);
2865 j2 = bit (inst2, 11);
2866
2867 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
2868 offset += (imm1 << 12) + (imm2 << 1);
2869
2870 nextpc = pc_val + offset;
2871 }
2872 }
2873 }
2874 else if ((inst1 & 0xfe50) == 0xe810)
2875 {
2876 /* Load multiple or RFE. */
2877 int rn, offset, load_pc = 1;
2878
2879 rn = bits (inst1, 0, 3);
2880 if (bit (inst1, 7) && !bit (inst1, 8))
2881 {
2882 /* LDMIA or POP */
2883 if (!bit (inst2, 15))
2884 load_pc = 0;
2885 offset = bitcount (inst2) * 4 - 4;
2886 }
2887 else if (!bit (inst1, 7) && bit (inst1, 8))
2888 {
2889 /* LDMDB */
2890 if (!bit (inst2, 15))
2891 load_pc = 0;
2892 offset = -4;
2893 }
2894 else if (bit (inst1, 7) && bit (inst1, 8))
2895 {
2896 /* RFEIA */
2897 offset = 0;
2898 }
2899 else if (!bit (inst1, 7) && !bit (inst1, 8))
2900 {
2901 /* RFEDB */
2902 offset = -8;
2903 }
2904 else
2905 load_pc = 0;
2906
2907 if (load_pc)
2908 {
2909 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
2910 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
2911 }
2912 }
2913 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
2914 {
2915 /* MOV PC or MOVS PC. */
2916 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
2917 nextpc = MAKE_THUMB_ADDR (nextpc);
2918 }
2919 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
2920 {
2921 /* LDR PC. */
2922 CORE_ADDR base;
2923 int rn, load_pc = 1;
2924
2925 rn = bits (inst1, 0, 3);
2926 base = get_frame_register_unsigned (frame, rn);
2927 if (rn == 15)
2928 {
2929 base = (base + 4) & ~(CORE_ADDR) 0x3;
2930 if (bit (inst1, 7))
2931 base += bits (inst2, 0, 11);
2932 else
2933 base -= bits (inst2, 0, 11);
2934 }
2935 else if (bit (inst1, 7))
2936 base += bits (inst2, 0, 11);
2937 else if (bit (inst2, 11))
2938 {
2939 if (bit (inst2, 10))
2940 {
2941 if (bit (inst2, 9))
2942 base += bits (inst2, 0, 7);
2943 else
2944 base -= bits (inst2, 0, 7);
2945 }
2946 }
2947 else if ((inst2 & 0x0fc0) == 0x0000)
2948 {
2949 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
2950 base += get_frame_register_unsigned (frame, rm) << shift;
2951 }
2952 else
2953 /* Reserved. */
2954 load_pc = 0;
2955
2956 if (load_pc)
2957 nextpc = get_frame_memory_unsigned (frame, base, 4);
2958 }
2959 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
2960 {
2961 /* TBB. */
2962 CORE_ADDR table, offset, length;
2963
2964 table = get_frame_register_unsigned (frame, bits (inst1, 0, 3));
2965 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
2966 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
2967 nextpc = pc_val + length;
2968 }
2969 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
2970 {
2971 /* TBH. */
2972 CORE_ADDR table, offset, length;
2973
2974 table = get_frame_register_unsigned (frame, bits (inst1, 0, 3));
2975 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
2976 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
2977 nextpc = pc_val + length;
2978 }
2979 }
2980 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
2981 {
2982 if (bits (inst1, 3, 6) == 0x0f)
2983 nextpc = pc_val;
2984 else
2985 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
2986 }
2987 else if ((inst1 & 0xf500) == 0xb100)
2988 {
2989 /* CBNZ or CBZ. */
2990 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
2991 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
2992
2993 if (bit (inst1, 11) && reg != 0)
2994 nextpc = pc_val + imm;
2995 else if (!bit (inst1, 11) && reg == 0)
2996 nextpc = pc_val + imm;
2997 }
2998 return nextpc;
2999 }
3000
3001 /* Get the raw next address. PC is the current program counter, in
3002 FRAME. INSERT_BKPT should be TRUE if we want a breakpoint set on
3003 the alternative next instruction if there are two options.
3004
3005 The value returned has the execution state of the next instruction
3006 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
3007 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
3008 address.
3009 */
3010 static CORE_ADDR
3011 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
3012 {
3013 struct gdbarch *gdbarch = get_frame_arch (frame);
3014 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3015 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3016 unsigned long pc_val;
3017 unsigned long this_instr;
3018 unsigned long status;
3019 CORE_ADDR nextpc;
3020
3021 if (arm_frame_is_thumb (frame))
3022 return thumb_get_next_pc_raw (frame, pc, insert_bkpt);
3023
3024 pc_val = (unsigned long) pc;
3025 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3026
3027 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
3028 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
3029
3030 if (bits (this_instr, 28, 31) == INST_NV)
3031 switch (bits (this_instr, 24, 27))
3032 {
3033 case 0xa:
3034 case 0xb:
3035 {
3036 /* Branch with Link and change to Thumb. */
3037 nextpc = BranchDest (pc, this_instr);
3038 nextpc |= bit (this_instr, 24) << 1;
3039 nextpc = MAKE_THUMB_ADDR (nextpc);
3040 break;
3041 }
3042 case 0xc:
3043 case 0xd:
3044 case 0xe:
3045 /* Coprocessor register transfer. */
3046 if (bits (this_instr, 12, 15) == 15)
3047 error (_("Invalid update to pc in instruction"));
3048 break;
3049 }
3050 else if (condition_true (bits (this_instr, 28, 31), status))
3051 {
3052 switch (bits (this_instr, 24, 27))
3053 {
3054 case 0x0:
3055 case 0x1: /* data processing */
3056 case 0x2:
3057 case 0x3:
3058 {
3059 unsigned long operand1, operand2, result = 0;
3060 unsigned long rn;
3061 int c;
3062
3063 if (bits (this_instr, 12, 15) != 15)
3064 break;
3065
3066 if (bits (this_instr, 22, 25) == 0
3067 && bits (this_instr, 4, 7) == 9) /* multiply */
3068 error (_("Invalid update to pc in instruction"));
3069
3070 /* BX <reg>, BLX <reg> */
3071 if (bits (this_instr, 4, 27) == 0x12fff1
3072 || bits (this_instr, 4, 27) == 0x12fff3)
3073 {
3074 rn = bits (this_instr, 0, 3);
3075 nextpc = (rn == 15) ? pc_val + 8
3076 : get_frame_register_unsigned (frame, rn);
3077 return nextpc;
3078 }
3079
3080 /* Multiply into PC */
3081 c = (status & FLAG_C) ? 1 : 0;
3082 rn = bits (this_instr, 16, 19);
3083 operand1 = (rn == 15) ? pc_val + 8
3084 : get_frame_register_unsigned (frame, rn);
3085
3086 if (bit (this_instr, 25))
3087 {
3088 unsigned long immval = bits (this_instr, 0, 7);
3089 unsigned long rotate = 2 * bits (this_instr, 8, 11);
3090 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
3091 & 0xffffffff;
3092 }
3093 else /* operand 2 is a shifted register */
3094 operand2 = shifted_reg_val (frame, this_instr, c, pc_val, status);
3095
3096 switch (bits (this_instr, 21, 24))
3097 {
3098 case 0x0: /*and */
3099 result = operand1 & operand2;
3100 break;
3101
3102 case 0x1: /*eor */
3103 result = operand1 ^ operand2;
3104 break;
3105
3106 case 0x2: /*sub */
3107 result = operand1 - operand2;
3108 break;
3109
3110 case 0x3: /*rsb */
3111 result = operand2 - operand1;
3112 break;
3113
3114 case 0x4: /*add */
3115 result = operand1 + operand2;
3116 break;
3117
3118 case 0x5: /*adc */
3119 result = operand1 + operand2 + c;
3120 break;
3121
3122 case 0x6: /*sbc */
3123 result = operand1 - operand2 + c;
3124 break;
3125
3126 case 0x7: /*rsc */
3127 result = operand2 - operand1 + c;
3128 break;
3129
3130 case 0x8:
3131 case 0x9:
3132 case 0xa:
3133 case 0xb: /* tst, teq, cmp, cmn */
3134 result = (unsigned long) nextpc;
3135 break;
3136
3137 case 0xc: /*orr */
3138 result = operand1 | operand2;
3139 break;
3140
3141 case 0xd: /*mov */
3142 /* Always step into a function. */
3143 result = operand2;
3144 break;
3145
3146 case 0xe: /*bic */
3147 result = operand1 & ~operand2;
3148 break;
3149
3150 case 0xf: /*mvn */
3151 result = ~operand2;
3152 break;
3153 }
3154
3155 /* In 26-bit APCS the bottom two bits of the result are
3156 ignored, and we always end up in ARM state. */
3157 if (!arm_apcs_32)
3158 nextpc = arm_addr_bits_remove (gdbarch, result);
3159 else
3160 nextpc = result;
3161
3162 break;
3163 }
3164
3165 case 0x4:
3166 case 0x5: /* data transfer */
3167 case 0x6:
3168 case 0x7:
3169 if (bit (this_instr, 20))
3170 {
3171 /* load */
3172 if (bits (this_instr, 12, 15) == 15)
3173 {
3174 /* rd == pc */
3175 unsigned long rn;
3176 unsigned long base;
3177
3178 if (bit (this_instr, 22))
3179 error (_("Invalid update to pc in instruction"));
3180
3181 /* byte write to PC */
3182 rn = bits (this_instr, 16, 19);
3183 base = (rn == 15) ? pc_val + 8
3184 : get_frame_register_unsigned (frame, rn);
3185 if (bit (this_instr, 24))
3186 {
3187 /* pre-indexed */
3188 int c = (status & FLAG_C) ? 1 : 0;
3189 unsigned long offset =
3190 (bit (this_instr, 25)
3191 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
3192 : bits (this_instr, 0, 11));
3193
3194 if (bit (this_instr, 23))
3195 base += offset;
3196 else
3197 base -= offset;
3198 }
3199 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
3200 4, byte_order);
3201 }
3202 }
3203 break;
3204
3205 case 0x8:
3206 case 0x9: /* block transfer */
3207 if (bit (this_instr, 20))
3208 {
3209 /* LDM */
3210 if (bit (this_instr, 15))
3211 {
3212 /* loading pc */
3213 int offset = 0;
3214
3215 if (bit (this_instr, 23))
3216 {
3217 /* up */
3218 unsigned long reglist = bits (this_instr, 0, 14);
3219 offset = bitcount (reglist) * 4;
3220 if (bit (this_instr, 24)) /* pre */
3221 offset += 4;
3222 }
3223 else if (bit (this_instr, 24))
3224 offset = -4;
3225
3226 {
3227 unsigned long rn_val =
3228 get_frame_register_unsigned (frame,
3229 bits (this_instr, 16, 19));
3230 nextpc =
3231 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
3232 + offset),
3233 4, byte_order);
3234 }
3235 }
3236 }
3237 break;
3238
3239 case 0xb: /* branch & link */
3240 case 0xa: /* branch */
3241 {
3242 nextpc = BranchDest (pc, this_instr);
3243 break;
3244 }
3245
3246 case 0xc:
3247 case 0xd:
3248 case 0xe: /* coproc ops */
3249 case 0xf: /* SWI */
3250 break;
3251
3252 default:
3253 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
3254 return (pc);
3255 }
3256 }
3257
3258 return nextpc;
3259 }
3260
3261 CORE_ADDR
3262 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
3263 {
3264 struct gdbarch *gdbarch = get_frame_arch (frame);
3265 CORE_ADDR nextpc =
3266 gdbarch_addr_bits_remove (gdbarch,
3267 arm_get_next_pc_raw (frame, pc, TRUE));
3268 if (nextpc == pc)
3269 error (_("Infinite loop detected"));
3270 return nextpc;
3271 }
3272
3273 /* single_step() is called just before we want to resume the inferior,
3274 if we want to single-step it but there is no hardware or kernel
3275 single-step support. We find the target of the coming instruction
3276 and breakpoint it. */
3277
3278 int
3279 arm_software_single_step (struct frame_info *frame)
3280 {
3281 struct gdbarch *gdbarch = get_frame_arch (frame);
3282 struct address_space *aspace = get_frame_address_space (frame);
3283
3284 /* NOTE: This may insert the wrong breakpoint instruction when
3285 single-stepping over a mode-changing instruction, if the
3286 CPSR heuristics are used. */
3287
3288 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
3289 insert_single_step_breakpoint (gdbarch, aspace, next_pc);
3290
3291 return 1;
3292 }
3293
3294 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
3295 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
3296 NULL if an error occurs. BUF is freed. */
3297
3298 static gdb_byte *
3299 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
3300 int old_len, int new_len)
3301 {
3302 gdb_byte *new_buf, *middle;
3303 int bytes_to_read = new_len - old_len;
3304
3305 new_buf = xmalloc (new_len);
3306 memcpy (new_buf + bytes_to_read, buf, old_len);
3307 xfree (buf);
3308 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
3309 {
3310 xfree (new_buf);
3311 return NULL;
3312 }
3313 return new_buf;
3314 }
3315
3316 /* An IT block is at most the 2-byte IT instruction followed by
3317 four 4-byte instructions. The furthest back we must search to
3318 find an IT block that affects the current instruction is thus
3319 2 + 3 * 4 == 14 bytes. */
3320 #define MAX_IT_BLOCK_PREFIX 14
3321
3322 /* Use a quick scan if there are more than this many bytes of
3323 code. */
3324 #define IT_SCAN_THRESHOLD 32
3325
3326 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
3327 A breakpoint in an IT block may not be hit, depending on the
3328 condition flags. */
3329 static CORE_ADDR
3330 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
3331 {
3332 gdb_byte *buf;
3333 char map_type;
3334 CORE_ADDR boundary, func_start;
3335 int buf_len, buf2_len;
3336 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
3337 int i, any, last_it, last_it_count;
3338
3339 /* If we are using BKPT breakpoints, none of this is necessary. */
3340 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
3341 return bpaddr;
3342
3343 /* ARM mode does not have this problem. */
3344 if (!arm_pc_is_thumb (bpaddr))
3345 return bpaddr;
3346
3347 /* We are setting a breakpoint in Thumb code that could potentially
3348 contain an IT block. The first step is to find how much Thumb
3349 code there is; we do not need to read outside of known Thumb
3350 sequences. */
3351 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
3352 if (map_type == 0)
3353 /* Thumb-2 code must have mapping symbols to have a chance. */
3354 return bpaddr;
3355
3356 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
3357
3358 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
3359 && func_start > boundary)
3360 boundary = func_start;
3361
3362 /* Search for a candidate IT instruction. We have to do some fancy
3363 footwork to distinguish a real IT instruction from the second
3364 half of a 32-bit instruction, but there is no need for that if
3365 there's no candidate. */
3366 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
3367 if (buf_len == 0)
3368 /* No room for an IT instruction. */
3369 return bpaddr;
3370
3371 buf = xmalloc (buf_len);
3372 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
3373 return bpaddr;
3374 any = 0;
3375 for (i = 0; i < buf_len; i += 2)
3376 {
3377 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
3378 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
3379 {
3380 any = 1;
3381 break;
3382 }
3383 }
3384 if (any == 0)
3385 {
3386 xfree (buf);
3387 return bpaddr;
3388 }
3389
3390 /* OK, the code bytes before this instruction contain at least one
3391 halfword which resembles an IT instruction. We know that it's
3392 Thumb code, but there are still two possibilities. Either the
3393 halfword really is an IT instruction, or it is the second half of
3394 a 32-bit Thumb instruction. The only way we can tell is to
3395 scan forwards from a known instruction boundary. */
3396 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
3397 {
3398 int definite;
3399
3400 /* There's a lot of code before this instruction. Start with an
3401 optimistic search; it's easy to recognize halfwords that can
3402 not be the start of a 32-bit instruction, and use that to
3403 lock on to the instruction boundaries. */
3404 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
3405 if (buf == NULL)
3406 return bpaddr;
3407 buf_len = IT_SCAN_THRESHOLD;
3408
3409 definite = 0;
3410 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
3411 {
3412 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
3413 if (thumb_insn_size (inst1) == 2)
3414 {
3415 definite = 1;
3416 break;
3417 }
3418 }
3419
3420 /* At this point, if DEFINITE, BUF[I] is the first place we
3421 are sure that we know the instruction boundaries, and it is far
3422 enough from BPADDR that we could not miss an IT instruction
3423 affecting BPADDR. If ! DEFINITE, give up - start from a
3424 known boundary. */
3425 if (! definite)
3426 {
3427 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
3428 if (buf == NULL)
3429 return bpaddr;
3430 buf_len = bpaddr - boundary;
3431 i = 0;
3432 }
3433 }
3434 else
3435 {
3436 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
3437 if (buf == NULL)
3438 return bpaddr;
3439 buf_len = bpaddr - boundary;
3440 i = 0;
3441 }
3442
3443 /* Scan forwards. Find the last IT instruction before BPADDR. */
3444 last_it = -1;
3445 last_it_count = 0;
3446 while (i < buf_len)
3447 {
3448 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
3449 last_it_count--;
3450 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
3451 {
3452 last_it = i;
3453 if (inst1 & 0x0001)
3454 last_it_count = 4;
3455 else if (inst1 & 0x0002)
3456 last_it_count = 3;
3457 else if (inst1 & 0x0004)
3458 last_it_count = 2;
3459 else
3460 last_it_count = 1;
3461 }
3462 i += thumb_insn_size (inst1);
3463 }
3464
3465 xfree (buf);
3466
3467 if (last_it == -1)
3468 /* There wasn't really an IT instruction after all. */
3469 return bpaddr;
3470
3471 if (last_it_count < 1)
3472 /* It was too far away. */
3473 return bpaddr;
3474
3475 /* This really is a trouble spot. Move the breakpoint to the IT
3476 instruction. */
3477 return bpaddr - buf_len + last_it;
3478 }
3479
3480 /* ARM displaced stepping support.
3481
3482 Generally ARM displaced stepping works as follows:
3483
3484 1. When an instruction is to be single-stepped, it is first decoded by
3485 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
3486 Depending on the type of instruction, it is then copied to a scratch
3487 location, possibly in a modified form. The copy_* set of functions
3488 performs such modification, as necessary. A breakpoint is placed after
3489 the modified instruction in the scratch space to return control to GDB.
3490 Note in particular that instructions which modify the PC will no longer
3491 do so after modification.
3492
3493 2. The instruction is single-stepped, by setting the PC to the scratch
3494 location address, and resuming. Control returns to GDB when the
3495 breakpoint is hit.
3496
3497 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
3498 function used for the current instruction. This function's job is to
3499 put the CPU/memory state back to what it would have been if the
3500 instruction had been executed unmodified in its original location. */
3501
3502 /* NOP instruction (mov r0, r0). */
3503 #define ARM_NOP 0xe1a00000
3504
3505 /* Helper for register reads for displaced stepping. In particular, this
3506 returns the PC as it would be seen by the instruction at its original
3507 location. */
3508
3509 ULONGEST
3510 displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
3511 {
3512 ULONGEST ret;
3513
3514 if (regno == 15)
3515 {
3516 if (debug_displaced)
3517 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
3518 (unsigned long) from + 8);
3519 return (ULONGEST) from + 8; /* Pipeline offset. */
3520 }
3521 else
3522 {
3523 regcache_cooked_read_unsigned (regs, regno, &ret);
3524 if (debug_displaced)
3525 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
3526 regno, (unsigned long) ret);
3527 return ret;
3528 }
3529 }
3530
3531 static int
3532 displaced_in_arm_mode (struct regcache *regs)
3533 {
3534 ULONGEST ps;
3535
3536 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
3537
3538 return (ps & CPSR_T) == 0;
3539 }
3540
3541 /* Write to the PC as from a branch instruction. */
3542
3543 static void
3544 branch_write_pc (struct regcache *regs, ULONGEST val)
3545 {
3546 if (displaced_in_arm_mode (regs))
3547 /* Note: If bits 0/1 are set, this branch would be unpredictable for
3548 architecture versions < 6. */
3549 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & ~(ULONGEST) 0x3);
3550 else
3551 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & ~(ULONGEST) 0x1);
3552 }
3553
3554 /* Write to the PC as from a branch-exchange instruction. */
3555
3556 static void
3557 bx_write_pc (struct regcache *regs, ULONGEST val)
3558 {
3559 ULONGEST ps;
3560
3561 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
3562
3563 if ((val & 1) == 1)
3564 {
3565 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | CPSR_T);
3566 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
3567 }
3568 else if ((val & 2) == 0)
3569 {
3570 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM,
3571 ps & ~(ULONGEST) CPSR_T);
3572 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
3573 }
3574 else
3575 {
3576 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
3577 mode, align dest to 4 bytes). */
3578 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
3579 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM,
3580 ps & ~(ULONGEST) CPSR_T);
3581 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
3582 }
3583 }
3584
3585 /* Write to the PC as if from a load instruction. */
3586
3587 static void
3588 load_write_pc (struct regcache *regs, ULONGEST val)
3589 {
3590 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
3591 bx_write_pc (regs, val);
3592 else
3593 branch_write_pc (regs, val);
3594 }
3595
3596 /* Write to the PC as if from an ALU instruction. */
3597
3598 static void
3599 alu_write_pc (struct regcache *regs, ULONGEST val)
3600 {
3601 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && displaced_in_arm_mode (regs))
3602 bx_write_pc (regs, val);
3603 else
3604 branch_write_pc (regs, val);
3605 }
3606
3607 /* Helper for writing to registers for displaced stepping. Writing to the PC
3608 has a varying effects depending on the instruction which does the write:
3609 this is controlled by the WRITE_PC argument. */
3610
3611 void
3612 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
3613 int regno, ULONGEST val, enum pc_write_style write_pc)
3614 {
3615 if (regno == 15)
3616 {
3617 if (debug_displaced)
3618 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
3619 (unsigned long) val);
3620 switch (write_pc)
3621 {
3622 case BRANCH_WRITE_PC:
3623 branch_write_pc (regs, val);
3624 break;
3625
3626 case BX_WRITE_PC:
3627 bx_write_pc (regs, val);
3628 break;
3629
3630 case LOAD_WRITE_PC:
3631 load_write_pc (regs, val);
3632 break;
3633
3634 case ALU_WRITE_PC:
3635 alu_write_pc (regs, val);
3636 break;
3637
3638 case CANNOT_WRITE_PC:
3639 warning (_("Instruction wrote to PC in an unexpected way when "
3640 "single-stepping"));
3641 break;
3642
3643 default:
3644 internal_error (__FILE__, __LINE__,
3645 _("Invalid argument to displaced_write_reg"));
3646 }
3647
3648 dsc->wrote_to_pc = 1;
3649 }
3650 else
3651 {
3652 if (debug_displaced)
3653 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
3654 regno, (unsigned long) val);
3655 regcache_cooked_write_unsigned (regs, regno, val);
3656 }
3657 }
3658
3659 /* This function is used to concisely determine if an instruction INSN
3660 references PC. Register fields of interest in INSN should have the
3661 corresponding fields of BITMASK set to 0b1111. The function returns return 1
3662 if any of these fields in INSN reference the PC (also 0b1111, r15), else it
3663 returns 0. */
3664
3665 static int
3666 insn_references_pc (uint32_t insn, uint32_t bitmask)
3667 {
3668 uint32_t lowbit = 1;
3669
3670 while (bitmask != 0)
3671 {
3672 uint32_t mask;
3673
3674 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
3675 ;
3676
3677 if (!lowbit)
3678 break;
3679
3680 mask = lowbit * 0xf;
3681
3682 if ((insn & mask) == mask)
3683 return 1;
3684
3685 bitmask &= ~mask;
3686 }
3687
3688 return 0;
3689 }
3690
3691 /* The simplest copy function. Many instructions have the same effect no
3692 matter what address they are executed at: in those cases, use this. */
3693
3694 static int
3695 copy_unmodified (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, uint32_t insn,
3696 const char *iname, struct displaced_step_closure *dsc)
3697 {
3698 if (debug_displaced)
3699 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
3700 "opcode/class '%s' unmodified\n", (unsigned long) insn,
3701 iname);
3702
3703 dsc->modinsn[0] = insn;
3704
3705 return 0;
3706 }
3707
3708 /* Preload instructions with immediate offset. */
3709
3710 static void
3711 cleanup_preload (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
3712 struct regcache *regs, struct displaced_step_closure *dsc)
3713 {
3714 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
3715 if (!dsc->u.preload.immed)
3716 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
3717 }
3718
3719 static int
3720 copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
3721 struct displaced_step_closure *dsc)
3722 {
3723 unsigned int rn = bits (insn, 16, 19);
3724 ULONGEST rn_val;
3725 CORE_ADDR from = dsc->insn_addr;
3726
3727 if (!insn_references_pc (insn, 0x000f0000ul))
3728 return copy_unmodified (gdbarch, insn, "preload", dsc);
3729
3730 if (debug_displaced)
3731 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
3732 (unsigned long) insn);
3733
3734 /* Preload instructions:
3735
3736 {pli/pld} [rn, #+/-imm]
3737 ->
3738 {pli/pld} [r0, #+/-imm]. */
3739
3740 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3741 rn_val = displaced_read_reg (regs, from, rn);
3742 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
3743
3744 dsc->u.preload.immed = 1;
3745
3746 dsc->modinsn[0] = insn & 0xfff0ffff;
3747
3748 dsc->cleanup = &cleanup_preload;
3749
3750 return 0;
3751 }
3752
3753 /* Preload instructions with register offset. */
3754
3755 static int
3756 copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
3757 struct displaced_step_closure *dsc)
3758 {
3759 unsigned int rn = bits (insn, 16, 19);
3760 unsigned int rm = bits (insn, 0, 3);
3761 ULONGEST rn_val, rm_val;
3762 CORE_ADDR from = dsc->insn_addr;
3763
3764 if (!insn_references_pc (insn, 0x000f000ful))
3765 return copy_unmodified (gdbarch, insn, "preload reg", dsc);
3766
3767 if (debug_displaced)
3768 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
3769 (unsigned long) insn);
3770
3771 /* Preload register-offset instructions:
3772
3773 {pli/pld} [rn, rm {, shift}]
3774 ->
3775 {pli/pld} [r0, r1 {, shift}]. */
3776
3777 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3778 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
3779 rn_val = displaced_read_reg (regs, from, rn);
3780 rm_val = displaced_read_reg (regs, from, rm);
3781 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
3782 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
3783
3784 dsc->u.preload.immed = 0;
3785
3786 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
3787
3788 dsc->cleanup = &cleanup_preload;
3789
3790 return 0;
3791 }
3792
3793 /* Copy/cleanup coprocessor load and store instructions. */
3794
3795 static void
3796 cleanup_copro_load_store (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
3797 struct regcache *regs,
3798 struct displaced_step_closure *dsc)
3799 {
3800 ULONGEST rn_val = displaced_read_reg (regs, dsc->insn_addr, 0);
3801
3802 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
3803
3804 if (dsc->u.ldst.writeback)
3805 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
3806 }
3807
3808 static int
3809 copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
3810 struct regcache *regs,
3811 struct displaced_step_closure *dsc)
3812 {
3813 unsigned int rn = bits (insn, 16, 19);
3814 ULONGEST rn_val;
3815 CORE_ADDR from = dsc->insn_addr;
3816
3817 if (!insn_references_pc (insn, 0x000f0000ul))
3818 return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
3819
3820 if (debug_displaced)
3821 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
3822 "load/store insn %.8lx\n", (unsigned long) insn);
3823
3824 /* Coprocessor load/store instructions:
3825
3826 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
3827 ->
3828 {stc/stc2} [r0, #+/-imm].
3829
3830 ldc/ldc2 are handled identically. */
3831
3832 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
3833 rn_val = displaced_read_reg (regs, from, rn);
3834 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
3835
3836 dsc->u.ldst.writeback = bit (insn, 25);
3837 dsc->u.ldst.rn = rn;
3838
3839 dsc->modinsn[0] = insn & 0xfff0ffff;
3840
3841 dsc->cleanup = &cleanup_copro_load_store;
3842
3843 return 0;
3844 }
3845
3846 /* Clean up branch instructions (actually perform the branch, by setting
3847 PC). */
3848
3849 static void
3850 cleanup_branch (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, struct regcache *regs,
3851 struct displaced_step_closure *dsc)
3852 {
3853 ULONGEST from = dsc->insn_addr;
3854 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
3855 int branch_taken = condition_true (dsc->u.branch.cond, status);
3856 enum pc_write_style write_pc = dsc->u.branch.exchange
3857 ? BX_WRITE_PC : BRANCH_WRITE_PC;
3858
3859 if (!branch_taken)
3860 return;
3861
3862 if (dsc->u.branch.link)
3863 {
3864 ULONGEST pc = displaced_read_reg (regs, from, 15);
3865 displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
3866 }
3867
3868 displaced_write_reg (regs, dsc, 15, dsc->u.branch.dest, write_pc);
3869 }
3870
3871 /* Copy B/BL/BLX instructions with immediate destinations. */
3872
3873 static int
3874 copy_b_bl_blx (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, uint32_t insn,
3875 struct regcache *regs, struct displaced_step_closure *dsc)
3876 {
3877 unsigned int cond = bits (insn, 28, 31);
3878 int exchange = (cond == 0xf);
3879 int link = exchange || bit (insn, 24);
3880 CORE_ADDR from = dsc->insn_addr;
3881 long offset;
3882
3883 if (debug_displaced)
3884 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
3885 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
3886 (unsigned long) insn);
3887
3888 /* Implement "BL<cond> <label>" as:
3889
3890 Preparation: cond <- instruction condition
3891 Insn: mov r0, r0 (nop)
3892 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
3893
3894 B<cond> similar, but don't set r14 in cleanup. */
3895
3896 if (exchange)
3897 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
3898 then arrange the switch into Thumb mode. */
3899 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
3900 else
3901 offset = bits (insn, 0, 23) << 2;
3902
3903 if (bit (offset, 25))
3904 offset = offset | ~0x3ffffff;
3905
3906 dsc->u.branch.cond = cond;
3907 dsc->u.branch.link = link;
3908 dsc->u.branch.exchange = exchange;
3909 dsc->u.branch.dest = from + 8 + offset;
3910
3911 dsc->modinsn[0] = ARM_NOP;
3912
3913 dsc->cleanup = &cleanup_branch;
3914
3915 return 0;
3916 }
3917
3918 /* Copy BX/BLX with register-specified destinations. */
3919
3920 static int
3921 copy_bx_blx_reg (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, uint32_t insn,
3922 struct regcache *regs, struct displaced_step_closure *dsc)
3923 {
3924 unsigned int cond = bits (insn, 28, 31);
3925 /* BX: x12xxx1x
3926 BLX: x12xxx3x. */
3927 int link = bit (insn, 5);
3928 unsigned int rm = bits (insn, 0, 3);
3929 CORE_ADDR from = dsc->insn_addr;
3930
3931 if (debug_displaced)
3932 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
3933 "%.8lx\n", (link) ? "blx" : "bx", (unsigned long) insn);
3934
3935 /* Implement {BX,BLX}<cond> <reg>" as:
3936
3937 Preparation: cond <- instruction condition
3938 Insn: mov r0, r0 (nop)
3939 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
3940
3941 Don't set r14 in cleanup for BX. */
3942
3943 dsc->u.branch.dest = displaced_read_reg (regs, from, rm);
3944
3945 dsc->u.branch.cond = cond;
3946 dsc->u.branch.link = link;
3947 dsc->u.branch.exchange = 1;
3948
3949 dsc->modinsn[0] = ARM_NOP;
3950
3951 dsc->cleanup = &cleanup_branch;
3952
3953 return 0;
3954 }
3955
3956 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
3957
3958 static void
3959 cleanup_alu_imm (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
3960 struct regcache *regs, struct displaced_step_closure *dsc)
3961 {
3962 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
3963 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
3964 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
3965 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
3966 }
3967
3968 static int
3969 copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
3970 struct displaced_step_closure *dsc)
3971 {
3972 unsigned int rn = bits (insn, 16, 19);
3973 unsigned int rd = bits (insn, 12, 15);
3974 unsigned int op = bits (insn, 21, 24);
3975 int is_mov = (op == 0xd);
3976 ULONGEST rd_val, rn_val;
3977 CORE_ADDR from = dsc->insn_addr;
3978
3979 if (!insn_references_pc (insn, 0x000ff000ul))
3980 return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
3981
3982 if (debug_displaced)
3983 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
3984 "%.8lx\n", is_mov ? "move" : "ALU",
3985 (unsigned long) insn);
3986
3987 /* Instruction is of form:
3988
3989 <op><cond> rd, [rn,] #imm
3990
3991 Rewrite as:
3992
3993 Preparation: tmp1, tmp2 <- r0, r1;
3994 r0, r1 <- rd, rn
3995 Insn: <op><cond> r0, r1, #imm
3996 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
3997 */
3998
3999 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4000 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4001 rn_val = displaced_read_reg (regs, from, rn);
4002 rd_val = displaced_read_reg (regs, from, rd);
4003 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4004 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4005 dsc->rd = rd;
4006
4007 if (is_mov)
4008 dsc->modinsn[0] = insn & 0xfff00fff;
4009 else
4010 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
4011
4012 dsc->cleanup = &cleanup_alu_imm;
4013
4014 return 0;
4015 }
4016
4017 /* Copy/cleanup arithmetic/logic insns with register RHS. */
4018
4019 static void
4020 cleanup_alu_reg (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
4021 struct regcache *regs, struct displaced_step_closure *dsc)
4022 {
4023 ULONGEST rd_val;
4024 int i;
4025
4026 rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4027
4028 for (i = 0; i < 3; i++)
4029 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
4030
4031 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4032 }
4033
4034 static int
4035 copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4036 struct displaced_step_closure *dsc)
4037 {
4038 unsigned int rn = bits (insn, 16, 19);
4039 unsigned int rm = bits (insn, 0, 3);
4040 unsigned int rd = bits (insn, 12, 15);
4041 unsigned int op = bits (insn, 21, 24);
4042 int is_mov = (op == 0xd);
4043 ULONGEST rd_val, rn_val, rm_val;
4044 CORE_ADDR from = dsc->insn_addr;
4045
4046 if (!insn_references_pc (insn, 0x000ff00ful))
4047 return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
4048
4049 if (debug_displaced)
4050 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
4051 is_mov ? "move" : "ALU", (unsigned long) insn);
4052
4053 /* Instruction is of form:
4054
4055 <op><cond> rd, [rn,] rm [, <shift>]
4056
4057 Rewrite as:
4058
4059 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
4060 r0, r1, r2 <- rd, rn, rm
4061 Insn: <op><cond> r0, r1, r2 [, <shift>]
4062 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
4063 */
4064
4065 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4066 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4067 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4068 rd_val = displaced_read_reg (regs, from, rd);
4069 rn_val = displaced_read_reg (regs, from, rn);
4070 rm_val = displaced_read_reg (regs, from, rm);
4071 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4072 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4073 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
4074 dsc->rd = rd;
4075
4076 if (is_mov)
4077 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
4078 else
4079 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
4080
4081 dsc->cleanup = &cleanup_alu_reg;
4082
4083 return 0;
4084 }
4085
4086 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
4087
4088 static void
4089 cleanup_alu_shifted_reg (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
4090 struct regcache *regs,
4091 struct displaced_step_closure *dsc)
4092 {
4093 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4094 int i;
4095
4096 for (i = 0; i < 4; i++)
4097 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
4098
4099 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4100 }
4101
4102 static int
4103 copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
4104 struct regcache *regs, struct displaced_step_closure *dsc)
4105 {
4106 unsigned int rn = bits (insn, 16, 19);
4107 unsigned int rm = bits (insn, 0, 3);
4108 unsigned int rd = bits (insn, 12, 15);
4109 unsigned int rs = bits (insn, 8, 11);
4110 unsigned int op = bits (insn, 21, 24);
4111 int is_mov = (op == 0xd), i;
4112 ULONGEST rd_val, rn_val, rm_val, rs_val;
4113 CORE_ADDR from = dsc->insn_addr;
4114
4115 if (!insn_references_pc (insn, 0x000fff0ful))
4116 return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
4117
4118 if (debug_displaced)
4119 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
4120 "%.8lx\n", is_mov ? "move" : "ALU",
4121 (unsigned long) insn);
4122
4123 /* Instruction is of form:
4124
4125 <op><cond> rd, [rn,] rm, <shift> rs
4126
4127 Rewrite as:
4128
4129 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
4130 r0, r1, r2, r3 <- rd, rn, rm, rs
4131 Insn: <op><cond> r0, r1, r2, <shift> r3
4132 Cleanup: tmp5 <- r0
4133 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
4134 rd <- tmp5
4135 */
4136
4137 for (i = 0; i < 4; i++)
4138 dsc->tmp[i] = displaced_read_reg (regs, from, i);
4139
4140 rd_val = displaced_read_reg (regs, from, rd);
4141 rn_val = displaced_read_reg (regs, from, rn);
4142 rm_val = displaced_read_reg (regs, from, rm);
4143 rs_val = displaced_read_reg (regs, from, rs);
4144 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4145 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4146 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
4147 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
4148 dsc->rd = rd;
4149
4150 if (is_mov)
4151 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
4152 else
4153 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
4154
4155 dsc->cleanup = &cleanup_alu_shifted_reg;
4156
4157 return 0;
4158 }
4159
4160 /* Clean up load instructions. */
4161
4162 static void
4163 cleanup_load (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, struct regcache *regs,
4164 struct displaced_step_closure *dsc)
4165 {
4166 ULONGEST rt_val, rt_val2 = 0, rn_val;
4167 CORE_ADDR from = dsc->insn_addr;
4168
4169 rt_val = displaced_read_reg (regs, from, 0);
4170 if (dsc->u.ldst.xfersize == 8)
4171 rt_val2 = displaced_read_reg (regs, from, 1);
4172 rn_val = displaced_read_reg (regs, from, 2);
4173
4174 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4175 if (dsc->u.ldst.xfersize > 4)
4176 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4177 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
4178 if (!dsc->u.ldst.immed)
4179 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
4180
4181 /* Handle register writeback. */
4182 if (dsc->u.ldst.writeback)
4183 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
4184 /* Put result in right place. */
4185 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
4186 if (dsc->u.ldst.xfersize == 8)
4187 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
4188 }
4189
4190 /* Clean up store instructions. */
4191
4192 static void
4193 cleanup_store (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, struct regcache *regs,
4194 struct displaced_step_closure *dsc)
4195 {
4196 CORE_ADDR from = dsc->insn_addr;
4197 ULONGEST rn_val = displaced_read_reg (regs, from, 2);
4198
4199 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4200 if (dsc->u.ldst.xfersize > 4)
4201 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4202 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
4203 if (!dsc->u.ldst.immed)
4204 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
4205 if (!dsc->u.ldst.restore_r4)
4206 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
4207
4208 /* Writeback. */
4209 if (dsc->u.ldst.writeback)
4210 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
4211 }
4212
4213 /* Copy "extra" load/store instructions. These are halfword/doubleword
4214 transfers, which have a different encoding to byte/word transfers. */
4215
4216 static int
4217 copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
4218 struct regcache *regs, struct displaced_step_closure *dsc)
4219 {
4220 unsigned int op1 = bits (insn, 20, 24);
4221 unsigned int op2 = bits (insn, 5, 6);
4222 unsigned int rt = bits (insn, 12, 15);
4223 unsigned int rn = bits (insn, 16, 19);
4224 unsigned int rm = bits (insn, 0, 3);
4225 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
4226 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
4227 int immed = (op1 & 0x4) != 0;
4228 int opcode;
4229 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
4230 CORE_ADDR from = dsc->insn_addr;
4231
4232 if (!insn_references_pc (insn, 0x000ff00ful))
4233 return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
4234
4235 if (debug_displaced)
4236 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
4237 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
4238 (unsigned long) insn);
4239
4240 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
4241
4242 if (opcode < 0)
4243 internal_error (__FILE__, __LINE__,
4244 _("copy_extra_ld_st: instruction decode error"));
4245
4246 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4247 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4248 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4249 if (!immed)
4250 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
4251
4252 rt_val = displaced_read_reg (regs, from, rt);
4253 if (bytesize[opcode] == 8)
4254 rt_val2 = displaced_read_reg (regs, from, rt + 1);
4255 rn_val = displaced_read_reg (regs, from, rn);
4256 if (!immed)
4257 rm_val = displaced_read_reg (regs, from, rm);
4258
4259 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
4260 if (bytesize[opcode] == 8)
4261 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
4262 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
4263 if (!immed)
4264 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
4265
4266 dsc->rd = rt;
4267 dsc->u.ldst.xfersize = bytesize[opcode];
4268 dsc->u.ldst.rn = rn;
4269 dsc->u.ldst.immed = immed;
4270 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
4271 dsc->u.ldst.restore_r4 = 0;
4272
4273 if (immed)
4274 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
4275 ->
4276 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
4277 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
4278 else
4279 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
4280 ->
4281 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
4282 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
4283
4284 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
4285
4286 return 0;
4287 }
4288
4289 /* Copy byte/word loads and stores. */
4290
4291 static int
4292 copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
4293 struct regcache *regs,
4294 struct displaced_step_closure *dsc, int load, int byte,
4295 int usermode)
4296 {
4297 int immed = !bit (insn, 25);
4298 unsigned int rt = bits (insn, 12, 15);
4299 unsigned int rn = bits (insn, 16, 19);
4300 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
4301 ULONGEST rt_val, rn_val, rm_val = 0;
4302 CORE_ADDR from = dsc->insn_addr;
4303
4304 if (!insn_references_pc (insn, 0x000ff00ful))
4305 return copy_unmodified (gdbarch, insn, "load/store", dsc);
4306
4307 if (debug_displaced)
4308 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
4309 load ? (byte ? "ldrb" : "ldr")
4310 : (byte ? "strb" : "str"), usermode ? "t" : "",
4311 (unsigned long) insn);
4312
4313 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4314 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4315 if (!immed)
4316 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
4317 if (!load)
4318 dsc->tmp[4] = displaced_read_reg (regs, from, 4);
4319
4320 rt_val = displaced_read_reg (regs, from, rt);
4321 rn_val = displaced_read_reg (regs, from, rn);
4322 if (!immed)
4323 rm_val = displaced_read_reg (regs, from, rm);
4324
4325 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
4326 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
4327 if (!immed)
4328 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
4329
4330 dsc->rd = rt;
4331 dsc->u.ldst.xfersize = byte ? 1 : 4;
4332 dsc->u.ldst.rn = rn;
4333 dsc->u.ldst.immed = immed;
4334 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
4335
4336 /* To write PC we can do:
4337
4338 scratch+0: str pc, temp (*temp = scratch + 8 + offset)
4339 scratch+4: ldr r4, temp
4340 scratch+8: sub r4, r4, pc (r4 = scratch + 8 + offset - scratch - 8 - 8)
4341 scratch+12: add r4, r4, #8 (r4 = offset)
4342 scratch+16: add r0, r0, r4
4343 scratch+20: str r0, [r2, #imm] (or str r0, [r2, r3])
4344 scratch+24: <temp>
4345
4346 Otherwise we don't know what value to write for PC, since the offset is
4347 architecture-dependent (sometimes PC+8, sometimes PC+12). */
4348
4349 if (load || rt != 15)
4350 {
4351 dsc->u.ldst.restore_r4 = 0;
4352
4353 if (immed)
4354 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
4355 ->
4356 {ldr,str}[b]<cond> r0, [r2, #imm]. */
4357 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
4358 else
4359 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
4360 ->
4361 {ldr,str}[b]<cond> r0, [r2, r3]. */
4362 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
4363 }
4364 else
4365 {
4366 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
4367 dsc->u.ldst.restore_r4 = 1;
4368
4369 dsc->modinsn[0] = 0xe58ff014; /* str pc, [pc, #20]. */
4370 dsc->modinsn[1] = 0xe59f4010; /* ldr r4, [pc, #16]. */
4371 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
4372 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
4373 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
4374
4375 /* As above. */
4376 if (immed)
4377 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
4378 else
4379 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
4380
4381 dsc->modinsn[6] = 0x0; /* breakpoint location. */
4382 dsc->modinsn[7] = 0x0; /* scratch space. */
4383
4384 dsc->numinsns = 6;
4385 }
4386
4387 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
4388
4389 return 0;
4390 }
4391
4392 /* Cleanup LDM instructions with fully-populated register list. This is an
4393 unfortunate corner case: it's impossible to implement correctly by modifying
4394 the instruction. The issue is as follows: we have an instruction,
4395
4396 ldm rN, {r0-r15}
4397
4398 which we must rewrite to avoid loading PC. A possible solution would be to
4399 do the load in two halves, something like (with suitable cleanup
4400 afterwards):
4401
4402 mov r8, rN
4403 ldm[id][ab] r8!, {r0-r7}
4404 str r7, <temp>
4405 ldm[id][ab] r8, {r7-r14}
4406 <bkpt>
4407
4408 but at present there's no suitable place for <temp>, since the scratch space
4409 is overwritten before the cleanup routine is called. For now, we simply
4410 emulate the instruction. */
4411
4412 static void
4413 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
4414 struct displaced_step_closure *dsc)
4415 {
4416 ULONGEST from = dsc->insn_addr;
4417 int inc = dsc->u.block.increment;
4418 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
4419 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
4420 uint32_t regmask = dsc->u.block.regmask;
4421 int regno = inc ? 0 : 15;
4422 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
4423 int exception_return = dsc->u.block.load && dsc->u.block.user
4424 && (regmask & 0x8000) != 0;
4425 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
4426 int do_transfer = condition_true (dsc->u.block.cond, status);
4427 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4428
4429 if (!do_transfer)
4430 return;
4431
4432 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
4433 sensible we can do here. Complain loudly. */
4434 if (exception_return)
4435 error (_("Cannot single-step exception return"));
4436
4437 /* We don't handle any stores here for now. */
4438 gdb_assert (dsc->u.block.load != 0);
4439
4440 if (debug_displaced)
4441 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
4442 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
4443 dsc->u.block.increment ? "inc" : "dec",
4444 dsc->u.block.before ? "before" : "after");
4445
4446 while (regmask)
4447 {
4448 uint32_t memword;
4449
4450 if (inc)
4451 while (regno <= 15 && (regmask & (1 << regno)) == 0)
4452 regno++;
4453 else
4454 while (regno >= 0 && (regmask & (1 << regno)) == 0)
4455 regno--;
4456
4457 xfer_addr += bump_before;
4458
4459 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
4460 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
4461
4462 xfer_addr += bump_after;
4463
4464 regmask &= ~(1 << regno);
4465 }
4466
4467 if (dsc->u.block.writeback)
4468 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
4469 CANNOT_WRITE_PC);
4470 }
4471
4472 /* Clean up an STM which included the PC in the register list. */
4473
4474 static void
4475 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
4476 struct displaced_step_closure *dsc)
4477 {
4478 ULONGEST from = dsc->insn_addr;
4479 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
4480 int store_executed = condition_true (dsc->u.block.cond, status);
4481 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
4482 CORE_ADDR stm_insn_addr;
4483 uint32_t pc_val;
4484 long offset;
4485 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4486
4487 /* If condition code fails, there's nothing else to do. */
4488 if (!store_executed)
4489 return;
4490
4491 if (dsc->u.block.increment)
4492 {
4493 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
4494
4495 if (dsc->u.block.before)
4496 pc_stored_at += 4;
4497 }
4498 else
4499 {
4500 pc_stored_at = dsc->u.block.xfer_addr;
4501
4502 if (dsc->u.block.before)
4503 pc_stored_at -= 4;
4504 }
4505
4506 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
4507 stm_insn_addr = dsc->scratch_base;
4508 offset = pc_val - stm_insn_addr;
4509
4510 if (debug_displaced)
4511 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
4512 "STM instruction\n", offset);
4513
4514 /* Rewrite the stored PC to the proper value for the non-displaced original
4515 instruction. */
4516 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
4517 dsc->insn_addr + offset);
4518 }
4519
4520 /* Clean up an LDM which includes the PC in the register list. We clumped all
4521 the registers in the transferred list into a contiguous range r0...rX (to
4522 avoid loading PC directly and losing control of the debugged program), so we
4523 must undo that here. */
4524
4525 static void
4526 cleanup_block_load_pc (struct gdbarch *gdbarch ATTRIBUTE_UNUSED,
4527 struct regcache *regs,
4528 struct displaced_step_closure *dsc)
4529 {
4530 ULONGEST from = dsc->insn_addr;
4531 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
4532 int load_executed = condition_true (dsc->u.block.cond, status), i;
4533 unsigned int mask = dsc->u.block.regmask, write_reg = 15;
4534 unsigned int regs_loaded = bitcount (mask);
4535 unsigned int num_to_shuffle = regs_loaded, clobbered;
4536
4537 /* The method employed here will fail if the register list is fully populated
4538 (we need to avoid loading PC directly). */
4539 gdb_assert (num_to_shuffle < 16);
4540
4541 if (!load_executed)
4542 return;
4543
4544 clobbered = (1 << num_to_shuffle) - 1;
4545
4546 while (num_to_shuffle > 0)
4547 {
4548 if ((mask & (1 << write_reg)) != 0)
4549 {
4550 unsigned int read_reg = num_to_shuffle - 1;
4551
4552 if (read_reg != write_reg)
4553 {
4554 ULONGEST rval = displaced_read_reg (regs, from, read_reg);
4555 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
4556 if (debug_displaced)
4557 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
4558 "loaded register r%d to r%d\n"), read_reg,
4559 write_reg);
4560 }
4561 else if (debug_displaced)
4562 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
4563 "r%d already in the right place\n"),
4564 write_reg);
4565
4566 clobbered &= ~(1 << write_reg);
4567
4568 num_to_shuffle--;
4569 }
4570
4571 write_reg--;
4572 }
4573
4574 /* Restore any registers we scribbled over. */
4575 for (write_reg = 0; clobbered != 0; write_reg++)
4576 {
4577 if ((clobbered & (1 << write_reg)) != 0)
4578 {
4579 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
4580 CANNOT_WRITE_PC);
4581 if (debug_displaced)
4582 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
4583 "clobbered register r%d\n"), write_reg);
4584 clobbered &= ~(1 << write_reg);
4585 }
4586 }
4587
4588 /* Perform register writeback manually. */
4589 if (dsc->u.block.writeback)
4590 {
4591 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
4592
4593 if (dsc->u.block.increment)
4594 new_rn_val += regs_loaded * 4;
4595 else
4596 new_rn_val -= regs_loaded * 4;
4597
4598 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
4599 CANNOT_WRITE_PC);
4600 }
4601 }
4602
4603 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
4604 in user-level code (in particular exception return, ldm rn, {...pc}^). */
4605
4606 static int
4607 copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4608 struct displaced_step_closure *dsc)
4609 {
4610 int load = bit (insn, 20);
4611 int user = bit (insn, 22);
4612 int increment = bit (insn, 23);
4613 int before = bit (insn, 24);
4614 int writeback = bit (insn, 21);
4615 int rn = bits (insn, 16, 19);
4616 CORE_ADDR from = dsc->insn_addr;
4617
4618 /* Block transfers which don't mention PC can be run directly out-of-line. */
4619 if (rn != 15 && (insn & 0x8000) == 0)
4620 return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
4621
4622 if (rn == 15)
4623 {
4624 warning (_("displaced: Unpredictable LDM or STM with base register r15"));
4625 return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
4626 }
4627
4628 if (debug_displaced)
4629 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
4630 "%.8lx\n", (unsigned long) insn);
4631
4632 dsc->u.block.xfer_addr = displaced_read_reg (regs, from, rn);
4633 dsc->u.block.rn = rn;
4634
4635 dsc->u.block.load = load;
4636 dsc->u.block.user = user;
4637 dsc->u.block.increment = increment;
4638 dsc->u.block.before = before;
4639 dsc->u.block.writeback = writeback;
4640 dsc->u.block.cond = bits (insn, 28, 31);
4641
4642 dsc->u.block.regmask = insn & 0xffff;
4643
4644 if (load)
4645 {
4646 if ((insn & 0xffff) == 0xffff)
4647 {
4648 /* LDM with a fully-populated register list. This case is
4649 particularly tricky. Implement for now by fully emulating the
4650 instruction (which might not behave perfectly in all cases, but
4651 these instructions should be rare enough for that not to matter
4652 too much). */
4653 dsc->modinsn[0] = ARM_NOP;
4654
4655 dsc->cleanup = &cleanup_block_load_all;
4656 }
4657 else
4658 {
4659 /* LDM of a list of registers which includes PC. Implement by
4660 rewriting the list of registers to be transferred into a
4661 contiguous chunk r0...rX before doing the transfer, then shuffling
4662 registers into the correct places in the cleanup routine. */
4663 unsigned int regmask = insn & 0xffff;
4664 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
4665 unsigned int to = 0, from = 0, i, new_rn;
4666
4667 for (i = 0; i < num_in_list; i++)
4668 dsc->tmp[i] = displaced_read_reg (regs, from, i);
4669
4670 /* Writeback makes things complicated. We need to avoid clobbering
4671 the base register with one of the registers in our modified
4672 register list, but just using a different register can't work in
4673 all cases, e.g.:
4674
4675 ldm r14!, {r0-r13,pc}
4676
4677 which would need to be rewritten as:
4678
4679 ldm rN!, {r0-r14}
4680
4681 but that can't work, because there's no free register for N.
4682
4683 Solve this by turning off the writeback bit, and emulating
4684 writeback manually in the cleanup routine. */
4685
4686 if (writeback)
4687 insn &= ~(1 << 21);
4688
4689 new_regmask = (1 << num_in_list) - 1;
4690
4691 if (debug_displaced)
4692 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
4693 "{..., pc}: original reg list %.4x, modified "
4694 "list %.4x\n"), rn, writeback ? "!" : "",
4695 (int) insn & 0xffff, new_regmask);
4696
4697 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
4698
4699 dsc->cleanup = &cleanup_block_load_pc;
4700 }
4701 }
4702 else
4703 {
4704 /* STM of a list of registers which includes PC. Run the instruction
4705 as-is, but out of line: this will store the wrong value for the PC,
4706 so we must manually fix up the memory in the cleanup routine.
4707 Doing things this way has the advantage that we can auto-detect
4708 the offset of the PC write (which is architecture-dependent) in
4709 the cleanup routine. */
4710 dsc->modinsn[0] = insn;
4711
4712 dsc->cleanup = &cleanup_block_store_pc;
4713 }
4714
4715 return 0;
4716 }
4717
4718 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
4719 for Linux, where some SVC instructions must be treated specially. */
4720
4721 static void
4722 cleanup_svc (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, struct regcache *regs,
4723 struct displaced_step_closure *dsc)
4724 {
4725 CORE_ADDR from = dsc->insn_addr;
4726 CORE_ADDR resume_addr = from + 4;
4727
4728 if (debug_displaced)
4729 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
4730 "%.8lx\n", (unsigned long) resume_addr);
4731
4732 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
4733 }
4734
4735 static int
4736 copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
4737 struct regcache *regs, struct displaced_step_closure *dsc)
4738 {
4739 CORE_ADDR from = dsc->insn_addr;
4740
4741 /* Allow OS-specific code to override SVC handling. */
4742 if (dsc->u.svc.copy_svc_os)
4743 return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
4744
4745 if (debug_displaced)
4746 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
4747 (unsigned long) insn);
4748
4749 /* Preparation: none.
4750 Insn: unmodified svc.
4751 Cleanup: pc <- insn_addr + 4. */
4752
4753 dsc->modinsn[0] = insn;
4754
4755 dsc->cleanup = &cleanup_svc;
4756 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
4757 instruction. */
4758 dsc->wrote_to_pc = 1;
4759
4760 return 0;
4761 }
4762
4763 /* Copy undefined instructions. */
4764
4765 static int
4766 copy_undef (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, uint32_t insn,
4767 struct displaced_step_closure *dsc)
4768 {
4769 if (debug_displaced)
4770 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn %.8lx\n",
4771 (unsigned long) insn);
4772
4773 dsc->modinsn[0] = insn;
4774
4775 return 0;
4776 }
4777
4778 /* Copy unpredictable instructions. */
4779
4780 static int
4781 copy_unpred (struct gdbarch *gdbarch ATTRIBUTE_UNUSED, uint32_t insn,
4782 struct displaced_step_closure *dsc)
4783 {
4784 if (debug_displaced)
4785 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
4786 "%.8lx\n", (unsigned long) insn);
4787
4788 dsc->modinsn[0] = insn;
4789
4790 return 0;
4791 }
4792
4793 /* The decode_* functions are instruction decoding helpers. They mostly follow
4794 the presentation in the ARM ARM. */
4795
4796 static int
4797 decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
4798 struct regcache *regs,
4799 struct displaced_step_closure *dsc)
4800 {
4801 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
4802 unsigned int rn = bits (insn, 16, 19);
4803
4804 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
4805 return copy_unmodified (gdbarch, insn, "cps", dsc);
4806 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
4807 return copy_unmodified (gdbarch, insn, "setend", dsc);
4808 else if ((op1 & 0x60) == 0x20)
4809 return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
4810 else if ((op1 & 0x71) == 0x40)
4811 return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
4812 else if ((op1 & 0x77) == 0x41)
4813 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
4814 else if ((op1 & 0x77) == 0x45)
4815 return copy_preload (gdbarch, insn, regs, dsc); /* pli. */
4816 else if ((op1 & 0x77) == 0x51)
4817 {
4818 if (rn != 0xf)
4819 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
4820 else
4821 return copy_unpred (gdbarch, insn, dsc);
4822 }
4823 else if ((op1 & 0x77) == 0x55)
4824 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
4825 else if (op1 == 0x57)
4826 switch (op2)
4827 {
4828 case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
4829 case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
4830 case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
4831 case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
4832 default: return copy_unpred (gdbarch, insn, dsc);
4833 }
4834 else if ((op1 & 0x63) == 0x43)
4835 return copy_unpred (gdbarch, insn, dsc);
4836 else if ((op2 & 0x1) == 0x0)
4837 switch (op1 & ~0x80)
4838 {
4839 case 0x61:
4840 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
4841 case 0x65:
4842 return copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
4843 case 0x71: case 0x75:
4844 /* pld/pldw reg. */
4845 return copy_preload_reg (gdbarch, insn, regs, dsc);
4846 case 0x63: case 0x67: case 0x73: case 0x77:
4847 return copy_unpred (gdbarch, insn, dsc);
4848 default:
4849 return copy_undef (gdbarch, insn, dsc);
4850 }
4851 else
4852 return copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
4853 }
4854
4855 static int
4856 decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
4857 struct regcache *regs, struct displaced_step_closure *dsc)
4858 {
4859 if (bit (insn, 27) == 0)
4860 return decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
4861 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
4862 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
4863 {
4864 case 0x0: case 0x2:
4865 return copy_unmodified (gdbarch, insn, "srs", dsc);
4866
4867 case 0x1: case 0x3:
4868 return copy_unmodified (gdbarch, insn, "rfe", dsc);
4869
4870 case 0x4: case 0x5: case 0x6: case 0x7:
4871 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
4872
4873 case 0x8:
4874 switch ((insn & 0xe00000) >> 21)
4875 {
4876 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
4877 /* stc/stc2. */
4878 return copy_copro_load_store (gdbarch, insn, regs, dsc);
4879
4880 case 0x2:
4881 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
4882
4883 default:
4884 return copy_undef (gdbarch, insn, dsc);
4885 }
4886
4887 case 0x9:
4888 {
4889 int rn_f = (bits (insn, 16, 19) == 0xf);
4890 switch ((insn & 0xe00000) >> 21)
4891 {
4892 case 0x1: case 0x3:
4893 /* ldc/ldc2 imm (undefined for rn == pc). */
4894 return rn_f ? copy_undef (gdbarch, insn, dsc)
4895 : copy_copro_load_store (gdbarch, insn, regs, dsc);
4896
4897 case 0x2:
4898 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
4899
4900 case 0x4: case 0x5: case 0x6: case 0x7:
4901 /* ldc/ldc2 lit (undefined for rn != pc). */
4902 return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
4903 : copy_undef (gdbarch, insn, dsc);
4904
4905 default:
4906 return copy_undef (gdbarch, insn, dsc);
4907 }
4908 }
4909
4910 case 0xa:
4911 return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
4912
4913 case 0xb:
4914 if (bits (insn, 16, 19) == 0xf)
4915 /* ldc/ldc2 lit. */
4916 return copy_copro_load_store (gdbarch, insn, regs, dsc);
4917 else
4918 return copy_undef (gdbarch, insn, dsc);
4919
4920 case 0xc:
4921 if (bit (insn, 4))
4922 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
4923 else
4924 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
4925
4926 case 0xd:
4927 if (bit (insn, 4))
4928 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
4929 else
4930 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
4931
4932 default:
4933 return copy_undef (gdbarch, insn, dsc);
4934 }
4935 }
4936
4937 /* Decode miscellaneous instructions in dp/misc encoding space. */
4938
4939 static int
4940 decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
4941 struct regcache *regs, struct displaced_step_closure *dsc)
4942 {
4943 unsigned int op2 = bits (insn, 4, 6);
4944 unsigned int op = bits (insn, 21, 22);
4945 unsigned int op1 = bits (insn, 16, 19);
4946
4947 switch (op2)
4948 {
4949 case 0x0:
4950 return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
4951
4952 case 0x1:
4953 if (op == 0x1) /* bx. */
4954 return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
4955 else if (op == 0x3)
4956 return copy_unmodified (gdbarch, insn, "clz", dsc);
4957 else
4958 return copy_undef (gdbarch, insn, dsc);
4959
4960 case 0x2:
4961 if (op == 0x1)
4962 /* Not really supported. */
4963 return copy_unmodified (gdbarch, insn, "bxj", dsc);
4964 else
4965 return copy_undef (gdbarch, insn, dsc);
4966
4967 case 0x3:
4968 if (op == 0x1)
4969 return copy_bx_blx_reg (gdbarch, insn, regs, dsc); /* blx register. */
4970 else
4971 return copy_undef (gdbarch, insn, dsc);
4972
4973 case 0x5:
4974 return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
4975
4976 case 0x7:
4977 if (op == 0x1)
4978 return copy_unmodified (gdbarch, insn, "bkpt", dsc);
4979 else if (op == 0x3)
4980 /* Not really supported. */
4981 return copy_unmodified (gdbarch, insn, "smc", dsc);
4982
4983 default:
4984 return copy_undef (gdbarch, insn, dsc);
4985 }
4986 }
4987
4988 static int
4989 decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4990 struct displaced_step_closure *dsc)
4991 {
4992 if (bit (insn, 25))
4993 switch (bits (insn, 20, 24))
4994 {
4995 case 0x10:
4996 return copy_unmodified (gdbarch, insn, "movw", dsc);
4997
4998 case 0x14:
4999 return copy_unmodified (gdbarch, insn, "movt", dsc);
5000
5001 case 0x12: case 0x16:
5002 return copy_unmodified (gdbarch, insn, "msr imm", dsc);
5003
5004 default:
5005 return copy_alu_imm (gdbarch, insn, regs, dsc);
5006 }
5007 else
5008 {
5009 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
5010
5011 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
5012 return copy_alu_reg (gdbarch, insn, regs, dsc);
5013 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
5014 return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
5015 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
5016 return decode_miscellaneous (gdbarch, insn, regs, dsc);
5017 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
5018 return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
5019 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
5020 return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
5021 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
5022 return copy_unmodified (gdbarch, insn, "synch", dsc);
5023 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
5024 /* 2nd arg means "unpriveleged". */
5025 return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
5026 dsc);
5027 }
5028
5029 /* Should be unreachable. */
5030 return 1;
5031 }
5032
5033 static int
5034 decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
5035 struct regcache *regs,
5036 struct displaced_step_closure *dsc)
5037 {
5038 int a = bit (insn, 25), b = bit (insn, 4);
5039 uint32_t op1 = bits (insn, 20, 24);
5040 int rn_f = bits (insn, 16, 19) == 0xf;
5041
5042 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
5043 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
5044 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
5045 else if ((!a && (op1 & 0x17) == 0x02)
5046 || (a && (op1 & 0x17) == 0x02 && !b))
5047 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
5048 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
5049 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
5050 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
5051 else if ((!a && (op1 & 0x17) == 0x03)
5052 || (a && (op1 & 0x17) == 0x03 && !b))
5053 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
5054 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
5055 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
5056 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
5057 else if ((!a && (op1 & 0x17) == 0x06)
5058 || (a && (op1 & 0x17) == 0x06 && !b))
5059 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
5060 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
5061 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
5062 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
5063 else if ((!a && (op1 & 0x17) == 0x07)
5064 || (a && (op1 & 0x17) == 0x07 && !b))
5065 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
5066
5067 /* Should be unreachable. */
5068 return 1;
5069 }
5070
5071 static int
5072 decode_media (struct gdbarch *gdbarch, uint32_t insn,
5073 struct displaced_step_closure *dsc)
5074 {
5075 switch (bits (insn, 20, 24))
5076 {
5077 case 0x00: case 0x01: case 0x02: case 0x03:
5078 return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
5079
5080 case 0x04: case 0x05: case 0x06: case 0x07:
5081 return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
5082
5083 case 0x08: case 0x09: case 0x0a: case 0x0b:
5084 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
5085 return copy_unmodified (gdbarch, insn,
5086 "decode/pack/unpack/saturate/reverse", dsc);
5087
5088 case 0x18:
5089 if (bits (insn, 5, 7) == 0) /* op2. */
5090 {
5091 if (bits (insn, 12, 15) == 0xf)
5092 return copy_unmodified (gdbarch, insn, "usad8", dsc);
5093 else
5094 return copy_unmodified (gdbarch, insn, "usada8", dsc);
5095 }
5096 else
5097 return copy_undef (gdbarch, insn, dsc);
5098
5099 case 0x1a: case 0x1b:
5100 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
5101 return copy_unmodified (gdbarch, insn, "sbfx", dsc);
5102 else
5103 return copy_undef (gdbarch, insn, dsc);
5104
5105 case 0x1c: case 0x1d:
5106 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
5107 {
5108 if (bits (insn, 0, 3) == 0xf)
5109 return copy_unmodified (gdbarch, insn, "bfc", dsc);
5110 else
5111 return copy_unmodified (gdbarch, insn, "bfi", dsc);
5112 }
5113 else
5114 return copy_undef (gdbarch, insn, dsc);
5115
5116 case 0x1e: case 0x1f:
5117 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
5118 return copy_unmodified (gdbarch, insn, "ubfx", dsc);
5119 else
5120 return copy_undef (gdbarch, insn, dsc);
5121 }
5122
5123 /* Should be unreachable. */
5124 return 1;
5125 }
5126
5127 static int
5128 decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
5129 struct regcache *regs, struct displaced_step_closure *dsc)
5130 {
5131 if (bit (insn, 25))
5132 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
5133 else
5134 return copy_block_xfer (gdbarch, insn, regs, dsc);
5135 }
5136
5137 static int
5138 decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
5139 struct regcache *regs, struct displaced_step_closure *dsc)
5140 {
5141 unsigned int opcode = bits (insn, 20, 24);
5142
5143 switch (opcode)
5144 {
5145 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
5146 return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
5147
5148 case 0x08: case 0x0a: case 0x0c: case 0x0e:
5149 case 0x12: case 0x16:
5150 return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
5151
5152 case 0x09: case 0x0b: case 0x0d: case 0x0f:
5153 case 0x13: case 0x17:
5154 return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
5155
5156 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
5157 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
5158 /* Note: no writeback for these instructions. Bit 25 will always be
5159 zero though (via caller), so the following works OK. */
5160 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5161 }
5162
5163 /* Should be unreachable. */
5164 return 1;
5165 }
5166
5167 static int
5168 decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
5169 struct regcache *regs, struct displaced_step_closure *dsc)
5170 {
5171 unsigned int op1 = bits (insn, 20, 25);
5172 int op = bit (insn, 4);
5173 unsigned int coproc = bits (insn, 8, 11);
5174 unsigned int rn = bits (insn, 16, 19);
5175
5176 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
5177 return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
5178 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
5179 && (coproc & 0xe) != 0xa)
5180 /* stc/stc2. */
5181 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5182 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
5183 && (coproc & 0xe) != 0xa)
5184 /* ldc/ldc2 imm/lit. */
5185 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5186 else if ((op1 & 0x3e) == 0x00)
5187 return copy_undef (gdbarch, insn, dsc);
5188 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
5189 return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
5190 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
5191 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
5192 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
5193 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
5194 else if ((op1 & 0x30) == 0x20 && !op)
5195 {
5196 if ((coproc & 0xe) == 0xa)
5197 return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
5198 else
5199 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5200 }
5201 else if ((op1 & 0x30) == 0x20 && op)
5202 return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
5203 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
5204 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
5205 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
5206 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
5207 else if ((op1 & 0x30) == 0x30)
5208 return copy_svc (gdbarch, insn, to, regs, dsc);
5209 else
5210 return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
5211 }
5212
5213 void
5214 arm_process_displaced_insn (struct gdbarch *gdbarch, uint32_t insn,
5215 CORE_ADDR from, CORE_ADDR to, struct regcache *regs,
5216 struct displaced_step_closure *dsc)
5217 {
5218 int err = 0;
5219
5220 if (!displaced_in_arm_mode (regs))
5221 error (_("Displaced stepping is only supported in ARM mode"));
5222
5223 /* Most displaced instructions use a 1-instruction scratch space, so set this
5224 here and override below if/when necessary. */
5225 dsc->numinsns = 1;
5226 dsc->insn_addr = from;
5227 dsc->scratch_base = to;
5228 dsc->cleanup = NULL;
5229 dsc->wrote_to_pc = 0;
5230
5231 if ((insn & 0xf0000000) == 0xf0000000)
5232 err = decode_unconditional (gdbarch, insn, regs, dsc);
5233 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
5234 {
5235 case 0x0: case 0x1: case 0x2: case 0x3:
5236 err = decode_dp_misc (gdbarch, insn, regs, dsc);
5237 break;
5238
5239 case 0x4: case 0x5: case 0x6:
5240 err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
5241 break;
5242
5243 case 0x7:
5244 err = decode_media (gdbarch, insn, dsc);
5245 break;
5246
5247 case 0x8: case 0x9: case 0xa: case 0xb:
5248 err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
5249 break;
5250
5251 case 0xc: case 0xd: case 0xe: case 0xf:
5252 err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
5253 break;
5254 }
5255
5256 if (err)
5257 internal_error (__FILE__, __LINE__,
5258 _("arm_process_displaced_insn: Instruction decode error"));
5259 }
5260
5261 /* Actually set up the scratch space for a displaced instruction. */
5262
5263 void
5264 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
5265 CORE_ADDR to, struct displaced_step_closure *dsc)
5266 {
5267 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5268 unsigned int i;
5269 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5270
5271 /* Poke modified instruction(s). */
5272 for (i = 0; i < dsc->numinsns; i++)
5273 {
5274 if (debug_displaced)
5275 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
5276 "%.8lx\n", (unsigned long) dsc->modinsn[i],
5277 (unsigned long) to + i * 4);
5278 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
5279 dsc->modinsn[i]);
5280 }
5281
5282 /* Put breakpoint afterwards. */
5283 write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
5284 tdep->arm_breakpoint_size);
5285
5286 if (debug_displaced)
5287 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
5288 paddress (gdbarch, from), paddress (gdbarch, to));
5289 }
5290
5291 /* Entry point for copying an instruction into scratch space for displaced
5292 stepping. */
5293
5294 struct displaced_step_closure *
5295 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
5296 CORE_ADDR from, CORE_ADDR to,
5297 struct regcache *regs)
5298 {
5299 struct displaced_step_closure *dsc
5300 = xmalloc (sizeof (struct displaced_step_closure));
5301 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5302 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
5303
5304 if (debug_displaced)
5305 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
5306 "at %.8lx\n", (unsigned long) insn,
5307 (unsigned long) from);
5308
5309 arm_process_displaced_insn (gdbarch, insn, from, to, regs, dsc);
5310 arm_displaced_init_closure (gdbarch, from, to, dsc);
5311
5312 return dsc;
5313 }
5314
5315 /* Entry point for cleaning things up after a displaced instruction has been
5316 single-stepped. */
5317
5318 void
5319 arm_displaced_step_fixup (struct gdbarch *gdbarch,
5320 struct displaced_step_closure *dsc,
5321 CORE_ADDR from, CORE_ADDR to,
5322 struct regcache *regs)
5323 {
5324 if (dsc->cleanup)
5325 dsc->cleanup (gdbarch, regs, dsc);
5326
5327 if (!dsc->wrote_to_pc)
5328 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
5329 }
5330
5331 #include "bfd-in2.h"
5332 #include "libcoff.h"
5333
5334 static int
5335 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
5336 {
5337 if (arm_pc_is_thumb (memaddr))
5338 {
5339 static asymbol *asym;
5340 static combined_entry_type ce;
5341 static struct coff_symbol_struct csym;
5342 static struct bfd fake_bfd;
5343 static bfd_target fake_target;
5344
5345 if (csym.native == NULL)
5346 {
5347 /* Create a fake symbol vector containing a Thumb symbol.
5348 This is solely so that the code in print_insn_little_arm()
5349 and print_insn_big_arm() in opcodes/arm-dis.c will detect
5350 the presence of a Thumb symbol and switch to decoding
5351 Thumb instructions. */
5352
5353 fake_target.flavour = bfd_target_coff_flavour;
5354 fake_bfd.xvec = &fake_target;
5355 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
5356 csym.native = &ce;
5357 csym.symbol.the_bfd = &fake_bfd;
5358 csym.symbol.name = "fake";
5359 asym = (asymbol *) & csym;
5360 }
5361
5362 memaddr = UNMAKE_THUMB_ADDR (memaddr);
5363 info->symbols = &asym;
5364 }
5365 else
5366 info->symbols = NULL;
5367
5368 if (info->endian == BFD_ENDIAN_BIG)
5369 return print_insn_big_arm (memaddr, info);
5370 else
5371 return print_insn_little_arm (memaddr, info);
5372 }
5373
5374 /* The following define instruction sequences that will cause ARM
5375 cpu's to take an undefined instruction trap. These are used to
5376 signal a breakpoint to GDB.
5377
5378 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
5379 modes. A different instruction is required for each mode. The ARM
5380 cpu's can also be big or little endian. Thus four different
5381 instructions are needed to support all cases.
5382
5383 Note: ARMv4 defines several new instructions that will take the
5384 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
5385 not in fact add the new instructions. The new undefined
5386 instructions in ARMv4 are all instructions that had no defined
5387 behaviour in earlier chips. There is no guarantee that they will
5388 raise an exception, but may be treated as NOP's. In practice, it
5389 may only safe to rely on instructions matching:
5390
5391 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
5392 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
5393 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
5394
5395 Even this may only true if the condition predicate is true. The
5396 following use a condition predicate of ALWAYS so it is always TRUE.
5397
5398 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
5399 and NetBSD all use a software interrupt rather than an undefined
5400 instruction to force a trap. This can be handled by by the
5401 abi-specific code during establishment of the gdbarch vector. */
5402
5403 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
5404 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
5405 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
5406 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
5407
5408 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
5409 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
5410 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
5411 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
5412
5413 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
5414 the program counter value to determine whether a 16-bit or 32-bit
5415 breakpoint should be used. It returns a pointer to a string of
5416 bytes that encode a breakpoint instruction, stores the length of
5417 the string to *lenptr, and adjusts the program counter (if
5418 necessary) to point to the actual memory location where the
5419 breakpoint should be inserted. */
5420
5421 static const unsigned char *
5422 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
5423 {
5424 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5425 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5426
5427 if (arm_pc_is_thumb (*pcptr))
5428 {
5429 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
5430
5431 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
5432 check whether we are replacing a 32-bit instruction. */
5433 if (tdep->thumb2_breakpoint != NULL)
5434 {
5435 gdb_byte buf[2];
5436 if (target_read_memory (*pcptr, buf, 2) == 0)
5437 {
5438 unsigned short inst1;
5439 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
5440 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
5441 {
5442 *lenptr = tdep->thumb2_breakpoint_size;
5443 return tdep->thumb2_breakpoint;
5444 }
5445 }
5446 }
5447
5448 *lenptr = tdep->thumb_breakpoint_size;
5449 return tdep->thumb_breakpoint;
5450 }
5451 else
5452 {
5453 *lenptr = tdep->arm_breakpoint_size;
5454 return tdep->arm_breakpoint;
5455 }
5456 }
5457
5458 static void
5459 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
5460 int *kindptr)
5461 {
5462 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5463
5464 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
5465
5466 if (arm_pc_is_thumb (*pcptr) && *kindptr == 4)
5467 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
5468 that this is not confused with a 32-bit ARM breakpoint. */
5469 *kindptr = 3;
5470 }
5471
5472 /* Extract from an array REGBUF containing the (raw) register state a
5473 function return value of type TYPE, and copy that, in virtual
5474 format, into VALBUF. */
5475
5476 static void
5477 arm_extract_return_value (struct type *type, struct regcache *regs,
5478 gdb_byte *valbuf)
5479 {
5480 struct gdbarch *gdbarch = get_regcache_arch (regs);
5481 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5482
5483 if (TYPE_CODE_FLT == TYPE_CODE (type))
5484 {
5485 switch (gdbarch_tdep (gdbarch)->fp_model)
5486 {
5487 case ARM_FLOAT_FPA:
5488 {
5489 /* The value is in register F0 in internal format. We need to
5490 extract the raw value and then convert it to the desired
5491 internal type. */
5492 bfd_byte tmpbuf[FP_REGISTER_SIZE];
5493
5494 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
5495 convert_from_extended (floatformat_from_type (type), tmpbuf,
5496 valbuf, gdbarch_byte_order (gdbarch));
5497 }
5498 break;
5499
5500 case ARM_FLOAT_SOFT_FPA:
5501 case ARM_FLOAT_SOFT_VFP:
5502 /* ARM_FLOAT_VFP can arise if this is a variadic function so
5503 not using the VFP ABI code. */
5504 case ARM_FLOAT_VFP:
5505 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
5506 if (TYPE_LENGTH (type) > 4)
5507 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
5508 valbuf + INT_REGISTER_SIZE);
5509 break;
5510
5511 default:
5512 internal_error
5513 (__FILE__, __LINE__,
5514 _("arm_extract_return_value: Floating point model not supported"));
5515 break;
5516 }
5517 }
5518 else if (TYPE_CODE (type) == TYPE_CODE_INT
5519 || TYPE_CODE (type) == TYPE_CODE_CHAR
5520 || TYPE_CODE (type) == TYPE_CODE_BOOL
5521 || TYPE_CODE (type) == TYPE_CODE_PTR
5522 || TYPE_CODE (type) == TYPE_CODE_REF
5523 || TYPE_CODE (type) == TYPE_CODE_ENUM)
5524 {
5525 /* If the the type is a plain integer, then the access is
5526 straight-forward. Otherwise we have to play around a bit more. */
5527 int len = TYPE_LENGTH (type);
5528 int regno = ARM_A1_REGNUM;
5529 ULONGEST tmp;
5530
5531 while (len > 0)
5532 {
5533 /* By using store_unsigned_integer we avoid having to do
5534 anything special for small big-endian values. */
5535 regcache_cooked_read_unsigned (regs, regno++, &tmp);
5536 store_unsigned_integer (valbuf,
5537 (len > INT_REGISTER_SIZE
5538 ? INT_REGISTER_SIZE : len),
5539 byte_order, tmp);
5540 len -= INT_REGISTER_SIZE;
5541 valbuf += INT_REGISTER_SIZE;
5542 }
5543 }
5544 else
5545 {
5546 /* For a structure or union the behaviour is as if the value had
5547 been stored to word-aligned memory and then loaded into
5548 registers with 32-bit load instruction(s). */
5549 int len = TYPE_LENGTH (type);
5550 int regno = ARM_A1_REGNUM;
5551 bfd_byte tmpbuf[INT_REGISTER_SIZE];
5552
5553 while (len > 0)
5554 {
5555 regcache_cooked_read (regs, regno++, tmpbuf);
5556 memcpy (valbuf, tmpbuf,
5557 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
5558 len -= INT_REGISTER_SIZE;
5559 valbuf += INT_REGISTER_SIZE;
5560 }
5561 }
5562 }
5563
5564
5565 /* Will a function return an aggregate type in memory or in a
5566 register? Return 0 if an aggregate type can be returned in a
5567 register, 1 if it must be returned in memory. */
5568
5569 static int
5570 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
5571 {
5572 int nRc;
5573 enum type_code code;
5574
5575 CHECK_TYPEDEF (type);
5576
5577 /* In the ARM ABI, "integer" like aggregate types are returned in
5578 registers. For an aggregate type to be integer like, its size
5579 must be less than or equal to INT_REGISTER_SIZE and the
5580 offset of each addressable subfield must be zero. Note that bit
5581 fields are not addressable, and all addressable subfields of
5582 unions always start at offset zero.
5583
5584 This function is based on the behaviour of GCC 2.95.1.
5585 See: gcc/arm.c: arm_return_in_memory() for details.
5586
5587 Note: All versions of GCC before GCC 2.95.2 do not set up the
5588 parameters correctly for a function returning the following
5589 structure: struct { float f;}; This should be returned in memory,
5590 not a register. Richard Earnshaw sent me a patch, but I do not
5591 know of any way to detect if a function like the above has been
5592 compiled with the correct calling convention. */
5593
5594 /* All aggregate types that won't fit in a register must be returned
5595 in memory. */
5596 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
5597 {
5598 return 1;
5599 }
5600
5601 /* The AAPCS says all aggregates not larger than a word are returned
5602 in a register. */
5603 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
5604 return 0;
5605
5606 /* The only aggregate types that can be returned in a register are
5607 structs and unions. Arrays must be returned in memory. */
5608 code = TYPE_CODE (type);
5609 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
5610 {
5611 return 1;
5612 }
5613
5614 /* Assume all other aggregate types can be returned in a register.
5615 Run a check for structures, unions and arrays. */
5616 nRc = 0;
5617
5618 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
5619 {
5620 int i;
5621 /* Need to check if this struct/union is "integer" like. For
5622 this to be true, its size must be less than or equal to
5623 INT_REGISTER_SIZE and the offset of each addressable
5624 subfield must be zero. Note that bit fields are not
5625 addressable, and unions always start at offset zero. If any
5626 of the subfields is a floating point type, the struct/union
5627 cannot be an integer type. */
5628
5629 /* For each field in the object, check:
5630 1) Is it FP? --> yes, nRc = 1;
5631 2) Is it addressable (bitpos != 0) and
5632 not packed (bitsize == 0)?
5633 --> yes, nRc = 1
5634 */
5635
5636 for (i = 0; i < TYPE_NFIELDS (type); i++)
5637 {
5638 enum type_code field_type_code;
5639 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type, i)));
5640
5641 /* Is it a floating point type field? */
5642 if (field_type_code == TYPE_CODE_FLT)
5643 {
5644 nRc = 1;
5645 break;
5646 }
5647
5648 /* If bitpos != 0, then we have to care about it. */
5649 if (TYPE_FIELD_BITPOS (type, i) != 0)
5650 {
5651 /* Bitfields are not addressable. If the field bitsize is
5652 zero, then the field is not packed. Hence it cannot be
5653 a bitfield or any other packed type. */
5654 if (TYPE_FIELD_BITSIZE (type, i) == 0)
5655 {
5656 nRc = 1;
5657 break;
5658 }
5659 }
5660 }
5661 }
5662
5663 return nRc;
5664 }
5665
5666 /* Write into appropriate registers a function return value of type
5667 TYPE, given in virtual format. */
5668
5669 static void
5670 arm_store_return_value (struct type *type, struct regcache *regs,
5671 const gdb_byte *valbuf)
5672 {
5673 struct gdbarch *gdbarch = get_regcache_arch (regs);
5674 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5675
5676 if (TYPE_CODE (type) == TYPE_CODE_FLT)
5677 {
5678 char buf[MAX_REGISTER_SIZE];
5679
5680 switch (gdbarch_tdep (gdbarch)->fp_model)
5681 {
5682 case ARM_FLOAT_FPA:
5683
5684 convert_to_extended (floatformat_from_type (type), buf, valbuf,
5685 gdbarch_byte_order (gdbarch));
5686 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
5687 break;
5688
5689 case ARM_FLOAT_SOFT_FPA:
5690 case ARM_FLOAT_SOFT_VFP:
5691 /* ARM_FLOAT_VFP can arise if this is a variadic function so
5692 not using the VFP ABI code. */
5693 case ARM_FLOAT_VFP:
5694 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
5695 if (TYPE_LENGTH (type) > 4)
5696 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
5697 valbuf + INT_REGISTER_SIZE);
5698 break;
5699
5700 default:
5701 internal_error
5702 (__FILE__, __LINE__,
5703 _("arm_store_return_value: Floating point model not supported"));
5704 break;
5705 }
5706 }
5707 else if (TYPE_CODE (type) == TYPE_CODE_INT
5708 || TYPE_CODE (type) == TYPE_CODE_CHAR
5709 || TYPE_CODE (type) == TYPE_CODE_BOOL
5710 || TYPE_CODE (type) == TYPE_CODE_PTR
5711 || TYPE_CODE (type) == TYPE_CODE_REF
5712 || TYPE_CODE (type) == TYPE_CODE_ENUM)
5713 {
5714 if (TYPE_LENGTH (type) <= 4)
5715 {
5716 /* Values of one word or less are zero/sign-extended and
5717 returned in r0. */
5718 bfd_byte tmpbuf[INT_REGISTER_SIZE];
5719 LONGEST val = unpack_long (type, valbuf);
5720
5721 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
5722 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
5723 }
5724 else
5725 {
5726 /* Integral values greater than one word are stored in consecutive
5727 registers starting with r0. This will always be a multiple of
5728 the regiser size. */
5729 int len = TYPE_LENGTH (type);
5730 int regno = ARM_A1_REGNUM;
5731
5732 while (len > 0)
5733 {
5734 regcache_cooked_write (regs, regno++, valbuf);
5735 len -= INT_REGISTER_SIZE;
5736 valbuf += INT_REGISTER_SIZE;
5737 }
5738 }
5739 }
5740 else
5741 {
5742 /* For a structure or union the behaviour is as if the value had
5743 been stored to word-aligned memory and then loaded into
5744 registers with 32-bit load instruction(s). */
5745 int len = TYPE_LENGTH (type);
5746 int regno = ARM_A1_REGNUM;
5747 bfd_byte tmpbuf[INT_REGISTER_SIZE];
5748
5749 while (len > 0)
5750 {
5751 memcpy (tmpbuf, valbuf,
5752 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
5753 regcache_cooked_write (regs, regno++, tmpbuf);
5754 len -= INT_REGISTER_SIZE;
5755 valbuf += INT_REGISTER_SIZE;
5756 }
5757 }
5758 }
5759
5760
5761 /* Handle function return values. */
5762
5763 static enum return_value_convention
5764 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
5765 struct type *valtype, struct regcache *regcache,
5766 gdb_byte *readbuf, const gdb_byte *writebuf)
5767 {
5768 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5769 enum arm_vfp_cprc_base_type vfp_base_type;
5770 int vfp_base_count;
5771
5772 if (arm_vfp_abi_for_function (gdbarch, func_type)
5773 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
5774 {
5775 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
5776 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
5777 int i;
5778 for (i = 0; i < vfp_base_count; i++)
5779 {
5780 if (reg_char == 'q')
5781 {
5782 if (writebuf)
5783 arm_neon_quad_write (gdbarch, regcache, i,
5784 writebuf + i * unit_length);
5785
5786 if (readbuf)
5787 arm_neon_quad_read (gdbarch, regcache, i,
5788 readbuf + i * unit_length);
5789 }
5790 else
5791 {
5792 char name_buf[4];
5793 int regnum;
5794
5795 sprintf (name_buf, "%c%d", reg_char, i);
5796 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
5797 strlen (name_buf));
5798 if (writebuf)
5799 regcache_cooked_write (regcache, regnum,
5800 writebuf + i * unit_length);
5801 if (readbuf)
5802 regcache_cooked_read (regcache, regnum,
5803 readbuf + i * unit_length);
5804 }
5805 }
5806 return RETURN_VALUE_REGISTER_CONVENTION;
5807 }
5808
5809 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
5810 || TYPE_CODE (valtype) == TYPE_CODE_UNION
5811 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
5812 {
5813 if (tdep->struct_return == pcc_struct_return
5814 || arm_return_in_memory (gdbarch, valtype))
5815 return RETURN_VALUE_STRUCT_CONVENTION;
5816 }
5817
5818 if (writebuf)
5819 arm_store_return_value (valtype, regcache, writebuf);
5820
5821 if (readbuf)
5822 arm_extract_return_value (valtype, regcache, readbuf);
5823
5824 return RETURN_VALUE_REGISTER_CONVENTION;
5825 }
5826
5827
5828 static int
5829 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
5830 {
5831 struct gdbarch *gdbarch = get_frame_arch (frame);
5832 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5833 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5834 CORE_ADDR jb_addr;
5835 char buf[INT_REGISTER_SIZE];
5836
5837 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
5838
5839 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
5840 INT_REGISTER_SIZE))
5841 return 0;
5842
5843 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
5844 return 1;
5845 }
5846
5847 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
5848 return the target PC. Otherwise return 0. */
5849
5850 CORE_ADDR
5851 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
5852 {
5853 char *name;
5854 int namelen;
5855 CORE_ADDR start_addr;
5856
5857 /* Find the starting address and name of the function containing the PC. */
5858 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
5859 return 0;
5860
5861 /* If PC is in a Thumb call or return stub, return the address of the
5862 target PC, which is in a register. The thunk functions are called
5863 _call_via_xx, where x is the register name. The possible names
5864 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
5865 functions, named __ARM_call_via_r[0-7]. */
5866 if (strncmp (name, "_call_via_", 10) == 0
5867 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
5868 {
5869 /* Use the name suffix to determine which register contains the
5870 target PC. */
5871 static char *table[15] =
5872 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
5873 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
5874 };
5875 int regno;
5876 int offset = strlen (name) - 2;
5877
5878 for (regno = 0; regno <= 14; regno++)
5879 if (strcmp (&name[offset], table[regno]) == 0)
5880 return get_frame_register_unsigned (frame, regno);
5881 }
5882
5883 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
5884 non-interworking calls to foo. We could decode the stubs
5885 to find the target but it's easier to use the symbol table. */
5886 namelen = strlen (name);
5887 if (name[0] == '_' && name[1] == '_'
5888 && ((namelen > 2 + strlen ("_from_thumb")
5889 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
5890 strlen ("_from_thumb")) == 0)
5891 || (namelen > 2 + strlen ("_from_arm")
5892 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
5893 strlen ("_from_arm")) == 0)))
5894 {
5895 char *target_name;
5896 int target_len = namelen - 2;
5897 struct minimal_symbol *minsym;
5898 struct objfile *objfile;
5899 struct obj_section *sec;
5900
5901 if (name[namelen - 1] == 'b')
5902 target_len -= strlen ("_from_thumb");
5903 else
5904 target_len -= strlen ("_from_arm");
5905
5906 target_name = alloca (target_len + 1);
5907 memcpy (target_name, name + 2, target_len);
5908 target_name[target_len] = '\0';
5909
5910 sec = find_pc_section (pc);
5911 objfile = (sec == NULL) ? NULL : sec->objfile;
5912 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
5913 if (minsym != NULL)
5914 return SYMBOL_VALUE_ADDRESS (minsym);
5915 else
5916 return 0;
5917 }
5918
5919 return 0; /* not a stub */
5920 }
5921
5922 static void
5923 set_arm_command (char *args, int from_tty)
5924 {
5925 printf_unfiltered (_("\
5926 \"set arm\" must be followed by an apporpriate subcommand.\n"));
5927 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
5928 }
5929
5930 static void
5931 show_arm_command (char *args, int from_tty)
5932 {
5933 cmd_show_list (showarmcmdlist, from_tty, "");
5934 }
5935
5936 static void
5937 arm_update_current_architecture (void)
5938 {
5939 struct gdbarch_info info;
5940
5941 /* If the current architecture is not ARM, we have nothing to do. */
5942 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
5943 return;
5944
5945 /* Update the architecture. */
5946 gdbarch_info_init (&info);
5947
5948 if (!gdbarch_update_p (info))
5949 internal_error (__FILE__, __LINE__, "could not update architecture");
5950 }
5951
5952 static void
5953 set_fp_model_sfunc (char *args, int from_tty,
5954 struct cmd_list_element *c)
5955 {
5956 enum arm_float_model fp_model;
5957
5958 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
5959 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
5960 {
5961 arm_fp_model = fp_model;
5962 break;
5963 }
5964
5965 if (fp_model == ARM_FLOAT_LAST)
5966 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
5967 current_fp_model);
5968
5969 arm_update_current_architecture ();
5970 }
5971
5972 static void
5973 show_fp_model (struct ui_file *file, int from_tty,
5974 struct cmd_list_element *c, const char *value)
5975 {
5976 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
5977
5978 if (arm_fp_model == ARM_FLOAT_AUTO
5979 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
5980 fprintf_filtered (file, _("\
5981 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
5982 fp_model_strings[tdep->fp_model]);
5983 else
5984 fprintf_filtered (file, _("\
5985 The current ARM floating point model is \"%s\".\n"),
5986 fp_model_strings[arm_fp_model]);
5987 }
5988
5989 static void
5990 arm_set_abi (char *args, int from_tty,
5991 struct cmd_list_element *c)
5992 {
5993 enum arm_abi_kind arm_abi;
5994
5995 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
5996 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
5997 {
5998 arm_abi_global = arm_abi;
5999 break;
6000 }
6001
6002 if (arm_abi == ARM_ABI_LAST)
6003 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
6004 arm_abi_string);
6005
6006 arm_update_current_architecture ();
6007 }
6008
6009 static void
6010 arm_show_abi (struct ui_file *file, int from_tty,
6011 struct cmd_list_element *c, const char *value)
6012 {
6013 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6014
6015 if (arm_abi_global == ARM_ABI_AUTO
6016 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
6017 fprintf_filtered (file, _("\
6018 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
6019 arm_abi_strings[tdep->arm_abi]);
6020 else
6021 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
6022 arm_abi_string);
6023 }
6024
6025 static void
6026 arm_show_fallback_mode (struct ui_file *file, int from_tty,
6027 struct cmd_list_element *c, const char *value)
6028 {
6029 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6030
6031 fprintf_filtered (file, _("\
6032 The current execution mode assumed (when symbols are unavailable) is \"%s\".\n"),
6033 arm_fallback_mode_string);
6034 }
6035
6036 static void
6037 arm_show_force_mode (struct ui_file *file, int from_tty,
6038 struct cmd_list_element *c, const char *value)
6039 {
6040 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6041
6042 fprintf_filtered (file, _("\
6043 The current execution mode assumed (even when symbols are available) is \"%s\".\n"),
6044 arm_force_mode_string);
6045 }
6046
6047 /* If the user changes the register disassembly style used for info
6048 register and other commands, we have to also switch the style used
6049 in opcodes for disassembly output. This function is run in the "set
6050 arm disassembly" command, and does that. */
6051
6052 static void
6053 set_disassembly_style_sfunc (char *args, int from_tty,
6054 struct cmd_list_element *c)
6055 {
6056 set_disassembly_style ();
6057 }
6058 \f
6059 /* Return the ARM register name corresponding to register I. */
6060 static const char *
6061 arm_register_name (struct gdbarch *gdbarch, int i)
6062 {
6063 const int num_regs = gdbarch_num_regs (gdbarch);
6064
6065 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
6066 && i >= num_regs && i < num_regs + 32)
6067 {
6068 static const char *const vfp_pseudo_names[] = {
6069 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
6070 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
6071 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
6072 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
6073 };
6074
6075 return vfp_pseudo_names[i - num_regs];
6076 }
6077
6078 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
6079 && i >= num_regs + 32 && i < num_regs + 32 + 16)
6080 {
6081 static const char *const neon_pseudo_names[] = {
6082 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
6083 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
6084 };
6085
6086 return neon_pseudo_names[i - num_regs - 32];
6087 }
6088
6089 if (i >= ARRAY_SIZE (arm_register_names))
6090 /* These registers are only supported on targets which supply
6091 an XML description. */
6092 return "";
6093
6094 return arm_register_names[i];
6095 }
6096
6097 static void
6098 set_disassembly_style (void)
6099 {
6100 int current;
6101
6102 /* Find the style that the user wants. */
6103 for (current = 0; current < num_disassembly_options; current++)
6104 if (disassembly_style == valid_disassembly_styles[current])
6105 break;
6106 gdb_assert (current < num_disassembly_options);
6107
6108 /* Synchronize the disassembler. */
6109 set_arm_regname_option (current);
6110 }
6111
6112 /* Test whether the coff symbol specific value corresponds to a Thumb
6113 function. */
6114
6115 static int
6116 coff_sym_is_thumb (int val)
6117 {
6118 return (val == C_THUMBEXT
6119 || val == C_THUMBSTAT
6120 || val == C_THUMBEXTFUNC
6121 || val == C_THUMBSTATFUNC
6122 || val == C_THUMBLABEL);
6123 }
6124
6125 /* arm_coff_make_msymbol_special()
6126 arm_elf_make_msymbol_special()
6127
6128 These functions test whether the COFF or ELF symbol corresponds to
6129 an address in thumb code, and set a "special" bit in a minimal
6130 symbol to indicate that it does. */
6131
6132 static void
6133 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
6134 {
6135 /* Thumb symbols are of type STT_LOPROC, (synonymous with
6136 STT_ARM_TFUNC). */
6137 if (ELF_ST_TYPE (((elf_symbol_type *)sym)->internal_elf_sym.st_info)
6138 == STT_LOPROC)
6139 MSYMBOL_SET_SPECIAL (msym);
6140 }
6141
6142 static void
6143 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
6144 {
6145 if (coff_sym_is_thumb (val))
6146 MSYMBOL_SET_SPECIAL (msym);
6147 }
6148
6149 static void
6150 arm_objfile_data_free (struct objfile *objfile, void *arg)
6151 {
6152 struct arm_per_objfile *data = arg;
6153 unsigned int i;
6154
6155 for (i = 0; i < objfile->obfd->section_count; i++)
6156 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
6157 }
6158
6159 static void
6160 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
6161 asymbol *sym)
6162 {
6163 const char *name = bfd_asymbol_name (sym);
6164 struct arm_per_objfile *data;
6165 VEC(arm_mapping_symbol_s) **map_p;
6166 struct arm_mapping_symbol new_map_sym;
6167
6168 gdb_assert (name[0] == '$');
6169 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
6170 return;
6171
6172 data = objfile_data (objfile, arm_objfile_data_key);
6173 if (data == NULL)
6174 {
6175 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
6176 struct arm_per_objfile);
6177 set_objfile_data (objfile, arm_objfile_data_key, data);
6178 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
6179 objfile->obfd->section_count,
6180 VEC(arm_mapping_symbol_s) *);
6181 }
6182 map_p = &data->section_maps[bfd_get_section (sym)->index];
6183
6184 new_map_sym.value = sym->value;
6185 new_map_sym.type = name[1];
6186
6187 /* Assume that most mapping symbols appear in order of increasing
6188 value. If they were randomly distributed, it would be faster to
6189 always push here and then sort at first use. */
6190 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
6191 {
6192 struct arm_mapping_symbol *prev_map_sym;
6193
6194 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
6195 if (prev_map_sym->value >= sym->value)
6196 {
6197 unsigned int idx;
6198 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
6199 arm_compare_mapping_symbols);
6200 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
6201 return;
6202 }
6203 }
6204
6205 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
6206 }
6207
6208 static void
6209 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
6210 {
6211 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
6212
6213 /* If necessary, set the T bit. */
6214 if (arm_apcs_32)
6215 {
6216 ULONGEST val;
6217 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
6218 if (arm_pc_is_thumb (pc))
6219 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM, val | CPSR_T);
6220 else
6221 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
6222 val & ~(ULONGEST) CPSR_T);
6223 }
6224 }
6225
6226 /* Read the contents of a NEON quad register, by reading from two
6227 double registers. This is used to implement the quad pseudo
6228 registers, and for argument passing in case the quad registers are
6229 missing; vectors are passed in quad registers when using the VFP
6230 ABI, even if a NEON unit is not present. REGNUM is the index of
6231 the quad register, in [0, 15]. */
6232
6233 static void
6234 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
6235 int regnum, gdb_byte *buf)
6236 {
6237 char name_buf[4];
6238 gdb_byte reg_buf[8];
6239 int offset, double_regnum;
6240
6241 sprintf (name_buf, "d%d", regnum << 1);
6242 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6243 strlen (name_buf));
6244
6245 /* d0 is always the least significant half of q0. */
6246 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6247 offset = 8;
6248 else
6249 offset = 0;
6250
6251 regcache_raw_read (regcache, double_regnum, reg_buf);
6252 memcpy (buf + offset, reg_buf, 8);
6253
6254 offset = 8 - offset;
6255 regcache_raw_read (regcache, double_regnum + 1, reg_buf);
6256 memcpy (buf + offset, reg_buf, 8);
6257 }
6258
6259 static void
6260 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
6261 int regnum, gdb_byte *buf)
6262 {
6263 const int num_regs = gdbarch_num_regs (gdbarch);
6264 char name_buf[4];
6265 gdb_byte reg_buf[8];
6266 int offset, double_regnum;
6267
6268 gdb_assert (regnum >= num_regs);
6269 regnum -= num_regs;
6270
6271 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
6272 /* Quad-precision register. */
6273 arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
6274 else
6275 {
6276 /* Single-precision register. */
6277 gdb_assert (regnum < 32);
6278
6279 /* s0 is always the least significant half of d0. */
6280 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6281 offset = (regnum & 1) ? 0 : 4;
6282 else
6283 offset = (regnum & 1) ? 4 : 0;
6284
6285 sprintf (name_buf, "d%d", regnum >> 1);
6286 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6287 strlen (name_buf));
6288
6289 regcache_raw_read (regcache, double_regnum, reg_buf);
6290 memcpy (buf, reg_buf + offset, 4);
6291 }
6292 }
6293
6294 /* Store the contents of BUF to a NEON quad register, by writing to
6295 two double registers. This is used to implement the quad pseudo
6296 registers, and for argument passing in case the quad registers are
6297 missing; vectors are passed in quad registers when using the VFP
6298 ABI, even if a NEON unit is not present. REGNUM is the index
6299 of the quad register, in [0, 15]. */
6300
6301 static void
6302 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
6303 int regnum, const gdb_byte *buf)
6304 {
6305 char name_buf[4];
6306 gdb_byte reg_buf[8];
6307 int offset, double_regnum;
6308
6309 sprintf (name_buf, "d%d", regnum << 1);
6310 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6311 strlen (name_buf));
6312
6313 /* d0 is always the least significant half of q0. */
6314 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6315 offset = 8;
6316 else
6317 offset = 0;
6318
6319 regcache_raw_write (regcache, double_regnum, buf + offset);
6320 offset = 8 - offset;
6321 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
6322 }
6323
6324 static void
6325 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
6326 int regnum, const gdb_byte *buf)
6327 {
6328 const int num_regs = gdbarch_num_regs (gdbarch);
6329 char name_buf[4];
6330 gdb_byte reg_buf[8];
6331 int offset, double_regnum;
6332
6333 gdb_assert (regnum >= num_regs);
6334 regnum -= num_regs;
6335
6336 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
6337 /* Quad-precision register. */
6338 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
6339 else
6340 {
6341 /* Single-precision register. */
6342 gdb_assert (regnum < 32);
6343
6344 /* s0 is always the least significant half of d0. */
6345 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6346 offset = (regnum & 1) ? 0 : 4;
6347 else
6348 offset = (regnum & 1) ? 4 : 0;
6349
6350 sprintf (name_buf, "d%d", regnum >> 1);
6351 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6352 strlen (name_buf));
6353
6354 regcache_raw_read (regcache, double_regnum, reg_buf);
6355 memcpy (reg_buf + offset, buf, 4);
6356 regcache_raw_write (regcache, double_regnum, reg_buf);
6357 }
6358 }
6359
6360 static struct value *
6361 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
6362 {
6363 const int *reg_p = baton;
6364 return value_of_register (*reg_p, frame);
6365 }
6366 \f
6367 static enum gdb_osabi
6368 arm_elf_osabi_sniffer (bfd *abfd)
6369 {
6370 unsigned int elfosabi;
6371 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
6372
6373 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
6374
6375 if (elfosabi == ELFOSABI_ARM)
6376 /* GNU tools use this value. Check note sections in this case,
6377 as well. */
6378 bfd_map_over_sections (abfd,
6379 generic_elf_osabi_sniff_abi_tag_sections,
6380 &osabi);
6381
6382 /* Anything else will be handled by the generic ELF sniffer. */
6383 return osabi;
6384 }
6385
6386 \f
6387 /* Initialize the current architecture based on INFO. If possible,
6388 re-use an architecture from ARCHES, which is a list of
6389 architectures already created during this debugging session.
6390
6391 Called e.g. at program startup, when reading a core file, and when
6392 reading a binary file. */
6393
6394 static struct gdbarch *
6395 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
6396 {
6397 struct gdbarch_tdep *tdep;
6398 struct gdbarch *gdbarch;
6399 struct gdbarch_list *best_arch;
6400 enum arm_abi_kind arm_abi = arm_abi_global;
6401 enum arm_float_model fp_model = arm_fp_model;
6402 struct tdesc_arch_data *tdesc_data = NULL;
6403 int i;
6404 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
6405 int have_neon = 0;
6406 int have_fpa_registers = 1;
6407
6408 /* Check any target description for validity. */
6409 if (tdesc_has_registers (info.target_desc))
6410 {
6411 /* For most registers we require GDB's default names; but also allow
6412 the numeric names for sp / lr / pc, as a convenience. */
6413 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
6414 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
6415 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
6416
6417 const struct tdesc_feature *feature;
6418 int valid_p;
6419
6420 feature = tdesc_find_feature (info.target_desc,
6421 "org.gnu.gdb.arm.core");
6422 if (feature == NULL)
6423 return NULL;
6424
6425 tdesc_data = tdesc_data_alloc ();
6426
6427 valid_p = 1;
6428 for (i = 0; i < ARM_SP_REGNUM; i++)
6429 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
6430 arm_register_names[i]);
6431 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
6432 ARM_SP_REGNUM,
6433 arm_sp_names);
6434 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
6435 ARM_LR_REGNUM,
6436 arm_lr_names);
6437 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
6438 ARM_PC_REGNUM,
6439 arm_pc_names);
6440 valid_p &= tdesc_numbered_register (feature, tdesc_data,
6441 ARM_PS_REGNUM, "cpsr");
6442
6443 if (!valid_p)
6444 {
6445 tdesc_data_cleanup (tdesc_data);
6446 return NULL;
6447 }
6448
6449 feature = tdesc_find_feature (info.target_desc,
6450 "org.gnu.gdb.arm.fpa");
6451 if (feature != NULL)
6452 {
6453 valid_p = 1;
6454 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
6455 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
6456 arm_register_names[i]);
6457 if (!valid_p)
6458 {
6459 tdesc_data_cleanup (tdesc_data);
6460 return NULL;
6461 }
6462 }
6463 else
6464 have_fpa_registers = 0;
6465
6466 feature = tdesc_find_feature (info.target_desc,
6467 "org.gnu.gdb.xscale.iwmmxt");
6468 if (feature != NULL)
6469 {
6470 static const char *const iwmmxt_names[] = {
6471 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
6472 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
6473 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
6474 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
6475 };
6476
6477 valid_p = 1;
6478 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
6479 valid_p
6480 &= tdesc_numbered_register (feature, tdesc_data, i,
6481 iwmmxt_names[i - ARM_WR0_REGNUM]);
6482
6483 /* Check for the control registers, but do not fail if they
6484 are missing. */
6485 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
6486 tdesc_numbered_register (feature, tdesc_data, i,
6487 iwmmxt_names[i - ARM_WR0_REGNUM]);
6488
6489 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
6490 valid_p
6491 &= tdesc_numbered_register (feature, tdesc_data, i,
6492 iwmmxt_names[i - ARM_WR0_REGNUM]);
6493
6494 if (!valid_p)
6495 {
6496 tdesc_data_cleanup (tdesc_data);
6497 return NULL;
6498 }
6499 }
6500
6501 /* If we have a VFP unit, check whether the single precision registers
6502 are present. If not, then we will synthesize them as pseudo
6503 registers. */
6504 feature = tdesc_find_feature (info.target_desc,
6505 "org.gnu.gdb.arm.vfp");
6506 if (feature != NULL)
6507 {
6508 static const char *const vfp_double_names[] = {
6509 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
6510 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
6511 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
6512 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
6513 };
6514
6515 /* Require the double precision registers. There must be either
6516 16 or 32. */
6517 valid_p = 1;
6518 for (i = 0; i < 32; i++)
6519 {
6520 valid_p &= tdesc_numbered_register (feature, tdesc_data,
6521 ARM_D0_REGNUM + i,
6522 vfp_double_names[i]);
6523 if (!valid_p)
6524 break;
6525 }
6526
6527 if (!valid_p && i != 16)
6528 {
6529 tdesc_data_cleanup (tdesc_data);
6530 return NULL;
6531 }
6532
6533 if (tdesc_unnumbered_register (feature, "s0") == 0)
6534 have_vfp_pseudos = 1;
6535
6536 have_vfp_registers = 1;
6537
6538 /* If we have VFP, also check for NEON. The architecture allows
6539 NEON without VFP (integer vector operations only), but GDB
6540 does not support that. */
6541 feature = tdesc_find_feature (info.target_desc,
6542 "org.gnu.gdb.arm.neon");
6543 if (feature != NULL)
6544 {
6545 /* NEON requires 32 double-precision registers. */
6546 if (i != 32)
6547 {
6548 tdesc_data_cleanup (tdesc_data);
6549 return NULL;
6550 }
6551
6552 /* If there are quad registers defined by the stub, use
6553 their type; otherwise (normally) provide them with
6554 the default type. */
6555 if (tdesc_unnumbered_register (feature, "q0") == 0)
6556 have_neon_pseudos = 1;
6557
6558 have_neon = 1;
6559 }
6560 }
6561 }
6562
6563 /* If we have an object to base this architecture on, try to determine
6564 its ABI. */
6565
6566 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
6567 {
6568 int ei_osabi, e_flags;
6569
6570 switch (bfd_get_flavour (info.abfd))
6571 {
6572 case bfd_target_aout_flavour:
6573 /* Assume it's an old APCS-style ABI. */
6574 arm_abi = ARM_ABI_APCS;
6575 break;
6576
6577 case bfd_target_coff_flavour:
6578 /* Assume it's an old APCS-style ABI. */
6579 /* XXX WinCE? */
6580 arm_abi = ARM_ABI_APCS;
6581 break;
6582
6583 case bfd_target_elf_flavour:
6584 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
6585 e_flags = elf_elfheader (info.abfd)->e_flags;
6586
6587 if (ei_osabi == ELFOSABI_ARM)
6588 {
6589 /* GNU tools used to use this value, but do not for EABI
6590 objects. There's nowhere to tag an EABI version
6591 anyway, so assume APCS. */
6592 arm_abi = ARM_ABI_APCS;
6593 }
6594 else if (ei_osabi == ELFOSABI_NONE)
6595 {
6596 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
6597
6598 switch (eabi_ver)
6599 {
6600 case EF_ARM_EABI_UNKNOWN:
6601 /* Assume GNU tools. */
6602 arm_abi = ARM_ABI_APCS;
6603 break;
6604
6605 case EF_ARM_EABI_VER4:
6606 case EF_ARM_EABI_VER5:
6607 arm_abi = ARM_ABI_AAPCS;
6608 /* EABI binaries default to VFP float ordering.
6609 They may also contain build attributes that can
6610 be used to identify if the VFP argument-passing
6611 ABI is in use. */
6612 if (fp_model == ARM_FLOAT_AUTO)
6613 {
6614 #ifdef HAVE_ELF
6615 switch (bfd_elf_get_obj_attr_int (info.abfd,
6616 OBJ_ATTR_PROC,
6617 Tag_ABI_VFP_args))
6618 {
6619 case 0:
6620 /* "The user intended FP parameter/result
6621 passing to conform to AAPCS, base
6622 variant". */
6623 fp_model = ARM_FLOAT_SOFT_VFP;
6624 break;
6625 case 1:
6626 /* "The user intended FP parameter/result
6627 passing to conform to AAPCS, VFP
6628 variant". */
6629 fp_model = ARM_FLOAT_VFP;
6630 break;
6631 case 2:
6632 /* "The user intended FP parameter/result
6633 passing to conform to tool chain-specific
6634 conventions" - we don't know any such
6635 conventions, so leave it as "auto". */
6636 break;
6637 default:
6638 /* Attribute value not mentioned in the
6639 October 2008 ABI, so leave it as
6640 "auto". */
6641 break;
6642 }
6643 #else
6644 fp_model = ARM_FLOAT_SOFT_VFP;
6645 #endif
6646 }
6647 break;
6648
6649 default:
6650 /* Leave it as "auto". */
6651 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
6652 break;
6653 }
6654 }
6655
6656 if (fp_model == ARM_FLOAT_AUTO)
6657 {
6658 int e_flags = elf_elfheader (info.abfd)->e_flags;
6659
6660 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
6661 {
6662 case 0:
6663 /* Leave it as "auto". Strictly speaking this case
6664 means FPA, but almost nobody uses that now, and
6665 many toolchains fail to set the appropriate bits
6666 for the floating-point model they use. */
6667 break;
6668 case EF_ARM_SOFT_FLOAT:
6669 fp_model = ARM_FLOAT_SOFT_FPA;
6670 break;
6671 case EF_ARM_VFP_FLOAT:
6672 fp_model = ARM_FLOAT_VFP;
6673 break;
6674 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
6675 fp_model = ARM_FLOAT_SOFT_VFP;
6676 break;
6677 }
6678 }
6679
6680 if (e_flags & EF_ARM_BE8)
6681 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
6682
6683 break;
6684
6685 default:
6686 /* Leave it as "auto". */
6687 break;
6688 }
6689 }
6690
6691 /* If there is already a candidate, use it. */
6692 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
6693 best_arch != NULL;
6694 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
6695 {
6696 if (arm_abi != ARM_ABI_AUTO
6697 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
6698 continue;
6699
6700 if (fp_model != ARM_FLOAT_AUTO
6701 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
6702 continue;
6703
6704 /* There are various other properties in tdep that we do not
6705 need to check here: those derived from a target description,
6706 since gdbarches with a different target description are
6707 automatically disqualified. */
6708
6709 /* Found a match. */
6710 break;
6711 }
6712
6713 if (best_arch != NULL)
6714 {
6715 if (tdesc_data != NULL)
6716 tdesc_data_cleanup (tdesc_data);
6717 return best_arch->gdbarch;
6718 }
6719
6720 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
6721 gdbarch = gdbarch_alloc (&info, tdep);
6722
6723 /* Record additional information about the architecture we are defining.
6724 These are gdbarch discriminators, like the OSABI. */
6725 tdep->arm_abi = arm_abi;
6726 tdep->fp_model = fp_model;
6727 tdep->have_fpa_registers = have_fpa_registers;
6728 tdep->have_vfp_registers = have_vfp_registers;
6729 tdep->have_vfp_pseudos = have_vfp_pseudos;
6730 tdep->have_neon_pseudos = have_neon_pseudos;
6731 tdep->have_neon = have_neon;
6732
6733 /* Breakpoints. */
6734 switch (info.byte_order_for_code)
6735 {
6736 case BFD_ENDIAN_BIG:
6737 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
6738 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
6739 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
6740 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
6741
6742 break;
6743
6744 case BFD_ENDIAN_LITTLE:
6745 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
6746 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
6747 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
6748 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
6749
6750 break;
6751
6752 default:
6753 internal_error (__FILE__, __LINE__,
6754 _("arm_gdbarch_init: bad byte order for float format"));
6755 }
6756
6757 /* On ARM targets char defaults to unsigned. */
6758 set_gdbarch_char_signed (gdbarch, 0);
6759
6760 /* Note: for displaced stepping, this includes the breakpoint, and one word
6761 of additional scratch space. This setting isn't used for anything beside
6762 displaced stepping at present. */
6763 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
6764
6765 /* This should be low enough for everything. */
6766 tdep->lowest_pc = 0x20;
6767 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
6768
6769 /* The default, for both APCS and AAPCS, is to return small
6770 structures in registers. */
6771 tdep->struct_return = reg_struct_return;
6772
6773 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
6774 set_gdbarch_frame_align (gdbarch, arm_frame_align);
6775
6776 set_gdbarch_write_pc (gdbarch, arm_write_pc);
6777
6778 /* Frame handling. */
6779 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
6780 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
6781 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
6782
6783 frame_base_set_default (gdbarch, &arm_normal_base);
6784
6785 /* Address manipulation. */
6786 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
6787 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
6788
6789 /* Advance PC across function entry code. */
6790 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
6791
6792 /* Skip trampolines. */
6793 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
6794
6795 /* The stack grows downward. */
6796 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
6797
6798 /* Breakpoint manipulation. */
6799 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
6800 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
6801 arm_remote_breakpoint_from_pc);
6802
6803 /* Information about registers, etc. */
6804 set_gdbarch_deprecated_fp_regnum (gdbarch, ARM_FP_REGNUM); /* ??? */
6805 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
6806 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
6807 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
6808 set_gdbarch_register_type (gdbarch, arm_register_type);
6809
6810 /* This "info float" is FPA-specific. Use the generic version if we
6811 do not have FPA. */
6812 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
6813 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
6814
6815 /* Internal <-> external register number maps. */
6816 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
6817 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
6818
6819 set_gdbarch_register_name (gdbarch, arm_register_name);
6820
6821 /* Returning results. */
6822 set_gdbarch_return_value (gdbarch, arm_return_value);
6823
6824 /* Disassembly. */
6825 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
6826
6827 /* Minsymbol frobbing. */
6828 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
6829 set_gdbarch_coff_make_msymbol_special (gdbarch,
6830 arm_coff_make_msymbol_special);
6831 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
6832
6833 /* Thumb-2 IT block support. */
6834 set_gdbarch_adjust_breakpoint_address (gdbarch,
6835 arm_adjust_breakpoint_address);
6836
6837 /* Virtual tables. */
6838 set_gdbarch_vbit_in_delta (gdbarch, 1);
6839
6840 /* Hook in the ABI-specific overrides, if they have been registered. */
6841 gdbarch_init_osabi (info, gdbarch);
6842
6843 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
6844
6845 /* Add some default predicates. */
6846 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
6847 dwarf2_append_unwinders (gdbarch);
6848 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
6849
6850 /* Now we have tuned the configuration, set a few final things,
6851 based on what the OS ABI has told us. */
6852
6853 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
6854 binaries are always marked. */
6855 if (tdep->arm_abi == ARM_ABI_AUTO)
6856 tdep->arm_abi = ARM_ABI_APCS;
6857
6858 /* We used to default to FPA for generic ARM, but almost nobody
6859 uses that now, and we now provide a way for the user to force
6860 the model. So default to the most useful variant. */
6861 if (tdep->fp_model == ARM_FLOAT_AUTO)
6862 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
6863
6864 if (tdep->jb_pc >= 0)
6865 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
6866
6867 /* Floating point sizes and format. */
6868 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
6869 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
6870 {
6871 set_gdbarch_double_format
6872 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
6873 set_gdbarch_long_double_format
6874 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
6875 }
6876 else
6877 {
6878 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
6879 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
6880 }
6881
6882 if (have_vfp_pseudos)
6883 {
6884 /* NOTE: These are the only pseudo registers used by
6885 the ARM target at the moment. If more are added, a
6886 little more care in numbering will be needed. */
6887
6888 int num_pseudos = 32;
6889 if (have_neon_pseudos)
6890 num_pseudos += 16;
6891 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
6892 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
6893 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
6894 }
6895
6896 if (tdesc_data)
6897 {
6898 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
6899
6900 tdesc_use_registers (gdbarch, info.target_desc, tdesc_data);
6901
6902 /* Override tdesc_register_type to adjust the types of VFP
6903 registers for NEON. */
6904 set_gdbarch_register_type (gdbarch, arm_register_type);
6905 }
6906
6907 /* Add standard register aliases. We add aliases even for those
6908 nanes which are used by the current architecture - it's simpler,
6909 and does no harm, since nothing ever lists user registers. */
6910 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
6911 user_reg_add (gdbarch, arm_register_aliases[i].name,
6912 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
6913
6914 return gdbarch;
6915 }
6916
6917 static void
6918 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
6919 {
6920 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6921
6922 if (tdep == NULL)
6923 return;
6924
6925 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
6926 (unsigned long) tdep->lowest_pc);
6927 }
6928
6929 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
6930
6931 void
6932 _initialize_arm_tdep (void)
6933 {
6934 struct ui_file *stb;
6935 long length;
6936 struct cmd_list_element *new_set, *new_show;
6937 const char *setname;
6938 const char *setdesc;
6939 const char *const *regnames;
6940 int numregs, i, j;
6941 static char *helptext;
6942 char regdesc[1024], *rdptr = regdesc;
6943 size_t rest = sizeof (regdesc);
6944
6945 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
6946
6947 arm_objfile_data_key
6948 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
6949
6950 /* Register an ELF OS ABI sniffer for ARM binaries. */
6951 gdbarch_register_osabi_sniffer (bfd_arch_arm,
6952 bfd_target_elf_flavour,
6953 arm_elf_osabi_sniffer);
6954
6955 /* Get the number of possible sets of register names defined in opcodes. */
6956 num_disassembly_options = get_arm_regname_num_options ();
6957
6958 /* Add root prefix command for all "set arm"/"show arm" commands. */
6959 add_prefix_cmd ("arm", no_class, set_arm_command,
6960 _("Various ARM-specific commands."),
6961 &setarmcmdlist, "set arm ", 0, &setlist);
6962
6963 add_prefix_cmd ("arm", no_class, show_arm_command,
6964 _("Various ARM-specific commands."),
6965 &showarmcmdlist, "show arm ", 0, &showlist);
6966
6967 /* Sync the opcode insn printer with our register viewer. */
6968 parse_arm_disassembler_option ("reg-names-std");
6969
6970 /* Initialize the array that will be passed to
6971 add_setshow_enum_cmd(). */
6972 valid_disassembly_styles
6973 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
6974 for (i = 0; i < num_disassembly_options; i++)
6975 {
6976 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
6977 valid_disassembly_styles[i] = setname;
6978 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
6979 rdptr += length;
6980 rest -= length;
6981 /* When we find the default names, tell the disassembler to use
6982 them. */
6983 if (!strcmp (setname, "std"))
6984 {
6985 disassembly_style = setname;
6986 set_arm_regname_option (i);
6987 }
6988 }
6989 /* Mark the end of valid options. */
6990 valid_disassembly_styles[num_disassembly_options] = NULL;
6991
6992 /* Create the help text. */
6993 stb = mem_fileopen ();
6994 fprintf_unfiltered (stb, "%s%s%s",
6995 _("The valid values are:\n"),
6996 regdesc,
6997 _("The default is \"std\"."));
6998 helptext = ui_file_xstrdup (stb, NULL);
6999 ui_file_delete (stb);
7000
7001 add_setshow_enum_cmd("disassembler", no_class,
7002 valid_disassembly_styles, &disassembly_style,
7003 _("Set the disassembly style."),
7004 _("Show the disassembly style."),
7005 helptext,
7006 set_disassembly_style_sfunc,
7007 NULL, /* FIXME: i18n: The disassembly style is \"%s\". */
7008 &setarmcmdlist, &showarmcmdlist);
7009
7010 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
7011 _("Set usage of ARM 32-bit mode."),
7012 _("Show usage of ARM 32-bit mode."),
7013 _("When off, a 26-bit PC will be used."),
7014 NULL,
7015 NULL, /* FIXME: i18n: Usage of ARM 32-bit mode is %s. */
7016 &setarmcmdlist, &showarmcmdlist);
7017
7018 /* Add a command to allow the user to force the FPU model. */
7019 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
7020 _("Set the floating point type."),
7021 _("Show the floating point type."),
7022 _("auto - Determine the FP typefrom the OS-ABI.\n\
7023 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
7024 fpa - FPA co-processor (GCC compiled).\n\
7025 softvfp - Software FP with pure-endian doubles.\n\
7026 vfp - VFP co-processor."),
7027 set_fp_model_sfunc, show_fp_model,
7028 &setarmcmdlist, &showarmcmdlist);
7029
7030 /* Add a command to allow the user to force the ABI. */
7031 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
7032 _("Set the ABI."),
7033 _("Show the ABI."),
7034 NULL, arm_set_abi, arm_show_abi,
7035 &setarmcmdlist, &showarmcmdlist);
7036
7037 /* Add two commands to allow the user to force the assumed
7038 execution mode. */
7039 add_setshow_enum_cmd ("fallback-mode", class_support,
7040 arm_mode_strings, &arm_fallback_mode_string,
7041 _("Set the mode assumed when symbols are unavailable."),
7042 _("Show the mode assumed when symbols are unavailable."),
7043 NULL, NULL, arm_show_fallback_mode,
7044 &setarmcmdlist, &showarmcmdlist);
7045 add_setshow_enum_cmd ("force-mode", class_support,
7046 arm_mode_strings, &arm_force_mode_string,
7047 _("Set the mode assumed even when symbols are available."),
7048 _("Show the mode assumed even when symbols are available."),
7049 NULL, NULL, arm_show_force_mode,
7050 &setarmcmdlist, &showarmcmdlist);
7051
7052 /* Debugging flag. */
7053 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
7054 _("Set ARM debugging."),
7055 _("Show ARM debugging."),
7056 _("When on, arm-specific debugging is enabled."),
7057 NULL,
7058 NULL, /* FIXME: i18n: "ARM debugging is %s. */
7059 &setdebuglist, &showdebuglist);
7060 }
This page took 0.183019 seconds and 5 git commands to generate.