gdb/
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper () */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "doublest.h"
33 #include "value.h"
34 #include "arch-utils.h"
35 #include "osabi.h"
36 #include "frame-unwind.h"
37 #include "frame-base.h"
38 #include "trad-frame.h"
39 #include "objfiles.h"
40 #include "dwarf2-frame.h"
41 #include "gdbtypes.h"
42 #include "prologue-value.h"
43 #include "target-descriptions.h"
44 #include "user-regs.h"
45
46 #include "arm-tdep.h"
47 #include "gdb/sim-arm.h"
48
49 #include "elf-bfd.h"
50 #include "coff/internal.h"
51 #include "elf/arm.h"
52
53 #include "gdb_assert.h"
54 #include "vec.h"
55
56 #include "features/arm-with-m.c"
57
58 static int arm_debug;
59
60 /* Macros for setting and testing a bit in a minimal symbol that marks
61 it as Thumb function. The MSB of the minimal symbol's "info" field
62 is used for this purpose.
63
64 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
65 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
66
67 #define MSYMBOL_SET_SPECIAL(msym) \
68 MSYMBOL_TARGET_FLAG_1 (msym) = 1
69
70 #define MSYMBOL_IS_SPECIAL(msym) \
71 MSYMBOL_TARGET_FLAG_1 (msym)
72
73 /* Per-objfile data used for mapping symbols. */
74 static const struct objfile_data *arm_objfile_data_key;
75
76 struct arm_mapping_symbol
77 {
78 bfd_vma value;
79 char type;
80 };
81 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
82 DEF_VEC_O(arm_mapping_symbol_s);
83
84 struct arm_per_objfile
85 {
86 VEC(arm_mapping_symbol_s) **section_maps;
87 };
88
89 /* The list of available "set arm ..." and "show arm ..." commands. */
90 static struct cmd_list_element *setarmcmdlist = NULL;
91 static struct cmd_list_element *showarmcmdlist = NULL;
92
93 /* The type of floating-point to use. Keep this in sync with enum
94 arm_float_model, and the help string in _initialize_arm_tdep. */
95 static const char *fp_model_strings[] =
96 {
97 "auto",
98 "softfpa",
99 "fpa",
100 "softvfp",
101 "vfp",
102 NULL
103 };
104
105 /* A variable that can be configured by the user. */
106 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
107 static const char *current_fp_model = "auto";
108
109 /* The ABI to use. Keep this in sync with arm_abi_kind. */
110 static const char *arm_abi_strings[] =
111 {
112 "auto",
113 "APCS",
114 "AAPCS",
115 NULL
116 };
117
118 /* A variable that can be configured by the user. */
119 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
120 static const char *arm_abi_string = "auto";
121
122 /* The execution mode to assume. */
123 static const char *arm_mode_strings[] =
124 {
125 "auto",
126 "arm",
127 "thumb",
128 NULL
129 };
130
131 static const char *arm_fallback_mode_string = "auto";
132 static const char *arm_force_mode_string = "auto";
133
134 /* Number of different reg name sets (options). */
135 static int num_disassembly_options;
136
137 /* The standard register names, and all the valid aliases for them. */
138 static const struct
139 {
140 const char *name;
141 int regnum;
142 } arm_register_aliases[] = {
143 /* Basic register numbers. */
144 { "r0", 0 },
145 { "r1", 1 },
146 { "r2", 2 },
147 { "r3", 3 },
148 { "r4", 4 },
149 { "r5", 5 },
150 { "r6", 6 },
151 { "r7", 7 },
152 { "r8", 8 },
153 { "r9", 9 },
154 { "r10", 10 },
155 { "r11", 11 },
156 { "r12", 12 },
157 { "r13", 13 },
158 { "r14", 14 },
159 { "r15", 15 },
160 /* Synonyms (argument and variable registers). */
161 { "a1", 0 },
162 { "a2", 1 },
163 { "a3", 2 },
164 { "a4", 3 },
165 { "v1", 4 },
166 { "v2", 5 },
167 { "v3", 6 },
168 { "v4", 7 },
169 { "v5", 8 },
170 { "v6", 9 },
171 { "v7", 10 },
172 { "v8", 11 },
173 /* Other platform-specific names for r9. */
174 { "sb", 9 },
175 { "tr", 9 },
176 /* Special names. */
177 { "ip", 12 },
178 { "sp", 13 },
179 { "lr", 14 },
180 { "pc", 15 },
181 /* Names used by GCC (not listed in the ARM EABI). */
182 { "sl", 10 },
183 { "fp", 11 },
184 /* A special name from the older ATPCS. */
185 { "wr", 7 },
186 };
187
188 static const char *const arm_register_names[] =
189 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
190 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
191 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
192 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
193 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
194 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
195 "fps", "cpsr" }; /* 24 25 */
196
197 /* Valid register name styles. */
198 static const char **valid_disassembly_styles;
199
200 /* Disassembly style to use. Default to "std" register names. */
201 static const char *disassembly_style;
202
203 /* This is used to keep the bfd arch_info in sync with the disassembly
204 style. */
205 static void set_disassembly_style_sfunc(char *, int,
206 struct cmd_list_element *);
207 static void set_disassembly_style (void);
208
209 static void convert_from_extended (const struct floatformat *, const void *,
210 void *, int);
211 static void convert_to_extended (const struct floatformat *, void *,
212 const void *, int);
213
214 static void arm_neon_quad_read (struct gdbarch *gdbarch,
215 struct regcache *regcache,
216 int regnum, gdb_byte *buf);
217 static void arm_neon_quad_write (struct gdbarch *gdbarch,
218 struct regcache *regcache,
219 int regnum, const gdb_byte *buf);
220
221 struct arm_prologue_cache
222 {
223 /* The stack pointer at the time this frame was created; i.e. the
224 caller's stack pointer when this function was called. It is used
225 to identify this frame. */
226 CORE_ADDR prev_sp;
227
228 /* The frame base for this frame is just prev_sp - frame size.
229 FRAMESIZE is the distance from the frame pointer to the
230 initial stack pointer. */
231
232 int framesize;
233
234 /* The register used to hold the frame pointer for this frame. */
235 int framereg;
236
237 /* Saved register offsets. */
238 struct trad_frame_saved_reg *saved_regs;
239 };
240
241 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
242 CORE_ADDR prologue_start,
243 CORE_ADDR prologue_end,
244 struct arm_prologue_cache *cache);
245
246 /* Architecture version for displaced stepping. This effects the behaviour of
247 certain instructions, and really should not be hard-wired. */
248
249 #define DISPLACED_STEPPING_ARCH_VERSION 5
250
251 /* Addresses for calling Thumb functions have the bit 0 set.
252 Here are some macros to test, set, or clear bit 0 of addresses. */
253 #define IS_THUMB_ADDR(addr) ((addr) & 1)
254 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
255 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
256
257 /* Set to true if the 32-bit mode is in use. */
258
259 int arm_apcs_32 = 1;
260
261 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
262
263 static int
264 arm_psr_thumb_bit (struct gdbarch *gdbarch)
265 {
266 if (gdbarch_tdep (gdbarch)->is_m)
267 return XPSR_T;
268 else
269 return CPSR_T;
270 }
271
272 /* Determine if FRAME is executing in Thumb mode. */
273
274 int
275 arm_frame_is_thumb (struct frame_info *frame)
276 {
277 CORE_ADDR cpsr;
278 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
279
280 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
281 directly (from a signal frame or dummy frame) or by interpreting
282 the saved LR (from a prologue or DWARF frame). So consult it and
283 trust the unwinders. */
284 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
285
286 return (cpsr & t_bit) != 0;
287 }
288
289 /* Callback for VEC_lower_bound. */
290
291 static inline int
292 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
293 const struct arm_mapping_symbol *rhs)
294 {
295 return lhs->value < rhs->value;
296 }
297
298 /* Search for the mapping symbol covering MEMADDR. If one is found,
299 return its type. Otherwise, return 0. If START is non-NULL,
300 set *START to the location of the mapping symbol. */
301
302 static char
303 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
304 {
305 struct obj_section *sec;
306
307 /* If there are mapping symbols, consult them. */
308 sec = find_pc_section (memaddr);
309 if (sec != NULL)
310 {
311 struct arm_per_objfile *data;
312 VEC(arm_mapping_symbol_s) *map;
313 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
314 0 };
315 unsigned int idx;
316
317 data = objfile_data (sec->objfile, arm_objfile_data_key);
318 if (data != NULL)
319 {
320 map = data->section_maps[sec->the_bfd_section->index];
321 if (!VEC_empty (arm_mapping_symbol_s, map))
322 {
323 struct arm_mapping_symbol *map_sym;
324
325 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
326 arm_compare_mapping_symbols);
327
328 /* VEC_lower_bound finds the earliest ordered insertion
329 point. If the following symbol starts at this exact
330 address, we use that; otherwise, the preceding
331 mapping symbol covers this address. */
332 if (idx < VEC_length (arm_mapping_symbol_s, map))
333 {
334 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
335 if (map_sym->value == map_key.value)
336 {
337 if (start)
338 *start = map_sym->value + obj_section_addr (sec);
339 return map_sym->type;
340 }
341 }
342
343 if (idx > 0)
344 {
345 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
346 if (start)
347 *start = map_sym->value + obj_section_addr (sec);
348 return map_sym->type;
349 }
350 }
351 }
352 }
353
354 return 0;
355 }
356
357 static CORE_ADDR arm_get_next_pc_raw (struct frame_info *frame,
358 CORE_ADDR pc, int insert_bkpt);
359
360 /* Determine if the program counter specified in MEMADDR is in a Thumb
361 function. This function should be called for addresses unrelated to
362 any executing frame; otherwise, prefer arm_frame_is_thumb. */
363
364 static int
365 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
366 {
367 struct obj_section *sec;
368 struct minimal_symbol *sym;
369 char type;
370
371 /* If bit 0 of the address is set, assume this is a Thumb address. */
372 if (IS_THUMB_ADDR (memaddr))
373 return 1;
374
375 /* If the user wants to override the symbol table, let him. */
376 if (strcmp (arm_force_mode_string, "arm") == 0)
377 return 0;
378 if (strcmp (arm_force_mode_string, "thumb") == 0)
379 return 1;
380
381 /* ARM v6-M and v7-M are always in Thumb mode. */
382 if (gdbarch_tdep (gdbarch)->is_m)
383 return 1;
384
385 /* If there are mapping symbols, consult them. */
386 type = arm_find_mapping_symbol (memaddr, NULL);
387 if (type)
388 return type == 't';
389
390 /* Thumb functions have a "special" bit set in minimal symbols. */
391 sym = lookup_minimal_symbol_by_pc (memaddr);
392 if (sym)
393 return (MSYMBOL_IS_SPECIAL (sym));
394
395 /* If the user wants to override the fallback mode, let them. */
396 if (strcmp (arm_fallback_mode_string, "arm") == 0)
397 return 0;
398 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
399 return 1;
400
401 /* If we couldn't find any symbol, but we're talking to a running
402 target, then trust the current value of $cpsr. This lets
403 "display/i $pc" always show the correct mode (though if there is
404 a symbol table we will not reach here, so it still may not be
405 displayed in the mode it will be executed).
406
407 As a further heuristic if we detect that we are doing a single-step we
408 see what state executing the current instruction ends up with us being
409 in. */
410 if (target_has_registers)
411 {
412 struct frame_info *current_frame = get_current_frame ();
413 CORE_ADDR current_pc = get_frame_pc (current_frame);
414 int is_thumb = arm_frame_is_thumb (current_frame);
415 CORE_ADDR next_pc;
416 if (memaddr == current_pc)
417 return is_thumb;
418 else
419 {
420 struct gdbarch *gdbarch = get_frame_arch (current_frame);
421 next_pc = arm_get_next_pc_raw (current_frame, current_pc, FALSE);
422 if (memaddr == gdbarch_addr_bits_remove (gdbarch, next_pc))
423 return IS_THUMB_ADDR (next_pc);
424 else
425 return is_thumb;
426 }
427 }
428
429 /* Otherwise we're out of luck; we assume ARM. */
430 return 0;
431 }
432
433 /* Remove useless bits from addresses in a running program. */
434 static CORE_ADDR
435 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
436 {
437 if (arm_apcs_32)
438 return UNMAKE_THUMB_ADDR (val);
439 else
440 return (val & 0x03fffffc);
441 }
442
443 /* When reading symbols, we need to zap the low bit of the address,
444 which may be set to 1 for Thumb functions. */
445 static CORE_ADDR
446 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
447 {
448 return val & ~1;
449 }
450
451 /* Return 1 if PC is the start of a compiler helper function which
452 can be safely ignored during prologue skipping. */
453 static int
454 skip_prologue_function (CORE_ADDR pc)
455 {
456 struct minimal_symbol *msym;
457 const char *name;
458
459 msym = lookup_minimal_symbol_by_pc (pc);
460 if (msym == NULL || SYMBOL_VALUE_ADDRESS (msym) != pc)
461 return 0;
462
463 name = SYMBOL_LINKAGE_NAME (msym);
464 if (name == NULL)
465 return 0;
466
467 /* The GNU linker's Thumb call stub to foo is named
468 __foo_from_thumb. */
469 if (strstr (name, "_from_thumb") != NULL)
470 name += 2;
471
472 /* On soft-float targets, __truncdfsf2 is called to convert promoted
473 arguments to their argument types in non-prototyped
474 functions. */
475 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
476 return 1;
477 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
478 return 1;
479
480 /* Internal functions related to thread-local storage. */
481 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
482 return 1;
483 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
484 return 1;
485
486 return 0;
487 }
488
489 /* Support routines for instruction parsing. */
490 #define submask(x) ((1L << ((x) + 1)) - 1)
491 #define bit(obj,st) (((obj) >> (st)) & 1)
492 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
493 #define sbits(obj,st,fn) \
494 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
495 #define BranchDest(addr,instr) \
496 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
497
498 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
499
500 static unsigned int
501 thumb_expand_immediate (unsigned int imm)
502 {
503 unsigned int count = imm >> 7;
504
505 if (count < 8)
506 switch (count / 2)
507 {
508 case 0:
509 return imm & 0xff;
510 case 1:
511 return (imm & 0xff) | ((imm & 0xff) << 16);
512 case 2:
513 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
514 case 3:
515 return (imm & 0xff) | ((imm & 0xff) << 8)
516 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
517 }
518
519 return (0x80 | (imm & 0x7f)) << (32 - count);
520 }
521
522 /* Return 1 if the 16-bit Thumb instruction INST might change
523 control flow, 0 otherwise. */
524
525 static int
526 thumb_instruction_changes_pc (unsigned short inst)
527 {
528 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
529 return 1;
530
531 if ((inst & 0xf000) == 0xd000) /* conditional branch */
532 return 1;
533
534 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
535 return 1;
536
537 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
538 return 1;
539
540 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
541 return 1;
542
543 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
544 return 1;
545
546 return 0;
547 }
548
549 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
550 might change control flow, 0 otherwise. */
551
552 static int
553 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
554 {
555 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
556 {
557 /* Branches and miscellaneous control instructions. */
558
559 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
560 {
561 /* B, BL, BLX. */
562 return 1;
563 }
564 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
565 {
566 /* SUBS PC, LR, #imm8. */
567 return 1;
568 }
569 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
570 {
571 /* Conditional branch. */
572 return 1;
573 }
574
575 return 0;
576 }
577
578 if ((inst1 & 0xfe50) == 0xe810)
579 {
580 /* Load multiple or RFE. */
581
582 if (bit (inst1, 7) && !bit (inst1, 8))
583 {
584 /* LDMIA or POP */
585 if (bit (inst2, 15))
586 return 1;
587 }
588 else if (!bit (inst1, 7) && bit (inst1, 8))
589 {
590 /* LDMDB */
591 if (bit (inst2, 15))
592 return 1;
593 }
594 else if (bit (inst1, 7) && bit (inst1, 8))
595 {
596 /* RFEIA */
597 return 1;
598 }
599 else if (!bit (inst1, 7) && !bit (inst1, 8))
600 {
601 /* RFEDB */
602 return 1;
603 }
604
605 return 0;
606 }
607
608 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
609 {
610 /* MOV PC or MOVS PC. */
611 return 1;
612 }
613
614 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
615 {
616 /* LDR PC. */
617 if (bits (inst1, 0, 3) == 15)
618 return 1;
619 if (bit (inst1, 7))
620 return 1;
621 if (bit (inst2, 11))
622 return 1;
623 if ((inst2 & 0x0fc0) == 0x0000)
624 return 1;
625
626 return 0;
627 }
628
629 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
630 {
631 /* TBB. */
632 return 1;
633 }
634
635 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
636 {
637 /* TBH. */
638 return 1;
639 }
640
641 return 0;
642 }
643
644 /* Analyze a Thumb prologue, looking for a recognizable stack frame
645 and frame pointer. Scan until we encounter a store that could
646 clobber the stack frame unexpectedly, or an unknown instruction.
647 Return the last address which is definitely safe to skip for an
648 initial breakpoint. */
649
650 static CORE_ADDR
651 thumb_analyze_prologue (struct gdbarch *gdbarch,
652 CORE_ADDR start, CORE_ADDR limit,
653 struct arm_prologue_cache *cache)
654 {
655 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
656 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
657 int i;
658 pv_t regs[16];
659 struct pv_area *stack;
660 struct cleanup *back_to;
661 CORE_ADDR offset;
662 CORE_ADDR unrecognized_pc = 0;
663
664 for (i = 0; i < 16; i++)
665 regs[i] = pv_register (i, 0);
666 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
667 back_to = make_cleanup_free_pv_area (stack);
668
669 while (start < limit)
670 {
671 unsigned short insn;
672
673 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
674
675 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
676 {
677 int regno;
678 int mask;
679
680 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
681 break;
682
683 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
684 whether to save LR (R14). */
685 mask = (insn & 0xff) | ((insn & 0x100) << 6);
686
687 /* Calculate offsets of saved R0-R7 and LR. */
688 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
689 if (mask & (1 << regno))
690 {
691 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
692 -4);
693 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
694 }
695 }
696 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
697 sub sp, #simm */
698 {
699 offset = (insn & 0x7f) << 2; /* get scaled offset */
700 if (insn & 0x80) /* Check for SUB. */
701 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
702 -offset);
703 else
704 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
705 offset);
706 }
707 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
708 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
709 (insn & 0xff) << 2);
710 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
711 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
712 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
713 bits (insn, 6, 8));
714 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
715 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
716 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
717 bits (insn, 0, 7));
718 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
719 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
720 && pv_is_constant (regs[bits (insn, 3, 5)]))
721 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
722 regs[bits (insn, 6, 8)]);
723 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
724 && pv_is_constant (regs[bits (insn, 3, 6)]))
725 {
726 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
727 int rm = bits (insn, 3, 6);
728 regs[rd] = pv_add (regs[rd], regs[rm]);
729 }
730 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
731 {
732 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
733 int src_reg = (insn & 0x78) >> 3;
734 regs[dst_reg] = regs[src_reg];
735 }
736 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
737 {
738 /* Handle stores to the stack. Normally pushes are used,
739 but with GCC -mtpcs-frame, there may be other stores
740 in the prologue to create the frame. */
741 int regno = (insn >> 8) & 0x7;
742 pv_t addr;
743
744 offset = (insn & 0xff) << 2;
745 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
746
747 if (pv_area_store_would_trash (stack, addr))
748 break;
749
750 pv_area_store (stack, addr, 4, regs[regno]);
751 }
752 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
753 {
754 int rd = bits (insn, 0, 2);
755 int rn = bits (insn, 3, 5);
756 pv_t addr;
757
758 offset = bits (insn, 6, 10) << 2;
759 addr = pv_add_constant (regs[rn], offset);
760
761 if (pv_area_store_would_trash (stack, addr))
762 break;
763
764 pv_area_store (stack, addr, 4, regs[rd]);
765 }
766 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
767 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
768 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
769 /* Ignore stores of argument registers to the stack. */
770 ;
771 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
772 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
773 /* Ignore block loads from the stack, potentially copying
774 parameters from memory. */
775 ;
776 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
777 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
778 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
779 /* Similarly ignore single loads from the stack. */
780 ;
781 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
782 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
783 /* Skip register copies, i.e. saves to another register
784 instead of the stack. */
785 ;
786 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
787 /* Recognize constant loads; even with small stacks these are necessary
788 on Thumb. */
789 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
790 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
791 {
792 /* Constant pool loads, for the same reason. */
793 unsigned int constant;
794 CORE_ADDR loc;
795
796 loc = start + 4 + bits (insn, 0, 7) * 4;
797 constant = read_memory_unsigned_integer (loc, 4, byte_order);
798 regs[bits (insn, 8, 10)] = pv_constant (constant);
799 }
800 else if ((insn & 0xe000) == 0xe000)
801 {
802 unsigned short inst2;
803
804 inst2 = read_memory_unsigned_integer (start + 2, 2,
805 byte_order_for_code);
806
807 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
808 {
809 /* BL, BLX. Allow some special function calls when
810 skipping the prologue; GCC generates these before
811 storing arguments to the stack. */
812 CORE_ADDR nextpc;
813 int j1, j2, imm1, imm2;
814
815 imm1 = sbits (insn, 0, 10);
816 imm2 = bits (inst2, 0, 10);
817 j1 = bit (inst2, 13);
818 j2 = bit (inst2, 11);
819
820 offset = ((imm1 << 12) + (imm2 << 1));
821 offset ^= ((!j2) << 22) | ((!j1) << 23);
822
823 nextpc = start + 4 + offset;
824 /* For BLX make sure to clear the low bits. */
825 if (bit (inst2, 12) == 0)
826 nextpc = nextpc & 0xfffffffc;
827
828 if (!skip_prologue_function (nextpc))
829 break;
830 }
831
832 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!}, { registers } */
833 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
834 {
835 pv_t addr = regs[bits (insn, 0, 3)];
836 int regno;
837
838 if (pv_area_store_would_trash (stack, addr))
839 break;
840
841 /* Calculate offsets of saved registers. */
842 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
843 if (inst2 & (1 << regno))
844 {
845 addr = pv_add_constant (addr, -4);
846 pv_area_store (stack, addr, 4, regs[regno]);
847 }
848
849 if (insn & 0x0020)
850 regs[bits (insn, 0, 3)] = addr;
851 }
852
853 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2, [Rn, #+/-imm]{!} */
854 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
855 {
856 int regno1 = bits (inst2, 12, 15);
857 int regno2 = bits (inst2, 8, 11);
858 pv_t addr = regs[bits (insn, 0, 3)];
859
860 offset = inst2 & 0xff;
861 if (insn & 0x0080)
862 addr = pv_add_constant (addr, offset);
863 else
864 addr = pv_add_constant (addr, -offset);
865
866 if (pv_area_store_would_trash (stack, addr))
867 break;
868
869 pv_area_store (stack, addr, 4, regs[regno1]);
870 pv_area_store (stack, pv_add_constant (addr, 4),
871 4, regs[regno2]);
872
873 if (insn & 0x0020)
874 regs[bits (insn, 0, 3)] = addr;
875 }
876
877 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
878 && (inst2 & 0x0c00) == 0x0c00
879 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
880 {
881 int regno = bits (inst2, 12, 15);
882 pv_t addr = regs[bits (insn, 0, 3)];
883
884 offset = inst2 & 0xff;
885 if (inst2 & 0x0200)
886 addr = pv_add_constant (addr, offset);
887 else
888 addr = pv_add_constant (addr, -offset);
889
890 if (pv_area_store_would_trash (stack, addr))
891 break;
892
893 pv_area_store (stack, addr, 4, regs[regno]);
894
895 if (inst2 & 0x0100)
896 regs[bits (insn, 0, 3)] = addr;
897 }
898
899 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
900 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
901 {
902 int regno = bits (inst2, 12, 15);
903 pv_t addr;
904
905 offset = inst2 & 0xfff;
906 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
907
908 if (pv_area_store_would_trash (stack, addr))
909 break;
910
911 pv_area_store (stack, addr, 4, regs[regno]);
912 }
913
914 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
915 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
916 /* Ignore stores of argument registers to the stack. */
917 ;
918
919 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
920 && (inst2 & 0x0d00) == 0x0c00
921 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
922 /* Ignore stores of argument registers to the stack. */
923 ;
924
925 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!], { registers } */
926 && (inst2 & 0x8000) == 0x0000
927 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
928 /* Ignore block loads from the stack, potentially copying
929 parameters from memory. */
930 ;
931
932 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2, [Rn, #+/-imm] */
933 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
934 /* Similarly ignore dual loads from the stack. */
935 ;
936
937 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
938 && (inst2 & 0x0d00) == 0x0c00
939 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
940 /* Similarly ignore single loads from the stack. */
941 ;
942
943 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
944 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
945 /* Similarly ignore single loads from the stack. */
946 ;
947
948 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
949 && (inst2 & 0x8000) == 0x0000)
950 {
951 unsigned int imm = ((bits (insn, 10, 10) << 11)
952 | (bits (inst2, 12, 14) << 8)
953 | bits (inst2, 0, 7));
954
955 regs[bits (inst2, 8, 11)]
956 = pv_add_constant (regs[bits (insn, 0, 3)],
957 thumb_expand_immediate (imm));
958 }
959
960 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
961 && (inst2 & 0x8000) == 0x0000)
962 {
963 unsigned int imm = ((bits (insn, 10, 10) << 11)
964 | (bits (inst2, 12, 14) << 8)
965 | bits (inst2, 0, 7));
966
967 regs[bits (inst2, 8, 11)]
968 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
969 }
970
971 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
972 && (inst2 & 0x8000) == 0x0000)
973 {
974 unsigned int imm = ((bits (insn, 10, 10) << 11)
975 | (bits (inst2, 12, 14) << 8)
976 | bits (inst2, 0, 7));
977
978 regs[bits (inst2, 8, 11)]
979 = pv_add_constant (regs[bits (insn, 0, 3)],
980 - (CORE_ADDR) thumb_expand_immediate (imm));
981 }
982
983 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
984 && (inst2 & 0x8000) == 0x0000)
985 {
986 unsigned int imm = ((bits (insn, 10, 10) << 11)
987 | (bits (inst2, 12, 14) << 8)
988 | bits (inst2, 0, 7));
989
990 regs[bits (inst2, 8, 11)]
991 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
992 }
993
994 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
995 {
996 unsigned int imm = ((bits (insn, 10, 10) << 11)
997 | (bits (inst2, 12, 14) << 8)
998 | bits (inst2, 0, 7));
999
1000 regs[bits (inst2, 8, 11)]
1001 = pv_constant (thumb_expand_immediate (imm));
1002 }
1003
1004 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1005 {
1006 unsigned int imm = ((bits (insn, 0, 3) << 12)
1007 | (bits (insn, 10, 10) << 11)
1008 | (bits (inst2, 12, 14) << 8)
1009 | bits (inst2, 0, 7));
1010
1011 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1012 }
1013
1014 else if (insn == 0xea5f /* mov.w Rd,Rm */
1015 && (inst2 & 0xf0f0) == 0)
1016 {
1017 int dst_reg = (inst2 & 0x0f00) >> 8;
1018 int src_reg = inst2 & 0xf;
1019 regs[dst_reg] = regs[src_reg];
1020 }
1021
1022 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1023 {
1024 /* Constant pool loads. */
1025 unsigned int constant;
1026 CORE_ADDR loc;
1027
1028 offset = bits (insn, 0, 11);
1029 if (insn & 0x0080)
1030 loc = start + 4 + offset;
1031 else
1032 loc = start + 4 - offset;
1033
1034 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1035 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1036 }
1037
1038 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1039 {
1040 /* Constant pool loads. */
1041 unsigned int constant;
1042 CORE_ADDR loc;
1043
1044 offset = bits (insn, 0, 7) << 2;
1045 if (insn & 0x0080)
1046 loc = start + 4 + offset;
1047 else
1048 loc = start + 4 - offset;
1049
1050 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1051 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1052
1053 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1054 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1055 }
1056
1057 else if (thumb2_instruction_changes_pc (insn, inst2))
1058 {
1059 /* Don't scan past anything that might change control flow. */
1060 break;
1061 }
1062 else
1063 {
1064 /* The optimizer might shove anything into the prologue,
1065 so we just skip what we don't recognize. */
1066 unrecognized_pc = start;
1067 }
1068
1069 start += 2;
1070 }
1071 else if (thumb_instruction_changes_pc (insn))
1072 {
1073 /* Don't scan past anything that might change control flow. */
1074 break;
1075 }
1076 else
1077 {
1078 /* The optimizer might shove anything into the prologue,
1079 so we just skip what we don't recognize. */
1080 unrecognized_pc = start;
1081 }
1082
1083 start += 2;
1084 }
1085
1086 if (arm_debug)
1087 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1088 paddress (gdbarch, start));
1089
1090 if (unrecognized_pc == 0)
1091 unrecognized_pc = start;
1092
1093 if (cache == NULL)
1094 {
1095 do_cleanups (back_to);
1096 return unrecognized_pc;
1097 }
1098
1099 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1100 {
1101 /* Frame pointer is fp. Frame size is constant. */
1102 cache->framereg = ARM_FP_REGNUM;
1103 cache->framesize = -regs[ARM_FP_REGNUM].k;
1104 }
1105 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1106 {
1107 /* Frame pointer is r7. Frame size is constant. */
1108 cache->framereg = THUMB_FP_REGNUM;
1109 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1110 }
1111 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1112 {
1113 /* Try the stack pointer... this is a bit desperate. */
1114 cache->framereg = ARM_SP_REGNUM;
1115 cache->framesize = -regs[ARM_SP_REGNUM].k;
1116 }
1117 else
1118 {
1119 /* We're just out of luck. We don't know where the frame is. */
1120 cache->framereg = -1;
1121 cache->framesize = 0;
1122 }
1123
1124 for (i = 0; i < 16; i++)
1125 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1126 cache->saved_regs[i].addr = offset;
1127
1128 do_cleanups (back_to);
1129 return unrecognized_pc;
1130 }
1131
1132 /* Advance the PC across any function entry prologue instructions to
1133 reach some "real" code.
1134
1135 The APCS (ARM Procedure Call Standard) defines the following
1136 prologue:
1137
1138 mov ip, sp
1139 [stmfd sp!, {a1,a2,a3,a4}]
1140 stmfd sp!, {...,fp,ip,lr,pc}
1141 [stfe f7, [sp, #-12]!]
1142 [stfe f6, [sp, #-12]!]
1143 [stfe f5, [sp, #-12]!]
1144 [stfe f4, [sp, #-12]!]
1145 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn */
1146
1147 static CORE_ADDR
1148 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1149 {
1150 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1151 unsigned long inst;
1152 CORE_ADDR skip_pc;
1153 CORE_ADDR func_addr, limit_pc;
1154 struct symtab_and_line sal;
1155
1156 /* See if we can determine the end of the prologue via the symbol table.
1157 If so, then return either PC, or the PC after the prologue, whichever
1158 is greater. */
1159 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1160 {
1161 CORE_ADDR post_prologue_pc
1162 = skip_prologue_using_sal (gdbarch, func_addr);
1163 struct symtab *s = find_pc_symtab (func_addr);
1164
1165 /* GCC always emits a line note before the prologue and another
1166 one after, even if the two are at the same address or on the
1167 same line. Take advantage of this so that we do not need to
1168 know every instruction that might appear in the prologue. We
1169 will have producer information for most binaries; if it is
1170 missing (e.g. for -gstabs), assuming the GNU tools. */
1171 if (post_prologue_pc
1172 && (s == NULL
1173 || s->producer == NULL
1174 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1175 return post_prologue_pc;
1176
1177 if (post_prologue_pc != 0)
1178 {
1179 CORE_ADDR analyzed_limit;
1180
1181 /* For non-GCC compilers, make sure the entire line is an
1182 acceptable prologue; GDB will round this function's
1183 return value up to the end of the following line so we
1184 can not skip just part of a line (and we do not want to).
1185
1186 RealView does not treat the prologue specially, but does
1187 associate prologue code with the opening brace; so this
1188 lets us skip the first line if we think it is the opening
1189 brace. */
1190 if (arm_pc_is_thumb (gdbarch, func_addr))
1191 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1192 post_prologue_pc, NULL);
1193 else
1194 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1195 post_prologue_pc, NULL);
1196
1197 if (analyzed_limit != post_prologue_pc)
1198 return func_addr;
1199
1200 return post_prologue_pc;
1201 }
1202 }
1203
1204 /* Can't determine prologue from the symbol table, need to examine
1205 instructions. */
1206
1207 /* Find an upper limit on the function prologue using the debug
1208 information. If the debug information could not be used to provide
1209 that bound, then use an arbitrary large number as the upper bound. */
1210 /* Like arm_scan_prologue, stop no later than pc + 64. */
1211 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1212 if (limit_pc == 0)
1213 limit_pc = pc + 64; /* Magic. */
1214
1215
1216 /* Check if this is Thumb code. */
1217 if (arm_pc_is_thumb (gdbarch, pc))
1218 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1219
1220 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1221 {
1222 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1223
1224 /* "mov ip, sp" is no longer a required part of the prologue. */
1225 if (inst == 0xe1a0c00d) /* mov ip, sp */
1226 continue;
1227
1228 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1229 continue;
1230
1231 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1232 continue;
1233
1234 /* Some prologues begin with "str lr, [sp, #-4]!". */
1235 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1236 continue;
1237
1238 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1239 continue;
1240
1241 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1242 continue;
1243
1244 /* Any insns after this point may float into the code, if it makes
1245 for better instruction scheduling, so we skip them only if we
1246 find them, but still consider the function to be frame-ful. */
1247
1248 /* We may have either one sfmfd instruction here, or several stfe
1249 insns, depending on the version of floating point code we
1250 support. */
1251 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1252 continue;
1253
1254 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1255 continue;
1256
1257 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1258 continue;
1259
1260 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1261 continue;
1262
1263 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1264 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1265 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1266 continue;
1267
1268 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1269 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1270 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1271 continue;
1272
1273 /* Un-recognized instruction; stop scanning. */
1274 break;
1275 }
1276
1277 return skip_pc; /* End of prologue */
1278 }
1279
1280 /* *INDENT-OFF* */
1281 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1282 This function decodes a Thumb function prologue to determine:
1283 1) the size of the stack frame
1284 2) which registers are saved on it
1285 3) the offsets of saved regs
1286 4) the offset from the stack pointer to the frame pointer
1287
1288 A typical Thumb function prologue would create this stack frame
1289 (offsets relative to FP)
1290 old SP -> 24 stack parameters
1291 20 LR
1292 16 R7
1293 R7 -> 0 local variables (16 bytes)
1294 SP -> -12 additional stack space (12 bytes)
1295 The frame size would thus be 36 bytes, and the frame offset would be
1296 12 bytes. The frame register is R7.
1297
1298 The comments for thumb_skip_prolog() describe the algorithm we use
1299 to detect the end of the prolog. */
1300 /* *INDENT-ON* */
1301
1302 static void
1303 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1304 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1305 {
1306 CORE_ADDR prologue_start;
1307 CORE_ADDR prologue_end;
1308 CORE_ADDR current_pc;
1309
1310 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1311 &prologue_end))
1312 {
1313 /* See comment in arm_scan_prologue for an explanation of
1314 this heuristics. */
1315 if (prologue_end > prologue_start + 64)
1316 {
1317 prologue_end = prologue_start + 64;
1318 }
1319 }
1320 else
1321 /* We're in the boondocks: we have no idea where the start of the
1322 function is. */
1323 return;
1324
1325 prologue_end = min (prologue_end, prev_pc);
1326
1327 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1328 }
1329
1330 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1331
1332 static int
1333 arm_instruction_changes_pc (uint32_t this_instr)
1334 {
1335 if (bits (this_instr, 28, 31) == INST_NV)
1336 /* Unconditional instructions. */
1337 switch (bits (this_instr, 24, 27))
1338 {
1339 case 0xa:
1340 case 0xb:
1341 /* Branch with Link and change to Thumb. */
1342 return 1;
1343 case 0xc:
1344 case 0xd:
1345 case 0xe:
1346 /* Coprocessor register transfer. */
1347 if (bits (this_instr, 12, 15) == 15)
1348 error (_("Invalid update to pc in instruction"));
1349 return 0;
1350 default:
1351 return 0;
1352 }
1353 else
1354 switch (bits (this_instr, 25, 27))
1355 {
1356 case 0x0:
1357 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1358 {
1359 /* Multiplies and extra load/stores. */
1360 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1361 /* Neither multiplies nor extension load/stores are allowed
1362 to modify PC. */
1363 return 0;
1364
1365 /* Otherwise, miscellaneous instructions. */
1366
1367 /* BX <reg>, BXJ <reg>, BLX <reg> */
1368 if (bits (this_instr, 4, 27) == 0x12fff1
1369 || bits (this_instr, 4, 27) == 0x12fff2
1370 || bits (this_instr, 4, 27) == 0x12fff3)
1371 return 1;
1372
1373 /* Other miscellaneous instructions are unpredictable if they
1374 modify PC. */
1375 return 0;
1376 }
1377 /* Data processing instruction. Fall through. */
1378
1379 case 0x1:
1380 if (bits (this_instr, 12, 15) == 15)
1381 return 1;
1382 else
1383 return 0;
1384
1385 case 0x2:
1386 case 0x3:
1387 /* Media instructions and architecturally undefined instructions. */
1388 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1389 return 0;
1390
1391 /* Stores. */
1392 if (bit (this_instr, 20) == 0)
1393 return 0;
1394
1395 /* Loads. */
1396 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1397 return 1;
1398 else
1399 return 0;
1400
1401 case 0x4:
1402 /* Load/store multiple. */
1403 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1404 return 1;
1405 else
1406 return 0;
1407
1408 case 0x5:
1409 /* Branch and branch with link. */
1410 return 1;
1411
1412 case 0x6:
1413 case 0x7:
1414 /* Coprocessor transfers or SWIs can not affect PC. */
1415 return 0;
1416
1417 default:
1418 internal_error (__FILE__, __LINE__, "bad value in switch");
1419 }
1420 }
1421
1422 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1423 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1424 fill it in. Return the first address not recognized as a prologue
1425 instruction.
1426
1427 We recognize all the instructions typically found in ARM prologues,
1428 plus harmless instructions which can be skipped (either for analysis
1429 purposes, or a more restrictive set that can be skipped when finding
1430 the end of the prologue). */
1431
1432 static CORE_ADDR
1433 arm_analyze_prologue (struct gdbarch *gdbarch,
1434 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1435 struct arm_prologue_cache *cache)
1436 {
1437 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1438 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1439 int regno;
1440 CORE_ADDR offset, current_pc;
1441 pv_t regs[ARM_FPS_REGNUM];
1442 struct pv_area *stack;
1443 struct cleanup *back_to;
1444 int framereg, framesize;
1445 CORE_ADDR unrecognized_pc = 0;
1446
1447 /* Search the prologue looking for instructions that set up the
1448 frame pointer, adjust the stack pointer, and save registers.
1449
1450 Be careful, however, and if it doesn't look like a prologue,
1451 don't try to scan it. If, for instance, a frameless function
1452 begins with stmfd sp!, then we will tell ourselves there is
1453 a frame, which will confuse stack traceback, as well as "finish"
1454 and other operations that rely on a knowledge of the stack
1455 traceback. */
1456
1457 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1458 regs[regno] = pv_register (regno, 0);
1459 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1460 back_to = make_cleanup_free_pv_area (stack);
1461
1462 for (current_pc = prologue_start;
1463 current_pc < prologue_end;
1464 current_pc += 4)
1465 {
1466 unsigned int insn
1467 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1468
1469 if (insn == 0xe1a0c00d) /* mov ip, sp */
1470 {
1471 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1472 continue;
1473 }
1474 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1475 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1476 {
1477 unsigned imm = insn & 0xff; /* immediate value */
1478 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1479 int rd = bits (insn, 12, 15);
1480 imm = (imm >> rot) | (imm << (32 - rot));
1481 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1482 continue;
1483 }
1484 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1485 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1486 {
1487 unsigned imm = insn & 0xff; /* immediate value */
1488 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1489 int rd = bits (insn, 12, 15);
1490 imm = (imm >> rot) | (imm << (32 - rot));
1491 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1492 continue;
1493 }
1494 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd, [sp, #-4]! */
1495 {
1496 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1497 break;
1498 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1499 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1500 regs[bits (insn, 12, 15)]);
1501 continue;
1502 }
1503 else if ((insn & 0xffff0000) == 0xe92d0000)
1504 /* stmfd sp!, {..., fp, ip, lr, pc}
1505 or
1506 stmfd sp!, {a1, a2, a3, a4} */
1507 {
1508 int mask = insn & 0xffff;
1509
1510 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1511 break;
1512
1513 /* Calculate offsets of saved registers. */
1514 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1515 if (mask & (1 << regno))
1516 {
1517 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1518 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1519 }
1520 }
1521 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1522 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1523 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1524 {
1525 /* No need to add this to saved_regs -- it's just an arg reg. */
1526 continue;
1527 }
1528 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1529 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1530 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1531 {
1532 /* No need to add this to saved_regs -- it's just an arg reg. */
1533 continue;
1534 }
1535 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn, { registers } */
1536 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1537 {
1538 /* No need to add this to saved_regs -- it's just arg regs. */
1539 continue;
1540 }
1541 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1542 {
1543 unsigned imm = insn & 0xff; /* immediate value */
1544 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1545 imm = (imm >> rot) | (imm << (32 - rot));
1546 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1547 }
1548 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1549 {
1550 unsigned imm = insn & 0xff; /* immediate value */
1551 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1552 imm = (imm >> rot) | (imm << (32 - rot));
1553 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1554 }
1555 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?, [sp, -#c]! */
1556 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1557 {
1558 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1559 break;
1560
1561 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1562 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1563 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1564 }
1565 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4, [sp!] */
1566 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1567 {
1568 int n_saved_fp_regs;
1569 unsigned int fp_start_reg, fp_bound_reg;
1570
1571 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1572 break;
1573
1574 if ((insn & 0x800) == 0x800) /* N0 is set */
1575 {
1576 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1577 n_saved_fp_regs = 3;
1578 else
1579 n_saved_fp_regs = 1;
1580 }
1581 else
1582 {
1583 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1584 n_saved_fp_regs = 2;
1585 else
1586 n_saved_fp_regs = 4;
1587 }
1588
1589 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1590 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1591 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1592 {
1593 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1594 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1595 regs[fp_start_reg++]);
1596 }
1597 }
1598 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1599 {
1600 /* Allow some special function calls when skipping the
1601 prologue; GCC generates these before storing arguments to
1602 the stack. */
1603 CORE_ADDR dest = BranchDest (current_pc, insn);
1604
1605 if (skip_prologue_function (dest))
1606 continue;
1607 else
1608 break;
1609 }
1610 else if ((insn & 0xf0000000) != 0xe0000000)
1611 break; /* Condition not true, exit early */
1612 else if (arm_instruction_changes_pc (insn))
1613 /* Don't scan past anything that might change control flow. */
1614 break;
1615 else if ((insn & 0xfe500000) == 0xe8100000) /* ldm */
1616 {
1617 /* Ignore block loads from the stack, potentially copying
1618 parameters from memory. */
1619 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1620 continue;
1621 else
1622 break;
1623 }
1624 else if ((insn & 0xfc500000) == 0xe4100000)
1625 {
1626 /* Similarly ignore single loads from the stack. */
1627 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1628 continue;
1629 else
1630 break;
1631 }
1632 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1633 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1634 register instead of the stack. */
1635 continue;
1636 else
1637 {
1638 /* The optimizer might shove anything into the prologue,
1639 so we just skip what we don't recognize. */
1640 unrecognized_pc = current_pc;
1641 continue;
1642 }
1643 }
1644
1645 if (unrecognized_pc == 0)
1646 unrecognized_pc = current_pc;
1647
1648 /* The frame size is just the distance from the frame register
1649 to the original stack pointer. */
1650 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1651 {
1652 /* Frame pointer is fp. */
1653 framereg = ARM_FP_REGNUM;
1654 framesize = -regs[ARM_FP_REGNUM].k;
1655 }
1656 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1657 {
1658 /* Try the stack pointer... this is a bit desperate. */
1659 framereg = ARM_SP_REGNUM;
1660 framesize = -regs[ARM_SP_REGNUM].k;
1661 }
1662 else
1663 {
1664 /* We're just out of luck. We don't know where the frame is. */
1665 framereg = -1;
1666 framesize = 0;
1667 }
1668
1669 if (cache)
1670 {
1671 cache->framereg = framereg;
1672 cache->framesize = framesize;
1673
1674 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1675 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1676 cache->saved_regs[regno].addr = offset;
1677 }
1678
1679 if (arm_debug)
1680 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1681 paddress (gdbarch, unrecognized_pc));
1682
1683 do_cleanups (back_to);
1684 return unrecognized_pc;
1685 }
1686
1687 static void
1688 arm_scan_prologue (struct frame_info *this_frame,
1689 struct arm_prologue_cache *cache)
1690 {
1691 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1692 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1693 int regno;
1694 CORE_ADDR prologue_start, prologue_end, current_pc;
1695 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1696 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1697 pv_t regs[ARM_FPS_REGNUM];
1698 struct pv_area *stack;
1699 struct cleanup *back_to;
1700 CORE_ADDR offset;
1701
1702 /* Assume there is no frame until proven otherwise. */
1703 cache->framereg = ARM_SP_REGNUM;
1704 cache->framesize = 0;
1705
1706 /* Check for Thumb prologue. */
1707 if (arm_frame_is_thumb (this_frame))
1708 {
1709 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1710 return;
1711 }
1712
1713 /* Find the function prologue. If we can't find the function in
1714 the symbol table, peek in the stack frame to find the PC. */
1715 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1716 &prologue_end))
1717 {
1718 /* One way to find the end of the prologue (which works well
1719 for unoptimized code) is to do the following:
1720
1721 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1722
1723 if (sal.line == 0)
1724 prologue_end = prev_pc;
1725 else if (sal.end < prologue_end)
1726 prologue_end = sal.end;
1727
1728 This mechanism is very accurate so long as the optimizer
1729 doesn't move any instructions from the function body into the
1730 prologue. If this happens, sal.end will be the last
1731 instruction in the first hunk of prologue code just before
1732 the first instruction that the scheduler has moved from
1733 the body to the prologue.
1734
1735 In order to make sure that we scan all of the prologue
1736 instructions, we use a slightly less accurate mechanism which
1737 may scan more than necessary. To help compensate for this
1738 lack of accuracy, the prologue scanning loop below contains
1739 several clauses which'll cause the loop to terminate early if
1740 an implausible prologue instruction is encountered.
1741
1742 The expression
1743
1744 prologue_start + 64
1745
1746 is a suitable endpoint since it accounts for the largest
1747 possible prologue plus up to five instructions inserted by
1748 the scheduler. */
1749
1750 if (prologue_end > prologue_start + 64)
1751 {
1752 prologue_end = prologue_start + 64; /* See above. */
1753 }
1754 }
1755 else
1756 {
1757 /* We have no symbol information. Our only option is to assume this
1758 function has a standard stack frame and the normal frame register.
1759 Then, we can find the value of our frame pointer on entrance to
1760 the callee (or at the present moment if this is the innermost frame).
1761 The value stored there should be the address of the stmfd + 8. */
1762 CORE_ADDR frame_loc;
1763 LONGEST return_value;
1764
1765 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1766 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1767 return;
1768 else
1769 {
1770 prologue_start = gdbarch_addr_bits_remove
1771 (gdbarch, return_value) - 8;
1772 prologue_end = prologue_start + 64; /* See above. */
1773 }
1774 }
1775
1776 if (prev_pc < prologue_end)
1777 prologue_end = prev_pc;
1778
1779 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1780 }
1781
1782 static struct arm_prologue_cache *
1783 arm_make_prologue_cache (struct frame_info *this_frame)
1784 {
1785 int reg;
1786 struct arm_prologue_cache *cache;
1787 CORE_ADDR unwound_fp;
1788
1789 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1790 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1791
1792 arm_scan_prologue (this_frame, cache);
1793
1794 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1795 if (unwound_fp == 0)
1796 return cache;
1797
1798 cache->prev_sp = unwound_fp + cache->framesize;
1799
1800 /* Calculate actual addresses of saved registers using offsets
1801 determined by arm_scan_prologue. */
1802 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1803 if (trad_frame_addr_p (cache->saved_regs, reg))
1804 cache->saved_regs[reg].addr += cache->prev_sp;
1805
1806 return cache;
1807 }
1808
1809 /* Our frame ID for a normal frame is the current function's starting PC
1810 and the caller's SP when we were called. */
1811
1812 static void
1813 arm_prologue_this_id (struct frame_info *this_frame,
1814 void **this_cache,
1815 struct frame_id *this_id)
1816 {
1817 struct arm_prologue_cache *cache;
1818 struct frame_id id;
1819 CORE_ADDR pc, func;
1820
1821 if (*this_cache == NULL)
1822 *this_cache = arm_make_prologue_cache (this_frame);
1823 cache = *this_cache;
1824
1825 /* This is meant to halt the backtrace at "_start". */
1826 pc = get_frame_pc (this_frame);
1827 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1828 return;
1829
1830 /* If we've hit a wall, stop. */
1831 if (cache->prev_sp == 0)
1832 return;
1833
1834 func = get_frame_func (this_frame);
1835 id = frame_id_build (cache->prev_sp, func);
1836 *this_id = id;
1837 }
1838
1839 static struct value *
1840 arm_prologue_prev_register (struct frame_info *this_frame,
1841 void **this_cache,
1842 int prev_regnum)
1843 {
1844 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1845 struct arm_prologue_cache *cache;
1846
1847 if (*this_cache == NULL)
1848 *this_cache = arm_make_prologue_cache (this_frame);
1849 cache = *this_cache;
1850
1851 /* If we are asked to unwind the PC, then we need to return the LR
1852 instead. The prologue may save PC, but it will point into this
1853 frame's prologue, not the next frame's resume location. Also
1854 strip the saved T bit. A valid LR may have the low bit set, but
1855 a valid PC never does. */
1856 if (prev_regnum == ARM_PC_REGNUM)
1857 {
1858 CORE_ADDR lr;
1859
1860 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1861 return frame_unwind_got_constant (this_frame, prev_regnum,
1862 arm_addr_bits_remove (gdbarch, lr));
1863 }
1864
1865 /* SP is generally not saved to the stack, but this frame is
1866 identified by the next frame's stack pointer at the time of the call.
1867 The value was already reconstructed into PREV_SP. */
1868 if (prev_regnum == ARM_SP_REGNUM)
1869 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
1870
1871 /* The CPSR may have been changed by the call instruction and by the
1872 called function. The only bit we can reconstruct is the T bit,
1873 by checking the low bit of LR as of the call. This is a reliable
1874 indicator of Thumb-ness except for some ARM v4T pre-interworking
1875 Thumb code, which could get away with a clear low bit as long as
1876 the called function did not use bx. Guess that all other
1877 bits are unchanged; the condition flags are presumably lost,
1878 but the processor status is likely valid. */
1879 if (prev_regnum == ARM_PS_REGNUM)
1880 {
1881 CORE_ADDR lr, cpsr;
1882 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
1883
1884 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
1885 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1886 if (IS_THUMB_ADDR (lr))
1887 cpsr |= t_bit;
1888 else
1889 cpsr &= ~t_bit;
1890 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
1891 }
1892
1893 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1894 prev_regnum);
1895 }
1896
1897 struct frame_unwind arm_prologue_unwind = {
1898 NORMAL_FRAME,
1899 arm_prologue_this_id,
1900 arm_prologue_prev_register,
1901 NULL,
1902 default_frame_sniffer
1903 };
1904
1905 static struct arm_prologue_cache *
1906 arm_make_stub_cache (struct frame_info *this_frame)
1907 {
1908 struct arm_prologue_cache *cache;
1909
1910 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1911 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1912
1913 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
1914
1915 return cache;
1916 }
1917
1918 /* Our frame ID for a stub frame is the current SP and LR. */
1919
1920 static void
1921 arm_stub_this_id (struct frame_info *this_frame,
1922 void **this_cache,
1923 struct frame_id *this_id)
1924 {
1925 struct arm_prologue_cache *cache;
1926
1927 if (*this_cache == NULL)
1928 *this_cache = arm_make_stub_cache (this_frame);
1929 cache = *this_cache;
1930
1931 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
1932 }
1933
1934 static int
1935 arm_stub_unwind_sniffer (const struct frame_unwind *self,
1936 struct frame_info *this_frame,
1937 void **this_prologue_cache)
1938 {
1939 CORE_ADDR addr_in_block;
1940 char dummy[4];
1941
1942 addr_in_block = get_frame_address_in_block (this_frame);
1943 if (in_plt_section (addr_in_block, NULL)
1944 /* We also use the stub winder if the target memory is unreadable
1945 to avoid having the prologue unwinder trying to read it. */
1946 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1947 return 1;
1948
1949 return 0;
1950 }
1951
1952 struct frame_unwind arm_stub_unwind = {
1953 NORMAL_FRAME,
1954 arm_stub_this_id,
1955 arm_prologue_prev_register,
1956 NULL,
1957 arm_stub_unwind_sniffer
1958 };
1959
1960 static CORE_ADDR
1961 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1962 {
1963 struct arm_prologue_cache *cache;
1964
1965 if (*this_cache == NULL)
1966 *this_cache = arm_make_prologue_cache (this_frame);
1967 cache = *this_cache;
1968
1969 return cache->prev_sp - cache->framesize;
1970 }
1971
1972 struct frame_base arm_normal_base = {
1973 &arm_prologue_unwind,
1974 arm_normal_frame_base,
1975 arm_normal_frame_base,
1976 arm_normal_frame_base
1977 };
1978
1979 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1980 dummy frame. The frame ID's base needs to match the TOS value
1981 saved by save_dummy_frame_tos() and returned from
1982 arm_push_dummy_call, and the PC needs to match the dummy frame's
1983 breakpoint. */
1984
1985 static struct frame_id
1986 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1987 {
1988 return frame_id_build (get_frame_register_unsigned (this_frame, ARM_SP_REGNUM),
1989 get_frame_pc (this_frame));
1990 }
1991
1992 /* Given THIS_FRAME, find the previous frame's resume PC (which will
1993 be used to construct the previous frame's ID, after looking up the
1994 containing function). */
1995
1996 static CORE_ADDR
1997 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1998 {
1999 CORE_ADDR pc;
2000 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2001 return arm_addr_bits_remove (gdbarch, pc);
2002 }
2003
2004 static CORE_ADDR
2005 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2006 {
2007 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2008 }
2009
2010 static struct value *
2011 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
2012 int regnum)
2013 {
2014 struct gdbarch * gdbarch = get_frame_arch (this_frame);
2015 CORE_ADDR lr, cpsr;
2016 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2017
2018 switch (regnum)
2019 {
2020 case ARM_PC_REGNUM:
2021 /* The PC is normally copied from the return column, which
2022 describes saves of LR. However, that version may have an
2023 extra bit set to indicate Thumb state. The bit is not
2024 part of the PC. */
2025 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2026 return frame_unwind_got_constant (this_frame, regnum,
2027 arm_addr_bits_remove (gdbarch, lr));
2028
2029 case ARM_PS_REGNUM:
2030 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
2031 cpsr = get_frame_register_unsigned (this_frame, regnum);
2032 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2033 if (IS_THUMB_ADDR (lr))
2034 cpsr |= t_bit;
2035 else
2036 cpsr &= ~t_bit;
2037 return frame_unwind_got_constant (this_frame, regnum, cpsr);
2038
2039 default:
2040 internal_error (__FILE__, __LINE__,
2041 _("Unexpected register %d"), regnum);
2042 }
2043 }
2044
2045 static void
2046 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
2047 struct dwarf2_frame_state_reg *reg,
2048 struct frame_info *this_frame)
2049 {
2050 switch (regnum)
2051 {
2052 case ARM_PC_REGNUM:
2053 case ARM_PS_REGNUM:
2054 reg->how = DWARF2_FRAME_REG_FN;
2055 reg->loc.fn = arm_dwarf2_prev_register;
2056 break;
2057 case ARM_SP_REGNUM:
2058 reg->how = DWARF2_FRAME_REG_CFA;
2059 break;
2060 }
2061 }
2062
2063 /* Return true if we are in the function's epilogue, i.e. after the
2064 instruction that destroyed the function's stack frame. */
2065
2066 static int
2067 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2068 {
2069 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2070 unsigned int insn, insn2;
2071 int found_return = 0, found_stack_adjust = 0;
2072 CORE_ADDR func_start, func_end;
2073 CORE_ADDR scan_pc;
2074 gdb_byte buf[4];
2075
2076 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
2077 return 0;
2078
2079 /* The epilogue is a sequence of instructions along the following lines:
2080
2081 - add stack frame size to SP or FP
2082 - [if frame pointer used] restore SP from FP
2083 - restore registers from SP [may include PC]
2084 - a return-type instruction [if PC wasn't already restored]
2085
2086 In a first pass, we scan forward from the current PC and verify the
2087 instructions we find as compatible with this sequence, ending in a
2088 return instruction.
2089
2090 However, this is not sufficient to distinguish indirect function calls
2091 within a function from indirect tail calls in the epilogue in some cases.
2092 Therefore, if we didn't already find any SP-changing instruction during
2093 forward scan, we add a backward scanning heuristic to ensure we actually
2094 are in the epilogue. */
2095
2096 scan_pc = pc;
2097 while (scan_pc < func_end && !found_return)
2098 {
2099 if (target_read_memory (scan_pc, buf, 2))
2100 break;
2101
2102 scan_pc += 2;
2103 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
2104
2105 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
2106 found_return = 1;
2107 else if (insn == 0x46f7) /* mov pc, lr */
2108 found_return = 1;
2109 else if (insn == 0x46bd) /* mov sp, r7 */
2110 found_stack_adjust = 1;
2111 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
2112 found_stack_adjust = 1;
2113 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
2114 {
2115 found_stack_adjust = 1;
2116 if (insn & 0x0100) /* <registers> include PC. */
2117 found_return = 1;
2118 }
2119 else if ((insn & 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
2120 {
2121 if (target_read_memory (scan_pc, buf, 2))
2122 break;
2123
2124 scan_pc += 2;
2125 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
2126
2127 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
2128 {
2129 found_stack_adjust = 1;
2130 if (insn2 & 0x8000) /* <registers> include PC. */
2131 found_return = 1;
2132 }
2133 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
2134 && (insn2 & 0x0fff) == 0x0b04)
2135 {
2136 found_stack_adjust = 1;
2137 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
2138 found_return = 1;
2139 }
2140 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
2141 && (insn2 & 0x0e00) == 0x0a00)
2142 found_stack_adjust = 1;
2143 else
2144 break;
2145 }
2146 else
2147 break;
2148 }
2149
2150 if (!found_return)
2151 return 0;
2152
2153 /* Since any instruction in the epilogue sequence, with the possible
2154 exception of return itself, updates the stack pointer, we need to
2155 scan backwards for at most one instruction. Try either a 16-bit or
2156 a 32-bit instruction. This is just a heuristic, so we do not worry
2157 too much about false positives.*/
2158
2159 if (!found_stack_adjust)
2160 {
2161 if (pc - 4 < func_start)
2162 return 0;
2163 if (target_read_memory (pc - 4, buf, 4))
2164 return 0;
2165
2166 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
2167 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
2168
2169 if (insn2 == 0x46bd) /* mov sp, r7 */
2170 found_stack_adjust = 1;
2171 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
2172 found_stack_adjust = 1;
2173 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
2174 found_stack_adjust = 1;
2175 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
2176 found_stack_adjust = 1;
2177 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
2178 && (insn2 & 0x0fff) == 0x0b04)
2179 found_stack_adjust = 1;
2180 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
2181 && (insn2 & 0x0e00) == 0x0a00)
2182 found_stack_adjust = 1;
2183 }
2184
2185 return found_stack_adjust;
2186 }
2187
2188 /* Return true if we are in the function's epilogue, i.e. after the
2189 instruction that destroyed the function's stack frame. */
2190
2191 static int
2192 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2193 {
2194 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2195 unsigned int insn;
2196 int found_return, found_stack_adjust;
2197 CORE_ADDR func_start, func_end;
2198
2199 if (arm_pc_is_thumb (gdbarch, pc))
2200 return thumb_in_function_epilogue_p (gdbarch, pc);
2201
2202 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
2203 return 0;
2204
2205 /* We are in the epilogue if the previous instruction was a stack
2206 adjustment and the next instruction is a possible return (bx, mov
2207 pc, or pop). We could have to scan backwards to find the stack
2208 adjustment, or forwards to find the return, but this is a decent
2209 approximation. First scan forwards. */
2210
2211 found_return = 0;
2212 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
2213 if (bits (insn, 28, 31) != INST_NV)
2214 {
2215 if ((insn & 0x0ffffff0) == 0x012fff10)
2216 /* BX. */
2217 found_return = 1;
2218 else if ((insn & 0x0ffffff0) == 0x01a0f000)
2219 /* MOV PC. */
2220 found_return = 1;
2221 else if ((insn & 0x0fff0000) == 0x08bd0000
2222 && (insn & 0x0000c000) != 0)
2223 /* POP (LDMIA), including PC or LR. */
2224 found_return = 1;
2225 }
2226
2227 if (!found_return)
2228 return 0;
2229
2230 /* Scan backwards. This is just a heuristic, so do not worry about
2231 false positives from mode changes. */
2232
2233 if (pc < func_start + 4)
2234 return 0;
2235
2236 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
2237 if (bits (insn, 28, 31) != INST_NV)
2238 {
2239 if ((insn & 0x0df0f000) == 0x0080d000)
2240 /* ADD SP (register or immediate). */
2241 found_stack_adjust = 1;
2242 else if ((insn & 0x0df0f000) == 0x0040d000)
2243 /* SUB SP (register or immediate). */
2244 found_stack_adjust = 1;
2245 else if ((insn & 0x0ffffff0) == 0x01a0d000)
2246 /* MOV SP. */
2247 found_return = 1;
2248 else if ((insn & 0x0fff0000) == 0x08bd0000)
2249 /* POP (LDMIA). */
2250 found_stack_adjust = 1;
2251 }
2252
2253 if (found_stack_adjust)
2254 return 1;
2255
2256 return 0;
2257 }
2258
2259
2260 /* When arguments must be pushed onto the stack, they go on in reverse
2261 order. The code below implements a FILO (stack) to do this. */
2262
2263 struct stack_item
2264 {
2265 int len;
2266 struct stack_item *prev;
2267 void *data;
2268 };
2269
2270 static struct stack_item *
2271 push_stack_item (struct stack_item *prev, const void *contents, int len)
2272 {
2273 struct stack_item *si;
2274 si = xmalloc (sizeof (struct stack_item));
2275 si->data = xmalloc (len);
2276 si->len = len;
2277 si->prev = prev;
2278 memcpy (si->data, contents, len);
2279 return si;
2280 }
2281
2282 static struct stack_item *
2283 pop_stack_item (struct stack_item *si)
2284 {
2285 struct stack_item *dead = si;
2286 si = si->prev;
2287 xfree (dead->data);
2288 xfree (dead);
2289 return si;
2290 }
2291
2292
2293 /* Return the alignment (in bytes) of the given type. */
2294
2295 static int
2296 arm_type_align (struct type *t)
2297 {
2298 int n;
2299 int align;
2300 int falign;
2301
2302 t = check_typedef (t);
2303 switch (TYPE_CODE (t))
2304 {
2305 default:
2306 /* Should never happen. */
2307 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
2308 return 4;
2309
2310 case TYPE_CODE_PTR:
2311 case TYPE_CODE_ENUM:
2312 case TYPE_CODE_INT:
2313 case TYPE_CODE_FLT:
2314 case TYPE_CODE_SET:
2315 case TYPE_CODE_RANGE:
2316 case TYPE_CODE_BITSTRING:
2317 case TYPE_CODE_REF:
2318 case TYPE_CODE_CHAR:
2319 case TYPE_CODE_BOOL:
2320 return TYPE_LENGTH (t);
2321
2322 case TYPE_CODE_ARRAY:
2323 case TYPE_CODE_COMPLEX:
2324 /* TODO: What about vector types? */
2325 return arm_type_align (TYPE_TARGET_TYPE (t));
2326
2327 case TYPE_CODE_STRUCT:
2328 case TYPE_CODE_UNION:
2329 align = 1;
2330 for (n = 0; n < TYPE_NFIELDS (t); n++)
2331 {
2332 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
2333 if (falign > align)
2334 align = falign;
2335 }
2336 return align;
2337 }
2338 }
2339
2340 /* Possible base types for a candidate for passing and returning in
2341 VFP registers. */
2342
2343 enum arm_vfp_cprc_base_type
2344 {
2345 VFP_CPRC_UNKNOWN,
2346 VFP_CPRC_SINGLE,
2347 VFP_CPRC_DOUBLE,
2348 VFP_CPRC_VEC64,
2349 VFP_CPRC_VEC128
2350 };
2351
2352 /* The length of one element of base type B. */
2353
2354 static unsigned
2355 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
2356 {
2357 switch (b)
2358 {
2359 case VFP_CPRC_SINGLE:
2360 return 4;
2361 case VFP_CPRC_DOUBLE:
2362 return 8;
2363 case VFP_CPRC_VEC64:
2364 return 8;
2365 case VFP_CPRC_VEC128:
2366 return 16;
2367 default:
2368 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
2369 (int) b);
2370 }
2371 }
2372
2373 /* The character ('s', 'd' or 'q') for the type of VFP register used
2374 for passing base type B. */
2375
2376 static int
2377 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
2378 {
2379 switch (b)
2380 {
2381 case VFP_CPRC_SINGLE:
2382 return 's';
2383 case VFP_CPRC_DOUBLE:
2384 return 'd';
2385 case VFP_CPRC_VEC64:
2386 return 'd';
2387 case VFP_CPRC_VEC128:
2388 return 'q';
2389 default:
2390 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
2391 (int) b);
2392 }
2393 }
2394
2395 /* Determine whether T may be part of a candidate for passing and
2396 returning in VFP registers, ignoring the limit on the total number
2397 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
2398 classification of the first valid component found; if it is not
2399 VFP_CPRC_UNKNOWN, all components must have the same classification
2400 as *BASE_TYPE. If it is found that T contains a type not permitted
2401 for passing and returning in VFP registers, a type differently
2402 classified from *BASE_TYPE, or two types differently classified
2403 from each other, return -1, otherwise return the total number of
2404 base-type elements found (possibly 0 in an empty structure or
2405 array). Vectors and complex types are not currently supported,
2406 matching the generic AAPCS support. */
2407
2408 static int
2409 arm_vfp_cprc_sub_candidate (struct type *t,
2410 enum arm_vfp_cprc_base_type *base_type)
2411 {
2412 t = check_typedef (t);
2413 switch (TYPE_CODE (t))
2414 {
2415 case TYPE_CODE_FLT:
2416 switch (TYPE_LENGTH (t))
2417 {
2418 case 4:
2419 if (*base_type == VFP_CPRC_UNKNOWN)
2420 *base_type = VFP_CPRC_SINGLE;
2421 else if (*base_type != VFP_CPRC_SINGLE)
2422 return -1;
2423 return 1;
2424
2425 case 8:
2426 if (*base_type == VFP_CPRC_UNKNOWN)
2427 *base_type = VFP_CPRC_DOUBLE;
2428 else if (*base_type != VFP_CPRC_DOUBLE)
2429 return -1;
2430 return 1;
2431
2432 default:
2433 return -1;
2434 }
2435 break;
2436
2437 case TYPE_CODE_ARRAY:
2438 {
2439 int count;
2440 unsigned unitlen;
2441 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
2442 if (count == -1)
2443 return -1;
2444 if (TYPE_LENGTH (t) == 0)
2445 {
2446 gdb_assert (count == 0);
2447 return 0;
2448 }
2449 else if (count == 0)
2450 return -1;
2451 unitlen = arm_vfp_cprc_unit_length (*base_type);
2452 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
2453 return TYPE_LENGTH (t) / unitlen;
2454 }
2455 break;
2456
2457 case TYPE_CODE_STRUCT:
2458 {
2459 int count = 0;
2460 unsigned unitlen;
2461 int i;
2462 for (i = 0; i < TYPE_NFIELDS (t); i++)
2463 {
2464 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
2465 base_type);
2466 if (sub_count == -1)
2467 return -1;
2468 count += sub_count;
2469 }
2470 if (TYPE_LENGTH (t) == 0)
2471 {
2472 gdb_assert (count == 0);
2473 return 0;
2474 }
2475 else if (count == 0)
2476 return -1;
2477 unitlen = arm_vfp_cprc_unit_length (*base_type);
2478 if (TYPE_LENGTH (t) != unitlen * count)
2479 return -1;
2480 return count;
2481 }
2482
2483 case TYPE_CODE_UNION:
2484 {
2485 int count = 0;
2486 unsigned unitlen;
2487 int i;
2488 for (i = 0; i < TYPE_NFIELDS (t); i++)
2489 {
2490 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
2491 base_type);
2492 if (sub_count == -1)
2493 return -1;
2494 count = (count > sub_count ? count : sub_count);
2495 }
2496 if (TYPE_LENGTH (t) == 0)
2497 {
2498 gdb_assert (count == 0);
2499 return 0;
2500 }
2501 else if (count == 0)
2502 return -1;
2503 unitlen = arm_vfp_cprc_unit_length (*base_type);
2504 if (TYPE_LENGTH (t) != unitlen * count)
2505 return -1;
2506 return count;
2507 }
2508
2509 default:
2510 break;
2511 }
2512
2513 return -1;
2514 }
2515
2516 /* Determine whether T is a VFP co-processor register candidate (CPRC)
2517 if passed to or returned from a non-variadic function with the VFP
2518 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
2519 *BASE_TYPE to the base type for T and *COUNT to the number of
2520 elements of that base type before returning. */
2521
2522 static int
2523 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
2524 int *count)
2525 {
2526 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
2527 int c = arm_vfp_cprc_sub_candidate (t, &b);
2528 if (c <= 0 || c > 4)
2529 return 0;
2530 *base_type = b;
2531 *count = c;
2532 return 1;
2533 }
2534
2535 /* Return 1 if the VFP ABI should be used for passing arguments to and
2536 returning values from a function of type FUNC_TYPE, 0
2537 otherwise. */
2538
2539 static int
2540 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
2541 {
2542 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2543 /* Variadic functions always use the base ABI. Assume that functions
2544 without debug info are not variadic. */
2545 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
2546 return 0;
2547 /* The VFP ABI is only supported as a variant of AAPCS. */
2548 if (tdep->arm_abi != ARM_ABI_AAPCS)
2549 return 0;
2550 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
2551 }
2552
2553 /* We currently only support passing parameters in integer registers, which
2554 conforms with GCC's default model, and VFP argument passing following
2555 the VFP variant of AAPCS. Several other variants exist and
2556 we should probably support some of them based on the selected ABI. */
2557
2558 static CORE_ADDR
2559 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
2560 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
2561 struct value **args, CORE_ADDR sp, int struct_return,
2562 CORE_ADDR struct_addr)
2563 {
2564 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2565 int argnum;
2566 int argreg;
2567 int nstack;
2568 struct stack_item *si = NULL;
2569 int use_vfp_abi;
2570 struct type *ftype;
2571 unsigned vfp_regs_free = (1 << 16) - 1;
2572
2573 /* Determine the type of this function and whether the VFP ABI
2574 applies. */
2575 ftype = check_typedef (value_type (function));
2576 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
2577 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
2578 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
2579
2580 /* Set the return address. For the ARM, the return breakpoint is
2581 always at BP_ADDR. */
2582 if (arm_pc_is_thumb (gdbarch, bp_addr))
2583 bp_addr |= 1;
2584 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
2585
2586 /* Walk through the list of args and determine how large a temporary
2587 stack is required. Need to take care here as structs may be
2588 passed on the stack, and we have to to push them. */
2589 nstack = 0;
2590
2591 argreg = ARM_A1_REGNUM;
2592 nstack = 0;
2593
2594 /* The struct_return pointer occupies the first parameter
2595 passing register. */
2596 if (struct_return)
2597 {
2598 if (arm_debug)
2599 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
2600 gdbarch_register_name (gdbarch, argreg),
2601 paddress (gdbarch, struct_addr));
2602 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
2603 argreg++;
2604 }
2605
2606 for (argnum = 0; argnum < nargs; argnum++)
2607 {
2608 int len;
2609 struct type *arg_type;
2610 struct type *target_type;
2611 enum type_code typecode;
2612 const bfd_byte *val;
2613 int align;
2614 enum arm_vfp_cprc_base_type vfp_base_type;
2615 int vfp_base_count;
2616 int may_use_core_reg = 1;
2617
2618 arg_type = check_typedef (value_type (args[argnum]));
2619 len = TYPE_LENGTH (arg_type);
2620 target_type = TYPE_TARGET_TYPE (arg_type);
2621 typecode = TYPE_CODE (arg_type);
2622 val = value_contents (args[argnum]);
2623
2624 align = arm_type_align (arg_type);
2625 /* Round alignment up to a whole number of words. */
2626 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
2627 /* Different ABIs have different maximum alignments. */
2628 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
2629 {
2630 /* The APCS ABI only requires word alignment. */
2631 align = INT_REGISTER_SIZE;
2632 }
2633 else
2634 {
2635 /* The AAPCS requires at most doubleword alignment. */
2636 if (align > INT_REGISTER_SIZE * 2)
2637 align = INT_REGISTER_SIZE * 2;
2638 }
2639
2640 if (use_vfp_abi
2641 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
2642 &vfp_base_count))
2643 {
2644 int regno;
2645 int unit_length;
2646 int shift;
2647 unsigned mask;
2648
2649 /* Because this is a CPRC it cannot go in a core register or
2650 cause a core register to be skipped for alignment.
2651 Either it goes in VFP registers and the rest of this loop
2652 iteration is skipped for this argument, or it goes on the
2653 stack (and the stack alignment code is correct for this
2654 case). */
2655 may_use_core_reg = 0;
2656
2657 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
2658 shift = unit_length / 4;
2659 mask = (1 << (shift * vfp_base_count)) - 1;
2660 for (regno = 0; regno < 16; regno += shift)
2661 if (((vfp_regs_free >> regno) & mask) == mask)
2662 break;
2663
2664 if (regno < 16)
2665 {
2666 int reg_char;
2667 int reg_scaled;
2668 int i;
2669
2670 vfp_regs_free &= ~(mask << regno);
2671 reg_scaled = regno / shift;
2672 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
2673 for (i = 0; i < vfp_base_count; i++)
2674 {
2675 char name_buf[4];
2676 int regnum;
2677 if (reg_char == 'q')
2678 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
2679 val + i * unit_length);
2680 else
2681 {
2682 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
2683 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
2684 strlen (name_buf));
2685 regcache_cooked_write (regcache, regnum,
2686 val + i * unit_length);
2687 }
2688 }
2689 continue;
2690 }
2691 else
2692 {
2693 /* This CPRC could not go in VFP registers, so all VFP
2694 registers are now marked as used. */
2695 vfp_regs_free = 0;
2696 }
2697 }
2698
2699 /* Push stack padding for dowubleword alignment. */
2700 if (nstack & (align - 1))
2701 {
2702 si = push_stack_item (si, val, INT_REGISTER_SIZE);
2703 nstack += INT_REGISTER_SIZE;
2704 }
2705
2706 /* Doubleword aligned quantities must go in even register pairs. */
2707 if (may_use_core_reg
2708 && argreg <= ARM_LAST_ARG_REGNUM
2709 && align > INT_REGISTER_SIZE
2710 && argreg & 1)
2711 argreg++;
2712
2713 /* If the argument is a pointer to a function, and it is a
2714 Thumb function, create a LOCAL copy of the value and set
2715 the THUMB bit in it. */
2716 if (TYPE_CODE_PTR == typecode
2717 && target_type != NULL
2718 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
2719 {
2720 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
2721 if (arm_pc_is_thumb (gdbarch, regval))
2722 {
2723 bfd_byte *copy = alloca (len);
2724 store_unsigned_integer (copy, len, byte_order,
2725 MAKE_THUMB_ADDR (regval));
2726 val = copy;
2727 }
2728 }
2729
2730 /* Copy the argument to general registers or the stack in
2731 register-sized pieces. Large arguments are split between
2732 registers and stack. */
2733 while (len > 0)
2734 {
2735 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
2736
2737 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
2738 {
2739 /* The argument is being passed in a general purpose
2740 register. */
2741 CORE_ADDR regval
2742 = extract_unsigned_integer (val, partial_len, byte_order);
2743 if (byte_order == BFD_ENDIAN_BIG)
2744 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
2745 if (arm_debug)
2746 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
2747 argnum,
2748 gdbarch_register_name
2749 (gdbarch, argreg),
2750 phex (regval, INT_REGISTER_SIZE));
2751 regcache_cooked_write_unsigned (regcache, argreg, regval);
2752 argreg++;
2753 }
2754 else
2755 {
2756 /* Push the arguments onto the stack. */
2757 if (arm_debug)
2758 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
2759 argnum, nstack);
2760 si = push_stack_item (si, val, INT_REGISTER_SIZE);
2761 nstack += INT_REGISTER_SIZE;
2762 }
2763
2764 len -= partial_len;
2765 val += partial_len;
2766 }
2767 }
2768 /* If we have an odd number of words to push, then decrement the stack
2769 by one word now, so first stack argument will be dword aligned. */
2770 if (nstack & 4)
2771 sp -= 4;
2772
2773 while (si)
2774 {
2775 sp -= si->len;
2776 write_memory (sp, si->data, si->len);
2777 si = pop_stack_item (si);
2778 }
2779
2780 /* Finally, update teh SP register. */
2781 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
2782
2783 return sp;
2784 }
2785
2786
2787 /* Always align the frame to an 8-byte boundary. This is required on
2788 some platforms and harmless on the rest. */
2789
2790 static CORE_ADDR
2791 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2792 {
2793 /* Align the stack to eight bytes. */
2794 return sp & ~ (CORE_ADDR) 7;
2795 }
2796
2797 static void
2798 print_fpu_flags (int flags)
2799 {
2800 if (flags & (1 << 0))
2801 fputs ("IVO ", stdout);
2802 if (flags & (1 << 1))
2803 fputs ("DVZ ", stdout);
2804 if (flags & (1 << 2))
2805 fputs ("OFL ", stdout);
2806 if (flags & (1 << 3))
2807 fputs ("UFL ", stdout);
2808 if (flags & (1 << 4))
2809 fputs ("INX ", stdout);
2810 putchar ('\n');
2811 }
2812
2813 /* Print interesting information about the floating point processor
2814 (if present) or emulator. */
2815 static void
2816 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
2817 struct frame_info *frame, const char *args)
2818 {
2819 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
2820 int type;
2821
2822 type = (status >> 24) & 127;
2823 if (status & (1 << 31))
2824 printf (_("Hardware FPU type %d\n"), type);
2825 else
2826 printf (_("Software FPU type %d\n"), type);
2827 /* i18n: [floating point unit] mask */
2828 fputs (_("mask: "), stdout);
2829 print_fpu_flags (status >> 16);
2830 /* i18n: [floating point unit] flags */
2831 fputs (_("flags: "), stdout);
2832 print_fpu_flags (status);
2833 }
2834
2835 /* Construct the ARM extended floating point type. */
2836 static struct type *
2837 arm_ext_type (struct gdbarch *gdbarch)
2838 {
2839 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2840
2841 if (!tdep->arm_ext_type)
2842 tdep->arm_ext_type
2843 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
2844 floatformats_arm_ext);
2845
2846 return tdep->arm_ext_type;
2847 }
2848
2849 static struct type *
2850 arm_neon_double_type (struct gdbarch *gdbarch)
2851 {
2852 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2853
2854 if (tdep->neon_double_type == NULL)
2855 {
2856 struct type *t, *elem;
2857
2858 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
2859 TYPE_CODE_UNION);
2860 elem = builtin_type (gdbarch)->builtin_uint8;
2861 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
2862 elem = builtin_type (gdbarch)->builtin_uint16;
2863 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
2864 elem = builtin_type (gdbarch)->builtin_uint32;
2865 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
2866 elem = builtin_type (gdbarch)->builtin_uint64;
2867 append_composite_type_field (t, "u64", elem);
2868 elem = builtin_type (gdbarch)->builtin_float;
2869 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
2870 elem = builtin_type (gdbarch)->builtin_double;
2871 append_composite_type_field (t, "f64", elem);
2872
2873 TYPE_VECTOR (t) = 1;
2874 TYPE_NAME (t) = "neon_d";
2875 tdep->neon_double_type = t;
2876 }
2877
2878 return tdep->neon_double_type;
2879 }
2880
2881 /* FIXME: The vector types are not correctly ordered on big-endian
2882 targets. Just as s0 is the low bits of d0, d0[0] is also the low
2883 bits of d0 - regardless of what unit size is being held in d0. So
2884 the offset of the first uint8 in d0 is 7, but the offset of the
2885 first float is 4. This code works as-is for little-endian
2886 targets. */
2887
2888 static struct type *
2889 arm_neon_quad_type (struct gdbarch *gdbarch)
2890 {
2891 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2892
2893 if (tdep->neon_quad_type == NULL)
2894 {
2895 struct type *t, *elem;
2896
2897 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
2898 TYPE_CODE_UNION);
2899 elem = builtin_type (gdbarch)->builtin_uint8;
2900 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
2901 elem = builtin_type (gdbarch)->builtin_uint16;
2902 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
2903 elem = builtin_type (gdbarch)->builtin_uint32;
2904 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
2905 elem = builtin_type (gdbarch)->builtin_uint64;
2906 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
2907 elem = builtin_type (gdbarch)->builtin_float;
2908 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
2909 elem = builtin_type (gdbarch)->builtin_double;
2910 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
2911
2912 TYPE_VECTOR (t) = 1;
2913 TYPE_NAME (t) = "neon_q";
2914 tdep->neon_quad_type = t;
2915 }
2916
2917 return tdep->neon_quad_type;
2918 }
2919
2920 /* Return the GDB type object for the "standard" data type of data in
2921 register N. */
2922
2923 static struct type *
2924 arm_register_type (struct gdbarch *gdbarch, int regnum)
2925 {
2926 int num_regs = gdbarch_num_regs (gdbarch);
2927
2928 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
2929 && regnum >= num_regs && regnum < num_regs + 32)
2930 return builtin_type (gdbarch)->builtin_float;
2931
2932 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
2933 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
2934 return arm_neon_quad_type (gdbarch);
2935
2936 /* If the target description has register information, we are only
2937 in this function so that we can override the types of
2938 double-precision registers for NEON. */
2939 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
2940 {
2941 struct type *t = tdesc_register_type (gdbarch, regnum);
2942
2943 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
2944 && TYPE_CODE (t) == TYPE_CODE_FLT
2945 && gdbarch_tdep (gdbarch)->have_neon)
2946 return arm_neon_double_type (gdbarch);
2947 else
2948 return t;
2949 }
2950
2951 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
2952 {
2953 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
2954 return builtin_type (gdbarch)->builtin_void;
2955
2956 return arm_ext_type (gdbarch);
2957 }
2958 else if (regnum == ARM_SP_REGNUM)
2959 return builtin_type (gdbarch)->builtin_data_ptr;
2960 else if (regnum == ARM_PC_REGNUM)
2961 return builtin_type (gdbarch)->builtin_func_ptr;
2962 else if (regnum >= ARRAY_SIZE (arm_register_names))
2963 /* These registers are only supported on targets which supply
2964 an XML description. */
2965 return builtin_type (gdbarch)->builtin_int0;
2966 else
2967 return builtin_type (gdbarch)->builtin_uint32;
2968 }
2969
2970 /* Map a DWARF register REGNUM onto the appropriate GDB register
2971 number. */
2972
2973 static int
2974 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2975 {
2976 /* Core integer regs. */
2977 if (reg >= 0 && reg <= 15)
2978 return reg;
2979
2980 /* Legacy FPA encoding. These were once used in a way which
2981 overlapped with VFP register numbering, so their use is
2982 discouraged, but GDB doesn't support the ARM toolchain
2983 which used them for VFP. */
2984 if (reg >= 16 && reg <= 23)
2985 return ARM_F0_REGNUM + reg - 16;
2986
2987 /* New assignments for the FPA registers. */
2988 if (reg >= 96 && reg <= 103)
2989 return ARM_F0_REGNUM + reg - 96;
2990
2991 /* WMMX register assignments. */
2992 if (reg >= 104 && reg <= 111)
2993 return ARM_WCGR0_REGNUM + reg - 104;
2994
2995 if (reg >= 112 && reg <= 127)
2996 return ARM_WR0_REGNUM + reg - 112;
2997
2998 if (reg >= 192 && reg <= 199)
2999 return ARM_WC0_REGNUM + reg - 192;
3000
3001 /* VFP v2 registers. A double precision value is actually
3002 in d1 rather than s2, but the ABI only defines numbering
3003 for the single precision registers. This will "just work"
3004 in GDB for little endian targets (we'll read eight bytes,
3005 starting in s0 and then progressing to s1), but will be
3006 reversed on big endian targets with VFP. This won't
3007 be a problem for the new Neon quad registers; you're supposed
3008 to use DW_OP_piece for those. */
3009 if (reg >= 64 && reg <= 95)
3010 {
3011 char name_buf[4];
3012
3013 sprintf (name_buf, "s%d", reg - 64);
3014 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3015 strlen (name_buf));
3016 }
3017
3018 /* VFP v3 / Neon registers. This range is also used for VFP v2
3019 registers, except that it now describes d0 instead of s0. */
3020 if (reg >= 256 && reg <= 287)
3021 {
3022 char name_buf[4];
3023
3024 sprintf (name_buf, "d%d", reg - 256);
3025 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3026 strlen (name_buf));
3027 }
3028
3029 return -1;
3030 }
3031
3032 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
3033 static int
3034 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
3035 {
3036 int reg = regnum;
3037 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
3038
3039 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
3040 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
3041
3042 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
3043 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
3044
3045 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
3046 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
3047
3048 if (reg < NUM_GREGS)
3049 return SIM_ARM_R0_REGNUM + reg;
3050 reg -= NUM_GREGS;
3051
3052 if (reg < NUM_FREGS)
3053 return SIM_ARM_FP0_REGNUM + reg;
3054 reg -= NUM_FREGS;
3055
3056 if (reg < NUM_SREGS)
3057 return SIM_ARM_FPS_REGNUM + reg;
3058 reg -= NUM_SREGS;
3059
3060 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
3061 }
3062
3063 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
3064 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
3065 It is thought that this is is the floating-point register format on
3066 little-endian systems. */
3067
3068 static void
3069 convert_from_extended (const struct floatformat *fmt, const void *ptr,
3070 void *dbl, int endianess)
3071 {
3072 DOUBLEST d;
3073
3074 if (endianess == BFD_ENDIAN_BIG)
3075 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
3076 else
3077 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
3078 ptr, &d);
3079 floatformat_from_doublest (fmt, &d, dbl);
3080 }
3081
3082 static void
3083 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
3084 int endianess)
3085 {
3086 DOUBLEST d;
3087
3088 floatformat_to_doublest (fmt, ptr, &d);
3089 if (endianess == BFD_ENDIAN_BIG)
3090 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
3091 else
3092 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
3093 &d, dbl);
3094 }
3095
3096 static int
3097 condition_true (unsigned long cond, unsigned long status_reg)
3098 {
3099 if (cond == INST_AL || cond == INST_NV)
3100 return 1;
3101
3102 switch (cond)
3103 {
3104 case INST_EQ:
3105 return ((status_reg & FLAG_Z) != 0);
3106 case INST_NE:
3107 return ((status_reg & FLAG_Z) == 0);
3108 case INST_CS:
3109 return ((status_reg & FLAG_C) != 0);
3110 case INST_CC:
3111 return ((status_reg & FLAG_C) == 0);
3112 case INST_MI:
3113 return ((status_reg & FLAG_N) != 0);
3114 case INST_PL:
3115 return ((status_reg & FLAG_N) == 0);
3116 case INST_VS:
3117 return ((status_reg & FLAG_V) != 0);
3118 case INST_VC:
3119 return ((status_reg & FLAG_V) == 0);
3120 case INST_HI:
3121 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
3122 case INST_LS:
3123 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
3124 case INST_GE:
3125 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
3126 case INST_LT:
3127 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
3128 case INST_GT:
3129 return (((status_reg & FLAG_Z) == 0)
3130 && (((status_reg & FLAG_N) == 0)
3131 == ((status_reg & FLAG_V) == 0)));
3132 case INST_LE:
3133 return (((status_reg & FLAG_Z) != 0)
3134 || (((status_reg & FLAG_N) == 0)
3135 != ((status_reg & FLAG_V) == 0)));
3136 }
3137 return 1;
3138 }
3139
3140 static unsigned long
3141 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
3142 unsigned long pc_val, unsigned long status_reg)
3143 {
3144 unsigned long res, shift;
3145 int rm = bits (inst, 0, 3);
3146 unsigned long shifttype = bits (inst, 5, 6);
3147
3148 if (bit (inst, 4))
3149 {
3150 int rs = bits (inst, 8, 11);
3151 shift = (rs == 15 ? pc_val + 8
3152 : get_frame_register_unsigned (frame, rs)) & 0xFF;
3153 }
3154 else
3155 shift = bits (inst, 7, 11);
3156
3157 res = (rm == 15
3158 ? (pc_val + (bit (inst, 4) ? 12 : 8))
3159 : get_frame_register_unsigned (frame, rm));
3160
3161 switch (shifttype)
3162 {
3163 case 0: /* LSL */
3164 res = shift >= 32 ? 0 : res << shift;
3165 break;
3166
3167 case 1: /* LSR */
3168 res = shift >= 32 ? 0 : res >> shift;
3169 break;
3170
3171 case 2: /* ASR */
3172 if (shift >= 32)
3173 shift = 31;
3174 res = ((res & 0x80000000L)
3175 ? ~((~res) >> shift) : res >> shift);
3176 break;
3177
3178 case 3: /* ROR/RRX */
3179 shift &= 31;
3180 if (shift == 0)
3181 res = (res >> 1) | (carry ? 0x80000000L : 0);
3182 else
3183 res = (res >> shift) | (res << (32 - shift));
3184 break;
3185 }
3186
3187 return res & 0xffffffff;
3188 }
3189
3190 /* Return number of 1-bits in VAL. */
3191
3192 static int
3193 bitcount (unsigned long val)
3194 {
3195 int nbits;
3196 for (nbits = 0; val != 0; nbits++)
3197 val &= val - 1; /* delete rightmost 1-bit in val */
3198 return nbits;
3199 }
3200
3201 /* Return the size in bytes of the complete Thumb instruction whose
3202 first halfword is INST1. */
3203
3204 static int
3205 thumb_insn_size (unsigned short inst1)
3206 {
3207 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
3208 return 4;
3209 else
3210 return 2;
3211 }
3212
3213 static int
3214 thumb_advance_itstate (unsigned int itstate)
3215 {
3216 /* Preserve IT[7:5], the first three bits of the condition. Shift
3217 the upcoming condition flags left by one bit. */
3218 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
3219
3220 /* If we have finished the IT block, clear the state. */
3221 if ((itstate & 0x0f) == 0)
3222 itstate = 0;
3223
3224 return itstate;
3225 }
3226
3227 /* Find the next PC after the current instruction executes. In some
3228 cases we can not statically determine the answer (see the IT state
3229 handling in this function); in that case, a breakpoint may be
3230 inserted in addition to the returned PC, which will be used to set
3231 another breakpoint by our caller. */
3232
3233 static CORE_ADDR
3234 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
3235 {
3236 struct gdbarch *gdbarch = get_frame_arch (frame);
3237 struct address_space *aspace = get_frame_address_space (frame);
3238 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3239 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3240 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
3241 unsigned short inst1;
3242 CORE_ADDR nextpc = pc + 2; /* default is next instruction */
3243 unsigned long offset;
3244 ULONGEST status, itstate;
3245
3246 nextpc = MAKE_THUMB_ADDR (nextpc);
3247 pc_val = MAKE_THUMB_ADDR (pc_val);
3248
3249 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3250
3251 /* Thumb-2 conditional execution support. There are eight bits in
3252 the CPSR which describe conditional execution state. Once
3253 reconstructed (they're in a funny order), the low five bits
3254 describe the low bit of the condition for each instruction and
3255 how many instructions remain. The high three bits describe the
3256 base condition. One of the low four bits will be set if an IT
3257 block is active. These bits read as zero on earlier
3258 processors. */
3259 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
3260 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
3261
3262 /* If-Then handling. On GNU/Linux, where this routine is used, we
3263 use an undefined instruction as a breakpoint. Unlike BKPT, IT
3264 can disable execution of the undefined instruction. So we might
3265 miss the breakpoint if we set it on a skipped conditional
3266 instruction. Because conditional instructions can change the
3267 flags, affecting the execution of further instructions, we may
3268 need to set two breakpoints. */
3269
3270 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
3271 {
3272 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
3273 {
3274 /* An IT instruction. Because this instruction does not
3275 modify the flags, we can accurately predict the next
3276 executed instruction. */
3277 itstate = inst1 & 0x00ff;
3278 pc += thumb_insn_size (inst1);
3279
3280 while (itstate != 0 && ! condition_true (itstate >> 4, status))
3281 {
3282 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3283 pc += thumb_insn_size (inst1);
3284 itstate = thumb_advance_itstate (itstate);
3285 }
3286
3287 return MAKE_THUMB_ADDR (pc);
3288 }
3289 else if (itstate != 0)
3290 {
3291 /* We are in a conditional block. Check the condition. */
3292 if (! condition_true (itstate >> 4, status))
3293 {
3294 /* Advance to the next executed instruction. */
3295 pc += thumb_insn_size (inst1);
3296 itstate = thumb_advance_itstate (itstate);
3297
3298 while (itstate != 0 && ! condition_true (itstate >> 4, status))
3299 {
3300 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3301 pc += thumb_insn_size (inst1);
3302 itstate = thumb_advance_itstate (itstate);
3303 }
3304
3305 return MAKE_THUMB_ADDR (pc);
3306 }
3307 else if ((itstate & 0x0f) == 0x08)
3308 {
3309 /* This is the last instruction of the conditional
3310 block, and it is executed. We can handle it normally
3311 because the following instruction is not conditional,
3312 and we must handle it normally because it is
3313 permitted to branch. Fall through. */
3314 }
3315 else
3316 {
3317 int cond_negated;
3318
3319 /* There are conditional instructions after this one.
3320 If this instruction modifies the flags, then we can
3321 not predict what the next executed instruction will
3322 be. Fortunately, this instruction is architecturally
3323 forbidden to branch; we know it will fall through.
3324 Start by skipping past it. */
3325 pc += thumb_insn_size (inst1);
3326 itstate = thumb_advance_itstate (itstate);
3327
3328 /* Set a breakpoint on the following instruction. */
3329 gdb_assert ((itstate & 0x0f) != 0);
3330 if (insert_bkpt)
3331 insert_single_step_breakpoint (gdbarch, aspace, pc);
3332 cond_negated = (itstate >> 4) & 1;
3333
3334 /* Skip all following instructions with the same
3335 condition. If there is a later instruction in the IT
3336 block with the opposite condition, set the other
3337 breakpoint there. If not, then set a breakpoint on
3338 the instruction after the IT block. */
3339 do
3340 {
3341 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3342 pc += thumb_insn_size (inst1);
3343 itstate = thumb_advance_itstate (itstate);
3344 }
3345 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
3346
3347 return MAKE_THUMB_ADDR (pc);
3348 }
3349 }
3350 }
3351 else if (itstate & 0x0f)
3352 {
3353 /* We are in a conditional block. Check the condition. */
3354 int cond = itstate >> 4;
3355
3356 if (! condition_true (cond, status))
3357 {
3358 /* Advance to the next instruction. All the 32-bit
3359 instructions share a common prefix. */
3360 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
3361 return MAKE_THUMB_ADDR (pc + 4);
3362 else
3363 return MAKE_THUMB_ADDR (pc + 2);
3364 }
3365
3366 /* Otherwise, handle the instruction normally. */
3367 }
3368
3369 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
3370 {
3371 CORE_ADDR sp;
3372
3373 /* Fetch the saved PC from the stack. It's stored above
3374 all of the other registers. */
3375 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
3376 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
3377 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
3378 }
3379 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
3380 {
3381 unsigned long cond = bits (inst1, 8, 11);
3382 if (cond == 0x0f) /* 0x0f = SWI */
3383 {
3384 struct gdbarch_tdep *tdep;
3385 tdep = gdbarch_tdep (gdbarch);
3386
3387 if (tdep->syscall_next_pc != NULL)
3388 nextpc = tdep->syscall_next_pc (frame);
3389
3390 }
3391 else if (cond != 0x0f && condition_true (cond, status))
3392 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
3393 }
3394 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
3395 {
3396 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
3397 }
3398 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
3399 {
3400 unsigned short inst2;
3401 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
3402
3403 /* Default to the next instruction. */
3404 nextpc = pc + 4;
3405 nextpc = MAKE_THUMB_ADDR (nextpc);
3406
3407 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
3408 {
3409 /* Branches and miscellaneous control instructions. */
3410
3411 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
3412 {
3413 /* B, BL, BLX. */
3414 int j1, j2, imm1, imm2;
3415
3416 imm1 = sbits (inst1, 0, 10);
3417 imm2 = bits (inst2, 0, 10);
3418 j1 = bit (inst2, 13);
3419 j2 = bit (inst2, 11);
3420
3421 offset = ((imm1 << 12) + (imm2 << 1));
3422 offset ^= ((!j2) << 22) | ((!j1) << 23);
3423
3424 nextpc = pc_val + offset;
3425 /* For BLX make sure to clear the low bits. */
3426 if (bit (inst2, 12) == 0)
3427 nextpc = nextpc & 0xfffffffc;
3428 }
3429 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
3430 {
3431 /* SUBS PC, LR, #imm8. */
3432 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
3433 nextpc -= inst2 & 0x00ff;
3434 }
3435 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
3436 {
3437 /* Conditional branch. */
3438 if (condition_true (bits (inst1, 6, 9), status))
3439 {
3440 int sign, j1, j2, imm1, imm2;
3441
3442 sign = sbits (inst1, 10, 10);
3443 imm1 = bits (inst1, 0, 5);
3444 imm2 = bits (inst2, 0, 10);
3445 j1 = bit (inst2, 13);
3446 j2 = bit (inst2, 11);
3447
3448 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
3449 offset += (imm1 << 12) + (imm2 << 1);
3450
3451 nextpc = pc_val + offset;
3452 }
3453 }
3454 }
3455 else if ((inst1 & 0xfe50) == 0xe810)
3456 {
3457 /* Load multiple or RFE. */
3458 int rn, offset, load_pc = 1;
3459
3460 rn = bits (inst1, 0, 3);
3461 if (bit (inst1, 7) && !bit (inst1, 8))
3462 {
3463 /* LDMIA or POP */
3464 if (!bit (inst2, 15))
3465 load_pc = 0;
3466 offset = bitcount (inst2) * 4 - 4;
3467 }
3468 else if (!bit (inst1, 7) && bit (inst1, 8))
3469 {
3470 /* LDMDB */
3471 if (!bit (inst2, 15))
3472 load_pc = 0;
3473 offset = -4;
3474 }
3475 else if (bit (inst1, 7) && bit (inst1, 8))
3476 {
3477 /* RFEIA */
3478 offset = 0;
3479 }
3480 else if (!bit (inst1, 7) && !bit (inst1, 8))
3481 {
3482 /* RFEDB */
3483 offset = -8;
3484 }
3485 else
3486 load_pc = 0;
3487
3488 if (load_pc)
3489 {
3490 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
3491 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
3492 }
3493 }
3494 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
3495 {
3496 /* MOV PC or MOVS PC. */
3497 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3498 nextpc = MAKE_THUMB_ADDR (nextpc);
3499 }
3500 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
3501 {
3502 /* LDR PC. */
3503 CORE_ADDR base;
3504 int rn, load_pc = 1;
3505
3506 rn = bits (inst1, 0, 3);
3507 base = get_frame_register_unsigned (frame, rn);
3508 if (rn == 15)
3509 {
3510 base = (base + 4) & ~(CORE_ADDR) 0x3;
3511 if (bit (inst1, 7))
3512 base += bits (inst2, 0, 11);
3513 else
3514 base -= bits (inst2, 0, 11);
3515 }
3516 else if (bit (inst1, 7))
3517 base += bits (inst2, 0, 11);
3518 else if (bit (inst2, 11))
3519 {
3520 if (bit (inst2, 10))
3521 {
3522 if (bit (inst2, 9))
3523 base += bits (inst2, 0, 7);
3524 else
3525 base -= bits (inst2, 0, 7);
3526 }
3527 }
3528 else if ((inst2 & 0x0fc0) == 0x0000)
3529 {
3530 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
3531 base += get_frame_register_unsigned (frame, rm) << shift;
3532 }
3533 else
3534 /* Reserved. */
3535 load_pc = 0;
3536
3537 if (load_pc)
3538 nextpc = get_frame_memory_unsigned (frame, base, 4);
3539 }
3540 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
3541 {
3542 /* TBB. */
3543 CORE_ADDR tbl_reg, table, offset, length;
3544
3545 tbl_reg = bits (inst1, 0, 3);
3546 if (tbl_reg == 0x0f)
3547 table = pc + 4; /* Regcache copy of PC isn't right yet. */
3548 else
3549 table = get_frame_register_unsigned (frame, tbl_reg);
3550
3551 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3552 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
3553 nextpc = pc_val + length;
3554 }
3555 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
3556 {
3557 /* TBH. */
3558 CORE_ADDR tbl_reg, table, offset, length;
3559
3560 tbl_reg = bits (inst1, 0, 3);
3561 if (tbl_reg == 0x0f)
3562 table = pc + 4; /* Regcache copy of PC isn't right yet. */
3563 else
3564 table = get_frame_register_unsigned (frame, tbl_reg);
3565
3566 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3567 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
3568 nextpc = pc_val + length;
3569 }
3570 }
3571 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
3572 {
3573 if (bits (inst1, 3, 6) == 0x0f)
3574 nextpc = pc_val;
3575 else
3576 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
3577 }
3578 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
3579 {
3580 if (bits (inst1, 3, 6) == 0x0f)
3581 nextpc = pc_val;
3582 else
3583 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
3584
3585 nextpc = MAKE_THUMB_ADDR (nextpc);
3586 }
3587 else if ((inst1 & 0xf500) == 0xb100)
3588 {
3589 /* CBNZ or CBZ. */
3590 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
3591 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
3592
3593 if (bit (inst1, 11) && reg != 0)
3594 nextpc = pc_val + imm;
3595 else if (!bit (inst1, 11) && reg == 0)
3596 nextpc = pc_val + imm;
3597 }
3598 return nextpc;
3599 }
3600
3601 /* Get the raw next address. PC is the current program counter, in
3602 FRAME. INSERT_BKPT should be TRUE if we want a breakpoint set on
3603 the alternative next instruction if there are two options.
3604
3605 The value returned has the execution state of the next instruction
3606 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
3607 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
3608 address.
3609 */
3610 static CORE_ADDR
3611 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
3612 {
3613 struct gdbarch *gdbarch = get_frame_arch (frame);
3614 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3615 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3616 unsigned long pc_val;
3617 unsigned long this_instr;
3618 unsigned long status;
3619 CORE_ADDR nextpc;
3620
3621 if (arm_frame_is_thumb (frame))
3622 return thumb_get_next_pc_raw (frame, pc, insert_bkpt);
3623
3624 pc_val = (unsigned long) pc;
3625 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3626
3627 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
3628 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
3629
3630 if (bits (this_instr, 28, 31) == INST_NV)
3631 switch (bits (this_instr, 24, 27))
3632 {
3633 case 0xa:
3634 case 0xb:
3635 {
3636 /* Branch with Link and change to Thumb. */
3637 nextpc = BranchDest (pc, this_instr);
3638 nextpc |= bit (this_instr, 24) << 1;
3639 nextpc = MAKE_THUMB_ADDR (nextpc);
3640 break;
3641 }
3642 case 0xc:
3643 case 0xd:
3644 case 0xe:
3645 /* Coprocessor register transfer. */
3646 if (bits (this_instr, 12, 15) == 15)
3647 error (_("Invalid update to pc in instruction"));
3648 break;
3649 }
3650 else if (condition_true (bits (this_instr, 28, 31), status))
3651 {
3652 switch (bits (this_instr, 24, 27))
3653 {
3654 case 0x0:
3655 case 0x1: /* data processing */
3656 case 0x2:
3657 case 0x3:
3658 {
3659 unsigned long operand1, operand2, result = 0;
3660 unsigned long rn;
3661 int c;
3662
3663 if (bits (this_instr, 12, 15) != 15)
3664 break;
3665
3666 if (bits (this_instr, 22, 25) == 0
3667 && bits (this_instr, 4, 7) == 9) /* multiply */
3668 error (_("Invalid update to pc in instruction"));
3669
3670 /* BX <reg>, BLX <reg> */
3671 if (bits (this_instr, 4, 27) == 0x12fff1
3672 || bits (this_instr, 4, 27) == 0x12fff3)
3673 {
3674 rn = bits (this_instr, 0, 3);
3675 nextpc = (rn == 15) ? pc_val + 8
3676 : get_frame_register_unsigned (frame, rn);
3677 return nextpc;
3678 }
3679
3680 /* Multiply into PC */
3681 c = (status & FLAG_C) ? 1 : 0;
3682 rn = bits (this_instr, 16, 19);
3683 operand1 = (rn == 15) ? pc_val + 8
3684 : get_frame_register_unsigned (frame, rn);
3685
3686 if (bit (this_instr, 25))
3687 {
3688 unsigned long immval = bits (this_instr, 0, 7);
3689 unsigned long rotate = 2 * bits (this_instr, 8, 11);
3690 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
3691 & 0xffffffff;
3692 }
3693 else /* operand 2 is a shifted register */
3694 operand2 = shifted_reg_val (frame, this_instr, c, pc_val, status);
3695
3696 switch (bits (this_instr, 21, 24))
3697 {
3698 case 0x0: /*and */
3699 result = operand1 & operand2;
3700 break;
3701
3702 case 0x1: /*eor */
3703 result = operand1 ^ operand2;
3704 break;
3705
3706 case 0x2: /*sub */
3707 result = operand1 - operand2;
3708 break;
3709
3710 case 0x3: /*rsb */
3711 result = operand2 - operand1;
3712 break;
3713
3714 case 0x4: /*add */
3715 result = operand1 + operand2;
3716 break;
3717
3718 case 0x5: /*adc */
3719 result = operand1 + operand2 + c;
3720 break;
3721
3722 case 0x6: /*sbc */
3723 result = operand1 - operand2 + c;
3724 break;
3725
3726 case 0x7: /*rsc */
3727 result = operand2 - operand1 + c;
3728 break;
3729
3730 case 0x8:
3731 case 0x9:
3732 case 0xa:
3733 case 0xb: /* tst, teq, cmp, cmn */
3734 result = (unsigned long) nextpc;
3735 break;
3736
3737 case 0xc: /*orr */
3738 result = operand1 | operand2;
3739 break;
3740
3741 case 0xd: /*mov */
3742 /* Always step into a function. */
3743 result = operand2;
3744 break;
3745
3746 case 0xe: /*bic */
3747 result = operand1 & ~operand2;
3748 break;
3749
3750 case 0xf: /*mvn */
3751 result = ~operand2;
3752 break;
3753 }
3754
3755 /* In 26-bit APCS the bottom two bits of the result are
3756 ignored, and we always end up in ARM state. */
3757 if (!arm_apcs_32)
3758 nextpc = arm_addr_bits_remove (gdbarch, result);
3759 else
3760 nextpc = result;
3761
3762 break;
3763 }
3764
3765 case 0x4:
3766 case 0x5: /* data transfer */
3767 case 0x6:
3768 case 0x7:
3769 if (bit (this_instr, 20))
3770 {
3771 /* load */
3772 if (bits (this_instr, 12, 15) == 15)
3773 {
3774 /* rd == pc */
3775 unsigned long rn;
3776 unsigned long base;
3777
3778 if (bit (this_instr, 22))
3779 error (_("Invalid update to pc in instruction"));
3780
3781 /* byte write to PC */
3782 rn = bits (this_instr, 16, 19);
3783 base = (rn == 15) ? pc_val + 8
3784 : get_frame_register_unsigned (frame, rn);
3785 if (bit (this_instr, 24))
3786 {
3787 /* pre-indexed */
3788 int c = (status & FLAG_C) ? 1 : 0;
3789 unsigned long offset =
3790 (bit (this_instr, 25)
3791 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
3792 : bits (this_instr, 0, 11));
3793
3794 if (bit (this_instr, 23))
3795 base += offset;
3796 else
3797 base -= offset;
3798 }
3799 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
3800 4, byte_order);
3801 }
3802 }
3803 break;
3804
3805 case 0x8:
3806 case 0x9: /* block transfer */
3807 if (bit (this_instr, 20))
3808 {
3809 /* LDM */
3810 if (bit (this_instr, 15))
3811 {
3812 /* loading pc */
3813 int offset = 0;
3814
3815 if (bit (this_instr, 23))
3816 {
3817 /* up */
3818 unsigned long reglist = bits (this_instr, 0, 14);
3819 offset = bitcount (reglist) * 4;
3820 if (bit (this_instr, 24)) /* pre */
3821 offset += 4;
3822 }
3823 else if (bit (this_instr, 24))
3824 offset = -4;
3825
3826 {
3827 unsigned long rn_val =
3828 get_frame_register_unsigned (frame,
3829 bits (this_instr, 16, 19));
3830 nextpc =
3831 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
3832 + offset),
3833 4, byte_order);
3834 }
3835 }
3836 }
3837 break;
3838
3839 case 0xb: /* branch & link */
3840 case 0xa: /* branch */
3841 {
3842 nextpc = BranchDest (pc, this_instr);
3843 break;
3844 }
3845
3846 case 0xc:
3847 case 0xd:
3848 case 0xe: /* coproc ops */
3849 break;
3850 case 0xf: /* SWI */
3851 {
3852 struct gdbarch_tdep *tdep;
3853 tdep = gdbarch_tdep (gdbarch);
3854
3855 if (tdep->syscall_next_pc != NULL)
3856 nextpc = tdep->syscall_next_pc (frame);
3857
3858 }
3859 break;
3860
3861 default:
3862 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
3863 return (pc);
3864 }
3865 }
3866
3867 return nextpc;
3868 }
3869
3870 CORE_ADDR
3871 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
3872 {
3873 struct gdbarch *gdbarch = get_frame_arch (frame);
3874 CORE_ADDR nextpc =
3875 gdbarch_addr_bits_remove (gdbarch,
3876 arm_get_next_pc_raw (frame, pc, TRUE));
3877 if (nextpc == pc)
3878 error (_("Infinite loop detected"));
3879 return nextpc;
3880 }
3881
3882 /* single_step() is called just before we want to resume the inferior,
3883 if we want to single-step it but there is no hardware or kernel
3884 single-step support. We find the target of the coming instruction
3885 and breakpoint it. */
3886
3887 int
3888 arm_software_single_step (struct frame_info *frame)
3889 {
3890 struct gdbarch *gdbarch = get_frame_arch (frame);
3891 struct address_space *aspace = get_frame_address_space (frame);
3892
3893 /* NOTE: This may insert the wrong breakpoint instruction when
3894 single-stepping over a mode-changing instruction, if the
3895 CPSR heuristics are used. */
3896
3897 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
3898 insert_single_step_breakpoint (gdbarch, aspace, next_pc);
3899
3900 return 1;
3901 }
3902
3903 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
3904 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
3905 NULL if an error occurs. BUF is freed. */
3906
3907 static gdb_byte *
3908 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
3909 int old_len, int new_len)
3910 {
3911 gdb_byte *new_buf, *middle;
3912 int bytes_to_read = new_len - old_len;
3913
3914 new_buf = xmalloc (new_len);
3915 memcpy (new_buf + bytes_to_read, buf, old_len);
3916 xfree (buf);
3917 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
3918 {
3919 xfree (new_buf);
3920 return NULL;
3921 }
3922 return new_buf;
3923 }
3924
3925 /* An IT block is at most the 2-byte IT instruction followed by
3926 four 4-byte instructions. The furthest back we must search to
3927 find an IT block that affects the current instruction is thus
3928 2 + 3 * 4 == 14 bytes. */
3929 #define MAX_IT_BLOCK_PREFIX 14
3930
3931 /* Use a quick scan if there are more than this many bytes of
3932 code. */
3933 #define IT_SCAN_THRESHOLD 32
3934
3935 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
3936 A breakpoint in an IT block may not be hit, depending on the
3937 condition flags. */
3938 static CORE_ADDR
3939 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
3940 {
3941 gdb_byte *buf;
3942 char map_type;
3943 CORE_ADDR boundary, func_start;
3944 int buf_len, buf2_len;
3945 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
3946 int i, any, last_it, last_it_count;
3947
3948 /* If we are using BKPT breakpoints, none of this is necessary. */
3949 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
3950 return bpaddr;
3951
3952 /* ARM mode does not have this problem. */
3953 if (!arm_pc_is_thumb (gdbarch, bpaddr))
3954 return bpaddr;
3955
3956 /* We are setting a breakpoint in Thumb code that could potentially
3957 contain an IT block. The first step is to find how much Thumb
3958 code there is; we do not need to read outside of known Thumb
3959 sequences. */
3960 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
3961 if (map_type == 0)
3962 /* Thumb-2 code must have mapping symbols to have a chance. */
3963 return bpaddr;
3964
3965 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
3966
3967 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
3968 && func_start > boundary)
3969 boundary = func_start;
3970
3971 /* Search for a candidate IT instruction. We have to do some fancy
3972 footwork to distinguish a real IT instruction from the second
3973 half of a 32-bit instruction, but there is no need for that if
3974 there's no candidate. */
3975 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
3976 if (buf_len == 0)
3977 /* No room for an IT instruction. */
3978 return bpaddr;
3979
3980 buf = xmalloc (buf_len);
3981 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
3982 return bpaddr;
3983 any = 0;
3984 for (i = 0; i < buf_len; i += 2)
3985 {
3986 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
3987 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
3988 {
3989 any = 1;
3990 break;
3991 }
3992 }
3993 if (any == 0)
3994 {
3995 xfree (buf);
3996 return bpaddr;
3997 }
3998
3999 /* OK, the code bytes before this instruction contain at least one
4000 halfword which resembles an IT instruction. We know that it's
4001 Thumb code, but there are still two possibilities. Either the
4002 halfword really is an IT instruction, or it is the second half of
4003 a 32-bit Thumb instruction. The only way we can tell is to
4004 scan forwards from a known instruction boundary. */
4005 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
4006 {
4007 int definite;
4008
4009 /* There's a lot of code before this instruction. Start with an
4010 optimistic search; it's easy to recognize halfwords that can
4011 not be the start of a 32-bit instruction, and use that to
4012 lock on to the instruction boundaries. */
4013 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
4014 if (buf == NULL)
4015 return bpaddr;
4016 buf_len = IT_SCAN_THRESHOLD;
4017
4018 definite = 0;
4019 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
4020 {
4021 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4022 if (thumb_insn_size (inst1) == 2)
4023 {
4024 definite = 1;
4025 break;
4026 }
4027 }
4028
4029 /* At this point, if DEFINITE, BUF[I] is the first place we
4030 are sure that we know the instruction boundaries, and it is far
4031 enough from BPADDR that we could not miss an IT instruction
4032 affecting BPADDR. If ! DEFINITE, give up - start from a
4033 known boundary. */
4034 if (! definite)
4035 {
4036 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
4037 if (buf == NULL)
4038 return bpaddr;
4039 buf_len = bpaddr - boundary;
4040 i = 0;
4041 }
4042 }
4043 else
4044 {
4045 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
4046 if (buf == NULL)
4047 return bpaddr;
4048 buf_len = bpaddr - boundary;
4049 i = 0;
4050 }
4051
4052 /* Scan forwards. Find the last IT instruction before BPADDR. */
4053 last_it = -1;
4054 last_it_count = 0;
4055 while (i < buf_len)
4056 {
4057 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4058 last_it_count--;
4059 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4060 {
4061 last_it = i;
4062 if (inst1 & 0x0001)
4063 last_it_count = 4;
4064 else if (inst1 & 0x0002)
4065 last_it_count = 3;
4066 else if (inst1 & 0x0004)
4067 last_it_count = 2;
4068 else
4069 last_it_count = 1;
4070 }
4071 i += thumb_insn_size (inst1);
4072 }
4073
4074 xfree (buf);
4075
4076 if (last_it == -1)
4077 /* There wasn't really an IT instruction after all. */
4078 return bpaddr;
4079
4080 if (last_it_count < 1)
4081 /* It was too far away. */
4082 return bpaddr;
4083
4084 /* This really is a trouble spot. Move the breakpoint to the IT
4085 instruction. */
4086 return bpaddr - buf_len + last_it;
4087 }
4088
4089 /* ARM displaced stepping support.
4090
4091 Generally ARM displaced stepping works as follows:
4092
4093 1. When an instruction is to be single-stepped, it is first decoded by
4094 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
4095 Depending on the type of instruction, it is then copied to a scratch
4096 location, possibly in a modified form. The copy_* set of functions
4097 performs such modification, as necessary. A breakpoint is placed after
4098 the modified instruction in the scratch space to return control to GDB.
4099 Note in particular that instructions which modify the PC will no longer
4100 do so after modification.
4101
4102 2. The instruction is single-stepped, by setting the PC to the scratch
4103 location address, and resuming. Control returns to GDB when the
4104 breakpoint is hit.
4105
4106 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
4107 function used for the current instruction. This function's job is to
4108 put the CPU/memory state back to what it would have been if the
4109 instruction had been executed unmodified in its original location. */
4110
4111 /* NOP instruction (mov r0, r0). */
4112 #define ARM_NOP 0xe1a00000
4113
4114 /* Helper for register reads for displaced stepping. In particular, this
4115 returns the PC as it would be seen by the instruction at its original
4116 location. */
4117
4118 ULONGEST
4119 displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
4120 {
4121 ULONGEST ret;
4122
4123 if (regno == 15)
4124 {
4125 if (debug_displaced)
4126 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
4127 (unsigned long) from + 8);
4128 return (ULONGEST) from + 8; /* Pipeline offset. */
4129 }
4130 else
4131 {
4132 regcache_cooked_read_unsigned (regs, regno, &ret);
4133 if (debug_displaced)
4134 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
4135 regno, (unsigned long) ret);
4136 return ret;
4137 }
4138 }
4139
4140 static int
4141 displaced_in_arm_mode (struct regcache *regs)
4142 {
4143 ULONGEST ps;
4144 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
4145
4146 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4147
4148 return (ps & t_bit) == 0;
4149 }
4150
4151 /* Write to the PC as from a branch instruction. */
4152
4153 static void
4154 branch_write_pc (struct regcache *regs, ULONGEST val)
4155 {
4156 if (displaced_in_arm_mode (regs))
4157 /* Note: If bits 0/1 are set, this branch would be unpredictable for
4158 architecture versions < 6. */
4159 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & ~(ULONGEST) 0x3);
4160 else
4161 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & ~(ULONGEST) 0x1);
4162 }
4163
4164 /* Write to the PC as from a branch-exchange instruction. */
4165
4166 static void
4167 bx_write_pc (struct regcache *regs, ULONGEST val)
4168 {
4169 ULONGEST ps;
4170 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
4171
4172 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4173
4174 if ((val & 1) == 1)
4175 {
4176 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
4177 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
4178 }
4179 else if ((val & 2) == 0)
4180 {
4181 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4182 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
4183 }
4184 else
4185 {
4186 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
4187 mode, align dest to 4 bytes). */
4188 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
4189 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4190 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
4191 }
4192 }
4193
4194 /* Write to the PC as if from a load instruction. */
4195
4196 static void
4197 load_write_pc (struct regcache *regs, ULONGEST val)
4198 {
4199 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
4200 bx_write_pc (regs, val);
4201 else
4202 branch_write_pc (regs, val);
4203 }
4204
4205 /* Write to the PC as if from an ALU instruction. */
4206
4207 static void
4208 alu_write_pc (struct regcache *regs, ULONGEST val)
4209 {
4210 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && displaced_in_arm_mode (regs))
4211 bx_write_pc (regs, val);
4212 else
4213 branch_write_pc (regs, val);
4214 }
4215
4216 /* Helper for writing to registers for displaced stepping. Writing to the PC
4217 has a varying effects depending on the instruction which does the write:
4218 this is controlled by the WRITE_PC argument. */
4219
4220 void
4221 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
4222 int regno, ULONGEST val, enum pc_write_style write_pc)
4223 {
4224 if (regno == 15)
4225 {
4226 if (debug_displaced)
4227 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
4228 (unsigned long) val);
4229 switch (write_pc)
4230 {
4231 case BRANCH_WRITE_PC:
4232 branch_write_pc (regs, val);
4233 break;
4234
4235 case BX_WRITE_PC:
4236 bx_write_pc (regs, val);
4237 break;
4238
4239 case LOAD_WRITE_PC:
4240 load_write_pc (regs, val);
4241 break;
4242
4243 case ALU_WRITE_PC:
4244 alu_write_pc (regs, val);
4245 break;
4246
4247 case CANNOT_WRITE_PC:
4248 warning (_("Instruction wrote to PC in an unexpected way when "
4249 "single-stepping"));
4250 break;
4251
4252 default:
4253 internal_error (__FILE__, __LINE__,
4254 _("Invalid argument to displaced_write_reg"));
4255 }
4256
4257 dsc->wrote_to_pc = 1;
4258 }
4259 else
4260 {
4261 if (debug_displaced)
4262 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
4263 regno, (unsigned long) val);
4264 regcache_cooked_write_unsigned (regs, regno, val);
4265 }
4266 }
4267
4268 /* This function is used to concisely determine if an instruction INSN
4269 references PC. Register fields of interest in INSN should have the
4270 corresponding fields of BITMASK set to 0b1111. The function returns return 1
4271 if any of these fields in INSN reference the PC (also 0b1111, r15), else it
4272 returns 0. */
4273
4274 static int
4275 insn_references_pc (uint32_t insn, uint32_t bitmask)
4276 {
4277 uint32_t lowbit = 1;
4278
4279 while (bitmask != 0)
4280 {
4281 uint32_t mask;
4282
4283 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
4284 ;
4285
4286 if (!lowbit)
4287 break;
4288
4289 mask = lowbit * 0xf;
4290
4291 if ((insn & mask) == mask)
4292 return 1;
4293
4294 bitmask &= ~mask;
4295 }
4296
4297 return 0;
4298 }
4299
4300 /* The simplest copy function. Many instructions have the same effect no
4301 matter what address they are executed at: in those cases, use this. */
4302
4303 static int
4304 copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
4305 const char *iname, struct displaced_step_closure *dsc)
4306 {
4307 if (debug_displaced)
4308 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
4309 "opcode/class '%s' unmodified\n", (unsigned long) insn,
4310 iname);
4311
4312 dsc->modinsn[0] = insn;
4313
4314 return 0;
4315 }
4316
4317 /* Preload instructions with immediate offset. */
4318
4319 static void
4320 cleanup_preload (struct gdbarch *gdbarch,
4321 struct regcache *regs, struct displaced_step_closure *dsc)
4322 {
4323 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4324 if (!dsc->u.preload.immed)
4325 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4326 }
4327
4328 static int
4329 copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4330 struct displaced_step_closure *dsc)
4331 {
4332 unsigned int rn = bits (insn, 16, 19);
4333 ULONGEST rn_val;
4334 CORE_ADDR from = dsc->insn_addr;
4335
4336 if (!insn_references_pc (insn, 0x000f0000ul))
4337 return copy_unmodified (gdbarch, insn, "preload", dsc);
4338
4339 if (debug_displaced)
4340 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
4341 (unsigned long) insn);
4342
4343 /* Preload instructions:
4344
4345 {pli/pld} [rn, #+/-imm]
4346 ->
4347 {pli/pld} [r0, #+/-imm]. */
4348
4349 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4350 rn_val = displaced_read_reg (regs, from, rn);
4351 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4352
4353 dsc->u.preload.immed = 1;
4354
4355 dsc->modinsn[0] = insn & 0xfff0ffff;
4356
4357 dsc->cleanup = &cleanup_preload;
4358
4359 return 0;
4360 }
4361
4362 /* Preload instructions with register offset. */
4363
4364 static int
4365 copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4366 struct displaced_step_closure *dsc)
4367 {
4368 unsigned int rn = bits (insn, 16, 19);
4369 unsigned int rm = bits (insn, 0, 3);
4370 ULONGEST rn_val, rm_val;
4371 CORE_ADDR from = dsc->insn_addr;
4372
4373 if (!insn_references_pc (insn, 0x000f000ful))
4374 return copy_unmodified (gdbarch, insn, "preload reg", dsc);
4375
4376 if (debug_displaced)
4377 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
4378 (unsigned long) insn);
4379
4380 /* Preload register-offset instructions:
4381
4382 {pli/pld} [rn, rm {, shift}]
4383 ->
4384 {pli/pld} [r0, r1 {, shift}]. */
4385
4386 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4387 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4388 rn_val = displaced_read_reg (regs, from, rn);
4389 rm_val = displaced_read_reg (regs, from, rm);
4390 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4391 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
4392
4393 dsc->u.preload.immed = 0;
4394
4395 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
4396
4397 dsc->cleanup = &cleanup_preload;
4398
4399 return 0;
4400 }
4401
4402 /* Copy/cleanup coprocessor load and store instructions. */
4403
4404 static void
4405 cleanup_copro_load_store (struct gdbarch *gdbarch,
4406 struct regcache *regs,
4407 struct displaced_step_closure *dsc)
4408 {
4409 ULONGEST rn_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4410
4411 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4412
4413 if (dsc->u.ldst.writeback)
4414 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
4415 }
4416
4417 static int
4418 copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
4419 struct regcache *regs,
4420 struct displaced_step_closure *dsc)
4421 {
4422 unsigned int rn = bits (insn, 16, 19);
4423 ULONGEST rn_val;
4424 CORE_ADDR from = dsc->insn_addr;
4425
4426 if (!insn_references_pc (insn, 0x000f0000ul))
4427 return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
4428
4429 if (debug_displaced)
4430 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
4431 "load/store insn %.8lx\n", (unsigned long) insn);
4432
4433 /* Coprocessor load/store instructions:
4434
4435 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
4436 ->
4437 {stc/stc2} [r0, #+/-imm].
4438
4439 ldc/ldc2 are handled identically. */
4440
4441 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4442 rn_val = displaced_read_reg (regs, from, rn);
4443 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4444
4445 dsc->u.ldst.writeback = bit (insn, 25);
4446 dsc->u.ldst.rn = rn;
4447
4448 dsc->modinsn[0] = insn & 0xfff0ffff;
4449
4450 dsc->cleanup = &cleanup_copro_load_store;
4451
4452 return 0;
4453 }
4454
4455 /* Clean up branch instructions (actually perform the branch, by setting
4456 PC). */
4457
4458 static void
4459 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
4460 struct displaced_step_closure *dsc)
4461 {
4462 ULONGEST from = dsc->insn_addr;
4463 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
4464 int branch_taken = condition_true (dsc->u.branch.cond, status);
4465 enum pc_write_style write_pc = dsc->u.branch.exchange
4466 ? BX_WRITE_PC : BRANCH_WRITE_PC;
4467
4468 if (!branch_taken)
4469 return;
4470
4471 if (dsc->u.branch.link)
4472 {
4473 ULONGEST pc = displaced_read_reg (regs, from, 15);
4474 displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
4475 }
4476
4477 displaced_write_reg (regs, dsc, 15, dsc->u.branch.dest, write_pc);
4478 }
4479
4480 /* Copy B/BL/BLX instructions with immediate destinations. */
4481
4482 static int
4483 copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
4484 struct regcache *regs, struct displaced_step_closure *dsc)
4485 {
4486 unsigned int cond = bits (insn, 28, 31);
4487 int exchange = (cond == 0xf);
4488 int link = exchange || bit (insn, 24);
4489 CORE_ADDR from = dsc->insn_addr;
4490 long offset;
4491
4492 if (debug_displaced)
4493 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
4494 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
4495 (unsigned long) insn);
4496
4497 /* Implement "BL<cond> <label>" as:
4498
4499 Preparation: cond <- instruction condition
4500 Insn: mov r0, r0 (nop)
4501 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
4502
4503 B<cond> similar, but don't set r14 in cleanup. */
4504
4505 if (exchange)
4506 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
4507 then arrange the switch into Thumb mode. */
4508 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
4509 else
4510 offset = bits (insn, 0, 23) << 2;
4511
4512 if (bit (offset, 25))
4513 offset = offset | ~0x3ffffff;
4514
4515 dsc->u.branch.cond = cond;
4516 dsc->u.branch.link = link;
4517 dsc->u.branch.exchange = exchange;
4518 dsc->u.branch.dest = from + 8 + offset;
4519
4520 dsc->modinsn[0] = ARM_NOP;
4521
4522 dsc->cleanup = &cleanup_branch;
4523
4524 return 0;
4525 }
4526
4527 /* Copy BX/BLX with register-specified destinations. */
4528
4529 static int
4530 copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
4531 struct regcache *regs, struct displaced_step_closure *dsc)
4532 {
4533 unsigned int cond = bits (insn, 28, 31);
4534 /* BX: x12xxx1x
4535 BLX: x12xxx3x. */
4536 int link = bit (insn, 5);
4537 unsigned int rm = bits (insn, 0, 3);
4538 CORE_ADDR from = dsc->insn_addr;
4539
4540 if (debug_displaced)
4541 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
4542 "%.8lx\n", (link) ? "blx" : "bx", (unsigned long) insn);
4543
4544 /* Implement {BX,BLX}<cond> <reg>" as:
4545
4546 Preparation: cond <- instruction condition
4547 Insn: mov r0, r0 (nop)
4548 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
4549
4550 Don't set r14 in cleanup for BX. */
4551
4552 dsc->u.branch.dest = displaced_read_reg (regs, from, rm);
4553
4554 dsc->u.branch.cond = cond;
4555 dsc->u.branch.link = link;
4556 dsc->u.branch.exchange = 1;
4557
4558 dsc->modinsn[0] = ARM_NOP;
4559
4560 dsc->cleanup = &cleanup_branch;
4561
4562 return 0;
4563 }
4564
4565 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
4566
4567 static void
4568 cleanup_alu_imm (struct gdbarch *gdbarch,
4569 struct regcache *regs, struct displaced_step_closure *dsc)
4570 {
4571 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4572 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4573 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4574 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4575 }
4576
4577 static int
4578 copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4579 struct displaced_step_closure *dsc)
4580 {
4581 unsigned int rn = bits (insn, 16, 19);
4582 unsigned int rd = bits (insn, 12, 15);
4583 unsigned int op = bits (insn, 21, 24);
4584 int is_mov = (op == 0xd);
4585 ULONGEST rd_val, rn_val;
4586 CORE_ADDR from = dsc->insn_addr;
4587
4588 if (!insn_references_pc (insn, 0x000ff000ul))
4589 return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
4590
4591 if (debug_displaced)
4592 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
4593 "%.8lx\n", is_mov ? "move" : "ALU",
4594 (unsigned long) insn);
4595
4596 /* Instruction is of form:
4597
4598 <op><cond> rd, [rn,] #imm
4599
4600 Rewrite as:
4601
4602 Preparation: tmp1, tmp2 <- r0, r1;
4603 r0, r1 <- rd, rn
4604 Insn: <op><cond> r0, r1, #imm
4605 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
4606 */
4607
4608 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4609 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4610 rn_val = displaced_read_reg (regs, from, rn);
4611 rd_val = displaced_read_reg (regs, from, rd);
4612 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4613 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4614 dsc->rd = rd;
4615
4616 if (is_mov)
4617 dsc->modinsn[0] = insn & 0xfff00fff;
4618 else
4619 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
4620
4621 dsc->cleanup = &cleanup_alu_imm;
4622
4623 return 0;
4624 }
4625
4626 /* Copy/cleanup arithmetic/logic insns with register RHS. */
4627
4628 static void
4629 cleanup_alu_reg (struct gdbarch *gdbarch,
4630 struct regcache *regs, struct displaced_step_closure *dsc)
4631 {
4632 ULONGEST rd_val;
4633 int i;
4634
4635 rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4636
4637 for (i = 0; i < 3; i++)
4638 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
4639
4640 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4641 }
4642
4643 static int
4644 copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4645 struct displaced_step_closure *dsc)
4646 {
4647 unsigned int rn = bits (insn, 16, 19);
4648 unsigned int rm = bits (insn, 0, 3);
4649 unsigned int rd = bits (insn, 12, 15);
4650 unsigned int op = bits (insn, 21, 24);
4651 int is_mov = (op == 0xd);
4652 ULONGEST rd_val, rn_val, rm_val;
4653 CORE_ADDR from = dsc->insn_addr;
4654
4655 if (!insn_references_pc (insn, 0x000ff00ful))
4656 return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
4657
4658 if (debug_displaced)
4659 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
4660 is_mov ? "move" : "ALU", (unsigned long) insn);
4661
4662 /* Instruction is of form:
4663
4664 <op><cond> rd, [rn,] rm [, <shift>]
4665
4666 Rewrite as:
4667
4668 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
4669 r0, r1, r2 <- rd, rn, rm
4670 Insn: <op><cond> r0, r1, r2 [, <shift>]
4671 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
4672 */
4673
4674 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4675 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4676 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4677 rd_val = displaced_read_reg (regs, from, rd);
4678 rn_val = displaced_read_reg (regs, from, rn);
4679 rm_val = displaced_read_reg (regs, from, rm);
4680 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4681 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4682 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
4683 dsc->rd = rd;
4684
4685 if (is_mov)
4686 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
4687 else
4688 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
4689
4690 dsc->cleanup = &cleanup_alu_reg;
4691
4692 return 0;
4693 }
4694
4695 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
4696
4697 static void
4698 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
4699 struct regcache *regs,
4700 struct displaced_step_closure *dsc)
4701 {
4702 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4703 int i;
4704
4705 for (i = 0; i < 4; i++)
4706 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
4707
4708 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4709 }
4710
4711 static int
4712 copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
4713 struct regcache *regs, struct displaced_step_closure *dsc)
4714 {
4715 unsigned int rn = bits (insn, 16, 19);
4716 unsigned int rm = bits (insn, 0, 3);
4717 unsigned int rd = bits (insn, 12, 15);
4718 unsigned int rs = bits (insn, 8, 11);
4719 unsigned int op = bits (insn, 21, 24);
4720 int is_mov = (op == 0xd), i;
4721 ULONGEST rd_val, rn_val, rm_val, rs_val;
4722 CORE_ADDR from = dsc->insn_addr;
4723
4724 if (!insn_references_pc (insn, 0x000fff0ful))
4725 return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
4726
4727 if (debug_displaced)
4728 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
4729 "%.8lx\n", is_mov ? "move" : "ALU",
4730 (unsigned long) insn);
4731
4732 /* Instruction is of form:
4733
4734 <op><cond> rd, [rn,] rm, <shift> rs
4735
4736 Rewrite as:
4737
4738 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
4739 r0, r1, r2, r3 <- rd, rn, rm, rs
4740 Insn: <op><cond> r0, r1, r2, <shift> r3
4741 Cleanup: tmp5 <- r0
4742 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
4743 rd <- tmp5
4744 */
4745
4746 for (i = 0; i < 4; i++)
4747 dsc->tmp[i] = displaced_read_reg (regs, from, i);
4748
4749 rd_val = displaced_read_reg (regs, from, rd);
4750 rn_val = displaced_read_reg (regs, from, rn);
4751 rm_val = displaced_read_reg (regs, from, rm);
4752 rs_val = displaced_read_reg (regs, from, rs);
4753 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4754 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4755 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
4756 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
4757 dsc->rd = rd;
4758
4759 if (is_mov)
4760 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
4761 else
4762 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
4763
4764 dsc->cleanup = &cleanup_alu_shifted_reg;
4765
4766 return 0;
4767 }
4768
4769 /* Clean up load instructions. */
4770
4771 static void
4772 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
4773 struct displaced_step_closure *dsc)
4774 {
4775 ULONGEST rt_val, rt_val2 = 0, rn_val;
4776 CORE_ADDR from = dsc->insn_addr;
4777
4778 rt_val = displaced_read_reg (regs, from, 0);
4779 if (dsc->u.ldst.xfersize == 8)
4780 rt_val2 = displaced_read_reg (regs, from, 1);
4781 rn_val = displaced_read_reg (regs, from, 2);
4782
4783 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4784 if (dsc->u.ldst.xfersize > 4)
4785 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4786 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
4787 if (!dsc->u.ldst.immed)
4788 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
4789
4790 /* Handle register writeback. */
4791 if (dsc->u.ldst.writeback)
4792 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
4793 /* Put result in right place. */
4794 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
4795 if (dsc->u.ldst.xfersize == 8)
4796 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
4797 }
4798
4799 /* Clean up store instructions. */
4800
4801 static void
4802 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
4803 struct displaced_step_closure *dsc)
4804 {
4805 CORE_ADDR from = dsc->insn_addr;
4806 ULONGEST rn_val = displaced_read_reg (regs, from, 2);
4807
4808 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4809 if (dsc->u.ldst.xfersize > 4)
4810 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4811 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
4812 if (!dsc->u.ldst.immed)
4813 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
4814 if (!dsc->u.ldst.restore_r4)
4815 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
4816
4817 /* Writeback. */
4818 if (dsc->u.ldst.writeback)
4819 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
4820 }
4821
4822 /* Copy "extra" load/store instructions. These are halfword/doubleword
4823 transfers, which have a different encoding to byte/word transfers. */
4824
4825 static int
4826 copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
4827 struct regcache *regs, struct displaced_step_closure *dsc)
4828 {
4829 unsigned int op1 = bits (insn, 20, 24);
4830 unsigned int op2 = bits (insn, 5, 6);
4831 unsigned int rt = bits (insn, 12, 15);
4832 unsigned int rn = bits (insn, 16, 19);
4833 unsigned int rm = bits (insn, 0, 3);
4834 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
4835 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
4836 int immed = (op1 & 0x4) != 0;
4837 int opcode;
4838 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
4839 CORE_ADDR from = dsc->insn_addr;
4840
4841 if (!insn_references_pc (insn, 0x000ff00ful))
4842 return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
4843
4844 if (debug_displaced)
4845 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
4846 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
4847 (unsigned long) insn);
4848
4849 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
4850
4851 if (opcode < 0)
4852 internal_error (__FILE__, __LINE__,
4853 _("copy_extra_ld_st: instruction decode error"));
4854
4855 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4856 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4857 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4858 if (!immed)
4859 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
4860
4861 rt_val = displaced_read_reg (regs, from, rt);
4862 if (bytesize[opcode] == 8)
4863 rt_val2 = displaced_read_reg (regs, from, rt + 1);
4864 rn_val = displaced_read_reg (regs, from, rn);
4865 if (!immed)
4866 rm_val = displaced_read_reg (regs, from, rm);
4867
4868 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
4869 if (bytesize[opcode] == 8)
4870 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
4871 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
4872 if (!immed)
4873 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
4874
4875 dsc->rd = rt;
4876 dsc->u.ldst.xfersize = bytesize[opcode];
4877 dsc->u.ldst.rn = rn;
4878 dsc->u.ldst.immed = immed;
4879 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
4880 dsc->u.ldst.restore_r4 = 0;
4881
4882 if (immed)
4883 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
4884 ->
4885 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
4886 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
4887 else
4888 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
4889 ->
4890 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
4891 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
4892
4893 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
4894
4895 return 0;
4896 }
4897
4898 /* Copy byte/word loads and stores. */
4899
4900 static int
4901 copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
4902 struct regcache *regs,
4903 struct displaced_step_closure *dsc, int load, int byte,
4904 int usermode)
4905 {
4906 int immed = !bit (insn, 25);
4907 unsigned int rt = bits (insn, 12, 15);
4908 unsigned int rn = bits (insn, 16, 19);
4909 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
4910 ULONGEST rt_val, rn_val, rm_val = 0;
4911 CORE_ADDR from = dsc->insn_addr;
4912
4913 if (!insn_references_pc (insn, 0x000ff00ful))
4914 return copy_unmodified (gdbarch, insn, "load/store", dsc);
4915
4916 if (debug_displaced)
4917 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
4918 load ? (byte ? "ldrb" : "ldr")
4919 : (byte ? "strb" : "str"), usermode ? "t" : "",
4920 (unsigned long) insn);
4921
4922 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4923 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4924 if (!immed)
4925 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
4926 if (!load)
4927 dsc->tmp[4] = displaced_read_reg (regs, from, 4);
4928
4929 rt_val = displaced_read_reg (regs, from, rt);
4930 rn_val = displaced_read_reg (regs, from, rn);
4931 if (!immed)
4932 rm_val = displaced_read_reg (regs, from, rm);
4933
4934 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
4935 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
4936 if (!immed)
4937 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
4938
4939 dsc->rd = rt;
4940 dsc->u.ldst.xfersize = byte ? 1 : 4;
4941 dsc->u.ldst.rn = rn;
4942 dsc->u.ldst.immed = immed;
4943 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
4944
4945 /* To write PC we can do:
4946
4947 scratch+0: str pc, temp (*temp = scratch + 8 + offset)
4948 scratch+4: ldr r4, temp
4949 scratch+8: sub r4, r4, pc (r4 = scratch + 8 + offset - scratch - 8 - 8)
4950 scratch+12: add r4, r4, #8 (r4 = offset)
4951 scratch+16: add r0, r0, r4
4952 scratch+20: str r0, [r2, #imm] (or str r0, [r2, r3])
4953 scratch+24: <temp>
4954
4955 Otherwise we don't know what value to write for PC, since the offset is
4956 architecture-dependent (sometimes PC+8, sometimes PC+12). */
4957
4958 if (load || rt != 15)
4959 {
4960 dsc->u.ldst.restore_r4 = 0;
4961
4962 if (immed)
4963 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
4964 ->
4965 {ldr,str}[b]<cond> r0, [r2, #imm]. */
4966 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
4967 else
4968 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
4969 ->
4970 {ldr,str}[b]<cond> r0, [r2, r3]. */
4971 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
4972 }
4973 else
4974 {
4975 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
4976 dsc->u.ldst.restore_r4 = 1;
4977
4978 dsc->modinsn[0] = 0xe58ff014; /* str pc, [pc, #20]. */
4979 dsc->modinsn[1] = 0xe59f4010; /* ldr r4, [pc, #16]. */
4980 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
4981 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
4982 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
4983
4984 /* As above. */
4985 if (immed)
4986 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
4987 else
4988 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
4989
4990 dsc->modinsn[6] = 0x0; /* breakpoint location. */
4991 dsc->modinsn[7] = 0x0; /* scratch space. */
4992
4993 dsc->numinsns = 6;
4994 }
4995
4996 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
4997
4998 return 0;
4999 }
5000
5001 /* Cleanup LDM instructions with fully-populated register list. This is an
5002 unfortunate corner case: it's impossible to implement correctly by modifying
5003 the instruction. The issue is as follows: we have an instruction,
5004
5005 ldm rN, {r0-r15}
5006
5007 which we must rewrite to avoid loading PC. A possible solution would be to
5008 do the load in two halves, something like (with suitable cleanup
5009 afterwards):
5010
5011 mov r8, rN
5012 ldm[id][ab] r8!, {r0-r7}
5013 str r7, <temp>
5014 ldm[id][ab] r8, {r7-r14}
5015 <bkpt>
5016
5017 but at present there's no suitable place for <temp>, since the scratch space
5018 is overwritten before the cleanup routine is called. For now, we simply
5019 emulate the instruction. */
5020
5021 static void
5022 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
5023 struct displaced_step_closure *dsc)
5024 {
5025 ULONGEST from = dsc->insn_addr;
5026 int inc = dsc->u.block.increment;
5027 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
5028 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
5029 uint32_t regmask = dsc->u.block.regmask;
5030 int regno = inc ? 0 : 15;
5031 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
5032 int exception_return = dsc->u.block.load && dsc->u.block.user
5033 && (regmask & 0x8000) != 0;
5034 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5035 int do_transfer = condition_true (dsc->u.block.cond, status);
5036 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5037
5038 if (!do_transfer)
5039 return;
5040
5041 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
5042 sensible we can do here. Complain loudly. */
5043 if (exception_return)
5044 error (_("Cannot single-step exception return"));
5045
5046 /* We don't handle any stores here for now. */
5047 gdb_assert (dsc->u.block.load != 0);
5048
5049 if (debug_displaced)
5050 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
5051 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
5052 dsc->u.block.increment ? "inc" : "dec",
5053 dsc->u.block.before ? "before" : "after");
5054
5055 while (regmask)
5056 {
5057 uint32_t memword;
5058
5059 if (inc)
5060 while (regno <= 15 && (regmask & (1 << regno)) == 0)
5061 regno++;
5062 else
5063 while (regno >= 0 && (regmask & (1 << regno)) == 0)
5064 regno--;
5065
5066 xfer_addr += bump_before;
5067
5068 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
5069 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
5070
5071 xfer_addr += bump_after;
5072
5073 regmask &= ~(1 << regno);
5074 }
5075
5076 if (dsc->u.block.writeback)
5077 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
5078 CANNOT_WRITE_PC);
5079 }
5080
5081 /* Clean up an STM which included the PC in the register list. */
5082
5083 static void
5084 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
5085 struct displaced_step_closure *dsc)
5086 {
5087 ULONGEST from = dsc->insn_addr;
5088 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5089 int store_executed = condition_true (dsc->u.block.cond, status);
5090 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
5091 CORE_ADDR stm_insn_addr;
5092 uint32_t pc_val;
5093 long offset;
5094 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5095
5096 /* If condition code fails, there's nothing else to do. */
5097 if (!store_executed)
5098 return;
5099
5100 if (dsc->u.block.increment)
5101 {
5102 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
5103
5104 if (dsc->u.block.before)
5105 pc_stored_at += 4;
5106 }
5107 else
5108 {
5109 pc_stored_at = dsc->u.block.xfer_addr;
5110
5111 if (dsc->u.block.before)
5112 pc_stored_at -= 4;
5113 }
5114
5115 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
5116 stm_insn_addr = dsc->scratch_base;
5117 offset = pc_val - stm_insn_addr;
5118
5119 if (debug_displaced)
5120 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
5121 "STM instruction\n", offset);
5122
5123 /* Rewrite the stored PC to the proper value for the non-displaced original
5124 instruction. */
5125 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
5126 dsc->insn_addr + offset);
5127 }
5128
5129 /* Clean up an LDM which includes the PC in the register list. We clumped all
5130 the registers in the transferred list into a contiguous range r0...rX (to
5131 avoid loading PC directly and losing control of the debugged program), so we
5132 must undo that here. */
5133
5134 static void
5135 cleanup_block_load_pc (struct gdbarch *gdbarch,
5136 struct regcache *regs,
5137 struct displaced_step_closure *dsc)
5138 {
5139 ULONGEST from = dsc->insn_addr;
5140 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5141 int load_executed = condition_true (dsc->u.block.cond, status), i;
5142 unsigned int mask = dsc->u.block.regmask, write_reg = 15;
5143 unsigned int regs_loaded = bitcount (mask);
5144 unsigned int num_to_shuffle = regs_loaded, clobbered;
5145
5146 /* The method employed here will fail if the register list is fully populated
5147 (we need to avoid loading PC directly). */
5148 gdb_assert (num_to_shuffle < 16);
5149
5150 if (!load_executed)
5151 return;
5152
5153 clobbered = (1 << num_to_shuffle) - 1;
5154
5155 while (num_to_shuffle > 0)
5156 {
5157 if ((mask & (1 << write_reg)) != 0)
5158 {
5159 unsigned int read_reg = num_to_shuffle - 1;
5160
5161 if (read_reg != write_reg)
5162 {
5163 ULONGEST rval = displaced_read_reg (regs, from, read_reg);
5164 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
5165 if (debug_displaced)
5166 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
5167 "loaded register r%d to r%d\n"), read_reg,
5168 write_reg);
5169 }
5170 else if (debug_displaced)
5171 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
5172 "r%d already in the right place\n"),
5173 write_reg);
5174
5175 clobbered &= ~(1 << write_reg);
5176
5177 num_to_shuffle--;
5178 }
5179
5180 write_reg--;
5181 }
5182
5183 /* Restore any registers we scribbled over. */
5184 for (write_reg = 0; clobbered != 0; write_reg++)
5185 {
5186 if ((clobbered & (1 << write_reg)) != 0)
5187 {
5188 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
5189 CANNOT_WRITE_PC);
5190 if (debug_displaced)
5191 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
5192 "clobbered register r%d\n"), write_reg);
5193 clobbered &= ~(1 << write_reg);
5194 }
5195 }
5196
5197 /* Perform register writeback manually. */
5198 if (dsc->u.block.writeback)
5199 {
5200 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
5201
5202 if (dsc->u.block.increment)
5203 new_rn_val += regs_loaded * 4;
5204 else
5205 new_rn_val -= regs_loaded * 4;
5206
5207 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
5208 CANNOT_WRITE_PC);
5209 }
5210 }
5211
5212 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
5213 in user-level code (in particular exception return, ldm rn, {...pc}^). */
5214
5215 static int
5216 copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5217 struct displaced_step_closure *dsc)
5218 {
5219 int load = bit (insn, 20);
5220 int user = bit (insn, 22);
5221 int increment = bit (insn, 23);
5222 int before = bit (insn, 24);
5223 int writeback = bit (insn, 21);
5224 int rn = bits (insn, 16, 19);
5225 CORE_ADDR from = dsc->insn_addr;
5226
5227 /* Block transfers which don't mention PC can be run directly out-of-line. */
5228 if (rn != 15 && (insn & 0x8000) == 0)
5229 return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
5230
5231 if (rn == 15)
5232 {
5233 warning (_("displaced: Unpredictable LDM or STM with base register r15"));
5234 return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
5235 }
5236
5237 if (debug_displaced)
5238 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
5239 "%.8lx\n", (unsigned long) insn);
5240
5241 dsc->u.block.xfer_addr = displaced_read_reg (regs, from, rn);
5242 dsc->u.block.rn = rn;
5243
5244 dsc->u.block.load = load;
5245 dsc->u.block.user = user;
5246 dsc->u.block.increment = increment;
5247 dsc->u.block.before = before;
5248 dsc->u.block.writeback = writeback;
5249 dsc->u.block.cond = bits (insn, 28, 31);
5250
5251 dsc->u.block.regmask = insn & 0xffff;
5252
5253 if (load)
5254 {
5255 if ((insn & 0xffff) == 0xffff)
5256 {
5257 /* LDM with a fully-populated register list. This case is
5258 particularly tricky. Implement for now by fully emulating the
5259 instruction (which might not behave perfectly in all cases, but
5260 these instructions should be rare enough for that not to matter
5261 too much). */
5262 dsc->modinsn[0] = ARM_NOP;
5263
5264 dsc->cleanup = &cleanup_block_load_all;
5265 }
5266 else
5267 {
5268 /* LDM of a list of registers which includes PC. Implement by
5269 rewriting the list of registers to be transferred into a
5270 contiguous chunk r0...rX before doing the transfer, then shuffling
5271 registers into the correct places in the cleanup routine. */
5272 unsigned int regmask = insn & 0xffff;
5273 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
5274 unsigned int to = 0, from = 0, i, new_rn;
5275
5276 for (i = 0; i < num_in_list; i++)
5277 dsc->tmp[i] = displaced_read_reg (regs, from, i);
5278
5279 /* Writeback makes things complicated. We need to avoid clobbering
5280 the base register with one of the registers in our modified
5281 register list, but just using a different register can't work in
5282 all cases, e.g.:
5283
5284 ldm r14!, {r0-r13,pc}
5285
5286 which would need to be rewritten as:
5287
5288 ldm rN!, {r0-r14}
5289
5290 but that can't work, because there's no free register for N.
5291
5292 Solve this by turning off the writeback bit, and emulating
5293 writeback manually in the cleanup routine. */
5294
5295 if (writeback)
5296 insn &= ~(1 << 21);
5297
5298 new_regmask = (1 << num_in_list) - 1;
5299
5300 if (debug_displaced)
5301 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
5302 "{..., pc}: original reg list %.4x, modified "
5303 "list %.4x\n"), rn, writeback ? "!" : "",
5304 (int) insn & 0xffff, new_regmask);
5305
5306 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
5307
5308 dsc->cleanup = &cleanup_block_load_pc;
5309 }
5310 }
5311 else
5312 {
5313 /* STM of a list of registers which includes PC. Run the instruction
5314 as-is, but out of line: this will store the wrong value for the PC,
5315 so we must manually fix up the memory in the cleanup routine.
5316 Doing things this way has the advantage that we can auto-detect
5317 the offset of the PC write (which is architecture-dependent) in
5318 the cleanup routine. */
5319 dsc->modinsn[0] = insn;
5320
5321 dsc->cleanup = &cleanup_block_store_pc;
5322 }
5323
5324 return 0;
5325 }
5326
5327 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
5328 for Linux, where some SVC instructions must be treated specially. */
5329
5330 static void
5331 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
5332 struct displaced_step_closure *dsc)
5333 {
5334 CORE_ADDR from = dsc->insn_addr;
5335 CORE_ADDR resume_addr = from + 4;
5336
5337 if (debug_displaced)
5338 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
5339 "%.8lx\n", (unsigned long) resume_addr);
5340
5341 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
5342 }
5343
5344 static int
5345 copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
5346 struct regcache *regs, struct displaced_step_closure *dsc)
5347 {
5348 CORE_ADDR from = dsc->insn_addr;
5349
5350 /* Allow OS-specific code to override SVC handling. */
5351 if (dsc->u.svc.copy_svc_os)
5352 return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
5353
5354 if (debug_displaced)
5355 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
5356 (unsigned long) insn);
5357
5358 /* Preparation: none.
5359 Insn: unmodified svc.
5360 Cleanup: pc <- insn_addr + 4. */
5361
5362 dsc->modinsn[0] = insn;
5363
5364 dsc->cleanup = &cleanup_svc;
5365 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
5366 instruction. */
5367 dsc->wrote_to_pc = 1;
5368
5369 return 0;
5370 }
5371
5372 /* Copy undefined instructions. */
5373
5374 static int
5375 copy_undef (struct gdbarch *gdbarch, uint32_t insn,
5376 struct displaced_step_closure *dsc)
5377 {
5378 if (debug_displaced)
5379 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn %.8lx\n",
5380 (unsigned long) insn);
5381
5382 dsc->modinsn[0] = insn;
5383
5384 return 0;
5385 }
5386
5387 /* Copy unpredictable instructions. */
5388
5389 static int
5390 copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
5391 struct displaced_step_closure *dsc)
5392 {
5393 if (debug_displaced)
5394 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
5395 "%.8lx\n", (unsigned long) insn);
5396
5397 dsc->modinsn[0] = insn;
5398
5399 return 0;
5400 }
5401
5402 /* The decode_* functions are instruction decoding helpers. They mostly follow
5403 the presentation in the ARM ARM. */
5404
5405 static int
5406 decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
5407 struct regcache *regs,
5408 struct displaced_step_closure *dsc)
5409 {
5410 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
5411 unsigned int rn = bits (insn, 16, 19);
5412
5413 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
5414 return copy_unmodified (gdbarch, insn, "cps", dsc);
5415 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
5416 return copy_unmodified (gdbarch, insn, "setend", dsc);
5417 else if ((op1 & 0x60) == 0x20)
5418 return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
5419 else if ((op1 & 0x71) == 0x40)
5420 return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
5421 else if ((op1 & 0x77) == 0x41)
5422 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
5423 else if ((op1 & 0x77) == 0x45)
5424 return copy_preload (gdbarch, insn, regs, dsc); /* pli. */
5425 else if ((op1 & 0x77) == 0x51)
5426 {
5427 if (rn != 0xf)
5428 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
5429 else
5430 return copy_unpred (gdbarch, insn, dsc);
5431 }
5432 else if ((op1 & 0x77) == 0x55)
5433 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
5434 else if (op1 == 0x57)
5435 switch (op2)
5436 {
5437 case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
5438 case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
5439 case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
5440 case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
5441 default: return copy_unpred (gdbarch, insn, dsc);
5442 }
5443 else if ((op1 & 0x63) == 0x43)
5444 return copy_unpred (gdbarch, insn, dsc);
5445 else if ((op2 & 0x1) == 0x0)
5446 switch (op1 & ~0x80)
5447 {
5448 case 0x61:
5449 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
5450 case 0x65:
5451 return copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
5452 case 0x71: case 0x75:
5453 /* pld/pldw reg. */
5454 return copy_preload_reg (gdbarch, insn, regs, dsc);
5455 case 0x63: case 0x67: case 0x73: case 0x77:
5456 return copy_unpred (gdbarch, insn, dsc);
5457 default:
5458 return copy_undef (gdbarch, insn, dsc);
5459 }
5460 else
5461 return copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
5462 }
5463
5464 static int
5465 decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
5466 struct regcache *regs, struct displaced_step_closure *dsc)
5467 {
5468 if (bit (insn, 27) == 0)
5469 return decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
5470 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
5471 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
5472 {
5473 case 0x0: case 0x2:
5474 return copy_unmodified (gdbarch, insn, "srs", dsc);
5475
5476 case 0x1: case 0x3:
5477 return copy_unmodified (gdbarch, insn, "rfe", dsc);
5478
5479 case 0x4: case 0x5: case 0x6: case 0x7:
5480 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
5481
5482 case 0x8:
5483 switch ((insn & 0xe00000) >> 21)
5484 {
5485 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
5486 /* stc/stc2. */
5487 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5488
5489 case 0x2:
5490 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
5491
5492 default:
5493 return copy_undef (gdbarch, insn, dsc);
5494 }
5495
5496 case 0x9:
5497 {
5498 int rn_f = (bits (insn, 16, 19) == 0xf);
5499 switch ((insn & 0xe00000) >> 21)
5500 {
5501 case 0x1: case 0x3:
5502 /* ldc/ldc2 imm (undefined for rn == pc). */
5503 return rn_f ? copy_undef (gdbarch, insn, dsc)
5504 : copy_copro_load_store (gdbarch, insn, regs, dsc);
5505
5506 case 0x2:
5507 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
5508
5509 case 0x4: case 0x5: case 0x6: case 0x7:
5510 /* ldc/ldc2 lit (undefined for rn != pc). */
5511 return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
5512 : copy_undef (gdbarch, insn, dsc);
5513
5514 default:
5515 return copy_undef (gdbarch, insn, dsc);
5516 }
5517 }
5518
5519 case 0xa:
5520 return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
5521
5522 case 0xb:
5523 if (bits (insn, 16, 19) == 0xf)
5524 /* ldc/ldc2 lit. */
5525 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5526 else
5527 return copy_undef (gdbarch, insn, dsc);
5528
5529 case 0xc:
5530 if (bit (insn, 4))
5531 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
5532 else
5533 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5534
5535 case 0xd:
5536 if (bit (insn, 4))
5537 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
5538 else
5539 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5540
5541 default:
5542 return copy_undef (gdbarch, insn, dsc);
5543 }
5544 }
5545
5546 /* Decode miscellaneous instructions in dp/misc encoding space. */
5547
5548 static int
5549 decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
5550 struct regcache *regs, struct displaced_step_closure *dsc)
5551 {
5552 unsigned int op2 = bits (insn, 4, 6);
5553 unsigned int op = bits (insn, 21, 22);
5554 unsigned int op1 = bits (insn, 16, 19);
5555
5556 switch (op2)
5557 {
5558 case 0x0:
5559 return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
5560
5561 case 0x1:
5562 if (op == 0x1) /* bx. */
5563 return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
5564 else if (op == 0x3)
5565 return copy_unmodified (gdbarch, insn, "clz", dsc);
5566 else
5567 return copy_undef (gdbarch, insn, dsc);
5568
5569 case 0x2:
5570 if (op == 0x1)
5571 /* Not really supported. */
5572 return copy_unmodified (gdbarch, insn, "bxj", dsc);
5573 else
5574 return copy_undef (gdbarch, insn, dsc);
5575
5576 case 0x3:
5577 if (op == 0x1)
5578 return copy_bx_blx_reg (gdbarch, insn, regs, dsc); /* blx register. */
5579 else
5580 return copy_undef (gdbarch, insn, dsc);
5581
5582 case 0x5:
5583 return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
5584
5585 case 0x7:
5586 if (op == 0x1)
5587 return copy_unmodified (gdbarch, insn, "bkpt", dsc);
5588 else if (op == 0x3)
5589 /* Not really supported. */
5590 return copy_unmodified (gdbarch, insn, "smc", dsc);
5591
5592 default:
5593 return copy_undef (gdbarch, insn, dsc);
5594 }
5595 }
5596
5597 static int
5598 decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5599 struct displaced_step_closure *dsc)
5600 {
5601 if (bit (insn, 25))
5602 switch (bits (insn, 20, 24))
5603 {
5604 case 0x10:
5605 return copy_unmodified (gdbarch, insn, "movw", dsc);
5606
5607 case 0x14:
5608 return copy_unmodified (gdbarch, insn, "movt", dsc);
5609
5610 case 0x12: case 0x16:
5611 return copy_unmodified (gdbarch, insn, "msr imm", dsc);
5612
5613 default:
5614 return copy_alu_imm (gdbarch, insn, regs, dsc);
5615 }
5616 else
5617 {
5618 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
5619
5620 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
5621 return copy_alu_reg (gdbarch, insn, regs, dsc);
5622 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
5623 return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
5624 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
5625 return decode_miscellaneous (gdbarch, insn, regs, dsc);
5626 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
5627 return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
5628 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
5629 return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
5630 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
5631 return copy_unmodified (gdbarch, insn, "synch", dsc);
5632 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
5633 /* 2nd arg means "unpriveleged". */
5634 return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
5635 dsc);
5636 }
5637
5638 /* Should be unreachable. */
5639 return 1;
5640 }
5641
5642 static int
5643 decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
5644 struct regcache *regs,
5645 struct displaced_step_closure *dsc)
5646 {
5647 int a = bit (insn, 25), b = bit (insn, 4);
5648 uint32_t op1 = bits (insn, 20, 24);
5649 int rn_f = bits (insn, 16, 19) == 0xf;
5650
5651 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
5652 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
5653 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
5654 else if ((!a && (op1 & 0x17) == 0x02)
5655 || (a && (op1 & 0x17) == 0x02 && !b))
5656 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
5657 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
5658 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
5659 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
5660 else if ((!a && (op1 & 0x17) == 0x03)
5661 || (a && (op1 & 0x17) == 0x03 && !b))
5662 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
5663 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
5664 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
5665 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
5666 else if ((!a && (op1 & 0x17) == 0x06)
5667 || (a && (op1 & 0x17) == 0x06 && !b))
5668 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
5669 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
5670 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
5671 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
5672 else if ((!a && (op1 & 0x17) == 0x07)
5673 || (a && (op1 & 0x17) == 0x07 && !b))
5674 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
5675
5676 /* Should be unreachable. */
5677 return 1;
5678 }
5679
5680 static int
5681 decode_media (struct gdbarch *gdbarch, uint32_t insn,
5682 struct displaced_step_closure *dsc)
5683 {
5684 switch (bits (insn, 20, 24))
5685 {
5686 case 0x00: case 0x01: case 0x02: case 0x03:
5687 return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
5688
5689 case 0x04: case 0x05: case 0x06: case 0x07:
5690 return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
5691
5692 case 0x08: case 0x09: case 0x0a: case 0x0b:
5693 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
5694 return copy_unmodified (gdbarch, insn,
5695 "decode/pack/unpack/saturate/reverse", dsc);
5696
5697 case 0x18:
5698 if (bits (insn, 5, 7) == 0) /* op2. */
5699 {
5700 if (bits (insn, 12, 15) == 0xf)
5701 return copy_unmodified (gdbarch, insn, "usad8", dsc);
5702 else
5703 return copy_unmodified (gdbarch, insn, "usada8", dsc);
5704 }
5705 else
5706 return copy_undef (gdbarch, insn, dsc);
5707
5708 case 0x1a: case 0x1b:
5709 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
5710 return copy_unmodified (gdbarch, insn, "sbfx", dsc);
5711 else
5712 return copy_undef (gdbarch, insn, dsc);
5713
5714 case 0x1c: case 0x1d:
5715 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
5716 {
5717 if (bits (insn, 0, 3) == 0xf)
5718 return copy_unmodified (gdbarch, insn, "bfc", dsc);
5719 else
5720 return copy_unmodified (gdbarch, insn, "bfi", dsc);
5721 }
5722 else
5723 return copy_undef (gdbarch, insn, dsc);
5724
5725 case 0x1e: case 0x1f:
5726 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
5727 return copy_unmodified (gdbarch, insn, "ubfx", dsc);
5728 else
5729 return copy_undef (gdbarch, insn, dsc);
5730 }
5731
5732 /* Should be unreachable. */
5733 return 1;
5734 }
5735
5736 static int
5737 decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
5738 struct regcache *regs, struct displaced_step_closure *dsc)
5739 {
5740 if (bit (insn, 25))
5741 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
5742 else
5743 return copy_block_xfer (gdbarch, insn, regs, dsc);
5744 }
5745
5746 static int
5747 decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
5748 struct regcache *regs, struct displaced_step_closure *dsc)
5749 {
5750 unsigned int opcode = bits (insn, 20, 24);
5751
5752 switch (opcode)
5753 {
5754 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
5755 return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
5756
5757 case 0x08: case 0x0a: case 0x0c: case 0x0e:
5758 case 0x12: case 0x16:
5759 return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
5760
5761 case 0x09: case 0x0b: case 0x0d: case 0x0f:
5762 case 0x13: case 0x17:
5763 return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
5764
5765 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
5766 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
5767 /* Note: no writeback for these instructions. Bit 25 will always be
5768 zero though (via caller), so the following works OK. */
5769 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5770 }
5771
5772 /* Should be unreachable. */
5773 return 1;
5774 }
5775
5776 static int
5777 decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
5778 struct regcache *regs, struct displaced_step_closure *dsc)
5779 {
5780 unsigned int op1 = bits (insn, 20, 25);
5781 int op = bit (insn, 4);
5782 unsigned int coproc = bits (insn, 8, 11);
5783 unsigned int rn = bits (insn, 16, 19);
5784
5785 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
5786 return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
5787 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
5788 && (coproc & 0xe) != 0xa)
5789 /* stc/stc2. */
5790 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5791 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
5792 && (coproc & 0xe) != 0xa)
5793 /* ldc/ldc2 imm/lit. */
5794 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5795 else if ((op1 & 0x3e) == 0x00)
5796 return copy_undef (gdbarch, insn, dsc);
5797 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
5798 return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
5799 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
5800 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
5801 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
5802 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
5803 else if ((op1 & 0x30) == 0x20 && !op)
5804 {
5805 if ((coproc & 0xe) == 0xa)
5806 return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
5807 else
5808 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5809 }
5810 else if ((op1 & 0x30) == 0x20 && op)
5811 return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
5812 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
5813 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
5814 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
5815 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
5816 else if ((op1 & 0x30) == 0x30)
5817 return copy_svc (gdbarch, insn, to, regs, dsc);
5818 else
5819 return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
5820 }
5821
5822 void
5823 arm_process_displaced_insn (struct gdbarch *gdbarch, uint32_t insn,
5824 CORE_ADDR from, CORE_ADDR to, struct regcache *regs,
5825 struct displaced_step_closure *dsc)
5826 {
5827 int err = 0;
5828
5829 if (!displaced_in_arm_mode (regs))
5830 error (_("Displaced stepping is only supported in ARM mode"));
5831
5832 /* Most displaced instructions use a 1-instruction scratch space, so set this
5833 here and override below if/when necessary. */
5834 dsc->numinsns = 1;
5835 dsc->insn_addr = from;
5836 dsc->scratch_base = to;
5837 dsc->cleanup = NULL;
5838 dsc->wrote_to_pc = 0;
5839
5840 if ((insn & 0xf0000000) == 0xf0000000)
5841 err = decode_unconditional (gdbarch, insn, regs, dsc);
5842 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
5843 {
5844 case 0x0: case 0x1: case 0x2: case 0x3:
5845 err = decode_dp_misc (gdbarch, insn, regs, dsc);
5846 break;
5847
5848 case 0x4: case 0x5: case 0x6:
5849 err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
5850 break;
5851
5852 case 0x7:
5853 err = decode_media (gdbarch, insn, dsc);
5854 break;
5855
5856 case 0x8: case 0x9: case 0xa: case 0xb:
5857 err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
5858 break;
5859
5860 case 0xc: case 0xd: case 0xe: case 0xf:
5861 err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
5862 break;
5863 }
5864
5865 if (err)
5866 internal_error (__FILE__, __LINE__,
5867 _("arm_process_displaced_insn: Instruction decode error"));
5868 }
5869
5870 /* Actually set up the scratch space for a displaced instruction. */
5871
5872 void
5873 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
5874 CORE_ADDR to, struct displaced_step_closure *dsc)
5875 {
5876 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5877 unsigned int i;
5878 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5879
5880 /* Poke modified instruction(s). */
5881 for (i = 0; i < dsc->numinsns; i++)
5882 {
5883 if (debug_displaced)
5884 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
5885 "%.8lx\n", (unsigned long) dsc->modinsn[i],
5886 (unsigned long) to + i * 4);
5887 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
5888 dsc->modinsn[i]);
5889 }
5890
5891 /* Put breakpoint afterwards. */
5892 write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
5893 tdep->arm_breakpoint_size);
5894
5895 if (debug_displaced)
5896 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
5897 paddress (gdbarch, from), paddress (gdbarch, to));
5898 }
5899
5900 /* Entry point for copying an instruction into scratch space for displaced
5901 stepping. */
5902
5903 struct displaced_step_closure *
5904 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
5905 CORE_ADDR from, CORE_ADDR to,
5906 struct regcache *regs)
5907 {
5908 struct displaced_step_closure *dsc
5909 = xmalloc (sizeof (struct displaced_step_closure));
5910 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5911 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
5912
5913 if (debug_displaced)
5914 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
5915 "at %.8lx\n", (unsigned long) insn,
5916 (unsigned long) from);
5917
5918 arm_process_displaced_insn (gdbarch, insn, from, to, regs, dsc);
5919 arm_displaced_init_closure (gdbarch, from, to, dsc);
5920
5921 return dsc;
5922 }
5923
5924 /* Entry point for cleaning things up after a displaced instruction has been
5925 single-stepped. */
5926
5927 void
5928 arm_displaced_step_fixup (struct gdbarch *gdbarch,
5929 struct displaced_step_closure *dsc,
5930 CORE_ADDR from, CORE_ADDR to,
5931 struct regcache *regs)
5932 {
5933 if (dsc->cleanup)
5934 dsc->cleanup (gdbarch, regs, dsc);
5935
5936 if (!dsc->wrote_to_pc)
5937 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
5938 }
5939
5940 #include "bfd-in2.h"
5941 #include "libcoff.h"
5942
5943 static int
5944 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
5945 {
5946 struct gdbarch *gdbarch = info->application_data;
5947
5948 if (arm_pc_is_thumb (gdbarch, memaddr))
5949 {
5950 static asymbol *asym;
5951 static combined_entry_type ce;
5952 static struct coff_symbol_struct csym;
5953 static struct bfd fake_bfd;
5954 static bfd_target fake_target;
5955
5956 if (csym.native == NULL)
5957 {
5958 /* Create a fake symbol vector containing a Thumb symbol.
5959 This is solely so that the code in print_insn_little_arm()
5960 and print_insn_big_arm() in opcodes/arm-dis.c will detect
5961 the presence of a Thumb symbol and switch to decoding
5962 Thumb instructions. */
5963
5964 fake_target.flavour = bfd_target_coff_flavour;
5965 fake_bfd.xvec = &fake_target;
5966 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
5967 csym.native = &ce;
5968 csym.symbol.the_bfd = &fake_bfd;
5969 csym.symbol.name = "fake";
5970 asym = (asymbol *) & csym;
5971 }
5972
5973 memaddr = UNMAKE_THUMB_ADDR (memaddr);
5974 info->symbols = &asym;
5975 }
5976 else
5977 info->symbols = NULL;
5978
5979 if (info->endian == BFD_ENDIAN_BIG)
5980 return print_insn_big_arm (memaddr, info);
5981 else
5982 return print_insn_little_arm (memaddr, info);
5983 }
5984
5985 /* The following define instruction sequences that will cause ARM
5986 cpu's to take an undefined instruction trap. These are used to
5987 signal a breakpoint to GDB.
5988
5989 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
5990 modes. A different instruction is required for each mode. The ARM
5991 cpu's can also be big or little endian. Thus four different
5992 instructions are needed to support all cases.
5993
5994 Note: ARMv4 defines several new instructions that will take the
5995 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
5996 not in fact add the new instructions. The new undefined
5997 instructions in ARMv4 are all instructions that had no defined
5998 behaviour in earlier chips. There is no guarantee that they will
5999 raise an exception, but may be treated as NOP's. In practice, it
6000 may only safe to rely on instructions matching:
6001
6002 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
6003 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
6004 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
6005
6006 Even this may only true if the condition predicate is true. The
6007 following use a condition predicate of ALWAYS so it is always TRUE.
6008
6009 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
6010 and NetBSD all use a software interrupt rather than an undefined
6011 instruction to force a trap. This can be handled by by the
6012 abi-specific code during establishment of the gdbarch vector. */
6013
6014 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
6015 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
6016 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
6017 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
6018
6019 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
6020 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
6021 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
6022 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
6023
6024 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
6025 the program counter value to determine whether a 16-bit or 32-bit
6026 breakpoint should be used. It returns a pointer to a string of
6027 bytes that encode a breakpoint instruction, stores the length of
6028 the string to *lenptr, and adjusts the program counter (if
6029 necessary) to point to the actual memory location where the
6030 breakpoint should be inserted. */
6031
6032 static const unsigned char *
6033 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
6034 {
6035 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6036 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6037
6038 if (arm_pc_is_thumb (gdbarch, *pcptr))
6039 {
6040 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
6041
6042 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
6043 check whether we are replacing a 32-bit instruction. */
6044 if (tdep->thumb2_breakpoint != NULL)
6045 {
6046 gdb_byte buf[2];
6047 if (target_read_memory (*pcptr, buf, 2) == 0)
6048 {
6049 unsigned short inst1;
6050 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
6051 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
6052 {
6053 *lenptr = tdep->thumb2_breakpoint_size;
6054 return tdep->thumb2_breakpoint;
6055 }
6056 }
6057 }
6058
6059 *lenptr = tdep->thumb_breakpoint_size;
6060 return tdep->thumb_breakpoint;
6061 }
6062 else
6063 {
6064 *lenptr = tdep->arm_breakpoint_size;
6065 return tdep->arm_breakpoint;
6066 }
6067 }
6068
6069 static void
6070 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
6071 int *kindptr)
6072 {
6073 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6074
6075 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
6076
6077 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
6078 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
6079 that this is not confused with a 32-bit ARM breakpoint. */
6080 *kindptr = 3;
6081 }
6082
6083 /* Extract from an array REGBUF containing the (raw) register state a
6084 function return value of type TYPE, and copy that, in virtual
6085 format, into VALBUF. */
6086
6087 static void
6088 arm_extract_return_value (struct type *type, struct regcache *regs,
6089 gdb_byte *valbuf)
6090 {
6091 struct gdbarch *gdbarch = get_regcache_arch (regs);
6092 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6093
6094 if (TYPE_CODE_FLT == TYPE_CODE (type))
6095 {
6096 switch (gdbarch_tdep (gdbarch)->fp_model)
6097 {
6098 case ARM_FLOAT_FPA:
6099 {
6100 /* The value is in register F0 in internal format. We need to
6101 extract the raw value and then convert it to the desired
6102 internal type. */
6103 bfd_byte tmpbuf[FP_REGISTER_SIZE];
6104
6105 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
6106 convert_from_extended (floatformat_from_type (type), tmpbuf,
6107 valbuf, gdbarch_byte_order (gdbarch));
6108 }
6109 break;
6110
6111 case ARM_FLOAT_SOFT_FPA:
6112 case ARM_FLOAT_SOFT_VFP:
6113 /* ARM_FLOAT_VFP can arise if this is a variadic function so
6114 not using the VFP ABI code. */
6115 case ARM_FLOAT_VFP:
6116 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
6117 if (TYPE_LENGTH (type) > 4)
6118 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
6119 valbuf + INT_REGISTER_SIZE);
6120 break;
6121
6122 default:
6123 internal_error
6124 (__FILE__, __LINE__,
6125 _("arm_extract_return_value: Floating point model not supported"));
6126 break;
6127 }
6128 }
6129 else if (TYPE_CODE (type) == TYPE_CODE_INT
6130 || TYPE_CODE (type) == TYPE_CODE_CHAR
6131 || TYPE_CODE (type) == TYPE_CODE_BOOL
6132 || TYPE_CODE (type) == TYPE_CODE_PTR
6133 || TYPE_CODE (type) == TYPE_CODE_REF
6134 || TYPE_CODE (type) == TYPE_CODE_ENUM)
6135 {
6136 /* If the the type is a plain integer, then the access is
6137 straight-forward. Otherwise we have to play around a bit more. */
6138 int len = TYPE_LENGTH (type);
6139 int regno = ARM_A1_REGNUM;
6140 ULONGEST tmp;
6141
6142 while (len > 0)
6143 {
6144 /* By using store_unsigned_integer we avoid having to do
6145 anything special for small big-endian values. */
6146 regcache_cooked_read_unsigned (regs, regno++, &tmp);
6147 store_unsigned_integer (valbuf,
6148 (len > INT_REGISTER_SIZE
6149 ? INT_REGISTER_SIZE : len),
6150 byte_order, tmp);
6151 len -= INT_REGISTER_SIZE;
6152 valbuf += INT_REGISTER_SIZE;
6153 }
6154 }
6155 else
6156 {
6157 /* For a structure or union the behaviour is as if the value had
6158 been stored to word-aligned memory and then loaded into
6159 registers with 32-bit load instruction(s). */
6160 int len = TYPE_LENGTH (type);
6161 int regno = ARM_A1_REGNUM;
6162 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6163
6164 while (len > 0)
6165 {
6166 regcache_cooked_read (regs, regno++, tmpbuf);
6167 memcpy (valbuf, tmpbuf,
6168 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
6169 len -= INT_REGISTER_SIZE;
6170 valbuf += INT_REGISTER_SIZE;
6171 }
6172 }
6173 }
6174
6175
6176 /* Will a function return an aggregate type in memory or in a
6177 register? Return 0 if an aggregate type can be returned in a
6178 register, 1 if it must be returned in memory. */
6179
6180 static int
6181 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
6182 {
6183 int nRc;
6184 enum type_code code;
6185
6186 CHECK_TYPEDEF (type);
6187
6188 /* In the ARM ABI, "integer" like aggregate types are returned in
6189 registers. For an aggregate type to be integer like, its size
6190 must be less than or equal to INT_REGISTER_SIZE and the
6191 offset of each addressable subfield must be zero. Note that bit
6192 fields are not addressable, and all addressable subfields of
6193 unions always start at offset zero.
6194
6195 This function is based on the behaviour of GCC 2.95.1.
6196 See: gcc/arm.c: arm_return_in_memory() for details.
6197
6198 Note: All versions of GCC before GCC 2.95.2 do not set up the
6199 parameters correctly for a function returning the following
6200 structure: struct { float f;}; This should be returned in memory,
6201 not a register. Richard Earnshaw sent me a patch, but I do not
6202 know of any way to detect if a function like the above has been
6203 compiled with the correct calling convention. */
6204
6205 /* All aggregate types that won't fit in a register must be returned
6206 in memory. */
6207 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
6208 {
6209 return 1;
6210 }
6211
6212 /* The AAPCS says all aggregates not larger than a word are returned
6213 in a register. */
6214 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
6215 return 0;
6216
6217 /* The only aggregate types that can be returned in a register are
6218 structs and unions. Arrays must be returned in memory. */
6219 code = TYPE_CODE (type);
6220 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
6221 {
6222 return 1;
6223 }
6224
6225 /* Assume all other aggregate types can be returned in a register.
6226 Run a check for structures, unions and arrays. */
6227 nRc = 0;
6228
6229 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
6230 {
6231 int i;
6232 /* Need to check if this struct/union is "integer" like. For
6233 this to be true, its size must be less than or equal to
6234 INT_REGISTER_SIZE and the offset of each addressable
6235 subfield must be zero. Note that bit fields are not
6236 addressable, and unions always start at offset zero. If any
6237 of the subfields is a floating point type, the struct/union
6238 cannot be an integer type. */
6239
6240 /* For each field in the object, check:
6241 1) Is it FP? --> yes, nRc = 1;
6242 2) Is it addressable (bitpos != 0) and
6243 not packed (bitsize == 0)?
6244 --> yes, nRc = 1
6245 */
6246
6247 for (i = 0; i < TYPE_NFIELDS (type); i++)
6248 {
6249 enum type_code field_type_code;
6250 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type, i)));
6251
6252 /* Is it a floating point type field? */
6253 if (field_type_code == TYPE_CODE_FLT)
6254 {
6255 nRc = 1;
6256 break;
6257 }
6258
6259 /* If bitpos != 0, then we have to care about it. */
6260 if (TYPE_FIELD_BITPOS (type, i) != 0)
6261 {
6262 /* Bitfields are not addressable. If the field bitsize is
6263 zero, then the field is not packed. Hence it cannot be
6264 a bitfield or any other packed type. */
6265 if (TYPE_FIELD_BITSIZE (type, i) == 0)
6266 {
6267 nRc = 1;
6268 break;
6269 }
6270 }
6271 }
6272 }
6273
6274 return nRc;
6275 }
6276
6277 /* Write into appropriate registers a function return value of type
6278 TYPE, given in virtual format. */
6279
6280 static void
6281 arm_store_return_value (struct type *type, struct regcache *regs,
6282 const gdb_byte *valbuf)
6283 {
6284 struct gdbarch *gdbarch = get_regcache_arch (regs);
6285 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6286
6287 if (TYPE_CODE (type) == TYPE_CODE_FLT)
6288 {
6289 char buf[MAX_REGISTER_SIZE];
6290
6291 switch (gdbarch_tdep (gdbarch)->fp_model)
6292 {
6293 case ARM_FLOAT_FPA:
6294
6295 convert_to_extended (floatformat_from_type (type), buf, valbuf,
6296 gdbarch_byte_order (gdbarch));
6297 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
6298 break;
6299
6300 case ARM_FLOAT_SOFT_FPA:
6301 case ARM_FLOAT_SOFT_VFP:
6302 /* ARM_FLOAT_VFP can arise if this is a variadic function so
6303 not using the VFP ABI code. */
6304 case ARM_FLOAT_VFP:
6305 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
6306 if (TYPE_LENGTH (type) > 4)
6307 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
6308 valbuf + INT_REGISTER_SIZE);
6309 break;
6310
6311 default:
6312 internal_error
6313 (__FILE__, __LINE__,
6314 _("arm_store_return_value: Floating point model not supported"));
6315 break;
6316 }
6317 }
6318 else if (TYPE_CODE (type) == TYPE_CODE_INT
6319 || TYPE_CODE (type) == TYPE_CODE_CHAR
6320 || TYPE_CODE (type) == TYPE_CODE_BOOL
6321 || TYPE_CODE (type) == TYPE_CODE_PTR
6322 || TYPE_CODE (type) == TYPE_CODE_REF
6323 || TYPE_CODE (type) == TYPE_CODE_ENUM)
6324 {
6325 if (TYPE_LENGTH (type) <= 4)
6326 {
6327 /* Values of one word or less are zero/sign-extended and
6328 returned in r0. */
6329 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6330 LONGEST val = unpack_long (type, valbuf);
6331
6332 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
6333 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
6334 }
6335 else
6336 {
6337 /* Integral values greater than one word are stored in consecutive
6338 registers starting with r0. This will always be a multiple of
6339 the regiser size. */
6340 int len = TYPE_LENGTH (type);
6341 int regno = ARM_A1_REGNUM;
6342
6343 while (len > 0)
6344 {
6345 regcache_cooked_write (regs, regno++, valbuf);
6346 len -= INT_REGISTER_SIZE;
6347 valbuf += INT_REGISTER_SIZE;
6348 }
6349 }
6350 }
6351 else
6352 {
6353 /* For a structure or union the behaviour is as if the value had
6354 been stored to word-aligned memory and then loaded into
6355 registers with 32-bit load instruction(s). */
6356 int len = TYPE_LENGTH (type);
6357 int regno = ARM_A1_REGNUM;
6358 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6359
6360 while (len > 0)
6361 {
6362 memcpy (tmpbuf, valbuf,
6363 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
6364 regcache_cooked_write (regs, regno++, tmpbuf);
6365 len -= INT_REGISTER_SIZE;
6366 valbuf += INT_REGISTER_SIZE;
6367 }
6368 }
6369 }
6370
6371
6372 /* Handle function return values. */
6373
6374 static enum return_value_convention
6375 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
6376 struct type *valtype, struct regcache *regcache,
6377 gdb_byte *readbuf, const gdb_byte *writebuf)
6378 {
6379 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6380 enum arm_vfp_cprc_base_type vfp_base_type;
6381 int vfp_base_count;
6382
6383 if (arm_vfp_abi_for_function (gdbarch, func_type)
6384 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
6385 {
6386 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
6387 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
6388 int i;
6389 for (i = 0; i < vfp_base_count; i++)
6390 {
6391 if (reg_char == 'q')
6392 {
6393 if (writebuf)
6394 arm_neon_quad_write (gdbarch, regcache, i,
6395 writebuf + i * unit_length);
6396
6397 if (readbuf)
6398 arm_neon_quad_read (gdbarch, regcache, i,
6399 readbuf + i * unit_length);
6400 }
6401 else
6402 {
6403 char name_buf[4];
6404 int regnum;
6405
6406 sprintf (name_buf, "%c%d", reg_char, i);
6407 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6408 strlen (name_buf));
6409 if (writebuf)
6410 regcache_cooked_write (regcache, regnum,
6411 writebuf + i * unit_length);
6412 if (readbuf)
6413 regcache_cooked_read (regcache, regnum,
6414 readbuf + i * unit_length);
6415 }
6416 }
6417 return RETURN_VALUE_REGISTER_CONVENTION;
6418 }
6419
6420 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
6421 || TYPE_CODE (valtype) == TYPE_CODE_UNION
6422 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
6423 {
6424 if (tdep->struct_return == pcc_struct_return
6425 || arm_return_in_memory (gdbarch, valtype))
6426 return RETURN_VALUE_STRUCT_CONVENTION;
6427 }
6428
6429 if (writebuf)
6430 arm_store_return_value (valtype, regcache, writebuf);
6431
6432 if (readbuf)
6433 arm_extract_return_value (valtype, regcache, readbuf);
6434
6435 return RETURN_VALUE_REGISTER_CONVENTION;
6436 }
6437
6438
6439 static int
6440 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
6441 {
6442 struct gdbarch *gdbarch = get_frame_arch (frame);
6443 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6444 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6445 CORE_ADDR jb_addr;
6446 char buf[INT_REGISTER_SIZE];
6447
6448 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
6449
6450 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
6451 INT_REGISTER_SIZE))
6452 return 0;
6453
6454 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
6455 return 1;
6456 }
6457
6458 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
6459 return the target PC. Otherwise return 0. */
6460
6461 CORE_ADDR
6462 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
6463 {
6464 char *name;
6465 int namelen;
6466 CORE_ADDR start_addr;
6467
6468 /* Find the starting address and name of the function containing the PC. */
6469 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
6470 return 0;
6471
6472 /* If PC is in a Thumb call or return stub, return the address of the
6473 target PC, which is in a register. The thunk functions are called
6474 _call_via_xx, where x is the register name. The possible names
6475 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
6476 functions, named __ARM_call_via_r[0-7]. */
6477 if (strncmp (name, "_call_via_", 10) == 0
6478 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
6479 {
6480 /* Use the name suffix to determine which register contains the
6481 target PC. */
6482 static char *table[15] =
6483 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
6484 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
6485 };
6486 int regno;
6487 int offset = strlen (name) - 2;
6488
6489 for (regno = 0; regno <= 14; regno++)
6490 if (strcmp (&name[offset], table[regno]) == 0)
6491 return get_frame_register_unsigned (frame, regno);
6492 }
6493
6494 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
6495 non-interworking calls to foo. We could decode the stubs
6496 to find the target but it's easier to use the symbol table. */
6497 namelen = strlen (name);
6498 if (name[0] == '_' && name[1] == '_'
6499 && ((namelen > 2 + strlen ("_from_thumb")
6500 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
6501 strlen ("_from_thumb")) == 0)
6502 || (namelen > 2 + strlen ("_from_arm")
6503 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
6504 strlen ("_from_arm")) == 0)))
6505 {
6506 char *target_name;
6507 int target_len = namelen - 2;
6508 struct minimal_symbol *minsym;
6509 struct objfile *objfile;
6510 struct obj_section *sec;
6511
6512 if (name[namelen - 1] == 'b')
6513 target_len -= strlen ("_from_thumb");
6514 else
6515 target_len -= strlen ("_from_arm");
6516
6517 target_name = alloca (target_len + 1);
6518 memcpy (target_name, name + 2, target_len);
6519 target_name[target_len] = '\0';
6520
6521 sec = find_pc_section (pc);
6522 objfile = (sec == NULL) ? NULL : sec->objfile;
6523 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
6524 if (minsym != NULL)
6525 return SYMBOL_VALUE_ADDRESS (minsym);
6526 else
6527 return 0;
6528 }
6529
6530 return 0; /* not a stub */
6531 }
6532
6533 static void
6534 set_arm_command (char *args, int from_tty)
6535 {
6536 printf_unfiltered (_("\
6537 \"set arm\" must be followed by an apporpriate subcommand.\n"));
6538 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
6539 }
6540
6541 static void
6542 show_arm_command (char *args, int from_tty)
6543 {
6544 cmd_show_list (showarmcmdlist, from_tty, "");
6545 }
6546
6547 static void
6548 arm_update_current_architecture (void)
6549 {
6550 struct gdbarch_info info;
6551
6552 /* If the current architecture is not ARM, we have nothing to do. */
6553 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
6554 return;
6555
6556 /* Update the architecture. */
6557 gdbarch_info_init (&info);
6558
6559 if (!gdbarch_update_p (info))
6560 internal_error (__FILE__, __LINE__, "could not update architecture");
6561 }
6562
6563 static void
6564 set_fp_model_sfunc (char *args, int from_tty,
6565 struct cmd_list_element *c)
6566 {
6567 enum arm_float_model fp_model;
6568
6569 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
6570 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
6571 {
6572 arm_fp_model = fp_model;
6573 break;
6574 }
6575
6576 if (fp_model == ARM_FLOAT_LAST)
6577 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
6578 current_fp_model);
6579
6580 arm_update_current_architecture ();
6581 }
6582
6583 static void
6584 show_fp_model (struct ui_file *file, int from_tty,
6585 struct cmd_list_element *c, const char *value)
6586 {
6587 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6588
6589 if (arm_fp_model == ARM_FLOAT_AUTO
6590 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
6591 fprintf_filtered (file, _("\
6592 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
6593 fp_model_strings[tdep->fp_model]);
6594 else
6595 fprintf_filtered (file, _("\
6596 The current ARM floating point model is \"%s\".\n"),
6597 fp_model_strings[arm_fp_model]);
6598 }
6599
6600 static void
6601 arm_set_abi (char *args, int from_tty,
6602 struct cmd_list_element *c)
6603 {
6604 enum arm_abi_kind arm_abi;
6605
6606 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
6607 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
6608 {
6609 arm_abi_global = arm_abi;
6610 break;
6611 }
6612
6613 if (arm_abi == ARM_ABI_LAST)
6614 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
6615 arm_abi_string);
6616
6617 arm_update_current_architecture ();
6618 }
6619
6620 static void
6621 arm_show_abi (struct ui_file *file, int from_tty,
6622 struct cmd_list_element *c, const char *value)
6623 {
6624 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6625
6626 if (arm_abi_global == ARM_ABI_AUTO
6627 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
6628 fprintf_filtered (file, _("\
6629 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
6630 arm_abi_strings[tdep->arm_abi]);
6631 else
6632 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
6633 arm_abi_string);
6634 }
6635
6636 static void
6637 arm_show_fallback_mode (struct ui_file *file, int from_tty,
6638 struct cmd_list_element *c, const char *value)
6639 {
6640 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6641
6642 fprintf_filtered (file, _("\
6643 The current execution mode assumed (when symbols are unavailable) is \"%s\".\n"),
6644 arm_fallback_mode_string);
6645 }
6646
6647 static void
6648 arm_show_force_mode (struct ui_file *file, int from_tty,
6649 struct cmd_list_element *c, const char *value)
6650 {
6651 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6652
6653 fprintf_filtered (file, _("\
6654 The current execution mode assumed (even when symbols are available) is \"%s\".\n"),
6655 arm_force_mode_string);
6656 }
6657
6658 /* If the user changes the register disassembly style used for info
6659 register and other commands, we have to also switch the style used
6660 in opcodes for disassembly output. This function is run in the "set
6661 arm disassembly" command, and does that. */
6662
6663 static void
6664 set_disassembly_style_sfunc (char *args, int from_tty,
6665 struct cmd_list_element *c)
6666 {
6667 set_disassembly_style ();
6668 }
6669 \f
6670 /* Return the ARM register name corresponding to register I. */
6671 static const char *
6672 arm_register_name (struct gdbarch *gdbarch, int i)
6673 {
6674 const int num_regs = gdbarch_num_regs (gdbarch);
6675
6676 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
6677 && i >= num_regs && i < num_regs + 32)
6678 {
6679 static const char *const vfp_pseudo_names[] = {
6680 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
6681 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
6682 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
6683 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
6684 };
6685
6686 return vfp_pseudo_names[i - num_regs];
6687 }
6688
6689 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
6690 && i >= num_regs + 32 && i < num_regs + 32 + 16)
6691 {
6692 static const char *const neon_pseudo_names[] = {
6693 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
6694 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
6695 };
6696
6697 return neon_pseudo_names[i - num_regs - 32];
6698 }
6699
6700 if (i >= ARRAY_SIZE (arm_register_names))
6701 /* These registers are only supported on targets which supply
6702 an XML description. */
6703 return "";
6704
6705 return arm_register_names[i];
6706 }
6707
6708 static void
6709 set_disassembly_style (void)
6710 {
6711 int current;
6712
6713 /* Find the style that the user wants. */
6714 for (current = 0; current < num_disassembly_options; current++)
6715 if (disassembly_style == valid_disassembly_styles[current])
6716 break;
6717 gdb_assert (current < num_disassembly_options);
6718
6719 /* Synchronize the disassembler. */
6720 set_arm_regname_option (current);
6721 }
6722
6723 /* Test whether the coff symbol specific value corresponds to a Thumb
6724 function. */
6725
6726 static int
6727 coff_sym_is_thumb (int val)
6728 {
6729 return (val == C_THUMBEXT
6730 || val == C_THUMBSTAT
6731 || val == C_THUMBEXTFUNC
6732 || val == C_THUMBSTATFUNC
6733 || val == C_THUMBLABEL);
6734 }
6735
6736 /* arm_coff_make_msymbol_special()
6737 arm_elf_make_msymbol_special()
6738
6739 These functions test whether the COFF or ELF symbol corresponds to
6740 an address in thumb code, and set a "special" bit in a minimal
6741 symbol to indicate that it does. */
6742
6743 static void
6744 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
6745 {
6746 /* Thumb symbols are of type STT_LOPROC, (synonymous with
6747 STT_ARM_TFUNC). */
6748 if (ELF_ST_TYPE (((elf_symbol_type *)sym)->internal_elf_sym.st_info)
6749 == STT_LOPROC)
6750 MSYMBOL_SET_SPECIAL (msym);
6751 }
6752
6753 static void
6754 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
6755 {
6756 if (coff_sym_is_thumb (val))
6757 MSYMBOL_SET_SPECIAL (msym);
6758 }
6759
6760 static void
6761 arm_objfile_data_free (struct objfile *objfile, void *arg)
6762 {
6763 struct arm_per_objfile *data = arg;
6764 unsigned int i;
6765
6766 for (i = 0; i < objfile->obfd->section_count; i++)
6767 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
6768 }
6769
6770 static void
6771 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
6772 asymbol *sym)
6773 {
6774 const char *name = bfd_asymbol_name (sym);
6775 struct arm_per_objfile *data;
6776 VEC(arm_mapping_symbol_s) **map_p;
6777 struct arm_mapping_symbol new_map_sym;
6778
6779 gdb_assert (name[0] == '$');
6780 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
6781 return;
6782
6783 data = objfile_data (objfile, arm_objfile_data_key);
6784 if (data == NULL)
6785 {
6786 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
6787 struct arm_per_objfile);
6788 set_objfile_data (objfile, arm_objfile_data_key, data);
6789 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
6790 objfile->obfd->section_count,
6791 VEC(arm_mapping_symbol_s) *);
6792 }
6793 map_p = &data->section_maps[bfd_get_section (sym)->index];
6794
6795 new_map_sym.value = sym->value;
6796 new_map_sym.type = name[1];
6797
6798 /* Assume that most mapping symbols appear in order of increasing
6799 value. If they were randomly distributed, it would be faster to
6800 always push here and then sort at first use. */
6801 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
6802 {
6803 struct arm_mapping_symbol *prev_map_sym;
6804
6805 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
6806 if (prev_map_sym->value >= sym->value)
6807 {
6808 unsigned int idx;
6809 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
6810 arm_compare_mapping_symbols);
6811 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
6812 return;
6813 }
6814 }
6815
6816 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
6817 }
6818
6819 static void
6820 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
6821 {
6822 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6823 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
6824
6825 /* If necessary, set the T bit. */
6826 if (arm_apcs_32)
6827 {
6828 ULONGEST val, t_bit;
6829 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
6830 t_bit = arm_psr_thumb_bit (gdbarch);
6831 if (arm_pc_is_thumb (gdbarch, pc))
6832 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
6833 val | t_bit);
6834 else
6835 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
6836 val & ~t_bit);
6837 }
6838 }
6839
6840 /* Read the contents of a NEON quad register, by reading from two
6841 double registers. This is used to implement the quad pseudo
6842 registers, and for argument passing in case the quad registers are
6843 missing; vectors are passed in quad registers when using the VFP
6844 ABI, even if a NEON unit is not present. REGNUM is the index of
6845 the quad register, in [0, 15]. */
6846
6847 static void
6848 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
6849 int regnum, gdb_byte *buf)
6850 {
6851 char name_buf[4];
6852 gdb_byte reg_buf[8];
6853 int offset, double_regnum;
6854
6855 sprintf (name_buf, "d%d", regnum << 1);
6856 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6857 strlen (name_buf));
6858
6859 /* d0 is always the least significant half of q0. */
6860 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6861 offset = 8;
6862 else
6863 offset = 0;
6864
6865 regcache_raw_read (regcache, double_regnum, reg_buf);
6866 memcpy (buf + offset, reg_buf, 8);
6867
6868 offset = 8 - offset;
6869 regcache_raw_read (regcache, double_regnum + 1, reg_buf);
6870 memcpy (buf + offset, reg_buf, 8);
6871 }
6872
6873 static void
6874 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
6875 int regnum, gdb_byte *buf)
6876 {
6877 const int num_regs = gdbarch_num_regs (gdbarch);
6878 char name_buf[4];
6879 gdb_byte reg_buf[8];
6880 int offset, double_regnum;
6881
6882 gdb_assert (regnum >= num_regs);
6883 regnum -= num_regs;
6884
6885 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
6886 /* Quad-precision register. */
6887 arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
6888 else
6889 {
6890 /* Single-precision register. */
6891 gdb_assert (regnum < 32);
6892
6893 /* s0 is always the least significant half of d0. */
6894 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6895 offset = (regnum & 1) ? 0 : 4;
6896 else
6897 offset = (regnum & 1) ? 4 : 0;
6898
6899 sprintf (name_buf, "d%d", regnum >> 1);
6900 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6901 strlen (name_buf));
6902
6903 regcache_raw_read (regcache, double_regnum, reg_buf);
6904 memcpy (buf, reg_buf + offset, 4);
6905 }
6906 }
6907
6908 /* Store the contents of BUF to a NEON quad register, by writing to
6909 two double registers. This is used to implement the quad pseudo
6910 registers, and for argument passing in case the quad registers are
6911 missing; vectors are passed in quad registers when using the VFP
6912 ABI, even if a NEON unit is not present. REGNUM is the index
6913 of the quad register, in [0, 15]. */
6914
6915 static void
6916 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
6917 int regnum, const gdb_byte *buf)
6918 {
6919 char name_buf[4];
6920 gdb_byte reg_buf[8];
6921 int offset, double_regnum;
6922
6923 sprintf (name_buf, "d%d", regnum << 1);
6924 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6925 strlen (name_buf));
6926
6927 /* d0 is always the least significant half of q0. */
6928 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6929 offset = 8;
6930 else
6931 offset = 0;
6932
6933 regcache_raw_write (regcache, double_regnum, buf + offset);
6934 offset = 8 - offset;
6935 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
6936 }
6937
6938 static void
6939 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
6940 int regnum, const gdb_byte *buf)
6941 {
6942 const int num_regs = gdbarch_num_regs (gdbarch);
6943 char name_buf[4];
6944 gdb_byte reg_buf[8];
6945 int offset, double_regnum;
6946
6947 gdb_assert (regnum >= num_regs);
6948 regnum -= num_regs;
6949
6950 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
6951 /* Quad-precision register. */
6952 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
6953 else
6954 {
6955 /* Single-precision register. */
6956 gdb_assert (regnum < 32);
6957
6958 /* s0 is always the least significant half of d0. */
6959 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6960 offset = (regnum & 1) ? 0 : 4;
6961 else
6962 offset = (regnum & 1) ? 4 : 0;
6963
6964 sprintf (name_buf, "d%d", regnum >> 1);
6965 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6966 strlen (name_buf));
6967
6968 regcache_raw_read (regcache, double_regnum, reg_buf);
6969 memcpy (reg_buf + offset, buf, 4);
6970 regcache_raw_write (regcache, double_regnum, reg_buf);
6971 }
6972 }
6973
6974 static struct value *
6975 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
6976 {
6977 const int *reg_p = baton;
6978 return value_of_register (*reg_p, frame);
6979 }
6980 \f
6981 static enum gdb_osabi
6982 arm_elf_osabi_sniffer (bfd *abfd)
6983 {
6984 unsigned int elfosabi;
6985 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
6986
6987 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
6988
6989 if (elfosabi == ELFOSABI_ARM)
6990 /* GNU tools use this value. Check note sections in this case,
6991 as well. */
6992 bfd_map_over_sections (abfd,
6993 generic_elf_osabi_sniff_abi_tag_sections,
6994 &osabi);
6995
6996 /* Anything else will be handled by the generic ELF sniffer. */
6997 return osabi;
6998 }
6999
7000 \f
7001 /* Initialize the current architecture based on INFO. If possible,
7002 re-use an architecture from ARCHES, which is a list of
7003 architectures already created during this debugging session.
7004
7005 Called e.g. at program startup, when reading a core file, and when
7006 reading a binary file. */
7007
7008 static struct gdbarch *
7009 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
7010 {
7011 struct gdbarch_tdep *tdep;
7012 struct gdbarch *gdbarch;
7013 struct gdbarch_list *best_arch;
7014 enum arm_abi_kind arm_abi = arm_abi_global;
7015 enum arm_float_model fp_model = arm_fp_model;
7016 struct tdesc_arch_data *tdesc_data = NULL;
7017 int i, is_m = 0;
7018 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
7019 int have_neon = 0;
7020 int have_fpa_registers = 1;
7021 const struct target_desc *tdesc = info.target_desc;
7022
7023 /* If we have an object to base this architecture on, try to determine
7024 its ABI. */
7025
7026 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
7027 {
7028 int ei_osabi, e_flags;
7029
7030 switch (bfd_get_flavour (info.abfd))
7031 {
7032 case bfd_target_aout_flavour:
7033 /* Assume it's an old APCS-style ABI. */
7034 arm_abi = ARM_ABI_APCS;
7035 break;
7036
7037 case bfd_target_coff_flavour:
7038 /* Assume it's an old APCS-style ABI. */
7039 /* XXX WinCE? */
7040 arm_abi = ARM_ABI_APCS;
7041 break;
7042
7043 case bfd_target_elf_flavour:
7044 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
7045 e_flags = elf_elfheader (info.abfd)->e_flags;
7046
7047 if (ei_osabi == ELFOSABI_ARM)
7048 {
7049 /* GNU tools used to use this value, but do not for EABI
7050 objects. There's nowhere to tag an EABI version
7051 anyway, so assume APCS. */
7052 arm_abi = ARM_ABI_APCS;
7053 }
7054 else if (ei_osabi == ELFOSABI_NONE)
7055 {
7056 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
7057 int attr_arch, attr_profile;
7058
7059 switch (eabi_ver)
7060 {
7061 case EF_ARM_EABI_UNKNOWN:
7062 /* Assume GNU tools. */
7063 arm_abi = ARM_ABI_APCS;
7064 break;
7065
7066 case EF_ARM_EABI_VER4:
7067 case EF_ARM_EABI_VER5:
7068 arm_abi = ARM_ABI_AAPCS;
7069 /* EABI binaries default to VFP float ordering.
7070 They may also contain build attributes that can
7071 be used to identify if the VFP argument-passing
7072 ABI is in use. */
7073 if (fp_model == ARM_FLOAT_AUTO)
7074 {
7075 #ifdef HAVE_ELF
7076 switch (bfd_elf_get_obj_attr_int (info.abfd,
7077 OBJ_ATTR_PROC,
7078 Tag_ABI_VFP_args))
7079 {
7080 case 0:
7081 /* "The user intended FP parameter/result
7082 passing to conform to AAPCS, base
7083 variant". */
7084 fp_model = ARM_FLOAT_SOFT_VFP;
7085 break;
7086 case 1:
7087 /* "The user intended FP parameter/result
7088 passing to conform to AAPCS, VFP
7089 variant". */
7090 fp_model = ARM_FLOAT_VFP;
7091 break;
7092 case 2:
7093 /* "The user intended FP parameter/result
7094 passing to conform to tool chain-specific
7095 conventions" - we don't know any such
7096 conventions, so leave it as "auto". */
7097 break;
7098 default:
7099 /* Attribute value not mentioned in the
7100 October 2008 ABI, so leave it as
7101 "auto". */
7102 break;
7103 }
7104 #else
7105 fp_model = ARM_FLOAT_SOFT_VFP;
7106 #endif
7107 }
7108 break;
7109
7110 default:
7111 /* Leave it as "auto". */
7112 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
7113 break;
7114 }
7115
7116 #ifdef HAVE_ELF
7117 /* Detect M-profile programs. This only works if the
7118 executable file includes build attributes; GCC does
7119 copy them to the executable, but e.g. RealView does
7120 not. */
7121 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
7122 Tag_CPU_arch);
7123 attr_profile = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
7124 Tag_CPU_arch_profile);
7125 /* GCC specifies the profile for v6-M; RealView only
7126 specifies the profile for architectures starting with
7127 V7 (as opposed to architectures with a tag
7128 numerically greater than TAG_CPU_ARCH_V7). */
7129 if (!tdesc_has_registers (tdesc)
7130 && (attr_arch == TAG_CPU_ARCH_V6_M
7131 || attr_arch == TAG_CPU_ARCH_V6S_M
7132 || attr_profile == 'M'))
7133 tdesc = tdesc_arm_with_m;
7134 #endif
7135 }
7136
7137 if (fp_model == ARM_FLOAT_AUTO)
7138 {
7139 int e_flags = elf_elfheader (info.abfd)->e_flags;
7140
7141 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
7142 {
7143 case 0:
7144 /* Leave it as "auto". Strictly speaking this case
7145 means FPA, but almost nobody uses that now, and
7146 many toolchains fail to set the appropriate bits
7147 for the floating-point model they use. */
7148 break;
7149 case EF_ARM_SOFT_FLOAT:
7150 fp_model = ARM_FLOAT_SOFT_FPA;
7151 break;
7152 case EF_ARM_VFP_FLOAT:
7153 fp_model = ARM_FLOAT_VFP;
7154 break;
7155 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
7156 fp_model = ARM_FLOAT_SOFT_VFP;
7157 break;
7158 }
7159 }
7160
7161 if (e_flags & EF_ARM_BE8)
7162 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
7163
7164 break;
7165
7166 default:
7167 /* Leave it as "auto". */
7168 break;
7169 }
7170 }
7171
7172 /* Check any target description for validity. */
7173 if (tdesc_has_registers (tdesc))
7174 {
7175 /* For most registers we require GDB's default names; but also allow
7176 the numeric names for sp / lr / pc, as a convenience. */
7177 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
7178 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
7179 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
7180
7181 const struct tdesc_feature *feature;
7182 int valid_p;
7183
7184 feature = tdesc_find_feature (tdesc,
7185 "org.gnu.gdb.arm.core");
7186 if (feature == NULL)
7187 {
7188 feature = tdesc_find_feature (tdesc,
7189 "org.gnu.gdb.arm.m-profile");
7190 if (feature == NULL)
7191 return NULL;
7192 else
7193 is_m = 1;
7194 }
7195
7196 tdesc_data = tdesc_data_alloc ();
7197
7198 valid_p = 1;
7199 for (i = 0; i < ARM_SP_REGNUM; i++)
7200 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
7201 arm_register_names[i]);
7202 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7203 ARM_SP_REGNUM,
7204 arm_sp_names);
7205 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7206 ARM_LR_REGNUM,
7207 arm_lr_names);
7208 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7209 ARM_PC_REGNUM,
7210 arm_pc_names);
7211 if (is_m)
7212 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7213 ARM_PS_REGNUM, "xpsr");
7214 else
7215 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7216 ARM_PS_REGNUM, "cpsr");
7217
7218 if (!valid_p)
7219 {
7220 tdesc_data_cleanup (tdesc_data);
7221 return NULL;
7222 }
7223
7224 feature = tdesc_find_feature (tdesc,
7225 "org.gnu.gdb.arm.fpa");
7226 if (feature != NULL)
7227 {
7228 valid_p = 1;
7229 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
7230 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
7231 arm_register_names[i]);
7232 if (!valid_p)
7233 {
7234 tdesc_data_cleanup (tdesc_data);
7235 return NULL;
7236 }
7237 }
7238 else
7239 have_fpa_registers = 0;
7240
7241 feature = tdesc_find_feature (tdesc,
7242 "org.gnu.gdb.xscale.iwmmxt");
7243 if (feature != NULL)
7244 {
7245 static const char *const iwmmxt_names[] = {
7246 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
7247 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
7248 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
7249 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
7250 };
7251
7252 valid_p = 1;
7253 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
7254 valid_p
7255 &= tdesc_numbered_register (feature, tdesc_data, i,
7256 iwmmxt_names[i - ARM_WR0_REGNUM]);
7257
7258 /* Check for the control registers, but do not fail if they
7259 are missing. */
7260 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
7261 tdesc_numbered_register (feature, tdesc_data, i,
7262 iwmmxt_names[i - ARM_WR0_REGNUM]);
7263
7264 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
7265 valid_p
7266 &= tdesc_numbered_register (feature, tdesc_data, i,
7267 iwmmxt_names[i - ARM_WR0_REGNUM]);
7268
7269 if (!valid_p)
7270 {
7271 tdesc_data_cleanup (tdesc_data);
7272 return NULL;
7273 }
7274 }
7275
7276 /* If we have a VFP unit, check whether the single precision registers
7277 are present. If not, then we will synthesize them as pseudo
7278 registers. */
7279 feature = tdesc_find_feature (tdesc,
7280 "org.gnu.gdb.arm.vfp");
7281 if (feature != NULL)
7282 {
7283 static const char *const vfp_double_names[] = {
7284 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
7285 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
7286 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
7287 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
7288 };
7289
7290 /* Require the double precision registers. There must be either
7291 16 or 32. */
7292 valid_p = 1;
7293 for (i = 0; i < 32; i++)
7294 {
7295 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7296 ARM_D0_REGNUM + i,
7297 vfp_double_names[i]);
7298 if (!valid_p)
7299 break;
7300 }
7301
7302 if (!valid_p && i != 16)
7303 {
7304 tdesc_data_cleanup (tdesc_data);
7305 return NULL;
7306 }
7307
7308 if (tdesc_unnumbered_register (feature, "s0") == 0)
7309 have_vfp_pseudos = 1;
7310
7311 have_vfp_registers = 1;
7312
7313 /* If we have VFP, also check for NEON. The architecture allows
7314 NEON without VFP (integer vector operations only), but GDB
7315 does not support that. */
7316 feature = tdesc_find_feature (tdesc,
7317 "org.gnu.gdb.arm.neon");
7318 if (feature != NULL)
7319 {
7320 /* NEON requires 32 double-precision registers. */
7321 if (i != 32)
7322 {
7323 tdesc_data_cleanup (tdesc_data);
7324 return NULL;
7325 }
7326
7327 /* If there are quad registers defined by the stub, use
7328 their type; otherwise (normally) provide them with
7329 the default type. */
7330 if (tdesc_unnumbered_register (feature, "q0") == 0)
7331 have_neon_pseudos = 1;
7332
7333 have_neon = 1;
7334 }
7335 }
7336 }
7337
7338 /* If there is already a candidate, use it. */
7339 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
7340 best_arch != NULL;
7341 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
7342 {
7343 if (arm_abi != ARM_ABI_AUTO
7344 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
7345 continue;
7346
7347 if (fp_model != ARM_FLOAT_AUTO
7348 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
7349 continue;
7350
7351 /* There are various other properties in tdep that we do not
7352 need to check here: those derived from a target description,
7353 since gdbarches with a different target description are
7354 automatically disqualified. */
7355
7356 /* Do check is_m, though, since it might come from the binary. */
7357 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
7358 continue;
7359
7360 /* Found a match. */
7361 break;
7362 }
7363
7364 if (best_arch != NULL)
7365 {
7366 if (tdesc_data != NULL)
7367 tdesc_data_cleanup (tdesc_data);
7368 return best_arch->gdbarch;
7369 }
7370
7371 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
7372 gdbarch = gdbarch_alloc (&info, tdep);
7373
7374 /* Record additional information about the architecture we are defining.
7375 These are gdbarch discriminators, like the OSABI. */
7376 tdep->arm_abi = arm_abi;
7377 tdep->fp_model = fp_model;
7378 tdep->is_m = is_m;
7379 tdep->have_fpa_registers = have_fpa_registers;
7380 tdep->have_vfp_registers = have_vfp_registers;
7381 tdep->have_vfp_pseudos = have_vfp_pseudos;
7382 tdep->have_neon_pseudos = have_neon_pseudos;
7383 tdep->have_neon = have_neon;
7384
7385 /* Breakpoints. */
7386 switch (info.byte_order_for_code)
7387 {
7388 case BFD_ENDIAN_BIG:
7389 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
7390 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
7391 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
7392 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
7393
7394 break;
7395
7396 case BFD_ENDIAN_LITTLE:
7397 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
7398 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
7399 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
7400 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
7401
7402 break;
7403
7404 default:
7405 internal_error (__FILE__, __LINE__,
7406 _("arm_gdbarch_init: bad byte order for float format"));
7407 }
7408
7409 /* On ARM targets char defaults to unsigned. */
7410 set_gdbarch_char_signed (gdbarch, 0);
7411
7412 /* Note: for displaced stepping, this includes the breakpoint, and one word
7413 of additional scratch space. This setting isn't used for anything beside
7414 displaced stepping at present. */
7415 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
7416
7417 /* This should be low enough for everything. */
7418 tdep->lowest_pc = 0x20;
7419 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
7420
7421 /* The default, for both APCS and AAPCS, is to return small
7422 structures in registers. */
7423 tdep->struct_return = reg_struct_return;
7424
7425 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
7426 set_gdbarch_frame_align (gdbarch, arm_frame_align);
7427
7428 set_gdbarch_write_pc (gdbarch, arm_write_pc);
7429
7430 /* Frame handling. */
7431 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
7432 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
7433 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
7434
7435 frame_base_set_default (gdbarch, &arm_normal_base);
7436
7437 /* Address manipulation. */
7438 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
7439 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
7440
7441 /* Advance PC across function entry code. */
7442 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
7443
7444 /* Detect whether PC is in function epilogue. */
7445 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
7446
7447 /* Skip trampolines. */
7448 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
7449
7450 /* The stack grows downward. */
7451 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
7452
7453 /* Breakpoint manipulation. */
7454 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
7455 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
7456 arm_remote_breakpoint_from_pc);
7457
7458 /* Information about registers, etc. */
7459 set_gdbarch_deprecated_fp_regnum (gdbarch, ARM_FP_REGNUM); /* ??? */
7460 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
7461 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
7462 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
7463 set_gdbarch_register_type (gdbarch, arm_register_type);
7464
7465 /* This "info float" is FPA-specific. Use the generic version if we
7466 do not have FPA. */
7467 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
7468 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
7469
7470 /* Internal <-> external register number maps. */
7471 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
7472 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
7473
7474 set_gdbarch_register_name (gdbarch, arm_register_name);
7475
7476 /* Returning results. */
7477 set_gdbarch_return_value (gdbarch, arm_return_value);
7478
7479 /* Disassembly. */
7480 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
7481
7482 /* Minsymbol frobbing. */
7483 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
7484 set_gdbarch_coff_make_msymbol_special (gdbarch,
7485 arm_coff_make_msymbol_special);
7486 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
7487
7488 /* Thumb-2 IT block support. */
7489 set_gdbarch_adjust_breakpoint_address (gdbarch,
7490 arm_adjust_breakpoint_address);
7491
7492 /* Virtual tables. */
7493 set_gdbarch_vbit_in_delta (gdbarch, 1);
7494
7495 /* Hook in the ABI-specific overrides, if they have been registered. */
7496 gdbarch_init_osabi (info, gdbarch);
7497
7498 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
7499
7500 /* Add some default predicates. */
7501 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
7502 dwarf2_append_unwinders (gdbarch);
7503 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
7504
7505 /* Now we have tuned the configuration, set a few final things,
7506 based on what the OS ABI has told us. */
7507
7508 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
7509 binaries are always marked. */
7510 if (tdep->arm_abi == ARM_ABI_AUTO)
7511 tdep->arm_abi = ARM_ABI_APCS;
7512
7513 /* We used to default to FPA for generic ARM, but almost nobody
7514 uses that now, and we now provide a way for the user to force
7515 the model. So default to the most useful variant. */
7516 if (tdep->fp_model == ARM_FLOAT_AUTO)
7517 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
7518
7519 if (tdep->jb_pc >= 0)
7520 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
7521
7522 /* Floating point sizes and format. */
7523 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
7524 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
7525 {
7526 set_gdbarch_double_format
7527 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
7528 set_gdbarch_long_double_format
7529 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
7530 }
7531 else
7532 {
7533 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
7534 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
7535 }
7536
7537 if (have_vfp_pseudos)
7538 {
7539 /* NOTE: These are the only pseudo registers used by
7540 the ARM target at the moment. If more are added, a
7541 little more care in numbering will be needed. */
7542
7543 int num_pseudos = 32;
7544 if (have_neon_pseudos)
7545 num_pseudos += 16;
7546 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
7547 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
7548 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
7549 }
7550
7551 if (tdesc_data)
7552 {
7553 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
7554
7555 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
7556
7557 /* Override tdesc_register_type to adjust the types of VFP
7558 registers for NEON. */
7559 set_gdbarch_register_type (gdbarch, arm_register_type);
7560 }
7561
7562 /* Add standard register aliases. We add aliases even for those
7563 nanes which are used by the current architecture - it's simpler,
7564 and does no harm, since nothing ever lists user registers. */
7565 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
7566 user_reg_add (gdbarch, arm_register_aliases[i].name,
7567 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
7568
7569 return gdbarch;
7570 }
7571
7572 static void
7573 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
7574 {
7575 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7576
7577 if (tdep == NULL)
7578 return;
7579
7580 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
7581 (unsigned long) tdep->lowest_pc);
7582 }
7583
7584 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
7585
7586 void
7587 _initialize_arm_tdep (void)
7588 {
7589 struct ui_file *stb;
7590 long length;
7591 struct cmd_list_element *new_set, *new_show;
7592 const char *setname;
7593 const char *setdesc;
7594 const char *const *regnames;
7595 int numregs, i, j;
7596 static char *helptext;
7597 char regdesc[1024], *rdptr = regdesc;
7598 size_t rest = sizeof (regdesc);
7599
7600 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
7601
7602 arm_objfile_data_key
7603 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
7604
7605 /* Register an ELF OS ABI sniffer for ARM binaries. */
7606 gdbarch_register_osabi_sniffer (bfd_arch_arm,
7607 bfd_target_elf_flavour,
7608 arm_elf_osabi_sniffer);
7609
7610 /* Initialize the standard target descriptions. */
7611 initialize_tdesc_arm_with_m ();
7612
7613 /* Get the number of possible sets of register names defined in opcodes. */
7614 num_disassembly_options = get_arm_regname_num_options ();
7615
7616 /* Add root prefix command for all "set arm"/"show arm" commands. */
7617 add_prefix_cmd ("arm", no_class, set_arm_command,
7618 _("Various ARM-specific commands."),
7619 &setarmcmdlist, "set arm ", 0, &setlist);
7620
7621 add_prefix_cmd ("arm", no_class, show_arm_command,
7622 _("Various ARM-specific commands."),
7623 &showarmcmdlist, "show arm ", 0, &showlist);
7624
7625 /* Sync the opcode insn printer with our register viewer. */
7626 parse_arm_disassembler_option ("reg-names-std");
7627
7628 /* Initialize the array that will be passed to
7629 add_setshow_enum_cmd(). */
7630 valid_disassembly_styles
7631 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
7632 for (i = 0; i < num_disassembly_options; i++)
7633 {
7634 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
7635 valid_disassembly_styles[i] = setname;
7636 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
7637 rdptr += length;
7638 rest -= length;
7639 /* When we find the default names, tell the disassembler to use
7640 them. */
7641 if (!strcmp (setname, "std"))
7642 {
7643 disassembly_style = setname;
7644 set_arm_regname_option (i);
7645 }
7646 }
7647 /* Mark the end of valid options. */
7648 valid_disassembly_styles[num_disassembly_options] = NULL;
7649
7650 /* Create the help text. */
7651 stb = mem_fileopen ();
7652 fprintf_unfiltered (stb, "%s%s%s",
7653 _("The valid values are:\n"),
7654 regdesc,
7655 _("The default is \"std\"."));
7656 helptext = ui_file_xstrdup (stb, NULL);
7657 ui_file_delete (stb);
7658
7659 add_setshow_enum_cmd("disassembler", no_class,
7660 valid_disassembly_styles, &disassembly_style,
7661 _("Set the disassembly style."),
7662 _("Show the disassembly style."),
7663 helptext,
7664 set_disassembly_style_sfunc,
7665 NULL, /* FIXME: i18n: The disassembly style is \"%s\". */
7666 &setarmcmdlist, &showarmcmdlist);
7667
7668 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
7669 _("Set usage of ARM 32-bit mode."),
7670 _("Show usage of ARM 32-bit mode."),
7671 _("When off, a 26-bit PC will be used."),
7672 NULL,
7673 NULL, /* FIXME: i18n: Usage of ARM 32-bit mode is %s. */
7674 &setarmcmdlist, &showarmcmdlist);
7675
7676 /* Add a command to allow the user to force the FPU model. */
7677 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
7678 _("Set the floating point type."),
7679 _("Show the floating point type."),
7680 _("auto - Determine the FP typefrom the OS-ABI.\n\
7681 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
7682 fpa - FPA co-processor (GCC compiled).\n\
7683 softvfp - Software FP with pure-endian doubles.\n\
7684 vfp - VFP co-processor."),
7685 set_fp_model_sfunc, show_fp_model,
7686 &setarmcmdlist, &showarmcmdlist);
7687
7688 /* Add a command to allow the user to force the ABI. */
7689 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
7690 _("Set the ABI."),
7691 _("Show the ABI."),
7692 NULL, arm_set_abi, arm_show_abi,
7693 &setarmcmdlist, &showarmcmdlist);
7694
7695 /* Add two commands to allow the user to force the assumed
7696 execution mode. */
7697 add_setshow_enum_cmd ("fallback-mode", class_support,
7698 arm_mode_strings, &arm_fallback_mode_string,
7699 _("Set the mode assumed when symbols are unavailable."),
7700 _("Show the mode assumed when symbols are unavailable."),
7701 NULL, NULL, arm_show_fallback_mode,
7702 &setarmcmdlist, &showarmcmdlist);
7703 add_setshow_enum_cmd ("force-mode", class_support,
7704 arm_mode_strings, &arm_force_mode_string,
7705 _("Set the mode assumed even when symbols are available."),
7706 _("Show the mode assumed even when symbols are available."),
7707 NULL, NULL, arm_show_force_mode,
7708 &setarmcmdlist, &showarmcmdlist);
7709
7710 /* Debugging flag. */
7711 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
7712 _("Set ARM debugging."),
7713 _("Show ARM debugging."),
7714 _("When on, arm-specific debugging is enabled."),
7715 NULL,
7716 NULL, /* FIXME: i18n: "ARM debugging is %s. */
7717 &setdebuglist, &showdebuglist);
7718 }
This page took 0.196471 seconds and 4 git commands to generate.