1 /* Common target dependent code for GDB on ARM systems.
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include <ctype.h> /* XXX for isupper (). */
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
32 #include "reggroups.h"
35 #include "arch-utils.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
41 #include "dwarf2-frame.h"
43 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
49 #include "gdb/sim-arm.h"
52 #include "coff/internal.h"
55 #include "gdb_assert.h"
58 #include "features/arm-with-m.c"
59 #include "features/arm-with-iwmmxt.c"
60 #include "features/arm-with-vfpv2.c"
61 #include "features/arm-with-vfpv3.c"
62 #include "features/arm-with-neon.c"
66 /* Macros for setting and testing a bit in a minimal symbol that marks
67 it as Thumb function. The MSB of the minimal symbol's "info" field
68 is used for this purpose.
70 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
71 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
73 #define MSYMBOL_SET_SPECIAL(msym) \
74 MSYMBOL_TARGET_FLAG_1 (msym) = 1
76 #define MSYMBOL_IS_SPECIAL(msym) \
77 MSYMBOL_TARGET_FLAG_1 (msym)
79 /* Per-objfile data used for mapping symbols. */
80 static const struct objfile_data
*arm_objfile_data_key
;
82 struct arm_mapping_symbol
87 typedef struct arm_mapping_symbol arm_mapping_symbol_s
;
88 DEF_VEC_O(arm_mapping_symbol_s
);
90 struct arm_per_objfile
92 VEC(arm_mapping_symbol_s
) **section_maps
;
95 /* The list of available "set arm ..." and "show arm ..." commands. */
96 static struct cmd_list_element
*setarmcmdlist
= NULL
;
97 static struct cmd_list_element
*showarmcmdlist
= NULL
;
99 /* The type of floating-point to use. Keep this in sync with enum
100 arm_float_model, and the help string in _initialize_arm_tdep. */
101 static const char *fp_model_strings
[] =
111 /* A variable that can be configured by the user. */
112 static enum arm_float_model arm_fp_model
= ARM_FLOAT_AUTO
;
113 static const char *current_fp_model
= "auto";
115 /* The ABI to use. Keep this in sync with arm_abi_kind. */
116 static const char *arm_abi_strings
[] =
124 /* A variable that can be configured by the user. */
125 static enum arm_abi_kind arm_abi_global
= ARM_ABI_AUTO
;
126 static const char *arm_abi_string
= "auto";
128 /* The execution mode to assume. */
129 static const char *arm_mode_strings
[] =
137 static const char *arm_fallback_mode_string
= "auto";
138 static const char *arm_force_mode_string
= "auto";
140 /* Internal override of the execution mode. -1 means no override,
141 0 means override to ARM mode, 1 means override to Thumb mode.
142 The effect is the same as if arm_force_mode has been set by the
143 user (except the internal override has precedence over a user's
144 arm_force_mode override). */
145 static int arm_override_mode
= -1;
147 /* Number of different reg name sets (options). */
148 static int num_disassembly_options
;
150 /* The standard register names, and all the valid aliases for them. Note
151 that `fp', `sp' and `pc' are not added in this alias list, because they
152 have been added as builtin user registers in
153 std-regs.c:_initialize_frame_reg. */
158 } arm_register_aliases
[] = {
159 /* Basic register numbers. */
176 /* Synonyms (argument and variable registers). */
189 /* Other platform-specific names for r9. */
195 /* Names used by GCC (not listed in the ARM EABI). */
197 /* A special name from the older ATPCS. */
201 static const char *const arm_register_names
[] =
202 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
203 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
204 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
205 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
206 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
207 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
208 "fps", "cpsr" }; /* 24 25 */
210 /* Valid register name styles. */
211 static const char **valid_disassembly_styles
;
213 /* Disassembly style to use. Default to "std" register names. */
214 static const char *disassembly_style
;
216 /* This is used to keep the bfd arch_info in sync with the disassembly
218 static void set_disassembly_style_sfunc(char *, int,
219 struct cmd_list_element
*);
220 static void set_disassembly_style (void);
222 static void convert_from_extended (const struct floatformat
*, const void *,
224 static void convert_to_extended (const struct floatformat
*, void *,
227 static enum register_status
arm_neon_quad_read (struct gdbarch
*gdbarch
,
228 struct regcache
*regcache
,
229 int regnum
, gdb_byte
*buf
);
230 static void arm_neon_quad_write (struct gdbarch
*gdbarch
,
231 struct regcache
*regcache
,
232 int regnum
, const gdb_byte
*buf
);
234 static int thumb_insn_size (unsigned short inst1
);
236 struct arm_prologue_cache
238 /* The stack pointer at the time this frame was created; i.e. the
239 caller's stack pointer when this function was called. It is used
240 to identify this frame. */
243 /* The frame base for this frame is just prev_sp - frame size.
244 FRAMESIZE is the distance from the frame pointer to the
245 initial stack pointer. */
249 /* The register used to hold the frame pointer for this frame. */
252 /* Saved register offsets. */
253 struct trad_frame_saved_reg
*saved_regs
;
256 static CORE_ADDR
arm_analyze_prologue (struct gdbarch
*gdbarch
,
257 CORE_ADDR prologue_start
,
258 CORE_ADDR prologue_end
,
259 struct arm_prologue_cache
*cache
);
261 /* Architecture version for displaced stepping. This effects the behaviour of
262 certain instructions, and really should not be hard-wired. */
264 #define DISPLACED_STEPPING_ARCH_VERSION 5
266 /* Addresses for calling Thumb functions have the bit 0 set.
267 Here are some macros to test, set, or clear bit 0 of addresses. */
268 #define IS_THUMB_ADDR(addr) ((addr) & 1)
269 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
270 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
272 /* Set to true if the 32-bit mode is in use. */
276 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
279 arm_psr_thumb_bit (struct gdbarch
*gdbarch
)
281 if (gdbarch_tdep (gdbarch
)->is_m
)
287 /* Determine if FRAME is executing in Thumb mode. */
290 arm_frame_is_thumb (struct frame_info
*frame
)
293 ULONGEST t_bit
= arm_psr_thumb_bit (get_frame_arch (frame
));
295 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
296 directly (from a signal frame or dummy frame) or by interpreting
297 the saved LR (from a prologue or DWARF frame). So consult it and
298 trust the unwinders. */
299 cpsr
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
301 return (cpsr
& t_bit
) != 0;
304 /* Callback for VEC_lower_bound. */
307 arm_compare_mapping_symbols (const struct arm_mapping_symbol
*lhs
,
308 const struct arm_mapping_symbol
*rhs
)
310 return lhs
->value
< rhs
->value
;
313 /* Search for the mapping symbol covering MEMADDR. If one is found,
314 return its type. Otherwise, return 0. If START is non-NULL,
315 set *START to the location of the mapping symbol. */
318 arm_find_mapping_symbol (CORE_ADDR memaddr
, CORE_ADDR
*start
)
320 struct obj_section
*sec
;
322 /* If there are mapping symbols, consult them. */
323 sec
= find_pc_section (memaddr
);
326 struct arm_per_objfile
*data
;
327 VEC(arm_mapping_symbol_s
) *map
;
328 struct arm_mapping_symbol map_key
= { memaddr
- obj_section_addr (sec
),
332 data
= objfile_data (sec
->objfile
, arm_objfile_data_key
);
335 map
= data
->section_maps
[sec
->the_bfd_section
->index
];
336 if (!VEC_empty (arm_mapping_symbol_s
, map
))
338 struct arm_mapping_symbol
*map_sym
;
340 idx
= VEC_lower_bound (arm_mapping_symbol_s
, map
, &map_key
,
341 arm_compare_mapping_symbols
);
343 /* VEC_lower_bound finds the earliest ordered insertion
344 point. If the following symbol starts at this exact
345 address, we use that; otherwise, the preceding
346 mapping symbol covers this address. */
347 if (idx
< VEC_length (arm_mapping_symbol_s
, map
))
349 map_sym
= VEC_index (arm_mapping_symbol_s
, map
, idx
);
350 if (map_sym
->value
== map_key
.value
)
353 *start
= map_sym
->value
+ obj_section_addr (sec
);
354 return map_sym
->type
;
360 map_sym
= VEC_index (arm_mapping_symbol_s
, map
, idx
- 1);
362 *start
= map_sym
->value
+ obj_section_addr (sec
);
363 return map_sym
->type
;
372 /* Determine if the program counter specified in MEMADDR is in a Thumb
373 function. This function should be called for addresses unrelated to
374 any executing frame; otherwise, prefer arm_frame_is_thumb. */
377 arm_pc_is_thumb (struct gdbarch
*gdbarch
, CORE_ADDR memaddr
)
379 struct obj_section
*sec
;
380 struct minimal_symbol
*sym
;
382 struct displaced_step_closure
* dsc
383 = get_displaced_step_closure_by_addr(memaddr
);
385 /* If checking the mode of displaced instruction in copy area, the mode
386 should be determined by instruction on the original address. */
390 fprintf_unfiltered (gdb_stdlog
,
391 "displaced: check mode of %.8lx instead of %.8lx\n",
392 (unsigned long) dsc
->insn_addr
,
393 (unsigned long) memaddr
);
394 memaddr
= dsc
->insn_addr
;
397 /* If bit 0 of the address is set, assume this is a Thumb address. */
398 if (IS_THUMB_ADDR (memaddr
))
401 /* Respect internal mode override if active. */
402 if (arm_override_mode
!= -1)
403 return arm_override_mode
;
405 /* If the user wants to override the symbol table, let him. */
406 if (strcmp (arm_force_mode_string
, "arm") == 0)
408 if (strcmp (arm_force_mode_string
, "thumb") == 0)
411 /* ARM v6-M and v7-M are always in Thumb mode. */
412 if (gdbarch_tdep (gdbarch
)->is_m
)
415 /* If there are mapping symbols, consult them. */
416 type
= arm_find_mapping_symbol (memaddr
, NULL
);
420 /* Thumb functions have a "special" bit set in minimal symbols. */
421 sym
= lookup_minimal_symbol_by_pc (memaddr
);
423 return (MSYMBOL_IS_SPECIAL (sym
));
425 /* If the user wants to override the fallback mode, let them. */
426 if (strcmp (arm_fallback_mode_string
, "arm") == 0)
428 if (strcmp (arm_fallback_mode_string
, "thumb") == 0)
431 /* If we couldn't find any symbol, but we're talking to a running
432 target, then trust the current value of $cpsr. This lets
433 "display/i $pc" always show the correct mode (though if there is
434 a symbol table we will not reach here, so it still may not be
435 displayed in the mode it will be executed). */
436 if (target_has_registers
)
437 return arm_frame_is_thumb (get_current_frame ());
439 /* Otherwise we're out of luck; we assume ARM. */
443 /* Remove useless bits from addresses in a running program. */
445 arm_addr_bits_remove (struct gdbarch
*gdbarch
, CORE_ADDR val
)
448 return UNMAKE_THUMB_ADDR (val
);
450 return (val
& 0x03fffffc);
453 /* When reading symbols, we need to zap the low bit of the address,
454 which may be set to 1 for Thumb functions. */
456 arm_smash_text_address (struct gdbarch
*gdbarch
, CORE_ADDR val
)
461 /* Return 1 if PC is the start of a compiler helper function which
462 can be safely ignored during prologue skipping. IS_THUMB is true
463 if the function is known to be a Thumb function due to the way it
466 skip_prologue_function (struct gdbarch
*gdbarch
, CORE_ADDR pc
, int is_thumb
)
468 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
469 struct minimal_symbol
*msym
;
471 msym
= lookup_minimal_symbol_by_pc (pc
);
473 && SYMBOL_VALUE_ADDRESS (msym
) == pc
474 && SYMBOL_LINKAGE_NAME (msym
) != NULL
)
476 const char *name
= SYMBOL_LINKAGE_NAME (msym
);
478 /* The GNU linker's Thumb call stub to foo is named
480 if (strstr (name
, "_from_thumb") != NULL
)
483 /* On soft-float targets, __truncdfsf2 is called to convert promoted
484 arguments to their argument types in non-prototyped
486 if (strncmp (name
, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
488 if (strncmp (name
, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
491 /* Internal functions related to thread-local storage. */
492 if (strncmp (name
, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
494 if (strncmp (name
, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
499 /* If we run against a stripped glibc, we may be unable to identify
500 special functions by name. Check for one important case,
501 __aeabi_read_tp, by comparing the *code* against the default
502 implementation (this is hand-written ARM assembler in glibc). */
505 && read_memory_unsigned_integer (pc
, 4, byte_order_for_code
)
506 == 0xe3e00a0f /* mov r0, #0xffff0fff */
507 && read_memory_unsigned_integer (pc
+ 4, 4, byte_order_for_code
)
508 == 0xe240f01f) /* sub pc, r0, #31 */
515 /* Support routines for instruction parsing. */
516 #define submask(x) ((1L << ((x) + 1)) - 1)
517 #define bit(obj,st) (((obj) >> (st)) & 1)
518 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
519 #define sbits(obj,st,fn) \
520 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
521 #define BranchDest(addr,instr) \
522 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
524 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
525 the first 16-bit of instruction, and INSN2 is the second 16-bit of
527 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
528 ((bits ((insn1), 0, 3) << 12) \
529 | (bits ((insn1), 10, 10) << 11) \
530 | (bits ((insn2), 12, 14) << 8) \
531 | bits ((insn2), 0, 7))
533 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
534 the 32-bit instruction. */
535 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
536 ((bits ((insn), 16, 19) << 12) \
537 | bits ((insn), 0, 11))
539 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
542 thumb_expand_immediate (unsigned int imm
)
544 unsigned int count
= imm
>> 7;
552 return (imm
& 0xff) | ((imm
& 0xff) << 16);
554 return ((imm
& 0xff) << 8) | ((imm
& 0xff) << 24);
556 return (imm
& 0xff) | ((imm
& 0xff) << 8)
557 | ((imm
& 0xff) << 16) | ((imm
& 0xff) << 24);
560 return (0x80 | (imm
& 0x7f)) << (32 - count
);
563 /* Return 1 if the 16-bit Thumb instruction INST might change
564 control flow, 0 otherwise. */
567 thumb_instruction_changes_pc (unsigned short inst
)
569 if ((inst
& 0xff00) == 0xbd00) /* pop {rlist, pc} */
572 if ((inst
& 0xf000) == 0xd000) /* conditional branch */
575 if ((inst
& 0xf800) == 0xe000) /* unconditional branch */
578 if ((inst
& 0xff00) == 0x4700) /* bx REG, blx REG */
581 if ((inst
& 0xff87) == 0x4687) /* mov pc, REG */
584 if ((inst
& 0xf500) == 0xb100) /* CBNZ or CBZ. */
590 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
591 might change control flow, 0 otherwise. */
594 thumb2_instruction_changes_pc (unsigned short inst1
, unsigned short inst2
)
596 if ((inst1
& 0xf800) == 0xf000 && (inst2
& 0x8000) == 0x8000)
598 /* Branches and miscellaneous control instructions. */
600 if ((inst2
& 0x1000) != 0 || (inst2
& 0xd001) == 0xc000)
605 else if (inst1
== 0xf3de && (inst2
& 0xff00) == 0x3f00)
607 /* SUBS PC, LR, #imm8. */
610 else if ((inst2
& 0xd000) == 0x8000 && (inst1
& 0x0380) != 0x0380)
612 /* Conditional branch. */
619 if ((inst1
& 0xfe50) == 0xe810)
621 /* Load multiple or RFE. */
623 if (bit (inst1
, 7) && !bit (inst1
, 8))
629 else if (!bit (inst1
, 7) && bit (inst1
, 8))
635 else if (bit (inst1
, 7) && bit (inst1
, 8))
640 else if (!bit (inst1
, 7) && !bit (inst1
, 8))
649 if ((inst1
& 0xffef) == 0xea4f && (inst2
& 0xfff0) == 0x0f00)
651 /* MOV PC or MOVS PC. */
655 if ((inst1
& 0xff70) == 0xf850 && (inst2
& 0xf000) == 0xf000)
658 if (bits (inst1
, 0, 3) == 15)
664 if ((inst2
& 0x0fc0) == 0x0000)
670 if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf000)
676 if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf010)
685 /* Analyze a Thumb prologue, looking for a recognizable stack frame
686 and frame pointer. Scan until we encounter a store that could
687 clobber the stack frame unexpectedly, or an unknown instruction.
688 Return the last address which is definitely safe to skip for an
689 initial breakpoint. */
692 thumb_analyze_prologue (struct gdbarch
*gdbarch
,
693 CORE_ADDR start
, CORE_ADDR limit
,
694 struct arm_prologue_cache
*cache
)
696 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
697 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
700 struct pv_area
*stack
;
701 struct cleanup
*back_to
;
703 CORE_ADDR unrecognized_pc
= 0;
705 for (i
= 0; i
< 16; i
++)
706 regs
[i
] = pv_register (i
, 0);
707 stack
= make_pv_area (ARM_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
708 back_to
= make_cleanup_free_pv_area (stack
);
710 while (start
< limit
)
714 insn
= read_memory_unsigned_integer (start
, 2, byte_order_for_code
);
716 if ((insn
& 0xfe00) == 0xb400) /* push { rlist } */
721 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
724 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
725 whether to save LR (R14). */
726 mask
= (insn
& 0xff) | ((insn
& 0x100) << 6);
728 /* Calculate offsets of saved R0-R7 and LR. */
729 for (regno
= ARM_LR_REGNUM
; regno
>= 0; regno
--)
730 if (mask
& (1 << regno
))
732 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
],
734 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 4, regs
[regno
]);
737 else if ((insn
& 0xff00) == 0xb000) /* add sp, #simm OR
740 offset
= (insn
& 0x7f) << 2; /* get scaled offset */
741 if (insn
& 0x80) /* Check for SUB. */
742 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
],
745 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
],
748 else if ((insn
& 0xf800) == 0xa800) /* add Rd, sp, #imm */
749 regs
[bits (insn
, 8, 10)] = pv_add_constant (regs
[ARM_SP_REGNUM
],
751 else if ((insn
& 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
752 && pv_is_register (regs
[bits (insn
, 3, 5)], ARM_SP_REGNUM
))
753 regs
[bits (insn
, 0, 2)] = pv_add_constant (regs
[bits (insn
, 3, 5)],
755 else if ((insn
& 0xf800) == 0x3000 /* add Rd, #imm */
756 && pv_is_register (regs
[bits (insn
, 8, 10)], ARM_SP_REGNUM
))
757 regs
[bits (insn
, 8, 10)] = pv_add_constant (regs
[bits (insn
, 8, 10)],
759 else if ((insn
& 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
760 && pv_is_register (regs
[bits (insn
, 6, 8)], ARM_SP_REGNUM
)
761 && pv_is_constant (regs
[bits (insn
, 3, 5)]))
762 regs
[bits (insn
, 0, 2)] = pv_add (regs
[bits (insn
, 3, 5)],
763 regs
[bits (insn
, 6, 8)]);
764 else if ((insn
& 0xff00) == 0x4400 /* add Rd, Rm */
765 && pv_is_constant (regs
[bits (insn
, 3, 6)]))
767 int rd
= (bit (insn
, 7) << 3) + bits (insn
, 0, 2);
768 int rm
= bits (insn
, 3, 6);
769 regs
[rd
] = pv_add (regs
[rd
], regs
[rm
]);
771 else if ((insn
& 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
773 int dst_reg
= (insn
& 0x7) + ((insn
& 0x80) >> 4);
774 int src_reg
= (insn
& 0x78) >> 3;
775 regs
[dst_reg
] = regs
[src_reg
];
777 else if ((insn
& 0xf800) == 0x9000) /* str rd, [sp, #off] */
779 /* Handle stores to the stack. Normally pushes are used,
780 but with GCC -mtpcs-frame, there may be other stores
781 in the prologue to create the frame. */
782 int regno
= (insn
>> 8) & 0x7;
785 offset
= (insn
& 0xff) << 2;
786 addr
= pv_add_constant (regs
[ARM_SP_REGNUM
], offset
);
788 if (pv_area_store_would_trash (stack
, addr
))
791 pv_area_store (stack
, addr
, 4, regs
[regno
]);
793 else if ((insn
& 0xf800) == 0x6000) /* str rd, [rn, #off] */
795 int rd
= bits (insn
, 0, 2);
796 int rn
= bits (insn
, 3, 5);
799 offset
= bits (insn
, 6, 10) << 2;
800 addr
= pv_add_constant (regs
[rn
], offset
);
802 if (pv_area_store_would_trash (stack
, addr
))
805 pv_area_store (stack
, addr
, 4, regs
[rd
]);
807 else if (((insn
& 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
808 || (insn
& 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
809 && pv_is_register (regs
[bits (insn
, 3, 5)], ARM_SP_REGNUM
))
810 /* Ignore stores of argument registers to the stack. */
812 else if ((insn
& 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
813 && pv_is_register (regs
[bits (insn
, 8, 10)], ARM_SP_REGNUM
))
814 /* Ignore block loads from the stack, potentially copying
815 parameters from memory. */
817 else if ((insn
& 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
818 || ((insn
& 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
819 && pv_is_register (regs
[bits (insn
, 3, 5)], ARM_SP_REGNUM
)))
820 /* Similarly ignore single loads from the stack. */
822 else if ((insn
& 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
823 || (insn
& 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
824 /* Skip register copies, i.e. saves to another register
825 instead of the stack. */
827 else if ((insn
& 0xf800) == 0x2000) /* movs Rd, #imm */
828 /* Recognize constant loads; even with small stacks these are necessary
830 regs
[bits (insn
, 8, 10)] = pv_constant (bits (insn
, 0, 7));
831 else if ((insn
& 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
833 /* Constant pool loads, for the same reason. */
834 unsigned int constant
;
837 loc
= start
+ 4 + bits (insn
, 0, 7) * 4;
838 constant
= read_memory_unsigned_integer (loc
, 4, byte_order
);
839 regs
[bits (insn
, 8, 10)] = pv_constant (constant
);
841 else if (thumb_insn_size (insn
) == 4) /* 32-bit Thumb-2 instructions. */
843 unsigned short inst2
;
845 inst2
= read_memory_unsigned_integer (start
+ 2, 2,
846 byte_order_for_code
);
848 if ((insn
& 0xf800) == 0xf000 && (inst2
& 0xe800) == 0xe800)
850 /* BL, BLX. Allow some special function calls when
851 skipping the prologue; GCC generates these before
852 storing arguments to the stack. */
854 int j1
, j2
, imm1
, imm2
;
856 imm1
= sbits (insn
, 0, 10);
857 imm2
= bits (inst2
, 0, 10);
858 j1
= bit (inst2
, 13);
859 j2
= bit (inst2
, 11);
861 offset
= ((imm1
<< 12) + (imm2
<< 1));
862 offset
^= ((!j2
) << 22) | ((!j1
) << 23);
864 nextpc
= start
+ 4 + offset
;
865 /* For BLX make sure to clear the low bits. */
866 if (bit (inst2
, 12) == 0)
867 nextpc
= nextpc
& 0xfffffffc;
869 if (!skip_prologue_function (gdbarch
, nextpc
,
870 bit (inst2
, 12) != 0))
874 else if ((insn
& 0xffd0) == 0xe900 /* stmdb Rn{!},
876 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
878 pv_t addr
= regs
[bits (insn
, 0, 3)];
881 if (pv_area_store_would_trash (stack
, addr
))
884 /* Calculate offsets of saved registers. */
885 for (regno
= ARM_LR_REGNUM
; regno
>= 0; regno
--)
886 if (inst2
& (1 << regno
))
888 addr
= pv_add_constant (addr
, -4);
889 pv_area_store (stack
, addr
, 4, regs
[regno
]);
893 regs
[bits (insn
, 0, 3)] = addr
;
896 else if ((insn
& 0xff50) == 0xe940 /* strd Rt, Rt2,
898 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
900 int regno1
= bits (inst2
, 12, 15);
901 int regno2
= bits (inst2
, 8, 11);
902 pv_t addr
= regs
[bits (insn
, 0, 3)];
904 offset
= inst2
& 0xff;
906 addr
= pv_add_constant (addr
, offset
);
908 addr
= pv_add_constant (addr
, -offset
);
910 if (pv_area_store_would_trash (stack
, addr
))
913 pv_area_store (stack
, addr
, 4, regs
[regno1
]);
914 pv_area_store (stack
, pv_add_constant (addr
, 4),
918 regs
[bits (insn
, 0, 3)] = addr
;
921 else if ((insn
& 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
922 && (inst2
& 0x0c00) == 0x0c00
923 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
925 int regno
= bits (inst2
, 12, 15);
926 pv_t addr
= regs
[bits (insn
, 0, 3)];
928 offset
= inst2
& 0xff;
930 addr
= pv_add_constant (addr
, offset
);
932 addr
= pv_add_constant (addr
, -offset
);
934 if (pv_area_store_would_trash (stack
, addr
))
937 pv_area_store (stack
, addr
, 4, regs
[regno
]);
940 regs
[bits (insn
, 0, 3)] = addr
;
943 else if ((insn
& 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
944 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
946 int regno
= bits (inst2
, 12, 15);
949 offset
= inst2
& 0xfff;
950 addr
= pv_add_constant (regs
[bits (insn
, 0, 3)], offset
);
952 if (pv_area_store_would_trash (stack
, addr
))
955 pv_area_store (stack
, addr
, 4, regs
[regno
]);
958 else if ((insn
& 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
959 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
960 /* Ignore stores of argument registers to the stack. */
963 else if ((insn
& 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
964 && (inst2
& 0x0d00) == 0x0c00
965 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
966 /* Ignore stores of argument registers to the stack. */
969 else if ((insn
& 0xffd0) == 0xe890 /* ldmia Rn[!],
971 && (inst2
& 0x8000) == 0x0000
972 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
973 /* Ignore block loads from the stack, potentially copying
974 parameters from memory. */
977 else if ((insn
& 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
979 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
980 /* Similarly ignore dual loads from the stack. */
983 else if ((insn
& 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
984 && (inst2
& 0x0d00) == 0x0c00
985 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
986 /* Similarly ignore single loads from the stack. */
989 else if ((insn
& 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
990 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
991 /* Similarly ignore single loads from the stack. */
994 else if ((insn
& 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
995 && (inst2
& 0x8000) == 0x0000)
997 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
998 | (bits (inst2
, 12, 14) << 8)
999 | bits (inst2
, 0, 7));
1001 regs
[bits (inst2
, 8, 11)]
1002 = pv_add_constant (regs
[bits (insn
, 0, 3)],
1003 thumb_expand_immediate (imm
));
1006 else if ((insn
& 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1007 && (inst2
& 0x8000) == 0x0000)
1009 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1010 | (bits (inst2
, 12, 14) << 8)
1011 | bits (inst2
, 0, 7));
1013 regs
[bits (inst2
, 8, 11)]
1014 = pv_add_constant (regs
[bits (insn
, 0, 3)], imm
);
1017 else if ((insn
& 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1018 && (inst2
& 0x8000) == 0x0000)
1020 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1021 | (bits (inst2
, 12, 14) << 8)
1022 | bits (inst2
, 0, 7));
1024 regs
[bits (inst2
, 8, 11)]
1025 = pv_add_constant (regs
[bits (insn
, 0, 3)],
1026 - (CORE_ADDR
) thumb_expand_immediate (imm
));
1029 else if ((insn
& 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1030 && (inst2
& 0x8000) == 0x0000)
1032 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1033 | (bits (inst2
, 12, 14) << 8)
1034 | bits (inst2
, 0, 7));
1036 regs
[bits (inst2
, 8, 11)]
1037 = pv_add_constant (regs
[bits (insn
, 0, 3)], - (CORE_ADDR
) imm
);
1040 else if ((insn
& 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1042 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1043 | (bits (inst2
, 12, 14) << 8)
1044 | bits (inst2
, 0, 7));
1046 regs
[bits (inst2
, 8, 11)]
1047 = pv_constant (thumb_expand_immediate (imm
));
1050 else if ((insn
& 0xfbf0) == 0xf240) /* movw Rd, #const */
1053 = EXTRACT_MOVW_MOVT_IMM_T (insn
, inst2
);
1055 regs
[bits (inst2
, 8, 11)] = pv_constant (imm
);
1058 else if (insn
== 0xea5f /* mov.w Rd,Rm */
1059 && (inst2
& 0xf0f0) == 0)
1061 int dst_reg
= (inst2
& 0x0f00) >> 8;
1062 int src_reg
= inst2
& 0xf;
1063 regs
[dst_reg
] = regs
[src_reg
];
1066 else if ((insn
& 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1068 /* Constant pool loads. */
1069 unsigned int constant
;
1072 offset
= bits (insn
, 0, 11);
1074 loc
= start
+ 4 + offset
;
1076 loc
= start
+ 4 - offset
;
1078 constant
= read_memory_unsigned_integer (loc
, 4, byte_order
);
1079 regs
[bits (inst2
, 12, 15)] = pv_constant (constant
);
1082 else if ((insn
& 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1084 /* Constant pool loads. */
1085 unsigned int constant
;
1088 offset
= bits (insn
, 0, 7) << 2;
1090 loc
= start
+ 4 + offset
;
1092 loc
= start
+ 4 - offset
;
1094 constant
= read_memory_unsigned_integer (loc
, 4, byte_order
);
1095 regs
[bits (inst2
, 12, 15)] = pv_constant (constant
);
1097 constant
= read_memory_unsigned_integer (loc
+ 4, 4, byte_order
);
1098 regs
[bits (inst2
, 8, 11)] = pv_constant (constant
);
1101 else if (thumb2_instruction_changes_pc (insn
, inst2
))
1103 /* Don't scan past anything that might change control flow. */
1108 /* The optimizer might shove anything into the prologue,
1109 so we just skip what we don't recognize. */
1110 unrecognized_pc
= start
;
1115 else if (thumb_instruction_changes_pc (insn
))
1117 /* Don't scan past anything that might change control flow. */
1122 /* The optimizer might shove anything into the prologue,
1123 so we just skip what we don't recognize. */
1124 unrecognized_pc
= start
;
1131 fprintf_unfiltered (gdb_stdlog
, "Prologue scan stopped at %s\n",
1132 paddress (gdbarch
, start
));
1134 if (unrecognized_pc
== 0)
1135 unrecognized_pc
= start
;
1139 do_cleanups (back_to
);
1140 return unrecognized_pc
;
1143 if (pv_is_register (regs
[ARM_FP_REGNUM
], ARM_SP_REGNUM
))
1145 /* Frame pointer is fp. Frame size is constant. */
1146 cache
->framereg
= ARM_FP_REGNUM
;
1147 cache
->framesize
= -regs
[ARM_FP_REGNUM
].k
;
1149 else if (pv_is_register (regs
[THUMB_FP_REGNUM
], ARM_SP_REGNUM
))
1151 /* Frame pointer is r7. Frame size is constant. */
1152 cache
->framereg
= THUMB_FP_REGNUM
;
1153 cache
->framesize
= -regs
[THUMB_FP_REGNUM
].k
;
1157 /* Try the stack pointer... this is a bit desperate. */
1158 cache
->framereg
= ARM_SP_REGNUM
;
1159 cache
->framesize
= -regs
[ARM_SP_REGNUM
].k
;
1162 for (i
= 0; i
< 16; i
++)
1163 if (pv_area_find_reg (stack
, gdbarch
, i
, &offset
))
1164 cache
->saved_regs
[i
].addr
= offset
;
1166 do_cleanups (back_to
);
1167 return unrecognized_pc
;
1171 /* Try to analyze the instructions starting from PC, which load symbol
1172 __stack_chk_guard. Return the address of instruction after loading this
1173 symbol, set the dest register number to *BASEREG, and set the size of
1174 instructions for loading symbol in OFFSET. Return 0 if instructions are
1178 arm_analyze_load_stack_chk_guard(CORE_ADDR pc
, struct gdbarch
*gdbarch
,
1179 unsigned int *destreg
, int *offset
)
1181 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1182 int is_thumb
= arm_pc_is_thumb (gdbarch
, pc
);
1183 unsigned int low
, high
, address
;
1188 unsigned short insn1
1189 = read_memory_unsigned_integer (pc
, 2, byte_order_for_code
);
1191 if ((insn1
& 0xf800) == 0x4800) /* ldr Rd, #immed */
1193 *destreg
= bits (insn1
, 8, 10);
1195 address
= bits (insn1
, 0, 7);
1197 else if ((insn1
& 0xfbf0) == 0xf240) /* movw Rd, #const */
1199 unsigned short insn2
1200 = read_memory_unsigned_integer (pc
+ 2, 2, byte_order_for_code
);
1202 low
= EXTRACT_MOVW_MOVT_IMM_T (insn1
, insn2
);
1205 = read_memory_unsigned_integer (pc
+ 4, 2, byte_order_for_code
);
1207 = read_memory_unsigned_integer (pc
+ 6, 2, byte_order_for_code
);
1209 /* movt Rd, #const */
1210 if ((insn1
& 0xfbc0) == 0xf2c0)
1212 high
= EXTRACT_MOVW_MOVT_IMM_T (insn1
, insn2
);
1213 *destreg
= bits (insn2
, 8, 11);
1215 address
= (high
<< 16 | low
);
1222 = read_memory_unsigned_integer (pc
, 4, byte_order_for_code
);
1224 if ((insn
& 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1226 address
= bits (insn
, 0, 11);
1227 *destreg
= bits (insn
, 12, 15);
1230 else if ((insn
& 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1232 low
= EXTRACT_MOVW_MOVT_IMM_A (insn
);
1235 = read_memory_unsigned_integer (pc
+ 4, 4, byte_order_for_code
);
1237 if ((insn
& 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1239 high
= EXTRACT_MOVW_MOVT_IMM_A (insn
);
1240 *destreg
= bits (insn
, 12, 15);
1242 address
= (high
<< 16 | low
);
1250 /* Try to skip a sequence of instructions used for stack protector. If PC
1251 points to the first instruction of this sequence, return the address of
1252 first instruction after this sequence, otherwise, return original PC.
1254 On arm, this sequence of instructions is composed of mainly three steps,
1255 Step 1: load symbol __stack_chk_guard,
1256 Step 2: load from address of __stack_chk_guard,
1257 Step 3: store it to somewhere else.
1259 Usually, instructions on step 2 and step 3 are the same on various ARM
1260 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1261 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1262 instructions in step 1 vary from different ARM architectures. On ARMv7,
1265 movw Rn, #:lower16:__stack_chk_guard
1266 movt Rn, #:upper16:__stack_chk_guard
1273 .word __stack_chk_guard
1275 Since ldr/str is a very popular instruction, we can't use them as
1276 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1277 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1278 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1281 arm_skip_stack_protector(CORE_ADDR pc
, struct gdbarch
*gdbarch
)
1283 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1284 unsigned int address
, basereg
;
1285 struct minimal_symbol
*stack_chk_guard
;
1287 int is_thumb
= arm_pc_is_thumb (gdbarch
, pc
);
1290 /* Try to parse the instructions in Step 1. */
1291 addr
= arm_analyze_load_stack_chk_guard (pc
, gdbarch
,
1296 stack_chk_guard
= lookup_minimal_symbol_by_pc (addr
);
1297 /* If name of symbol doesn't start with '__stack_chk_guard', this
1298 instruction sequence is not for stack protector. If symbol is
1299 removed, we conservatively think this sequence is for stack protector. */
1301 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard
), "__stack_chk_guard",
1302 strlen ("__stack_chk_guard")) != 0)
1307 unsigned int destreg
;
1309 = read_memory_unsigned_integer (pc
+ offset
, 2, byte_order_for_code
);
1311 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1312 if ((insn
& 0xf800) != 0x6800)
1314 if (bits (insn
, 3, 5) != basereg
)
1316 destreg
= bits (insn
, 0, 2);
1318 insn
= read_memory_unsigned_integer (pc
+ offset
+ 2, 2,
1319 byte_order_for_code
);
1320 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1321 if ((insn
& 0xf800) != 0x6000)
1323 if (destreg
!= bits (insn
, 0, 2))
1328 unsigned int destreg
;
1330 = read_memory_unsigned_integer (pc
+ offset
, 4, byte_order_for_code
);
1332 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1333 if ((insn
& 0x0e500000) != 0x04100000)
1335 if (bits (insn
, 16, 19) != basereg
)
1337 destreg
= bits (insn
, 12, 15);
1338 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1339 insn
= read_memory_unsigned_integer (pc
+ offset
+ 4,
1340 4, byte_order_for_code
);
1341 if ((insn
& 0x0e500000) != 0x04000000)
1343 if (bits (insn
, 12, 15) != destreg
)
1346 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1349 return pc
+ offset
+ 4;
1351 return pc
+ offset
+ 8;
1354 /* Advance the PC across any function entry prologue instructions to
1355 reach some "real" code.
1357 The APCS (ARM Procedure Call Standard) defines the following
1361 [stmfd sp!, {a1,a2,a3,a4}]
1362 stmfd sp!, {...,fp,ip,lr,pc}
1363 [stfe f7, [sp, #-12]!]
1364 [stfe f6, [sp, #-12]!]
1365 [stfe f5, [sp, #-12]!]
1366 [stfe f4, [sp, #-12]!]
1367 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1370 arm_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
1372 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1375 CORE_ADDR func_addr
, limit_pc
;
1376 struct symtab_and_line sal
;
1378 /* See if we can determine the end of the prologue via the symbol table.
1379 If so, then return either PC, or the PC after the prologue, whichever
1381 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
1383 CORE_ADDR post_prologue_pc
1384 = skip_prologue_using_sal (gdbarch
, func_addr
);
1385 struct symtab
*s
= find_pc_symtab (func_addr
);
1387 if (post_prologue_pc
)
1389 = arm_skip_stack_protector (post_prologue_pc
, gdbarch
);
1392 /* GCC always emits a line note before the prologue and another
1393 one after, even if the two are at the same address or on the
1394 same line. Take advantage of this so that we do not need to
1395 know every instruction that might appear in the prologue. We
1396 will have producer information for most binaries; if it is
1397 missing (e.g. for -gstabs), assuming the GNU tools. */
1398 if (post_prologue_pc
1400 || s
->producer
== NULL
1401 || strncmp (s
->producer
, "GNU ", sizeof ("GNU ") - 1) == 0))
1402 return post_prologue_pc
;
1404 if (post_prologue_pc
!= 0)
1406 CORE_ADDR analyzed_limit
;
1408 /* For non-GCC compilers, make sure the entire line is an
1409 acceptable prologue; GDB will round this function's
1410 return value up to the end of the following line so we
1411 can not skip just part of a line (and we do not want to).
1413 RealView does not treat the prologue specially, but does
1414 associate prologue code with the opening brace; so this
1415 lets us skip the first line if we think it is the opening
1417 if (arm_pc_is_thumb (gdbarch
, func_addr
))
1418 analyzed_limit
= thumb_analyze_prologue (gdbarch
, func_addr
,
1419 post_prologue_pc
, NULL
);
1421 analyzed_limit
= arm_analyze_prologue (gdbarch
, func_addr
,
1422 post_prologue_pc
, NULL
);
1424 if (analyzed_limit
!= post_prologue_pc
)
1427 return post_prologue_pc
;
1431 /* Can't determine prologue from the symbol table, need to examine
1434 /* Find an upper limit on the function prologue using the debug
1435 information. If the debug information could not be used to provide
1436 that bound, then use an arbitrary large number as the upper bound. */
1437 /* Like arm_scan_prologue, stop no later than pc + 64. */
1438 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
1440 limit_pc
= pc
+ 64; /* Magic. */
1443 /* Check if this is Thumb code. */
1444 if (arm_pc_is_thumb (gdbarch
, pc
))
1445 return thumb_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
1447 for (skip_pc
= pc
; skip_pc
< limit_pc
; skip_pc
+= 4)
1449 inst
= read_memory_unsigned_integer (skip_pc
, 4, byte_order_for_code
);
1451 /* "mov ip, sp" is no longer a required part of the prologue. */
1452 if (inst
== 0xe1a0c00d) /* mov ip, sp */
1455 if ((inst
& 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1458 if ((inst
& 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1461 /* Some prologues begin with "str lr, [sp, #-4]!". */
1462 if (inst
== 0xe52de004) /* str lr, [sp, #-4]! */
1465 if ((inst
& 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1468 if ((inst
& 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1471 /* Any insns after this point may float into the code, if it makes
1472 for better instruction scheduling, so we skip them only if we
1473 find them, but still consider the function to be frame-ful. */
1475 /* We may have either one sfmfd instruction here, or several stfe
1476 insns, depending on the version of floating point code we
1478 if ((inst
& 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1481 if ((inst
& 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1484 if ((inst
& 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1487 if ((inst
& 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1490 if ((inst
& 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1491 || (inst
& 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1492 || (inst
& 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1495 if ((inst
& 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1496 || (inst
& 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1497 || (inst
& 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1500 /* Un-recognized instruction; stop scanning. */
1504 return skip_pc
; /* End of prologue. */
1508 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1509 This function decodes a Thumb function prologue to determine:
1510 1) the size of the stack frame
1511 2) which registers are saved on it
1512 3) the offsets of saved regs
1513 4) the offset from the stack pointer to the frame pointer
1515 A typical Thumb function prologue would create this stack frame
1516 (offsets relative to FP)
1517 old SP -> 24 stack parameters
1520 R7 -> 0 local variables (16 bytes)
1521 SP -> -12 additional stack space (12 bytes)
1522 The frame size would thus be 36 bytes, and the frame offset would be
1523 12 bytes. The frame register is R7.
1525 The comments for thumb_skip_prolog() describe the algorithm we use
1526 to detect the end of the prolog. */
1530 thumb_scan_prologue (struct gdbarch
*gdbarch
, CORE_ADDR prev_pc
,
1531 CORE_ADDR block_addr
, struct arm_prologue_cache
*cache
)
1533 CORE_ADDR prologue_start
;
1534 CORE_ADDR prologue_end
;
1535 CORE_ADDR current_pc
;
1537 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
1540 /* See comment in arm_scan_prologue for an explanation of
1542 if (prologue_end
> prologue_start
+ 64)
1544 prologue_end
= prologue_start
+ 64;
1548 /* We're in the boondocks: we have no idea where the start of the
1552 prologue_end
= min (prologue_end
, prev_pc
);
1554 thumb_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
1557 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1560 arm_instruction_changes_pc (uint32_t this_instr
)
1562 if (bits (this_instr
, 28, 31) == INST_NV
)
1563 /* Unconditional instructions. */
1564 switch (bits (this_instr
, 24, 27))
1568 /* Branch with Link and change to Thumb. */
1573 /* Coprocessor register transfer. */
1574 if (bits (this_instr
, 12, 15) == 15)
1575 error (_("Invalid update to pc in instruction"));
1581 switch (bits (this_instr
, 25, 27))
1584 if (bits (this_instr
, 23, 24) == 2 && bit (this_instr
, 20) == 0)
1586 /* Multiplies and extra load/stores. */
1587 if (bit (this_instr
, 4) == 1 && bit (this_instr
, 7) == 1)
1588 /* Neither multiplies nor extension load/stores are allowed
1592 /* Otherwise, miscellaneous instructions. */
1594 /* BX <reg>, BXJ <reg>, BLX <reg> */
1595 if (bits (this_instr
, 4, 27) == 0x12fff1
1596 || bits (this_instr
, 4, 27) == 0x12fff2
1597 || bits (this_instr
, 4, 27) == 0x12fff3)
1600 /* Other miscellaneous instructions are unpredictable if they
1604 /* Data processing instruction. Fall through. */
1607 if (bits (this_instr
, 12, 15) == 15)
1614 /* Media instructions and architecturally undefined instructions. */
1615 if (bits (this_instr
, 25, 27) == 3 && bit (this_instr
, 4) == 1)
1619 if (bit (this_instr
, 20) == 0)
1623 if (bits (this_instr
, 12, 15) == ARM_PC_REGNUM
)
1629 /* Load/store multiple. */
1630 if (bit (this_instr
, 20) == 1 && bit (this_instr
, 15) == 1)
1636 /* Branch and branch with link. */
1641 /* Coprocessor transfers or SWIs can not affect PC. */
1645 internal_error (__FILE__
, __LINE__
, _("bad value in switch"));
1649 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1650 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1651 fill it in. Return the first address not recognized as a prologue
1654 We recognize all the instructions typically found in ARM prologues,
1655 plus harmless instructions which can be skipped (either for analysis
1656 purposes, or a more restrictive set that can be skipped when finding
1657 the end of the prologue). */
1660 arm_analyze_prologue (struct gdbarch
*gdbarch
,
1661 CORE_ADDR prologue_start
, CORE_ADDR prologue_end
,
1662 struct arm_prologue_cache
*cache
)
1664 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1665 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1667 CORE_ADDR offset
, current_pc
;
1668 pv_t regs
[ARM_FPS_REGNUM
];
1669 struct pv_area
*stack
;
1670 struct cleanup
*back_to
;
1671 int framereg
, framesize
;
1672 CORE_ADDR unrecognized_pc
= 0;
1674 /* Search the prologue looking for instructions that set up the
1675 frame pointer, adjust the stack pointer, and save registers.
1677 Be careful, however, and if it doesn't look like a prologue,
1678 don't try to scan it. If, for instance, a frameless function
1679 begins with stmfd sp!, then we will tell ourselves there is
1680 a frame, which will confuse stack traceback, as well as "finish"
1681 and other operations that rely on a knowledge of the stack
1684 for (regno
= 0; regno
< ARM_FPS_REGNUM
; regno
++)
1685 regs
[regno
] = pv_register (regno
, 0);
1686 stack
= make_pv_area (ARM_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
1687 back_to
= make_cleanup_free_pv_area (stack
);
1689 for (current_pc
= prologue_start
;
1690 current_pc
< prologue_end
;
1694 = read_memory_unsigned_integer (current_pc
, 4, byte_order_for_code
);
1696 if (insn
== 0xe1a0c00d) /* mov ip, sp */
1698 regs
[ARM_IP_REGNUM
] = regs
[ARM_SP_REGNUM
];
1701 else if ((insn
& 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1702 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1704 unsigned imm
= insn
& 0xff; /* immediate value */
1705 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1706 int rd
= bits (insn
, 12, 15);
1707 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1708 regs
[rd
] = pv_add_constant (regs
[bits (insn
, 16, 19)], imm
);
1711 else if ((insn
& 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1712 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1714 unsigned imm
= insn
& 0xff; /* immediate value */
1715 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1716 int rd
= bits (insn
, 12, 15);
1717 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1718 regs
[rd
] = pv_add_constant (regs
[bits (insn
, 16, 19)], -imm
);
1721 else if ((insn
& 0xffff0fff) == 0xe52d0004) /* str Rd,
1724 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1726 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -4);
1727 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 4,
1728 regs
[bits (insn
, 12, 15)]);
1731 else if ((insn
& 0xffff0000) == 0xe92d0000)
1732 /* stmfd sp!, {..., fp, ip, lr, pc}
1734 stmfd sp!, {a1, a2, a3, a4} */
1736 int mask
= insn
& 0xffff;
1738 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1741 /* Calculate offsets of saved registers. */
1742 for (regno
= ARM_PC_REGNUM
; regno
>= 0; regno
--)
1743 if (mask
& (1 << regno
))
1746 = pv_add_constant (regs
[ARM_SP_REGNUM
], -4);
1747 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 4, regs
[regno
]);
1750 else if ((insn
& 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1751 || (insn
& 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1752 || (insn
& 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1754 /* No need to add this to saved_regs -- it's just an arg reg. */
1757 else if ((insn
& 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1758 || (insn
& 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1759 || (insn
& 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1761 /* No need to add this to saved_regs -- it's just an arg reg. */
1764 else if ((insn
& 0xfff00000) == 0xe8800000 /* stm Rn,
1766 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1768 /* No need to add this to saved_regs -- it's just arg regs. */
1771 else if ((insn
& 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1773 unsigned imm
= insn
& 0xff; /* immediate value */
1774 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1775 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1776 regs
[ARM_FP_REGNUM
] = pv_add_constant (regs
[ARM_IP_REGNUM
], -imm
);
1778 else if ((insn
& 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1780 unsigned imm
= insn
& 0xff; /* immediate value */
1781 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1782 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1783 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -imm
);
1785 else if ((insn
& 0xffff7fff) == 0xed6d0103 /* stfe f?,
1787 && gdbarch_tdep (gdbarch
)->have_fpa_registers
)
1789 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1792 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -12);
1793 regno
= ARM_F0_REGNUM
+ ((insn
>> 12) & 0x07);
1794 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 12, regs
[regno
]);
1796 else if ((insn
& 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1798 && gdbarch_tdep (gdbarch
)->have_fpa_registers
)
1800 int n_saved_fp_regs
;
1801 unsigned int fp_start_reg
, fp_bound_reg
;
1803 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1806 if ((insn
& 0x800) == 0x800) /* N0 is set */
1808 if ((insn
& 0x40000) == 0x40000) /* N1 is set */
1809 n_saved_fp_regs
= 3;
1811 n_saved_fp_regs
= 1;
1815 if ((insn
& 0x40000) == 0x40000) /* N1 is set */
1816 n_saved_fp_regs
= 2;
1818 n_saved_fp_regs
= 4;
1821 fp_start_reg
= ARM_F0_REGNUM
+ ((insn
>> 12) & 0x7);
1822 fp_bound_reg
= fp_start_reg
+ n_saved_fp_regs
;
1823 for (; fp_start_reg
< fp_bound_reg
; fp_start_reg
++)
1825 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -12);
1826 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 12,
1827 regs
[fp_start_reg
++]);
1830 else if ((insn
& 0xff000000) == 0xeb000000 && cache
== NULL
) /* bl */
1832 /* Allow some special function calls when skipping the
1833 prologue; GCC generates these before storing arguments to
1835 CORE_ADDR dest
= BranchDest (current_pc
, insn
);
1837 if (skip_prologue_function (gdbarch
, dest
, 0))
1842 else if ((insn
& 0xf0000000) != 0xe0000000)
1843 break; /* Condition not true, exit early. */
1844 else if (arm_instruction_changes_pc (insn
))
1845 /* Don't scan past anything that might change control flow. */
1847 else if ((insn
& 0xfe500000) == 0xe8100000 /* ldm */
1848 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1849 /* Ignore block loads from the stack, potentially copying
1850 parameters from memory. */
1852 else if ((insn
& 0xfc500000) == 0xe4100000
1853 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1854 /* Similarly ignore single loads from the stack. */
1856 else if ((insn
& 0xffff0ff0) == 0xe1a00000)
1857 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1858 register instead of the stack. */
1862 /* The optimizer might shove anything into the prologue,
1863 so we just skip what we don't recognize. */
1864 unrecognized_pc
= current_pc
;
1869 if (unrecognized_pc
== 0)
1870 unrecognized_pc
= current_pc
;
1872 /* The frame size is just the distance from the frame register
1873 to the original stack pointer. */
1874 if (pv_is_register (regs
[ARM_FP_REGNUM
], ARM_SP_REGNUM
))
1876 /* Frame pointer is fp. */
1877 framereg
= ARM_FP_REGNUM
;
1878 framesize
= -regs
[ARM_FP_REGNUM
].k
;
1882 /* Try the stack pointer... this is a bit desperate. */
1883 framereg
= ARM_SP_REGNUM
;
1884 framesize
= -regs
[ARM_SP_REGNUM
].k
;
1889 cache
->framereg
= framereg
;
1890 cache
->framesize
= framesize
;
1892 for (regno
= 0; regno
< ARM_FPS_REGNUM
; regno
++)
1893 if (pv_area_find_reg (stack
, gdbarch
, regno
, &offset
))
1894 cache
->saved_regs
[regno
].addr
= offset
;
1898 fprintf_unfiltered (gdb_stdlog
, "Prologue scan stopped at %s\n",
1899 paddress (gdbarch
, unrecognized_pc
));
1901 do_cleanups (back_to
);
1902 return unrecognized_pc
;
1906 arm_scan_prologue (struct frame_info
*this_frame
,
1907 struct arm_prologue_cache
*cache
)
1909 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1910 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1912 CORE_ADDR prologue_start
, prologue_end
, current_pc
;
1913 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
1914 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
1915 pv_t regs
[ARM_FPS_REGNUM
];
1916 struct pv_area
*stack
;
1917 struct cleanup
*back_to
;
1920 /* Assume there is no frame until proven otherwise. */
1921 cache
->framereg
= ARM_SP_REGNUM
;
1922 cache
->framesize
= 0;
1924 /* Check for Thumb prologue. */
1925 if (arm_frame_is_thumb (this_frame
))
1927 thumb_scan_prologue (gdbarch
, prev_pc
, block_addr
, cache
);
1931 /* Find the function prologue. If we can't find the function in
1932 the symbol table, peek in the stack frame to find the PC. */
1933 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
1936 /* One way to find the end of the prologue (which works well
1937 for unoptimized code) is to do the following:
1939 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1942 prologue_end = prev_pc;
1943 else if (sal.end < prologue_end)
1944 prologue_end = sal.end;
1946 This mechanism is very accurate so long as the optimizer
1947 doesn't move any instructions from the function body into the
1948 prologue. If this happens, sal.end will be the last
1949 instruction in the first hunk of prologue code just before
1950 the first instruction that the scheduler has moved from
1951 the body to the prologue.
1953 In order to make sure that we scan all of the prologue
1954 instructions, we use a slightly less accurate mechanism which
1955 may scan more than necessary. To help compensate for this
1956 lack of accuracy, the prologue scanning loop below contains
1957 several clauses which'll cause the loop to terminate early if
1958 an implausible prologue instruction is encountered.
1964 is a suitable endpoint since it accounts for the largest
1965 possible prologue plus up to five instructions inserted by
1968 if (prologue_end
> prologue_start
+ 64)
1970 prologue_end
= prologue_start
+ 64; /* See above. */
1975 /* We have no symbol information. Our only option is to assume this
1976 function has a standard stack frame and the normal frame register.
1977 Then, we can find the value of our frame pointer on entrance to
1978 the callee (or at the present moment if this is the innermost frame).
1979 The value stored there should be the address of the stmfd + 8. */
1980 CORE_ADDR frame_loc
;
1981 LONGEST return_value
;
1983 frame_loc
= get_frame_register_unsigned (this_frame
, ARM_FP_REGNUM
);
1984 if (!safe_read_memory_integer (frame_loc
, 4, byte_order
, &return_value
))
1988 prologue_start
= gdbarch_addr_bits_remove
1989 (gdbarch
, return_value
) - 8;
1990 prologue_end
= prologue_start
+ 64; /* See above. */
1994 if (prev_pc
< prologue_end
)
1995 prologue_end
= prev_pc
;
1997 arm_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
2000 static struct arm_prologue_cache
*
2001 arm_make_prologue_cache (struct frame_info
*this_frame
)
2004 struct arm_prologue_cache
*cache
;
2005 CORE_ADDR unwound_fp
;
2007 cache
= FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache
);
2008 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
2010 arm_scan_prologue (this_frame
, cache
);
2012 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
2013 if (unwound_fp
== 0)
2016 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
2018 /* Calculate actual addresses of saved registers using offsets
2019 determined by arm_scan_prologue. */
2020 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
2021 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
2022 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
2027 /* Our frame ID for a normal frame is the current function's starting PC
2028 and the caller's SP when we were called. */
2031 arm_prologue_this_id (struct frame_info
*this_frame
,
2033 struct frame_id
*this_id
)
2035 struct arm_prologue_cache
*cache
;
2039 if (*this_cache
== NULL
)
2040 *this_cache
= arm_make_prologue_cache (this_frame
);
2041 cache
= *this_cache
;
2043 /* This is meant to halt the backtrace at "_start". */
2044 pc
= get_frame_pc (this_frame
);
2045 if (pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
2048 /* If we've hit a wall, stop. */
2049 if (cache
->prev_sp
== 0)
2052 /* Use function start address as part of the frame ID. If we cannot
2053 identify the start address (due to missing symbol information),
2054 fall back to just using the current PC. */
2055 func
= get_frame_func (this_frame
);
2059 id
= frame_id_build (cache
->prev_sp
, func
);
2063 static struct value
*
2064 arm_prologue_prev_register (struct frame_info
*this_frame
,
2068 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2069 struct arm_prologue_cache
*cache
;
2071 if (*this_cache
== NULL
)
2072 *this_cache
= arm_make_prologue_cache (this_frame
);
2073 cache
= *this_cache
;
2075 /* If we are asked to unwind the PC, then we need to return the LR
2076 instead. The prologue may save PC, but it will point into this
2077 frame's prologue, not the next frame's resume location. Also
2078 strip the saved T bit. A valid LR may have the low bit set, but
2079 a valid PC never does. */
2080 if (prev_regnum
== ARM_PC_REGNUM
)
2084 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
2085 return frame_unwind_got_constant (this_frame
, prev_regnum
,
2086 arm_addr_bits_remove (gdbarch
, lr
));
2089 /* SP is generally not saved to the stack, but this frame is
2090 identified by the next frame's stack pointer at the time of the call.
2091 The value was already reconstructed into PREV_SP. */
2092 if (prev_regnum
== ARM_SP_REGNUM
)
2093 return frame_unwind_got_constant (this_frame
, prev_regnum
, cache
->prev_sp
);
2095 /* The CPSR may have been changed by the call instruction and by the
2096 called function. The only bit we can reconstruct is the T bit,
2097 by checking the low bit of LR as of the call. This is a reliable
2098 indicator of Thumb-ness except for some ARM v4T pre-interworking
2099 Thumb code, which could get away with a clear low bit as long as
2100 the called function did not use bx. Guess that all other
2101 bits are unchanged; the condition flags are presumably lost,
2102 but the processor status is likely valid. */
2103 if (prev_regnum
== ARM_PS_REGNUM
)
2106 ULONGEST t_bit
= arm_psr_thumb_bit (gdbarch
);
2108 cpsr
= get_frame_register_unsigned (this_frame
, prev_regnum
);
2109 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
2110 if (IS_THUMB_ADDR (lr
))
2114 return frame_unwind_got_constant (this_frame
, prev_regnum
, cpsr
);
2117 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
2121 struct frame_unwind arm_prologue_unwind
= {
2123 default_frame_unwind_stop_reason
,
2124 arm_prologue_this_id
,
2125 arm_prologue_prev_register
,
2127 default_frame_sniffer
2130 /* Maintain a list of ARM exception table entries per objfile, similar to the
2131 list of mapping symbols. We only cache entries for standard ARM-defined
2132 personality routines; the cache will contain only the frame unwinding
2133 instructions associated with the entry (not the descriptors). */
2135 static const struct objfile_data
*arm_exidx_data_key
;
2137 struct arm_exidx_entry
2142 typedef struct arm_exidx_entry arm_exidx_entry_s
;
2143 DEF_VEC_O(arm_exidx_entry_s
);
2145 struct arm_exidx_data
2147 VEC(arm_exidx_entry_s
) **section_maps
;
2151 arm_exidx_data_free (struct objfile
*objfile
, void *arg
)
2153 struct arm_exidx_data
*data
= arg
;
2156 for (i
= 0; i
< objfile
->obfd
->section_count
; i
++)
2157 VEC_free (arm_exidx_entry_s
, data
->section_maps
[i
]);
2161 arm_compare_exidx_entries (const struct arm_exidx_entry
*lhs
,
2162 const struct arm_exidx_entry
*rhs
)
2164 return lhs
->addr
< rhs
->addr
;
2167 static struct obj_section
*
2168 arm_obj_section_from_vma (struct objfile
*objfile
, bfd_vma vma
)
2170 struct obj_section
*osect
;
2172 ALL_OBJFILE_OSECTIONS (objfile
, osect
)
2173 if (bfd_get_section_flags (objfile
->obfd
,
2174 osect
->the_bfd_section
) & SEC_ALLOC
)
2176 bfd_vma start
, size
;
2177 start
= bfd_get_section_vma (objfile
->obfd
, osect
->the_bfd_section
);
2178 size
= bfd_get_section_size (osect
->the_bfd_section
);
2180 if (start
<= vma
&& vma
< start
+ size
)
2187 /* Parse contents of exception table and exception index sections
2188 of OBJFILE, and fill in the exception table entry cache.
2190 For each entry that refers to a standard ARM-defined personality
2191 routine, extract the frame unwinding instructions (from either
2192 the index or the table section). The unwinding instructions
2194 - extracting them from the rest of the table data
2195 - converting to host endianness
2196 - appending the implicit 0xb0 ("Finish") code
2198 The extracted and normalized instructions are stored for later
2199 retrieval by the arm_find_exidx_entry routine. */
2202 arm_exidx_new_objfile (struct objfile
*objfile
)
2204 struct cleanup
*cleanups
;
2205 struct arm_exidx_data
*data
;
2206 asection
*exidx
, *extab
;
2207 bfd_vma exidx_vma
= 0, extab_vma
= 0;
2208 bfd_size_type exidx_size
= 0, extab_size
= 0;
2209 gdb_byte
*exidx_data
= NULL
, *extab_data
= NULL
;
2212 /* If we've already touched this file, do nothing. */
2213 if (!objfile
|| objfile_data (objfile
, arm_exidx_data_key
) != NULL
)
2215 cleanups
= make_cleanup (null_cleanup
, NULL
);
2217 /* Read contents of exception table and index. */
2218 exidx
= bfd_get_section_by_name (objfile
->obfd
, ".ARM.exidx");
2221 exidx_vma
= bfd_section_vma (objfile
->obfd
, exidx
);
2222 exidx_size
= bfd_get_section_size (exidx
);
2223 exidx_data
= xmalloc (exidx_size
);
2224 make_cleanup (xfree
, exidx_data
);
2226 if (!bfd_get_section_contents (objfile
->obfd
, exidx
,
2227 exidx_data
, 0, exidx_size
))
2229 do_cleanups (cleanups
);
2234 extab
= bfd_get_section_by_name (objfile
->obfd
, ".ARM.extab");
2237 extab_vma
= bfd_section_vma (objfile
->obfd
, extab
);
2238 extab_size
= bfd_get_section_size (extab
);
2239 extab_data
= xmalloc (extab_size
);
2240 make_cleanup (xfree
, extab_data
);
2242 if (!bfd_get_section_contents (objfile
->obfd
, extab
,
2243 extab_data
, 0, extab_size
))
2245 do_cleanups (cleanups
);
2250 /* Allocate exception table data structure. */
2251 data
= OBSTACK_ZALLOC (&objfile
->objfile_obstack
, struct arm_exidx_data
);
2252 set_objfile_data (objfile
, arm_exidx_data_key
, data
);
2253 data
->section_maps
= OBSTACK_CALLOC (&objfile
->objfile_obstack
,
2254 objfile
->obfd
->section_count
,
2255 VEC(arm_exidx_entry_s
) *);
2257 /* Fill in exception table. */
2258 for (i
= 0; i
< exidx_size
/ 8; i
++)
2260 struct arm_exidx_entry new_exidx_entry
;
2261 bfd_vma idx
= bfd_h_get_32 (objfile
->obfd
, exidx_data
+ i
* 8);
2262 bfd_vma val
= bfd_h_get_32 (objfile
->obfd
, exidx_data
+ i
* 8 + 4);
2263 bfd_vma addr
= 0, word
= 0;
2264 int n_bytes
= 0, n_words
= 0;
2265 struct obj_section
*sec
;
2266 gdb_byte
*entry
= NULL
;
2268 /* Extract address of start of function. */
2269 idx
= ((idx
& 0x7fffffff) ^ 0x40000000) - 0x40000000;
2270 idx
+= exidx_vma
+ i
* 8;
2272 /* Find section containing function and compute section offset. */
2273 sec
= arm_obj_section_from_vma (objfile
, idx
);
2276 idx
-= bfd_get_section_vma (objfile
->obfd
, sec
->the_bfd_section
);
2278 /* Determine address of exception table entry. */
2281 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2283 else if ((val
& 0xff000000) == 0x80000000)
2285 /* Exception table entry embedded in .ARM.exidx
2286 -- must be short form. */
2290 else if (!(val
& 0x80000000))
2292 /* Exception table entry in .ARM.extab. */
2293 addr
= ((val
& 0x7fffffff) ^ 0x40000000) - 0x40000000;
2294 addr
+= exidx_vma
+ i
* 8 + 4;
2296 if (addr
>= extab_vma
&& addr
+ 4 <= extab_vma
+ extab_size
)
2298 word
= bfd_h_get_32 (objfile
->obfd
,
2299 extab_data
+ addr
- extab_vma
);
2302 if ((word
& 0xff000000) == 0x80000000)
2307 else if ((word
& 0xff000000) == 0x81000000
2308 || (word
& 0xff000000) == 0x82000000)
2312 n_words
= ((word
>> 16) & 0xff);
2314 else if (!(word
& 0x80000000))
2317 struct obj_section
*pers_sec
;
2318 int gnu_personality
= 0;
2320 /* Custom personality routine. */
2321 pers
= ((word
& 0x7fffffff) ^ 0x40000000) - 0x40000000;
2322 pers
= UNMAKE_THUMB_ADDR (pers
+ addr
- 4);
2324 /* Check whether we've got one of the variants of the
2325 GNU personality routines. */
2326 pers_sec
= arm_obj_section_from_vma (objfile
, pers
);
2329 static const char *personality
[] =
2331 "__gcc_personality_v0",
2332 "__gxx_personality_v0",
2333 "__gcj_personality_v0",
2334 "__gnu_objc_personality_v0",
2338 CORE_ADDR pc
= pers
+ obj_section_offset (pers_sec
);
2341 for (k
= 0; personality
[k
]; k
++)
2342 if (lookup_minimal_symbol_by_pc_name
2343 (pc
, personality
[k
], objfile
))
2345 gnu_personality
= 1;
2350 /* If so, the next word contains a word count in the high
2351 byte, followed by the same unwind instructions as the
2352 pre-defined forms. */
2354 && addr
+ 4 <= extab_vma
+ extab_size
)
2356 word
= bfd_h_get_32 (objfile
->obfd
,
2357 extab_data
+ addr
- extab_vma
);
2360 n_words
= ((word
>> 24) & 0xff);
2366 /* Sanity check address. */
2368 if (addr
< extab_vma
|| addr
+ 4 * n_words
> extab_vma
+ extab_size
)
2369 n_words
= n_bytes
= 0;
2371 /* The unwind instructions reside in WORD (only the N_BYTES least
2372 significant bytes are valid), followed by N_WORDS words in the
2373 extab section starting at ADDR. */
2374 if (n_bytes
|| n_words
)
2376 gdb_byte
*p
= entry
= obstack_alloc (&objfile
->objfile_obstack
,
2377 n_bytes
+ n_words
* 4 + 1);
2380 *p
++ = (gdb_byte
) ((word
>> (8 * n_bytes
)) & 0xff);
2384 word
= bfd_h_get_32 (objfile
->obfd
,
2385 extab_data
+ addr
- extab_vma
);
2388 *p
++ = (gdb_byte
) ((word
>> 24) & 0xff);
2389 *p
++ = (gdb_byte
) ((word
>> 16) & 0xff);
2390 *p
++ = (gdb_byte
) ((word
>> 8) & 0xff);
2391 *p
++ = (gdb_byte
) (word
& 0xff);
2394 /* Implied "Finish" to terminate the list. */
2398 /* Push entry onto vector. They are guaranteed to always
2399 appear in order of increasing addresses. */
2400 new_exidx_entry
.addr
= idx
;
2401 new_exidx_entry
.entry
= entry
;
2402 VEC_safe_push (arm_exidx_entry_s
,
2403 data
->section_maps
[sec
->the_bfd_section
->index
],
2407 do_cleanups (cleanups
);
2410 /* Search for the exception table entry covering MEMADDR. If one is found,
2411 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2412 set *START to the start of the region covered by this entry. */
2415 arm_find_exidx_entry (CORE_ADDR memaddr
, CORE_ADDR
*start
)
2417 struct obj_section
*sec
;
2419 sec
= find_pc_section (memaddr
);
2422 struct arm_exidx_data
*data
;
2423 VEC(arm_exidx_entry_s
) *map
;
2424 struct arm_exidx_entry map_key
= { memaddr
- obj_section_addr (sec
), 0 };
2427 data
= objfile_data (sec
->objfile
, arm_exidx_data_key
);
2430 map
= data
->section_maps
[sec
->the_bfd_section
->index
];
2431 if (!VEC_empty (arm_exidx_entry_s
, map
))
2433 struct arm_exidx_entry
*map_sym
;
2435 idx
= VEC_lower_bound (arm_exidx_entry_s
, map
, &map_key
,
2436 arm_compare_exidx_entries
);
2438 /* VEC_lower_bound finds the earliest ordered insertion
2439 point. If the following symbol starts at this exact
2440 address, we use that; otherwise, the preceding
2441 exception table entry covers this address. */
2442 if (idx
< VEC_length (arm_exidx_entry_s
, map
))
2444 map_sym
= VEC_index (arm_exidx_entry_s
, map
, idx
);
2445 if (map_sym
->addr
== map_key
.addr
)
2448 *start
= map_sym
->addr
+ obj_section_addr (sec
);
2449 return map_sym
->entry
;
2455 map_sym
= VEC_index (arm_exidx_entry_s
, map
, idx
- 1);
2457 *start
= map_sym
->addr
+ obj_section_addr (sec
);
2458 return map_sym
->entry
;
2467 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2468 instruction list from the ARM exception table entry ENTRY, allocate and
2469 return a prologue cache structure describing how to unwind this frame.
2471 Return NULL if the unwinding instruction list contains a "spare",
2472 "reserved" or "refuse to unwind" instruction as defined in section
2473 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2474 for the ARM Architecture" document. */
2476 static struct arm_prologue_cache
*
2477 arm_exidx_fill_cache (struct frame_info
*this_frame
, gdb_byte
*entry
)
2482 struct arm_prologue_cache
*cache
;
2483 cache
= FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache
);
2484 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
2490 /* Whenever we reload SP, we actually have to retrieve its
2491 actual value in the current frame. */
2494 if (trad_frame_realreg_p (cache
->saved_regs
, ARM_SP_REGNUM
))
2496 int reg
= cache
->saved_regs
[ARM_SP_REGNUM
].realreg
;
2497 vsp
= get_frame_register_unsigned (this_frame
, reg
);
2501 CORE_ADDR addr
= cache
->saved_regs
[ARM_SP_REGNUM
].addr
;
2502 vsp
= get_frame_memory_unsigned (this_frame
, addr
, 4);
2508 /* Decode next unwind instruction. */
2511 if ((insn
& 0xc0) == 0)
2513 int offset
= insn
& 0x3f;
2514 vsp
+= (offset
<< 2) + 4;
2516 else if ((insn
& 0xc0) == 0x40)
2518 int offset
= insn
& 0x3f;
2519 vsp
-= (offset
<< 2) + 4;
2521 else if ((insn
& 0xf0) == 0x80)
2523 int mask
= ((insn
& 0xf) << 8) | *entry
++;
2526 /* The special case of an all-zero mask identifies
2527 "Refuse to unwind". We return NULL to fall back
2528 to the prologue analyzer. */
2532 /* Pop registers r4..r15 under mask. */
2533 for (i
= 0; i
< 12; i
++)
2534 if (mask
& (1 << i
))
2536 cache
->saved_regs
[4 + i
].addr
= vsp
;
2540 /* Special-case popping SP -- we need to reload vsp. */
2541 if (mask
& (1 << (ARM_SP_REGNUM
- 4)))
2544 else if ((insn
& 0xf0) == 0x90)
2546 int reg
= insn
& 0xf;
2548 /* Reserved cases. */
2549 if (reg
== ARM_SP_REGNUM
|| reg
== ARM_PC_REGNUM
)
2552 /* Set SP from another register and mark VSP for reload. */
2553 cache
->saved_regs
[ARM_SP_REGNUM
] = cache
->saved_regs
[reg
];
2556 else if ((insn
& 0xf0) == 0xa0)
2558 int count
= insn
& 0x7;
2559 int pop_lr
= (insn
& 0x8) != 0;
2562 /* Pop r4..r[4+count]. */
2563 for (i
= 0; i
<= count
; i
++)
2565 cache
->saved_regs
[4 + i
].addr
= vsp
;
2569 /* If indicated by flag, pop LR as well. */
2572 cache
->saved_regs
[ARM_LR_REGNUM
].addr
= vsp
;
2576 else if (insn
== 0xb0)
2578 /* We could only have updated PC by popping into it; if so, it
2579 will show up as address. Otherwise, copy LR into PC. */
2580 if (!trad_frame_addr_p (cache
->saved_regs
, ARM_PC_REGNUM
))
2581 cache
->saved_regs
[ARM_PC_REGNUM
]
2582 = cache
->saved_regs
[ARM_LR_REGNUM
];
2587 else if (insn
== 0xb1)
2589 int mask
= *entry
++;
2592 /* All-zero mask and mask >= 16 is "spare". */
2593 if (mask
== 0 || mask
>= 16)
2596 /* Pop r0..r3 under mask. */
2597 for (i
= 0; i
< 4; i
++)
2598 if (mask
& (1 << i
))
2600 cache
->saved_regs
[i
].addr
= vsp
;
2604 else if (insn
== 0xb2)
2606 ULONGEST offset
= 0;
2611 offset
|= (*entry
& 0x7f) << shift
;
2614 while (*entry
++ & 0x80);
2616 vsp
+= 0x204 + (offset
<< 2);
2618 else if (insn
== 0xb3)
2620 int start
= *entry
>> 4;
2621 int count
= (*entry
++) & 0xf;
2624 /* Only registers D0..D15 are valid here. */
2625 if (start
+ count
>= 16)
2628 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2629 for (i
= 0; i
<= count
; i
++)
2631 cache
->saved_regs
[ARM_D0_REGNUM
+ start
+ i
].addr
= vsp
;
2635 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2638 else if ((insn
& 0xf8) == 0xb8)
2640 int count
= insn
& 0x7;
2643 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2644 for (i
= 0; i
<= count
; i
++)
2646 cache
->saved_regs
[ARM_D0_REGNUM
+ 8 + i
].addr
= vsp
;
2650 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2653 else if (insn
== 0xc6)
2655 int start
= *entry
>> 4;
2656 int count
= (*entry
++) & 0xf;
2659 /* Only registers WR0..WR15 are valid. */
2660 if (start
+ count
>= 16)
2663 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2664 for (i
= 0; i
<= count
; i
++)
2666 cache
->saved_regs
[ARM_WR0_REGNUM
+ start
+ i
].addr
= vsp
;
2670 else if (insn
== 0xc7)
2672 int mask
= *entry
++;
2675 /* All-zero mask and mask >= 16 is "spare". */
2676 if (mask
== 0 || mask
>= 16)
2679 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2680 for (i
= 0; i
< 4; i
++)
2681 if (mask
& (1 << i
))
2683 cache
->saved_regs
[ARM_WCGR0_REGNUM
+ i
].addr
= vsp
;
2687 else if ((insn
& 0xf8) == 0xc0)
2689 int count
= insn
& 0x7;
2692 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2693 for (i
= 0; i
<= count
; i
++)
2695 cache
->saved_regs
[ARM_WR0_REGNUM
+ 10 + i
].addr
= vsp
;
2699 else if (insn
== 0xc8)
2701 int start
= *entry
>> 4;
2702 int count
= (*entry
++) & 0xf;
2705 /* Only registers D0..D31 are valid. */
2706 if (start
+ count
>= 16)
2709 /* Pop VFP double-precision registers
2710 D[16+start]..D[16+start+count]. */
2711 for (i
= 0; i
<= count
; i
++)
2713 cache
->saved_regs
[ARM_D0_REGNUM
+ 16 + start
+ i
].addr
= vsp
;
2717 else if (insn
== 0xc9)
2719 int start
= *entry
>> 4;
2720 int count
= (*entry
++) & 0xf;
2723 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2724 for (i
= 0; i
<= count
; i
++)
2726 cache
->saved_regs
[ARM_D0_REGNUM
+ start
+ i
].addr
= vsp
;
2730 else if ((insn
& 0xf8) == 0xd0)
2732 int count
= insn
& 0x7;
2735 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2736 for (i
= 0; i
<= count
; i
++)
2738 cache
->saved_regs
[ARM_D0_REGNUM
+ 8 + i
].addr
= vsp
;
2744 /* Everything else is "spare". */
2749 /* If we restore SP from a register, assume this was the frame register.
2750 Otherwise just fall back to SP as frame register. */
2751 if (trad_frame_realreg_p (cache
->saved_regs
, ARM_SP_REGNUM
))
2752 cache
->framereg
= cache
->saved_regs
[ARM_SP_REGNUM
].realreg
;
2754 cache
->framereg
= ARM_SP_REGNUM
;
2756 /* Determine offset to previous frame. */
2758 = vsp
- get_frame_register_unsigned (this_frame
, cache
->framereg
);
2760 /* We already got the previous SP. */
2761 cache
->prev_sp
= vsp
;
2766 /* Unwinding via ARM exception table entries. Note that the sniffer
2767 already computes a filled-in prologue cache, which is then used
2768 with the same arm_prologue_this_id and arm_prologue_prev_register
2769 routines also used for prologue-parsing based unwinding. */
2772 arm_exidx_unwind_sniffer (const struct frame_unwind
*self
,
2773 struct frame_info
*this_frame
,
2774 void **this_prologue_cache
)
2776 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2777 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2778 CORE_ADDR addr_in_block
, exidx_region
, func_start
;
2779 struct arm_prologue_cache
*cache
;
2782 /* See if we have an ARM exception table entry covering this address. */
2783 addr_in_block
= get_frame_address_in_block (this_frame
);
2784 entry
= arm_find_exidx_entry (addr_in_block
, &exidx_region
);
2788 /* The ARM exception table does not describe unwind information
2789 for arbitrary PC values, but is guaranteed to be correct only
2790 at call sites. We have to decide here whether we want to use
2791 ARM exception table information for this frame, or fall back
2792 to using prologue parsing. (Note that if we have DWARF CFI,
2793 this sniffer isn't even called -- CFI is always preferred.)
2795 Before we make this decision, however, we check whether we
2796 actually have *symbol* information for the current frame.
2797 If not, prologue parsing would not work anyway, so we might
2798 as well use the exception table and hope for the best. */
2799 if (find_pc_partial_function (addr_in_block
, NULL
, &func_start
, NULL
))
2803 /* If the next frame is "normal", we are at a call site in this
2804 frame, so exception information is guaranteed to be valid. */
2805 if (get_next_frame (this_frame
)
2806 && get_frame_type (get_next_frame (this_frame
)) == NORMAL_FRAME
)
2809 /* We also assume exception information is valid if we're currently
2810 blocked in a system call. The system library is supposed to
2811 ensure this, so that e.g. pthread cancellation works. */
2812 if (arm_frame_is_thumb (this_frame
))
2816 if (safe_read_memory_integer (get_frame_pc (this_frame
) - 2, 2,
2817 byte_order_for_code
, &insn
)
2818 && (insn
& 0xff00) == 0xdf00 /* svc */)
2825 if (safe_read_memory_integer (get_frame_pc (this_frame
) - 4, 4,
2826 byte_order_for_code
, &insn
)
2827 && (insn
& 0x0f000000) == 0x0f000000 /* svc */)
2831 /* Bail out if we don't know that exception information is valid. */
2835 /* The ARM exception index does not mark the *end* of the region
2836 covered by the entry, and some functions will not have any entry.
2837 To correctly recognize the end of the covered region, the linker
2838 should have inserted dummy records with a CANTUNWIND marker.
2840 Unfortunately, current versions of GNU ld do not reliably do
2841 this, and thus we may have found an incorrect entry above.
2842 As a (temporary) sanity check, we only use the entry if it
2843 lies *within* the bounds of the function. Note that this check
2844 might reject perfectly valid entries that just happen to cover
2845 multiple functions; therefore this check ought to be removed
2846 once the linker is fixed. */
2847 if (func_start
> exidx_region
)
2851 /* Decode the list of unwinding instructions into a prologue cache.
2852 Note that this may fail due to e.g. a "refuse to unwind" code. */
2853 cache
= arm_exidx_fill_cache (this_frame
, entry
);
2857 *this_prologue_cache
= cache
;
2861 struct frame_unwind arm_exidx_unwind
= {
2863 default_frame_unwind_stop_reason
,
2864 arm_prologue_this_id
,
2865 arm_prologue_prev_register
,
2867 arm_exidx_unwind_sniffer
2870 static struct arm_prologue_cache
*
2871 arm_make_stub_cache (struct frame_info
*this_frame
)
2873 struct arm_prologue_cache
*cache
;
2875 cache
= FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache
);
2876 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
2878 cache
->prev_sp
= get_frame_register_unsigned (this_frame
, ARM_SP_REGNUM
);
2883 /* Our frame ID for a stub frame is the current SP and LR. */
2886 arm_stub_this_id (struct frame_info
*this_frame
,
2888 struct frame_id
*this_id
)
2890 struct arm_prologue_cache
*cache
;
2892 if (*this_cache
== NULL
)
2893 *this_cache
= arm_make_stub_cache (this_frame
);
2894 cache
= *this_cache
;
2896 *this_id
= frame_id_build (cache
->prev_sp
, get_frame_pc (this_frame
));
2900 arm_stub_unwind_sniffer (const struct frame_unwind
*self
,
2901 struct frame_info
*this_frame
,
2902 void **this_prologue_cache
)
2904 CORE_ADDR addr_in_block
;
2907 addr_in_block
= get_frame_address_in_block (this_frame
);
2908 if (in_plt_section (addr_in_block
, NULL
)
2909 /* We also use the stub winder if the target memory is unreadable
2910 to avoid having the prologue unwinder trying to read it. */
2911 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
2917 struct frame_unwind arm_stub_unwind
= {
2919 default_frame_unwind_stop_reason
,
2921 arm_prologue_prev_register
,
2923 arm_stub_unwind_sniffer
2927 arm_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
2929 struct arm_prologue_cache
*cache
;
2931 if (*this_cache
== NULL
)
2932 *this_cache
= arm_make_prologue_cache (this_frame
);
2933 cache
= *this_cache
;
2935 return cache
->prev_sp
- cache
->framesize
;
2938 struct frame_base arm_normal_base
= {
2939 &arm_prologue_unwind
,
2940 arm_normal_frame_base
,
2941 arm_normal_frame_base
,
2942 arm_normal_frame_base
2945 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2946 dummy frame. The frame ID's base needs to match the TOS value
2947 saved by save_dummy_frame_tos() and returned from
2948 arm_push_dummy_call, and the PC needs to match the dummy frame's
2951 static struct frame_id
2952 arm_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2954 return frame_id_build (get_frame_register_unsigned (this_frame
,
2956 get_frame_pc (this_frame
));
2959 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2960 be used to construct the previous frame's ID, after looking up the
2961 containing function). */
2964 arm_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2967 pc
= frame_unwind_register_unsigned (this_frame
, ARM_PC_REGNUM
);
2968 return arm_addr_bits_remove (gdbarch
, pc
);
2972 arm_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2974 return frame_unwind_register_unsigned (this_frame
, ARM_SP_REGNUM
);
2977 static struct value
*
2978 arm_dwarf2_prev_register (struct frame_info
*this_frame
, void **this_cache
,
2981 struct gdbarch
* gdbarch
= get_frame_arch (this_frame
);
2983 ULONGEST t_bit
= arm_psr_thumb_bit (gdbarch
);
2988 /* The PC is normally copied from the return column, which
2989 describes saves of LR. However, that version may have an
2990 extra bit set to indicate Thumb state. The bit is not
2992 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
2993 return frame_unwind_got_constant (this_frame
, regnum
,
2994 arm_addr_bits_remove (gdbarch
, lr
));
2997 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
2998 cpsr
= get_frame_register_unsigned (this_frame
, regnum
);
2999 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
3000 if (IS_THUMB_ADDR (lr
))
3004 return frame_unwind_got_constant (this_frame
, regnum
, cpsr
);
3007 internal_error (__FILE__
, __LINE__
,
3008 _("Unexpected register %d"), regnum
);
3013 arm_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
3014 struct dwarf2_frame_state_reg
*reg
,
3015 struct frame_info
*this_frame
)
3021 reg
->how
= DWARF2_FRAME_REG_FN
;
3022 reg
->loc
.fn
= arm_dwarf2_prev_register
;
3025 reg
->how
= DWARF2_FRAME_REG_CFA
;
3030 /* Return true if we are in the function's epilogue, i.e. after the
3031 instruction that destroyed the function's stack frame. */
3034 thumb_in_function_epilogue_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
3036 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
3037 unsigned int insn
, insn2
;
3038 int found_return
= 0, found_stack_adjust
= 0;
3039 CORE_ADDR func_start
, func_end
;
3043 if (!find_pc_partial_function (pc
, NULL
, &func_start
, &func_end
))
3046 /* The epilogue is a sequence of instructions along the following lines:
3048 - add stack frame size to SP or FP
3049 - [if frame pointer used] restore SP from FP
3050 - restore registers from SP [may include PC]
3051 - a return-type instruction [if PC wasn't already restored]
3053 In a first pass, we scan forward from the current PC and verify the
3054 instructions we find as compatible with this sequence, ending in a
3057 However, this is not sufficient to distinguish indirect function calls
3058 within a function from indirect tail calls in the epilogue in some cases.
3059 Therefore, if we didn't already find any SP-changing instruction during
3060 forward scan, we add a backward scanning heuristic to ensure we actually
3061 are in the epilogue. */
3064 while (scan_pc
< func_end
&& !found_return
)
3066 if (target_read_memory (scan_pc
, buf
, 2))
3070 insn
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
3072 if ((insn
& 0xff80) == 0x4700) /* bx <Rm> */
3074 else if (insn
== 0x46f7) /* mov pc, lr */
3076 else if (insn
== 0x46bd) /* mov sp, r7 */
3077 found_stack_adjust
= 1;
3078 else if ((insn
& 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3079 found_stack_adjust
= 1;
3080 else if ((insn
& 0xfe00) == 0xbc00) /* pop <registers> */
3082 found_stack_adjust
= 1;
3083 if (insn
& 0x0100) /* <registers> include PC. */
3086 else if (thumb_insn_size (insn
) == 4) /* 32-bit Thumb-2 instruction */
3088 if (target_read_memory (scan_pc
, buf
, 2))
3092 insn2
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
3094 if (insn
== 0xe8bd) /* ldm.w sp!, <registers> */
3096 found_stack_adjust
= 1;
3097 if (insn2
& 0x8000) /* <registers> include PC. */
3100 else if (insn
== 0xf85d /* ldr.w <Rt>, [sp], #4 */
3101 && (insn2
& 0x0fff) == 0x0b04)
3103 found_stack_adjust
= 1;
3104 if ((insn2
& 0xf000) == 0xf000) /* <Rt> is PC. */
3107 else if ((insn
& 0xffbf) == 0xecbd /* vldm sp!, <list> */
3108 && (insn2
& 0x0e00) == 0x0a00)
3109 found_stack_adjust
= 1;
3120 /* Since any instruction in the epilogue sequence, with the possible
3121 exception of return itself, updates the stack pointer, we need to
3122 scan backwards for at most one instruction. Try either a 16-bit or
3123 a 32-bit instruction. This is just a heuristic, so we do not worry
3124 too much about false positives. */
3126 if (!found_stack_adjust
)
3128 if (pc
- 4 < func_start
)
3130 if (target_read_memory (pc
- 4, buf
, 4))
3133 insn
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
3134 insn2
= extract_unsigned_integer (buf
+ 2, 2, byte_order_for_code
);
3136 if (insn2
== 0x46bd) /* mov sp, r7 */
3137 found_stack_adjust
= 1;
3138 else if ((insn2
& 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3139 found_stack_adjust
= 1;
3140 else if ((insn2
& 0xff00) == 0xbc00) /* pop <registers> without PC */
3141 found_stack_adjust
= 1;
3142 else if (insn
== 0xe8bd) /* ldm.w sp!, <registers> */
3143 found_stack_adjust
= 1;
3144 else if (insn
== 0xf85d /* ldr.w <Rt>, [sp], #4 */
3145 && (insn2
& 0x0fff) == 0x0b04)
3146 found_stack_adjust
= 1;
3147 else if ((insn
& 0xffbf) == 0xecbd /* vldm sp!, <list> */
3148 && (insn2
& 0x0e00) == 0x0a00)
3149 found_stack_adjust
= 1;
3152 return found_stack_adjust
;
3155 /* Return true if we are in the function's epilogue, i.e. after the
3156 instruction that destroyed the function's stack frame. */
3159 arm_in_function_epilogue_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
3161 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
3163 int found_return
, found_stack_adjust
;
3164 CORE_ADDR func_start
, func_end
;
3166 if (arm_pc_is_thumb (gdbarch
, pc
))
3167 return thumb_in_function_epilogue_p (gdbarch
, pc
);
3169 if (!find_pc_partial_function (pc
, NULL
, &func_start
, &func_end
))
3172 /* We are in the epilogue if the previous instruction was a stack
3173 adjustment and the next instruction is a possible return (bx, mov
3174 pc, or pop). We could have to scan backwards to find the stack
3175 adjustment, or forwards to find the return, but this is a decent
3176 approximation. First scan forwards. */
3179 insn
= read_memory_unsigned_integer (pc
, 4, byte_order_for_code
);
3180 if (bits (insn
, 28, 31) != INST_NV
)
3182 if ((insn
& 0x0ffffff0) == 0x012fff10)
3185 else if ((insn
& 0x0ffffff0) == 0x01a0f000)
3188 else if ((insn
& 0x0fff0000) == 0x08bd0000
3189 && (insn
& 0x0000c000) != 0)
3190 /* POP (LDMIA), including PC or LR. */
3197 /* Scan backwards. This is just a heuristic, so do not worry about
3198 false positives from mode changes. */
3200 if (pc
< func_start
+ 4)
3203 found_stack_adjust
= 0;
3204 insn
= read_memory_unsigned_integer (pc
- 4, 4, byte_order_for_code
);
3205 if (bits (insn
, 28, 31) != INST_NV
)
3207 if ((insn
& 0x0df0f000) == 0x0080d000)
3208 /* ADD SP (register or immediate). */
3209 found_stack_adjust
= 1;
3210 else if ((insn
& 0x0df0f000) == 0x0040d000)
3211 /* SUB SP (register or immediate). */
3212 found_stack_adjust
= 1;
3213 else if ((insn
& 0x0ffffff0) == 0x01a0d000)
3215 found_stack_adjust
= 1;
3216 else if ((insn
& 0x0fff0000) == 0x08bd0000)
3218 found_stack_adjust
= 1;
3221 if (found_stack_adjust
)
3228 /* When arguments must be pushed onto the stack, they go on in reverse
3229 order. The code below implements a FILO (stack) to do this. */
3234 struct stack_item
*prev
;
3238 static struct stack_item
*
3239 push_stack_item (struct stack_item
*prev
, const void *contents
, int len
)
3241 struct stack_item
*si
;
3242 si
= xmalloc (sizeof (struct stack_item
));
3243 si
->data
= xmalloc (len
);
3246 memcpy (si
->data
, contents
, len
);
3250 static struct stack_item
*
3251 pop_stack_item (struct stack_item
*si
)
3253 struct stack_item
*dead
= si
;
3261 /* Return the alignment (in bytes) of the given type. */
3264 arm_type_align (struct type
*t
)
3270 t
= check_typedef (t
);
3271 switch (TYPE_CODE (t
))
3274 /* Should never happen. */
3275 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
3279 case TYPE_CODE_ENUM
:
3283 case TYPE_CODE_RANGE
:
3284 case TYPE_CODE_BITSTRING
:
3286 case TYPE_CODE_CHAR
:
3287 case TYPE_CODE_BOOL
:
3288 return TYPE_LENGTH (t
);
3290 case TYPE_CODE_ARRAY
:
3291 case TYPE_CODE_COMPLEX
:
3292 /* TODO: What about vector types? */
3293 return arm_type_align (TYPE_TARGET_TYPE (t
));
3295 case TYPE_CODE_STRUCT
:
3296 case TYPE_CODE_UNION
:
3298 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
3300 falign
= arm_type_align (TYPE_FIELD_TYPE (t
, n
));
3308 /* Possible base types for a candidate for passing and returning in
3311 enum arm_vfp_cprc_base_type
3320 /* The length of one element of base type B. */
3323 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b
)
3327 case VFP_CPRC_SINGLE
:
3329 case VFP_CPRC_DOUBLE
:
3331 case VFP_CPRC_VEC64
:
3333 case VFP_CPRC_VEC128
:
3336 internal_error (__FILE__
, __LINE__
, _("Invalid VFP CPRC type: %d."),
3341 /* The character ('s', 'd' or 'q') for the type of VFP register used
3342 for passing base type B. */
3345 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b
)
3349 case VFP_CPRC_SINGLE
:
3351 case VFP_CPRC_DOUBLE
:
3353 case VFP_CPRC_VEC64
:
3355 case VFP_CPRC_VEC128
:
3358 internal_error (__FILE__
, __LINE__
, _("Invalid VFP CPRC type: %d."),
3363 /* Determine whether T may be part of a candidate for passing and
3364 returning in VFP registers, ignoring the limit on the total number
3365 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3366 classification of the first valid component found; if it is not
3367 VFP_CPRC_UNKNOWN, all components must have the same classification
3368 as *BASE_TYPE. If it is found that T contains a type not permitted
3369 for passing and returning in VFP registers, a type differently
3370 classified from *BASE_TYPE, or two types differently classified
3371 from each other, return -1, otherwise return the total number of
3372 base-type elements found (possibly 0 in an empty structure or
3373 array). Vectors and complex types are not currently supported,
3374 matching the generic AAPCS support. */
3377 arm_vfp_cprc_sub_candidate (struct type
*t
,
3378 enum arm_vfp_cprc_base_type
*base_type
)
3380 t
= check_typedef (t
);
3381 switch (TYPE_CODE (t
))
3384 switch (TYPE_LENGTH (t
))
3387 if (*base_type
== VFP_CPRC_UNKNOWN
)
3388 *base_type
= VFP_CPRC_SINGLE
;
3389 else if (*base_type
!= VFP_CPRC_SINGLE
)
3394 if (*base_type
== VFP_CPRC_UNKNOWN
)
3395 *base_type
= VFP_CPRC_DOUBLE
;
3396 else if (*base_type
!= VFP_CPRC_DOUBLE
)
3405 case TYPE_CODE_ARRAY
:
3409 count
= arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t
), base_type
);
3412 if (TYPE_LENGTH (t
) == 0)
3414 gdb_assert (count
== 0);
3417 else if (count
== 0)
3419 unitlen
= arm_vfp_cprc_unit_length (*base_type
);
3420 gdb_assert ((TYPE_LENGTH (t
) % unitlen
) == 0);
3421 return TYPE_LENGTH (t
) / unitlen
;
3425 case TYPE_CODE_STRUCT
:
3430 for (i
= 0; i
< TYPE_NFIELDS (t
); i
++)
3432 int sub_count
= arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t
, i
),
3434 if (sub_count
== -1)
3438 if (TYPE_LENGTH (t
) == 0)
3440 gdb_assert (count
== 0);
3443 else if (count
== 0)
3445 unitlen
= arm_vfp_cprc_unit_length (*base_type
);
3446 if (TYPE_LENGTH (t
) != unitlen
* count
)
3451 case TYPE_CODE_UNION
:
3456 for (i
= 0; i
< TYPE_NFIELDS (t
); i
++)
3458 int sub_count
= arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t
, i
),
3460 if (sub_count
== -1)
3462 count
= (count
> sub_count
? count
: sub_count
);
3464 if (TYPE_LENGTH (t
) == 0)
3466 gdb_assert (count
== 0);
3469 else if (count
== 0)
3471 unitlen
= arm_vfp_cprc_unit_length (*base_type
);
3472 if (TYPE_LENGTH (t
) != unitlen
* count
)
3484 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3485 if passed to or returned from a non-variadic function with the VFP
3486 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3487 *BASE_TYPE to the base type for T and *COUNT to the number of
3488 elements of that base type before returning. */
3491 arm_vfp_call_candidate (struct type
*t
, enum arm_vfp_cprc_base_type
*base_type
,
3494 enum arm_vfp_cprc_base_type b
= VFP_CPRC_UNKNOWN
;
3495 int c
= arm_vfp_cprc_sub_candidate (t
, &b
);
3496 if (c
<= 0 || c
> 4)
3503 /* Return 1 if the VFP ABI should be used for passing arguments to and
3504 returning values from a function of type FUNC_TYPE, 0
3508 arm_vfp_abi_for_function (struct gdbarch
*gdbarch
, struct type
*func_type
)
3510 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3511 /* Variadic functions always use the base ABI. Assume that functions
3512 without debug info are not variadic. */
3513 if (func_type
&& TYPE_VARARGS (check_typedef (func_type
)))
3515 /* The VFP ABI is only supported as a variant of AAPCS. */
3516 if (tdep
->arm_abi
!= ARM_ABI_AAPCS
)
3518 return gdbarch_tdep (gdbarch
)->fp_model
== ARM_FLOAT_VFP
;
3521 /* We currently only support passing parameters in integer registers, which
3522 conforms with GCC's default model, and VFP argument passing following
3523 the VFP variant of AAPCS. Several other variants exist and
3524 we should probably support some of them based on the selected ABI. */
3527 arm_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
3528 struct regcache
*regcache
, CORE_ADDR bp_addr
, int nargs
,
3529 struct value
**args
, CORE_ADDR sp
, int struct_return
,
3530 CORE_ADDR struct_addr
)
3532 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3536 struct stack_item
*si
= NULL
;
3539 unsigned vfp_regs_free
= (1 << 16) - 1;
3541 /* Determine the type of this function and whether the VFP ABI
3543 ftype
= check_typedef (value_type (function
));
3544 if (TYPE_CODE (ftype
) == TYPE_CODE_PTR
)
3545 ftype
= check_typedef (TYPE_TARGET_TYPE (ftype
));
3546 use_vfp_abi
= arm_vfp_abi_for_function (gdbarch
, ftype
);
3548 /* Set the return address. For the ARM, the return breakpoint is
3549 always at BP_ADDR. */
3550 if (arm_pc_is_thumb (gdbarch
, bp_addr
))
3552 regcache_cooked_write_unsigned (regcache
, ARM_LR_REGNUM
, bp_addr
);
3554 /* Walk through the list of args and determine how large a temporary
3555 stack is required. Need to take care here as structs may be
3556 passed on the stack, and we have to push them. */
3559 argreg
= ARM_A1_REGNUM
;
3562 /* The struct_return pointer occupies the first parameter
3563 passing register. */
3567 fprintf_unfiltered (gdb_stdlog
, "struct return in %s = %s\n",
3568 gdbarch_register_name (gdbarch
, argreg
),
3569 paddress (gdbarch
, struct_addr
));
3570 regcache_cooked_write_unsigned (regcache
, argreg
, struct_addr
);
3574 for (argnum
= 0; argnum
< nargs
; argnum
++)
3577 struct type
*arg_type
;
3578 struct type
*target_type
;
3579 enum type_code typecode
;
3580 const bfd_byte
*val
;
3582 enum arm_vfp_cprc_base_type vfp_base_type
;
3584 int may_use_core_reg
= 1;
3586 arg_type
= check_typedef (value_type (args
[argnum
]));
3587 len
= TYPE_LENGTH (arg_type
);
3588 target_type
= TYPE_TARGET_TYPE (arg_type
);
3589 typecode
= TYPE_CODE (arg_type
);
3590 val
= value_contents (args
[argnum
]);
3592 align
= arm_type_align (arg_type
);
3593 /* Round alignment up to a whole number of words. */
3594 align
= (align
+ INT_REGISTER_SIZE
- 1) & ~(INT_REGISTER_SIZE
- 1);
3595 /* Different ABIs have different maximum alignments. */
3596 if (gdbarch_tdep (gdbarch
)->arm_abi
== ARM_ABI_APCS
)
3598 /* The APCS ABI only requires word alignment. */
3599 align
= INT_REGISTER_SIZE
;
3603 /* The AAPCS requires at most doubleword alignment. */
3604 if (align
> INT_REGISTER_SIZE
* 2)
3605 align
= INT_REGISTER_SIZE
* 2;
3609 && arm_vfp_call_candidate (arg_type
, &vfp_base_type
,
3617 /* Because this is a CPRC it cannot go in a core register or
3618 cause a core register to be skipped for alignment.
3619 Either it goes in VFP registers and the rest of this loop
3620 iteration is skipped for this argument, or it goes on the
3621 stack (and the stack alignment code is correct for this
3623 may_use_core_reg
= 0;
3625 unit_length
= arm_vfp_cprc_unit_length (vfp_base_type
);
3626 shift
= unit_length
/ 4;
3627 mask
= (1 << (shift
* vfp_base_count
)) - 1;
3628 for (regno
= 0; regno
< 16; regno
+= shift
)
3629 if (((vfp_regs_free
>> regno
) & mask
) == mask
)
3638 vfp_regs_free
&= ~(mask
<< regno
);
3639 reg_scaled
= regno
/ shift
;
3640 reg_char
= arm_vfp_cprc_reg_char (vfp_base_type
);
3641 for (i
= 0; i
< vfp_base_count
; i
++)
3645 if (reg_char
== 'q')
3646 arm_neon_quad_write (gdbarch
, regcache
, reg_scaled
+ i
,
3647 val
+ i
* unit_length
);
3650 sprintf (name_buf
, "%c%d", reg_char
, reg_scaled
+ i
);
3651 regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
3653 regcache_cooked_write (regcache
, regnum
,
3654 val
+ i
* unit_length
);
3661 /* This CPRC could not go in VFP registers, so all VFP
3662 registers are now marked as used. */
3667 /* Push stack padding for dowubleword alignment. */
3668 if (nstack
& (align
- 1))
3670 si
= push_stack_item (si
, val
, INT_REGISTER_SIZE
);
3671 nstack
+= INT_REGISTER_SIZE
;
3674 /* Doubleword aligned quantities must go in even register pairs. */
3675 if (may_use_core_reg
3676 && argreg
<= ARM_LAST_ARG_REGNUM
3677 && align
> INT_REGISTER_SIZE
3681 /* If the argument is a pointer to a function, and it is a
3682 Thumb function, create a LOCAL copy of the value and set
3683 the THUMB bit in it. */
3684 if (TYPE_CODE_PTR
== typecode
3685 && target_type
!= NULL
3686 && TYPE_CODE_FUNC
== TYPE_CODE (check_typedef (target_type
)))
3688 CORE_ADDR regval
= extract_unsigned_integer (val
, len
, byte_order
);
3689 if (arm_pc_is_thumb (gdbarch
, regval
))
3691 bfd_byte
*copy
= alloca (len
);
3692 store_unsigned_integer (copy
, len
, byte_order
,
3693 MAKE_THUMB_ADDR (regval
));
3698 /* Copy the argument to general registers or the stack in
3699 register-sized pieces. Large arguments are split between
3700 registers and stack. */
3703 int partial_len
= len
< INT_REGISTER_SIZE
? len
: INT_REGISTER_SIZE
;
3705 if (may_use_core_reg
&& argreg
<= ARM_LAST_ARG_REGNUM
)
3707 /* The argument is being passed in a general purpose
3710 = extract_unsigned_integer (val
, partial_len
, byte_order
);
3711 if (byte_order
== BFD_ENDIAN_BIG
)
3712 regval
<<= (INT_REGISTER_SIZE
- partial_len
) * 8;
3714 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s = 0x%s\n",
3716 gdbarch_register_name
3718 phex (regval
, INT_REGISTER_SIZE
));
3719 regcache_cooked_write_unsigned (regcache
, argreg
, regval
);
3724 /* Push the arguments onto the stack. */
3726 fprintf_unfiltered (gdb_stdlog
, "arg %d @ sp + %d\n",
3728 si
= push_stack_item (si
, val
, INT_REGISTER_SIZE
);
3729 nstack
+= INT_REGISTER_SIZE
;
3736 /* If we have an odd number of words to push, then decrement the stack
3737 by one word now, so first stack argument will be dword aligned. */
3744 write_memory (sp
, si
->data
, si
->len
);
3745 si
= pop_stack_item (si
);
3748 /* Finally, update teh SP register. */
3749 regcache_cooked_write_unsigned (regcache
, ARM_SP_REGNUM
, sp
);
3755 /* Always align the frame to an 8-byte boundary. This is required on
3756 some platforms and harmless on the rest. */
3759 arm_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
3761 /* Align the stack to eight bytes. */
3762 return sp
& ~ (CORE_ADDR
) 7;
3766 print_fpu_flags (int flags
)
3768 if (flags
& (1 << 0))
3769 fputs ("IVO ", stdout
);
3770 if (flags
& (1 << 1))
3771 fputs ("DVZ ", stdout
);
3772 if (flags
& (1 << 2))
3773 fputs ("OFL ", stdout
);
3774 if (flags
& (1 << 3))
3775 fputs ("UFL ", stdout
);
3776 if (flags
& (1 << 4))
3777 fputs ("INX ", stdout
);
3781 /* Print interesting information about the floating point processor
3782 (if present) or emulator. */
3784 arm_print_float_info (struct gdbarch
*gdbarch
, struct ui_file
*file
,
3785 struct frame_info
*frame
, const char *args
)
3787 unsigned long status
= get_frame_register_unsigned (frame
, ARM_FPS_REGNUM
);
3790 type
= (status
>> 24) & 127;
3791 if (status
& (1 << 31))
3792 printf (_("Hardware FPU type %d\n"), type
);
3794 printf (_("Software FPU type %d\n"), type
);
3795 /* i18n: [floating point unit] mask */
3796 fputs (_("mask: "), stdout
);
3797 print_fpu_flags (status
>> 16);
3798 /* i18n: [floating point unit] flags */
3799 fputs (_("flags: "), stdout
);
3800 print_fpu_flags (status
);
3803 /* Construct the ARM extended floating point type. */
3804 static struct type
*
3805 arm_ext_type (struct gdbarch
*gdbarch
)
3807 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3809 if (!tdep
->arm_ext_type
)
3811 = arch_float_type (gdbarch
, -1, "builtin_type_arm_ext",
3812 floatformats_arm_ext
);
3814 return tdep
->arm_ext_type
;
3817 static struct type
*
3818 arm_neon_double_type (struct gdbarch
*gdbarch
)
3820 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3822 if (tdep
->neon_double_type
== NULL
)
3824 struct type
*t
, *elem
;
3826 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_neon_d",
3828 elem
= builtin_type (gdbarch
)->builtin_uint8
;
3829 append_composite_type_field (t
, "u8", init_vector_type (elem
, 8));
3830 elem
= builtin_type (gdbarch
)->builtin_uint16
;
3831 append_composite_type_field (t
, "u16", init_vector_type (elem
, 4));
3832 elem
= builtin_type (gdbarch
)->builtin_uint32
;
3833 append_composite_type_field (t
, "u32", init_vector_type (elem
, 2));
3834 elem
= builtin_type (gdbarch
)->builtin_uint64
;
3835 append_composite_type_field (t
, "u64", elem
);
3836 elem
= builtin_type (gdbarch
)->builtin_float
;
3837 append_composite_type_field (t
, "f32", init_vector_type (elem
, 2));
3838 elem
= builtin_type (gdbarch
)->builtin_double
;
3839 append_composite_type_field (t
, "f64", elem
);
3841 TYPE_VECTOR (t
) = 1;
3842 TYPE_NAME (t
) = "neon_d";
3843 tdep
->neon_double_type
= t
;
3846 return tdep
->neon_double_type
;
3849 /* FIXME: The vector types are not correctly ordered on big-endian
3850 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3851 bits of d0 - regardless of what unit size is being held in d0. So
3852 the offset of the first uint8 in d0 is 7, but the offset of the
3853 first float is 4. This code works as-is for little-endian
3856 static struct type
*
3857 arm_neon_quad_type (struct gdbarch
*gdbarch
)
3859 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3861 if (tdep
->neon_quad_type
== NULL
)
3863 struct type
*t
, *elem
;
3865 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_neon_q",
3867 elem
= builtin_type (gdbarch
)->builtin_uint8
;
3868 append_composite_type_field (t
, "u8", init_vector_type (elem
, 16));
3869 elem
= builtin_type (gdbarch
)->builtin_uint16
;
3870 append_composite_type_field (t
, "u16", init_vector_type (elem
, 8));
3871 elem
= builtin_type (gdbarch
)->builtin_uint32
;
3872 append_composite_type_field (t
, "u32", init_vector_type (elem
, 4));
3873 elem
= builtin_type (gdbarch
)->builtin_uint64
;
3874 append_composite_type_field (t
, "u64", init_vector_type (elem
, 2));
3875 elem
= builtin_type (gdbarch
)->builtin_float
;
3876 append_composite_type_field (t
, "f32", init_vector_type (elem
, 4));
3877 elem
= builtin_type (gdbarch
)->builtin_double
;
3878 append_composite_type_field (t
, "f64", init_vector_type (elem
, 2));
3880 TYPE_VECTOR (t
) = 1;
3881 TYPE_NAME (t
) = "neon_q";
3882 tdep
->neon_quad_type
= t
;
3885 return tdep
->neon_quad_type
;
3888 /* Return the GDB type object for the "standard" data type of data in
3891 static struct type
*
3892 arm_register_type (struct gdbarch
*gdbarch
, int regnum
)
3894 int num_regs
= gdbarch_num_regs (gdbarch
);
3896 if (gdbarch_tdep (gdbarch
)->have_vfp_pseudos
3897 && regnum
>= num_regs
&& regnum
< num_regs
+ 32)
3898 return builtin_type (gdbarch
)->builtin_float
;
3900 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
3901 && regnum
>= num_regs
+ 32 && regnum
< num_regs
+ 32 + 16)
3902 return arm_neon_quad_type (gdbarch
);
3904 /* If the target description has register information, we are only
3905 in this function so that we can override the types of
3906 double-precision registers for NEON. */
3907 if (tdesc_has_registers (gdbarch_target_desc (gdbarch
)))
3909 struct type
*t
= tdesc_register_type (gdbarch
, regnum
);
3911 if (regnum
>= ARM_D0_REGNUM
&& regnum
< ARM_D0_REGNUM
+ 32
3912 && TYPE_CODE (t
) == TYPE_CODE_FLT
3913 && gdbarch_tdep (gdbarch
)->have_neon
)
3914 return arm_neon_double_type (gdbarch
);
3919 if (regnum
>= ARM_F0_REGNUM
&& regnum
< ARM_F0_REGNUM
+ NUM_FREGS
)
3921 if (!gdbarch_tdep (gdbarch
)->have_fpa_registers
)
3922 return builtin_type (gdbarch
)->builtin_void
;
3924 return arm_ext_type (gdbarch
);
3926 else if (regnum
== ARM_SP_REGNUM
)
3927 return builtin_type (gdbarch
)->builtin_data_ptr
;
3928 else if (regnum
== ARM_PC_REGNUM
)
3929 return builtin_type (gdbarch
)->builtin_func_ptr
;
3930 else if (regnum
>= ARRAY_SIZE (arm_register_names
))
3931 /* These registers are only supported on targets which supply
3932 an XML description. */
3933 return builtin_type (gdbarch
)->builtin_int0
;
3935 return builtin_type (gdbarch
)->builtin_uint32
;
3938 /* Map a DWARF register REGNUM onto the appropriate GDB register
3942 arm_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
3944 /* Core integer regs. */
3945 if (reg
>= 0 && reg
<= 15)
3948 /* Legacy FPA encoding. These were once used in a way which
3949 overlapped with VFP register numbering, so their use is
3950 discouraged, but GDB doesn't support the ARM toolchain
3951 which used them for VFP. */
3952 if (reg
>= 16 && reg
<= 23)
3953 return ARM_F0_REGNUM
+ reg
- 16;
3955 /* New assignments for the FPA registers. */
3956 if (reg
>= 96 && reg
<= 103)
3957 return ARM_F0_REGNUM
+ reg
- 96;
3959 /* WMMX register assignments. */
3960 if (reg
>= 104 && reg
<= 111)
3961 return ARM_WCGR0_REGNUM
+ reg
- 104;
3963 if (reg
>= 112 && reg
<= 127)
3964 return ARM_WR0_REGNUM
+ reg
- 112;
3966 if (reg
>= 192 && reg
<= 199)
3967 return ARM_WC0_REGNUM
+ reg
- 192;
3969 /* VFP v2 registers. A double precision value is actually
3970 in d1 rather than s2, but the ABI only defines numbering
3971 for the single precision registers. This will "just work"
3972 in GDB for little endian targets (we'll read eight bytes,
3973 starting in s0 and then progressing to s1), but will be
3974 reversed on big endian targets with VFP. This won't
3975 be a problem for the new Neon quad registers; you're supposed
3976 to use DW_OP_piece for those. */
3977 if (reg
>= 64 && reg
<= 95)
3981 sprintf (name_buf
, "s%d", reg
- 64);
3982 return user_reg_map_name_to_regnum (gdbarch
, name_buf
,
3986 /* VFP v3 / Neon registers. This range is also used for VFP v2
3987 registers, except that it now describes d0 instead of s0. */
3988 if (reg
>= 256 && reg
<= 287)
3992 sprintf (name_buf
, "d%d", reg
- 256);
3993 return user_reg_map_name_to_regnum (gdbarch
, name_buf
,
4000 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4002 arm_register_sim_regno (struct gdbarch
*gdbarch
, int regnum
)
4005 gdb_assert (reg
>= 0 && reg
< gdbarch_num_regs (gdbarch
));
4007 if (regnum
>= ARM_WR0_REGNUM
&& regnum
<= ARM_WR15_REGNUM
)
4008 return regnum
- ARM_WR0_REGNUM
+ SIM_ARM_IWMMXT_COP0R0_REGNUM
;
4010 if (regnum
>= ARM_WC0_REGNUM
&& regnum
<= ARM_WC7_REGNUM
)
4011 return regnum
- ARM_WC0_REGNUM
+ SIM_ARM_IWMMXT_COP1R0_REGNUM
;
4013 if (regnum
>= ARM_WCGR0_REGNUM
&& regnum
<= ARM_WCGR7_REGNUM
)
4014 return regnum
- ARM_WCGR0_REGNUM
+ SIM_ARM_IWMMXT_COP1R8_REGNUM
;
4016 if (reg
< NUM_GREGS
)
4017 return SIM_ARM_R0_REGNUM
+ reg
;
4020 if (reg
< NUM_FREGS
)
4021 return SIM_ARM_FP0_REGNUM
+ reg
;
4024 if (reg
< NUM_SREGS
)
4025 return SIM_ARM_FPS_REGNUM
+ reg
;
4028 internal_error (__FILE__
, __LINE__
, _("Bad REGNUM %d"), regnum
);
4031 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4032 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4033 It is thought that this is is the floating-point register format on
4034 little-endian systems. */
4037 convert_from_extended (const struct floatformat
*fmt
, const void *ptr
,
4038 void *dbl
, int endianess
)
4042 if (endianess
== BFD_ENDIAN_BIG
)
4043 floatformat_to_doublest (&floatformat_arm_ext_big
, ptr
, &d
);
4045 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword
,
4047 floatformat_from_doublest (fmt
, &d
, dbl
);
4051 convert_to_extended (const struct floatformat
*fmt
, void *dbl
, const void *ptr
,
4056 floatformat_to_doublest (fmt
, ptr
, &d
);
4057 if (endianess
== BFD_ENDIAN_BIG
)
4058 floatformat_from_doublest (&floatformat_arm_ext_big
, &d
, dbl
);
4060 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword
,
4065 condition_true (unsigned long cond
, unsigned long status_reg
)
4067 if (cond
== INST_AL
|| cond
== INST_NV
)
4073 return ((status_reg
& FLAG_Z
) != 0);
4075 return ((status_reg
& FLAG_Z
) == 0);
4077 return ((status_reg
& FLAG_C
) != 0);
4079 return ((status_reg
& FLAG_C
) == 0);
4081 return ((status_reg
& FLAG_N
) != 0);
4083 return ((status_reg
& FLAG_N
) == 0);
4085 return ((status_reg
& FLAG_V
) != 0);
4087 return ((status_reg
& FLAG_V
) == 0);
4089 return ((status_reg
& (FLAG_C
| FLAG_Z
)) == FLAG_C
);
4091 return ((status_reg
& (FLAG_C
| FLAG_Z
)) != FLAG_C
);
4093 return (((status_reg
& FLAG_N
) == 0) == ((status_reg
& FLAG_V
) == 0));
4095 return (((status_reg
& FLAG_N
) == 0) != ((status_reg
& FLAG_V
) == 0));
4097 return (((status_reg
& FLAG_Z
) == 0)
4098 && (((status_reg
& FLAG_N
) == 0)
4099 == ((status_reg
& FLAG_V
) == 0)));
4101 return (((status_reg
& FLAG_Z
) != 0)
4102 || (((status_reg
& FLAG_N
) == 0)
4103 != ((status_reg
& FLAG_V
) == 0)));
4108 static unsigned long
4109 shifted_reg_val (struct frame_info
*frame
, unsigned long inst
, int carry
,
4110 unsigned long pc_val
, unsigned long status_reg
)
4112 unsigned long res
, shift
;
4113 int rm
= bits (inst
, 0, 3);
4114 unsigned long shifttype
= bits (inst
, 5, 6);
4118 int rs
= bits (inst
, 8, 11);
4119 shift
= (rs
== 15 ? pc_val
+ 8
4120 : get_frame_register_unsigned (frame
, rs
)) & 0xFF;
4123 shift
= bits (inst
, 7, 11);
4125 res
= (rm
== ARM_PC_REGNUM
4126 ? (pc_val
+ (bit (inst
, 4) ? 12 : 8))
4127 : get_frame_register_unsigned (frame
, rm
));
4132 res
= shift
>= 32 ? 0 : res
<< shift
;
4136 res
= shift
>= 32 ? 0 : res
>> shift
;
4142 res
= ((res
& 0x80000000L
)
4143 ? ~((~res
) >> shift
) : res
>> shift
);
4146 case 3: /* ROR/RRX */
4149 res
= (res
>> 1) | (carry
? 0x80000000L
: 0);
4151 res
= (res
>> shift
) | (res
<< (32 - shift
));
4155 return res
& 0xffffffff;
4158 /* Return number of 1-bits in VAL. */
4161 bitcount (unsigned long val
)
4164 for (nbits
= 0; val
!= 0; nbits
++)
4165 val
&= val
- 1; /* Delete rightmost 1-bit in val. */
4169 /* Return the size in bytes of the complete Thumb instruction whose
4170 first halfword is INST1. */
4173 thumb_insn_size (unsigned short inst1
)
4175 if ((inst1
& 0xe000) == 0xe000 && (inst1
& 0x1800) != 0)
4182 thumb_advance_itstate (unsigned int itstate
)
4184 /* Preserve IT[7:5], the first three bits of the condition. Shift
4185 the upcoming condition flags left by one bit. */
4186 itstate
= (itstate
& 0xe0) | ((itstate
<< 1) & 0x1f);
4188 /* If we have finished the IT block, clear the state. */
4189 if ((itstate
& 0x0f) == 0)
4195 /* Find the next PC after the current instruction executes. In some
4196 cases we can not statically determine the answer (see the IT state
4197 handling in this function); in that case, a breakpoint may be
4198 inserted in addition to the returned PC, which will be used to set
4199 another breakpoint by our caller. */
4202 thumb_get_next_pc_raw (struct frame_info
*frame
, CORE_ADDR pc
)
4204 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
4205 struct address_space
*aspace
= get_frame_address_space (frame
);
4206 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
4207 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
4208 unsigned long pc_val
= ((unsigned long) pc
) + 4; /* PC after prefetch */
4209 unsigned short inst1
;
4210 CORE_ADDR nextpc
= pc
+ 2; /* Default is next instruction. */
4211 unsigned long offset
;
4212 ULONGEST status
, itstate
;
4214 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4215 pc_val
= MAKE_THUMB_ADDR (pc_val
);
4217 inst1
= read_memory_unsigned_integer (pc
, 2, byte_order_for_code
);
4219 /* Thumb-2 conditional execution support. There are eight bits in
4220 the CPSR which describe conditional execution state. Once
4221 reconstructed (they're in a funny order), the low five bits
4222 describe the low bit of the condition for each instruction and
4223 how many instructions remain. The high three bits describe the
4224 base condition. One of the low four bits will be set if an IT
4225 block is active. These bits read as zero on earlier
4227 status
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
4228 itstate
= ((status
>> 8) & 0xfc) | ((status
>> 25) & 0x3);
4230 /* If-Then handling. On GNU/Linux, where this routine is used, we
4231 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4232 can disable execution of the undefined instruction. So we might
4233 miss the breakpoint if we set it on a skipped conditional
4234 instruction. Because conditional instructions can change the
4235 flags, affecting the execution of further instructions, we may
4236 need to set two breakpoints. */
4238 if (gdbarch_tdep (gdbarch
)->thumb2_breakpoint
!= NULL
)
4240 if ((inst1
& 0xff00) == 0xbf00 && (inst1
& 0x000f) != 0)
4242 /* An IT instruction. Because this instruction does not
4243 modify the flags, we can accurately predict the next
4244 executed instruction. */
4245 itstate
= inst1
& 0x00ff;
4246 pc
+= thumb_insn_size (inst1
);
4248 while (itstate
!= 0 && ! condition_true (itstate
>> 4, status
))
4250 inst1
= read_memory_unsigned_integer (pc
, 2,
4251 byte_order_for_code
);
4252 pc
+= thumb_insn_size (inst1
);
4253 itstate
= thumb_advance_itstate (itstate
);
4256 return MAKE_THUMB_ADDR (pc
);
4258 else if (itstate
!= 0)
4260 /* We are in a conditional block. Check the condition. */
4261 if (! condition_true (itstate
>> 4, status
))
4263 /* Advance to the next executed instruction. */
4264 pc
+= thumb_insn_size (inst1
);
4265 itstate
= thumb_advance_itstate (itstate
);
4267 while (itstate
!= 0 && ! condition_true (itstate
>> 4, status
))
4269 inst1
= read_memory_unsigned_integer (pc
, 2,
4270 byte_order_for_code
);
4271 pc
+= thumb_insn_size (inst1
);
4272 itstate
= thumb_advance_itstate (itstate
);
4275 return MAKE_THUMB_ADDR (pc
);
4277 else if ((itstate
& 0x0f) == 0x08)
4279 /* This is the last instruction of the conditional
4280 block, and it is executed. We can handle it normally
4281 because the following instruction is not conditional,
4282 and we must handle it normally because it is
4283 permitted to branch. Fall through. */
4289 /* There are conditional instructions after this one.
4290 If this instruction modifies the flags, then we can
4291 not predict what the next executed instruction will
4292 be. Fortunately, this instruction is architecturally
4293 forbidden to branch; we know it will fall through.
4294 Start by skipping past it. */
4295 pc
+= thumb_insn_size (inst1
);
4296 itstate
= thumb_advance_itstate (itstate
);
4298 /* Set a breakpoint on the following instruction. */
4299 gdb_assert ((itstate
& 0x0f) != 0);
4300 arm_insert_single_step_breakpoint (gdbarch
, aspace
,
4301 MAKE_THUMB_ADDR (pc
));
4302 cond_negated
= (itstate
>> 4) & 1;
4304 /* Skip all following instructions with the same
4305 condition. If there is a later instruction in the IT
4306 block with the opposite condition, set the other
4307 breakpoint there. If not, then set a breakpoint on
4308 the instruction after the IT block. */
4311 inst1
= read_memory_unsigned_integer (pc
, 2,
4312 byte_order_for_code
);
4313 pc
+= thumb_insn_size (inst1
);
4314 itstate
= thumb_advance_itstate (itstate
);
4316 while (itstate
!= 0 && ((itstate
>> 4) & 1) == cond_negated
);
4318 return MAKE_THUMB_ADDR (pc
);
4322 else if (itstate
& 0x0f)
4324 /* We are in a conditional block. Check the condition. */
4325 int cond
= itstate
>> 4;
4327 if (! condition_true (cond
, status
))
4328 /* Advance to the next instruction. All the 32-bit
4329 instructions share a common prefix. */
4330 return MAKE_THUMB_ADDR (pc
+ thumb_insn_size (inst1
));
4332 /* Otherwise, handle the instruction normally. */
4335 if ((inst1
& 0xff00) == 0xbd00) /* pop {rlist, pc} */
4339 /* Fetch the saved PC from the stack. It's stored above
4340 all of the other registers. */
4341 offset
= bitcount (bits (inst1
, 0, 7)) * INT_REGISTER_SIZE
;
4342 sp
= get_frame_register_unsigned (frame
, ARM_SP_REGNUM
);
4343 nextpc
= read_memory_unsigned_integer (sp
+ offset
, 4, byte_order
);
4345 else if ((inst1
& 0xf000) == 0xd000) /* conditional branch */
4347 unsigned long cond
= bits (inst1
, 8, 11);
4348 if (cond
== 0x0f) /* 0x0f = SWI */
4350 struct gdbarch_tdep
*tdep
;
4351 tdep
= gdbarch_tdep (gdbarch
);
4353 if (tdep
->syscall_next_pc
!= NULL
)
4354 nextpc
= tdep
->syscall_next_pc (frame
);
4357 else if (cond
!= 0x0f && condition_true (cond
, status
))
4358 nextpc
= pc_val
+ (sbits (inst1
, 0, 7) << 1);
4360 else if ((inst1
& 0xf800) == 0xe000) /* unconditional branch */
4362 nextpc
= pc_val
+ (sbits (inst1
, 0, 10) << 1);
4364 else if (thumb_insn_size (inst1
) == 4) /* 32-bit instruction */
4366 unsigned short inst2
;
4367 inst2
= read_memory_unsigned_integer (pc
+ 2, 2, byte_order_for_code
);
4369 /* Default to the next instruction. */
4371 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4373 if ((inst1
& 0xf800) == 0xf000 && (inst2
& 0x8000) == 0x8000)
4375 /* Branches and miscellaneous control instructions. */
4377 if ((inst2
& 0x1000) != 0 || (inst2
& 0xd001) == 0xc000)
4380 int j1
, j2
, imm1
, imm2
;
4382 imm1
= sbits (inst1
, 0, 10);
4383 imm2
= bits (inst2
, 0, 10);
4384 j1
= bit (inst2
, 13);
4385 j2
= bit (inst2
, 11);
4387 offset
= ((imm1
<< 12) + (imm2
<< 1));
4388 offset
^= ((!j2
) << 22) | ((!j1
) << 23);
4390 nextpc
= pc_val
+ offset
;
4391 /* For BLX make sure to clear the low bits. */
4392 if (bit (inst2
, 12) == 0)
4393 nextpc
= nextpc
& 0xfffffffc;
4395 else if (inst1
== 0xf3de && (inst2
& 0xff00) == 0x3f00)
4397 /* SUBS PC, LR, #imm8. */
4398 nextpc
= get_frame_register_unsigned (frame
, ARM_LR_REGNUM
);
4399 nextpc
-= inst2
& 0x00ff;
4401 else if ((inst2
& 0xd000) == 0x8000 && (inst1
& 0x0380) != 0x0380)
4403 /* Conditional branch. */
4404 if (condition_true (bits (inst1
, 6, 9), status
))
4406 int sign
, j1
, j2
, imm1
, imm2
;
4408 sign
= sbits (inst1
, 10, 10);
4409 imm1
= bits (inst1
, 0, 5);
4410 imm2
= bits (inst2
, 0, 10);
4411 j1
= bit (inst2
, 13);
4412 j2
= bit (inst2
, 11);
4414 offset
= (sign
<< 20) + (j2
<< 19) + (j1
<< 18);
4415 offset
+= (imm1
<< 12) + (imm2
<< 1);
4417 nextpc
= pc_val
+ offset
;
4421 else if ((inst1
& 0xfe50) == 0xe810)
4423 /* Load multiple or RFE. */
4424 int rn
, offset
, load_pc
= 1;
4426 rn
= bits (inst1
, 0, 3);
4427 if (bit (inst1
, 7) && !bit (inst1
, 8))
4430 if (!bit (inst2
, 15))
4432 offset
= bitcount (inst2
) * 4 - 4;
4434 else if (!bit (inst1
, 7) && bit (inst1
, 8))
4437 if (!bit (inst2
, 15))
4441 else if (bit (inst1
, 7) && bit (inst1
, 8))
4446 else if (!bit (inst1
, 7) && !bit (inst1
, 8))
4456 CORE_ADDR addr
= get_frame_register_unsigned (frame
, rn
);
4457 nextpc
= get_frame_memory_unsigned (frame
, addr
+ offset
, 4);
4460 else if ((inst1
& 0xffef) == 0xea4f && (inst2
& 0xfff0) == 0x0f00)
4462 /* MOV PC or MOVS PC. */
4463 nextpc
= get_frame_register_unsigned (frame
, bits (inst2
, 0, 3));
4464 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4466 else if ((inst1
& 0xff70) == 0xf850 && (inst2
& 0xf000) == 0xf000)
4470 int rn
, load_pc
= 1;
4472 rn
= bits (inst1
, 0, 3);
4473 base
= get_frame_register_unsigned (frame
, rn
);
4474 if (rn
== ARM_PC_REGNUM
)
4476 base
= (base
+ 4) & ~(CORE_ADDR
) 0x3;
4478 base
+= bits (inst2
, 0, 11);
4480 base
-= bits (inst2
, 0, 11);
4482 else if (bit (inst1
, 7))
4483 base
+= bits (inst2
, 0, 11);
4484 else if (bit (inst2
, 11))
4486 if (bit (inst2
, 10))
4489 base
+= bits (inst2
, 0, 7);
4491 base
-= bits (inst2
, 0, 7);
4494 else if ((inst2
& 0x0fc0) == 0x0000)
4496 int shift
= bits (inst2
, 4, 5), rm
= bits (inst2
, 0, 3);
4497 base
+= get_frame_register_unsigned (frame
, rm
) << shift
;
4504 nextpc
= get_frame_memory_unsigned (frame
, base
, 4);
4506 else if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf000)
4509 CORE_ADDR tbl_reg
, table
, offset
, length
;
4511 tbl_reg
= bits (inst1
, 0, 3);
4512 if (tbl_reg
== 0x0f)
4513 table
= pc
+ 4; /* Regcache copy of PC isn't right yet. */
4515 table
= get_frame_register_unsigned (frame
, tbl_reg
);
4517 offset
= get_frame_register_unsigned (frame
, bits (inst2
, 0, 3));
4518 length
= 2 * get_frame_memory_unsigned (frame
, table
+ offset
, 1);
4519 nextpc
= pc_val
+ length
;
4521 else if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf010)
4524 CORE_ADDR tbl_reg
, table
, offset
, length
;
4526 tbl_reg
= bits (inst1
, 0, 3);
4527 if (tbl_reg
== 0x0f)
4528 table
= pc
+ 4; /* Regcache copy of PC isn't right yet. */
4530 table
= get_frame_register_unsigned (frame
, tbl_reg
);
4532 offset
= 2 * get_frame_register_unsigned (frame
, bits (inst2
, 0, 3));
4533 length
= 2 * get_frame_memory_unsigned (frame
, table
+ offset
, 2);
4534 nextpc
= pc_val
+ length
;
4537 else if ((inst1
& 0xff00) == 0x4700) /* bx REG, blx REG */
4539 if (bits (inst1
, 3, 6) == 0x0f)
4542 nextpc
= get_frame_register_unsigned (frame
, bits (inst1
, 3, 6));
4544 else if ((inst1
& 0xff87) == 0x4687) /* mov pc, REG */
4546 if (bits (inst1
, 3, 6) == 0x0f)
4549 nextpc
= get_frame_register_unsigned (frame
, bits (inst1
, 3, 6));
4551 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4553 else if ((inst1
& 0xf500) == 0xb100)
4556 int imm
= (bit (inst1
, 9) << 6) + (bits (inst1
, 3, 7) << 1);
4557 ULONGEST reg
= get_frame_register_unsigned (frame
, bits (inst1
, 0, 2));
4559 if (bit (inst1
, 11) && reg
!= 0)
4560 nextpc
= pc_val
+ imm
;
4561 else if (!bit (inst1
, 11) && reg
== 0)
4562 nextpc
= pc_val
+ imm
;
4567 /* Get the raw next address. PC is the current program counter, in
4568 FRAME, which is assumed to be executing in ARM mode.
4570 The value returned has the execution state of the next instruction
4571 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4572 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4576 arm_get_next_pc_raw (struct frame_info
*frame
, CORE_ADDR pc
)
4578 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
4579 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
4580 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
4581 unsigned long pc_val
;
4582 unsigned long this_instr
;
4583 unsigned long status
;
4586 pc_val
= (unsigned long) pc
;
4587 this_instr
= read_memory_unsigned_integer (pc
, 4, byte_order_for_code
);
4589 status
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
4590 nextpc
= (CORE_ADDR
) (pc_val
+ 4); /* Default case */
4592 if (bits (this_instr
, 28, 31) == INST_NV
)
4593 switch (bits (this_instr
, 24, 27))
4598 /* Branch with Link and change to Thumb. */
4599 nextpc
= BranchDest (pc
, this_instr
);
4600 nextpc
|= bit (this_instr
, 24) << 1;
4601 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4607 /* Coprocessor register transfer. */
4608 if (bits (this_instr
, 12, 15) == 15)
4609 error (_("Invalid update to pc in instruction"));
4612 else if (condition_true (bits (this_instr
, 28, 31), status
))
4614 switch (bits (this_instr
, 24, 27))
4617 case 0x1: /* data processing */
4621 unsigned long operand1
, operand2
, result
= 0;
4625 if (bits (this_instr
, 12, 15) != 15)
4628 if (bits (this_instr
, 22, 25) == 0
4629 && bits (this_instr
, 4, 7) == 9) /* multiply */
4630 error (_("Invalid update to pc in instruction"));
4632 /* BX <reg>, BLX <reg> */
4633 if (bits (this_instr
, 4, 27) == 0x12fff1
4634 || bits (this_instr
, 4, 27) == 0x12fff3)
4636 rn
= bits (this_instr
, 0, 3);
4637 nextpc
= ((rn
== ARM_PC_REGNUM
)
4639 : get_frame_register_unsigned (frame
, rn
));
4644 /* Multiply into PC. */
4645 c
= (status
& FLAG_C
) ? 1 : 0;
4646 rn
= bits (this_instr
, 16, 19);
4647 operand1
= ((rn
== ARM_PC_REGNUM
)
4649 : get_frame_register_unsigned (frame
, rn
));
4651 if (bit (this_instr
, 25))
4653 unsigned long immval
= bits (this_instr
, 0, 7);
4654 unsigned long rotate
= 2 * bits (this_instr
, 8, 11);
4655 operand2
= ((immval
>> rotate
) | (immval
<< (32 - rotate
)))
4658 else /* operand 2 is a shifted register. */
4659 operand2
= shifted_reg_val (frame
, this_instr
, c
,
4662 switch (bits (this_instr
, 21, 24))
4665 result
= operand1
& operand2
;
4669 result
= operand1
^ operand2
;
4673 result
= operand1
- operand2
;
4677 result
= operand2
- operand1
;
4681 result
= operand1
+ operand2
;
4685 result
= operand1
+ operand2
+ c
;
4689 result
= operand1
- operand2
+ c
;
4693 result
= operand2
- operand1
+ c
;
4699 case 0xb: /* tst, teq, cmp, cmn */
4700 result
= (unsigned long) nextpc
;
4704 result
= operand1
| operand2
;
4708 /* Always step into a function. */
4713 result
= operand1
& ~operand2
;
4721 /* In 26-bit APCS the bottom two bits of the result are
4722 ignored, and we always end up in ARM state. */
4724 nextpc
= arm_addr_bits_remove (gdbarch
, result
);
4732 case 0x5: /* data transfer */
4735 if (bit (this_instr
, 20))
4738 if (bits (this_instr
, 12, 15) == 15)
4744 if (bit (this_instr
, 22))
4745 error (_("Invalid update to pc in instruction"));
4747 /* byte write to PC */
4748 rn
= bits (this_instr
, 16, 19);
4749 base
= ((rn
== ARM_PC_REGNUM
)
4751 : get_frame_register_unsigned (frame
, rn
));
4753 if (bit (this_instr
, 24))
4756 int c
= (status
& FLAG_C
) ? 1 : 0;
4757 unsigned long offset
=
4758 (bit (this_instr
, 25)
4759 ? shifted_reg_val (frame
, this_instr
, c
, pc_val
, status
)
4760 : bits (this_instr
, 0, 11));
4762 if (bit (this_instr
, 23))
4768 (CORE_ADDR
) read_memory_unsigned_integer ((CORE_ADDR
) base
,
4775 case 0x9: /* block transfer */
4776 if (bit (this_instr
, 20))
4779 if (bit (this_instr
, 15))
4783 unsigned long rn_val
4784 = get_frame_register_unsigned (frame
,
4785 bits (this_instr
, 16, 19));
4787 if (bit (this_instr
, 23))
4790 unsigned long reglist
= bits (this_instr
, 0, 14);
4791 offset
= bitcount (reglist
) * 4;
4792 if (bit (this_instr
, 24)) /* pre */
4795 else if (bit (this_instr
, 24))
4799 (CORE_ADDR
) read_memory_unsigned_integer ((CORE_ADDR
)
4806 case 0xb: /* branch & link */
4807 case 0xa: /* branch */
4809 nextpc
= BranchDest (pc
, this_instr
);
4815 case 0xe: /* coproc ops */
4819 struct gdbarch_tdep
*tdep
;
4820 tdep
= gdbarch_tdep (gdbarch
);
4822 if (tdep
->syscall_next_pc
!= NULL
)
4823 nextpc
= tdep
->syscall_next_pc (frame
);
4829 fprintf_filtered (gdb_stderr
, _("Bad bit-field extraction\n"));
4837 /* Determine next PC after current instruction executes. Will call either
4838 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
4839 loop is detected. */
4842 arm_get_next_pc (struct frame_info
*frame
, CORE_ADDR pc
)
4846 if (arm_frame_is_thumb (frame
))
4848 nextpc
= thumb_get_next_pc_raw (frame
, pc
);
4849 if (nextpc
== MAKE_THUMB_ADDR (pc
))
4850 error (_("Infinite loop detected"));
4854 nextpc
= arm_get_next_pc_raw (frame
, pc
);
4856 error (_("Infinite loop detected"));
4862 /* Like insert_single_step_breakpoint, but make sure we use a breakpoint
4863 of the appropriate mode (as encoded in the PC value), even if this
4864 differs from what would be expected according to the symbol tables. */
4867 arm_insert_single_step_breakpoint (struct gdbarch
*gdbarch
,
4868 struct address_space
*aspace
,
4871 struct cleanup
*old_chain
4872 = make_cleanup_restore_integer (&arm_override_mode
);
4874 arm_override_mode
= IS_THUMB_ADDR (pc
);
4875 pc
= gdbarch_addr_bits_remove (gdbarch
, pc
);
4877 insert_single_step_breakpoint (gdbarch
, aspace
, pc
);
4879 do_cleanups (old_chain
);
4882 /* Checks for an atomic sequence of instructions beginning with a LDREX{,B,H,D}
4883 instruction and ending with a STREX{,B,H,D} instruction. If such a sequence
4884 is found, attempt to step through it. A breakpoint is placed at the end of
4888 thumb_deal_with_atomic_sequence_raw (struct frame_info
*frame
)
4890 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
4891 struct address_space
*aspace
= get_frame_address_space (frame
);
4892 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
4893 CORE_ADDR pc
= get_frame_pc (frame
);
4894 CORE_ADDR breaks
[2] = {-1, -1};
4896 unsigned short insn1
, insn2
;
4899 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
4900 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
4901 ULONGEST status
, itstate
;
4903 /* We currently do not support atomic sequences within an IT block. */
4904 status
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
4905 itstate
= ((status
>> 8) & 0xfc) | ((status
>> 25) & 0x3);
4909 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction. */
4910 insn1
= read_memory_unsigned_integer (loc
, 2, byte_order_for_code
);
4912 if (thumb_insn_size (insn1
) != 4)
4915 insn2
= read_memory_unsigned_integer (loc
, 2, byte_order_for_code
);
4917 if (!((insn1
& 0xfff0) == 0xe850
4918 || ((insn1
& 0xfff0) == 0xe8d0 && (insn2
& 0x00c0) == 0x0040)))
4921 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
4923 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
4925 insn1
= read_memory_unsigned_integer (loc
, 2, byte_order_for_code
);
4928 if (thumb_insn_size (insn1
) != 4)
4930 /* Assume that there is at most one conditional branch in the
4931 atomic sequence. If a conditional branch is found, put a
4932 breakpoint in its destination address. */
4933 if ((insn1
& 0xf000) == 0xd000 && bits (insn1
, 8, 11) != 0x0f)
4935 if (last_breakpoint
> 0)
4936 return 0; /* More than one conditional branch found,
4937 fallback to the standard code. */
4939 breaks
[1] = loc
+ 2 + (sbits (insn1
, 0, 7) << 1);
4943 /* We do not support atomic sequences that use any *other*
4944 instructions but conditional branches to change the PC.
4945 Fall back to standard code to avoid losing control of
4947 else if (thumb_instruction_changes_pc (insn1
))
4952 insn2
= read_memory_unsigned_integer (loc
, 2, byte_order_for_code
);
4955 /* Assume that there is at most one conditional branch in the
4956 atomic sequence. If a conditional branch is found, put a
4957 breakpoint in its destination address. */
4958 if ((insn1
& 0xf800) == 0xf000
4959 && (insn2
& 0xd000) == 0x8000
4960 && (insn1
& 0x0380) != 0x0380)
4962 int sign
, j1
, j2
, imm1
, imm2
;
4963 unsigned int offset
;
4965 sign
= sbits (insn1
, 10, 10);
4966 imm1
= bits (insn1
, 0, 5);
4967 imm2
= bits (insn2
, 0, 10);
4968 j1
= bit (insn2
, 13);
4969 j2
= bit (insn2
, 11);
4971 offset
= (sign
<< 20) + (j2
<< 19) + (j1
<< 18);
4972 offset
+= (imm1
<< 12) + (imm2
<< 1);
4974 if (last_breakpoint
> 0)
4975 return 0; /* More than one conditional branch found,
4976 fallback to the standard code. */
4978 breaks
[1] = loc
+ offset
;
4982 /* We do not support atomic sequences that use any *other*
4983 instructions but conditional branches to change the PC.
4984 Fall back to standard code to avoid losing control of
4986 else if (thumb2_instruction_changes_pc (insn1
, insn2
))
4989 /* If we find a strex{,b,h,d}, we're done. */
4990 if ((insn1
& 0xfff0) == 0xe840
4991 || ((insn1
& 0xfff0) == 0xe8c0 && (insn2
& 0x00c0) == 0x0040))
4996 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
4997 if (insn_count
== atomic_sequence_length
)
5000 /* Insert a breakpoint right after the end of the atomic sequence. */
5003 /* Check for duplicated breakpoints. Check also for a breakpoint
5004 placed (branch instruction's destination) anywhere in sequence. */
5006 && (breaks
[1] == breaks
[0]
5007 || (breaks
[1] >= pc
&& breaks
[1] < loc
)))
5008 last_breakpoint
= 0;
5010 /* Effectively inserts the breakpoints. */
5011 for (index
= 0; index
<= last_breakpoint
; index
++)
5012 arm_insert_single_step_breakpoint (gdbarch
, aspace
,
5013 MAKE_THUMB_ADDR (breaks
[index
]));
5019 arm_deal_with_atomic_sequence_raw (struct frame_info
*frame
)
5021 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
5022 struct address_space
*aspace
= get_frame_address_space (frame
);
5023 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
5024 CORE_ADDR pc
= get_frame_pc (frame
);
5025 CORE_ADDR breaks
[2] = {-1, -1};
5030 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
5031 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
5033 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction.
5034 Note that we do not currently support conditionally executed atomic
5036 insn
= read_memory_unsigned_integer (loc
, 4, byte_order_for_code
);
5038 if ((insn
& 0xff9000f0) != 0xe1900090)
5041 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
5043 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
5045 insn
= read_memory_unsigned_integer (loc
, 4, byte_order_for_code
);
5048 /* Assume that there is at most one conditional branch in the atomic
5049 sequence. If a conditional branch is found, put a breakpoint in
5050 its destination address. */
5051 if (bits (insn
, 24, 27) == 0xa)
5053 if (last_breakpoint
> 0)
5054 return 0; /* More than one conditional branch found, fallback
5055 to the standard single-step code. */
5057 breaks
[1] = BranchDest (loc
- 4, insn
);
5061 /* We do not support atomic sequences that use any *other* instructions
5062 but conditional branches to change the PC. Fall back to standard
5063 code to avoid losing control of execution. */
5064 else if (arm_instruction_changes_pc (insn
))
5067 /* If we find a strex{,b,h,d}, we're done. */
5068 if ((insn
& 0xff9000f0) == 0xe1800090)
5072 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
5073 if (insn_count
== atomic_sequence_length
)
5076 /* Insert a breakpoint right after the end of the atomic sequence. */
5079 /* Check for duplicated breakpoints. Check also for a breakpoint
5080 placed (branch instruction's destination) anywhere in sequence. */
5082 && (breaks
[1] == breaks
[0]
5083 || (breaks
[1] >= pc
&& breaks
[1] < loc
)))
5084 last_breakpoint
= 0;
5086 /* Effectively inserts the breakpoints. */
5087 for (index
= 0; index
<= last_breakpoint
; index
++)
5088 arm_insert_single_step_breakpoint (gdbarch
, aspace
, breaks
[index
]);
5094 arm_deal_with_atomic_sequence (struct frame_info
*frame
)
5096 if (arm_frame_is_thumb (frame
))
5097 return thumb_deal_with_atomic_sequence_raw (frame
);
5099 return arm_deal_with_atomic_sequence_raw (frame
);
5102 /* single_step() is called just before we want to resume the inferior,
5103 if we want to single-step it but there is no hardware or kernel
5104 single-step support. We find the target of the coming instruction
5105 and breakpoint it. */
5108 arm_software_single_step (struct frame_info
*frame
)
5110 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
5111 struct address_space
*aspace
= get_frame_address_space (frame
);
5114 if (arm_deal_with_atomic_sequence (frame
))
5117 next_pc
= arm_get_next_pc (frame
, get_frame_pc (frame
));
5118 arm_insert_single_step_breakpoint (gdbarch
, aspace
, next_pc
);
5123 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
5124 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
5125 NULL if an error occurs. BUF is freed. */
5128 extend_buffer_earlier (gdb_byte
*buf
, CORE_ADDR endaddr
,
5129 int old_len
, int new_len
)
5131 gdb_byte
*new_buf
, *middle
;
5132 int bytes_to_read
= new_len
- old_len
;
5134 new_buf
= xmalloc (new_len
);
5135 memcpy (new_buf
+ bytes_to_read
, buf
, old_len
);
5137 if (target_read_memory (endaddr
- new_len
, new_buf
, bytes_to_read
) != 0)
5145 /* An IT block is at most the 2-byte IT instruction followed by
5146 four 4-byte instructions. The furthest back we must search to
5147 find an IT block that affects the current instruction is thus
5148 2 + 3 * 4 == 14 bytes. */
5149 #define MAX_IT_BLOCK_PREFIX 14
5151 /* Use a quick scan if there are more than this many bytes of
5153 #define IT_SCAN_THRESHOLD 32
5155 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
5156 A breakpoint in an IT block may not be hit, depending on the
5159 arm_adjust_breakpoint_address (struct gdbarch
*gdbarch
, CORE_ADDR bpaddr
)
5163 CORE_ADDR boundary
, func_start
;
5164 int buf_len
, buf2_len
;
5165 enum bfd_endian order
= gdbarch_byte_order_for_code (gdbarch
);
5166 int i
, any
, last_it
, last_it_count
;
5168 /* If we are using BKPT breakpoints, none of this is necessary. */
5169 if (gdbarch_tdep (gdbarch
)->thumb2_breakpoint
== NULL
)
5172 /* ARM mode does not have this problem. */
5173 if (!arm_pc_is_thumb (gdbarch
, bpaddr
))
5176 /* We are setting a breakpoint in Thumb code that could potentially
5177 contain an IT block. The first step is to find how much Thumb
5178 code there is; we do not need to read outside of known Thumb
5180 map_type
= arm_find_mapping_symbol (bpaddr
, &boundary
);
5182 /* Thumb-2 code must have mapping symbols to have a chance. */
5185 bpaddr
= gdbarch_addr_bits_remove (gdbarch
, bpaddr
);
5187 if (find_pc_partial_function (bpaddr
, NULL
, &func_start
, NULL
)
5188 && func_start
> boundary
)
5189 boundary
= func_start
;
5191 /* Search for a candidate IT instruction. We have to do some fancy
5192 footwork to distinguish a real IT instruction from the second
5193 half of a 32-bit instruction, but there is no need for that if
5194 there's no candidate. */
5195 buf_len
= min (bpaddr
- boundary
, MAX_IT_BLOCK_PREFIX
);
5197 /* No room for an IT instruction. */
5200 buf
= xmalloc (buf_len
);
5201 if (target_read_memory (bpaddr
- buf_len
, buf
, buf_len
) != 0)
5204 for (i
= 0; i
< buf_len
; i
+= 2)
5206 unsigned short inst1
= extract_unsigned_integer (&buf
[i
], 2, order
);
5207 if ((inst1
& 0xff00) == 0xbf00 && (inst1
& 0x000f) != 0)
5219 /* OK, the code bytes before this instruction contain at least one
5220 halfword which resembles an IT instruction. We know that it's
5221 Thumb code, but there are still two possibilities. Either the
5222 halfword really is an IT instruction, or it is the second half of
5223 a 32-bit Thumb instruction. The only way we can tell is to
5224 scan forwards from a known instruction boundary. */
5225 if (bpaddr
- boundary
> IT_SCAN_THRESHOLD
)
5229 /* There's a lot of code before this instruction. Start with an
5230 optimistic search; it's easy to recognize halfwords that can
5231 not be the start of a 32-bit instruction, and use that to
5232 lock on to the instruction boundaries. */
5233 buf
= extend_buffer_earlier (buf
, bpaddr
, buf_len
, IT_SCAN_THRESHOLD
);
5236 buf_len
= IT_SCAN_THRESHOLD
;
5239 for (i
= 0; i
< buf_len
- sizeof (buf
) && ! definite
; i
+= 2)
5241 unsigned short inst1
= extract_unsigned_integer (&buf
[i
], 2, order
);
5242 if (thumb_insn_size (inst1
) == 2)
5249 /* At this point, if DEFINITE, BUF[I] is the first place we
5250 are sure that we know the instruction boundaries, and it is far
5251 enough from BPADDR that we could not miss an IT instruction
5252 affecting BPADDR. If ! DEFINITE, give up - start from a
5256 buf
= extend_buffer_earlier (buf
, bpaddr
, buf_len
,
5260 buf_len
= bpaddr
- boundary
;
5266 buf
= extend_buffer_earlier (buf
, bpaddr
, buf_len
, bpaddr
- boundary
);
5269 buf_len
= bpaddr
- boundary
;
5273 /* Scan forwards. Find the last IT instruction before BPADDR. */
5278 unsigned short inst1
= extract_unsigned_integer (&buf
[i
], 2, order
);
5280 if ((inst1
& 0xff00) == 0xbf00 && (inst1
& 0x000f) != 0)
5285 else if (inst1
& 0x0002)
5287 else if (inst1
& 0x0004)
5292 i
+= thumb_insn_size (inst1
);
5298 /* There wasn't really an IT instruction after all. */
5301 if (last_it_count
< 1)
5302 /* It was too far away. */
5305 /* This really is a trouble spot. Move the breakpoint to the IT
5307 return bpaddr
- buf_len
+ last_it
;
5310 /* ARM displaced stepping support.
5312 Generally ARM displaced stepping works as follows:
5314 1. When an instruction is to be single-stepped, it is first decoded by
5315 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5316 Depending on the type of instruction, it is then copied to a scratch
5317 location, possibly in a modified form. The copy_* set of functions
5318 performs such modification, as necessary. A breakpoint is placed after
5319 the modified instruction in the scratch space to return control to GDB.
5320 Note in particular that instructions which modify the PC will no longer
5321 do so after modification.
5323 2. The instruction is single-stepped, by setting the PC to the scratch
5324 location address, and resuming. Control returns to GDB when the
5327 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5328 function used for the current instruction. This function's job is to
5329 put the CPU/memory state back to what it would have been if the
5330 instruction had been executed unmodified in its original location. */
5332 /* NOP instruction (mov r0, r0). */
5333 #define ARM_NOP 0xe1a00000
5334 #define THUMB_NOP 0x4600
5336 /* Helper for register reads for displaced stepping. In particular, this
5337 returns the PC as it would be seen by the instruction at its original
5341 displaced_read_reg (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5345 CORE_ADDR from
= dsc
->insn_addr
;
5347 if (regno
== ARM_PC_REGNUM
)
5349 /* Compute pipeline offset:
5350 - When executing an ARM instruction, PC reads as the address of the
5351 current instruction plus 8.
5352 - When executing a Thumb instruction, PC reads as the address of the
5353 current instruction plus 4. */
5360 if (debug_displaced
)
5361 fprintf_unfiltered (gdb_stdlog
, "displaced: read pc value %.8lx\n",
5362 (unsigned long) from
);
5363 return (ULONGEST
) from
;
5367 regcache_cooked_read_unsigned (regs
, regno
, &ret
);
5368 if (debug_displaced
)
5369 fprintf_unfiltered (gdb_stdlog
, "displaced: read r%d value %.8lx\n",
5370 regno
, (unsigned long) ret
);
5376 displaced_in_arm_mode (struct regcache
*regs
)
5379 ULONGEST t_bit
= arm_psr_thumb_bit (get_regcache_arch (regs
));
5381 regcache_cooked_read_unsigned (regs
, ARM_PS_REGNUM
, &ps
);
5383 return (ps
& t_bit
) == 0;
5386 /* Write to the PC as from a branch instruction. */
5389 branch_write_pc (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5393 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5394 architecture versions < 6. */
5395 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
,
5396 val
& ~(ULONGEST
) 0x3);
5398 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
,
5399 val
& ~(ULONGEST
) 0x1);
5402 /* Write to the PC as from a branch-exchange instruction. */
5405 bx_write_pc (struct regcache
*regs
, ULONGEST val
)
5408 ULONGEST t_bit
= arm_psr_thumb_bit (get_regcache_arch (regs
));
5410 regcache_cooked_read_unsigned (regs
, ARM_PS_REGNUM
, &ps
);
5414 regcache_cooked_write_unsigned (regs
, ARM_PS_REGNUM
, ps
| t_bit
);
5415 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
& 0xfffffffe);
5417 else if ((val
& 2) == 0)
5419 regcache_cooked_write_unsigned (regs
, ARM_PS_REGNUM
, ps
& ~t_bit
);
5420 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
);
5424 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5425 mode, align dest to 4 bytes). */
5426 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5427 regcache_cooked_write_unsigned (regs
, ARM_PS_REGNUM
, ps
& ~t_bit
);
5428 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
& 0xfffffffc);
5432 /* Write to the PC as if from a load instruction. */
5435 load_write_pc (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5438 if (DISPLACED_STEPPING_ARCH_VERSION
>= 5)
5439 bx_write_pc (regs
, val
);
5441 branch_write_pc (regs
, dsc
, val
);
5444 /* Write to the PC as if from an ALU instruction. */
5447 alu_write_pc (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5450 if (DISPLACED_STEPPING_ARCH_VERSION
>= 7 && !dsc
->is_thumb
)
5451 bx_write_pc (regs
, val
);
5453 branch_write_pc (regs
, dsc
, val
);
5456 /* Helper for writing to registers for displaced stepping. Writing to the PC
5457 has a varying effects depending on the instruction which does the write:
5458 this is controlled by the WRITE_PC argument. */
5461 displaced_write_reg (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5462 int regno
, ULONGEST val
, enum pc_write_style write_pc
)
5464 if (regno
== ARM_PC_REGNUM
)
5466 if (debug_displaced
)
5467 fprintf_unfiltered (gdb_stdlog
, "displaced: writing pc %.8lx\n",
5468 (unsigned long) val
);
5471 case BRANCH_WRITE_PC
:
5472 branch_write_pc (regs
, dsc
, val
);
5476 bx_write_pc (regs
, val
);
5480 load_write_pc (regs
, dsc
, val
);
5484 alu_write_pc (regs
, dsc
, val
);
5487 case CANNOT_WRITE_PC
:
5488 warning (_("Instruction wrote to PC in an unexpected way when "
5489 "single-stepping"));
5493 internal_error (__FILE__
, __LINE__
,
5494 _("Invalid argument to displaced_write_reg"));
5497 dsc
->wrote_to_pc
= 1;
5501 if (debug_displaced
)
5502 fprintf_unfiltered (gdb_stdlog
, "displaced: writing r%d value %.8lx\n",
5503 regno
, (unsigned long) val
);
5504 regcache_cooked_write_unsigned (regs
, regno
, val
);
5508 /* This function is used to concisely determine if an instruction INSN
5509 references PC. Register fields of interest in INSN should have the
5510 corresponding fields of BITMASK set to 0b1111. The function
5511 returns return 1 if any of these fields in INSN reference the PC
5512 (also 0b1111, r15), else it returns 0. */
5515 insn_references_pc (uint32_t insn
, uint32_t bitmask
)
5517 uint32_t lowbit
= 1;
5519 while (bitmask
!= 0)
5523 for (; lowbit
&& (bitmask
& lowbit
) == 0; lowbit
<<= 1)
5529 mask
= lowbit
* 0xf;
5531 if ((insn
& mask
) == mask
)
5540 /* The simplest copy function. Many instructions have the same effect no
5541 matter what address they are executed at: in those cases, use this. */
5544 arm_copy_unmodified (struct gdbarch
*gdbarch
, uint32_t insn
,
5545 const char *iname
, struct displaced_step_closure
*dsc
)
5547 if (debug_displaced
)
5548 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.8lx, "
5549 "opcode/class '%s' unmodified\n", (unsigned long) insn
,
5552 dsc
->modinsn
[0] = insn
;
5558 thumb_copy_unmodified_32bit (struct gdbarch
*gdbarch
, uint16_t insn1
,
5559 uint16_t insn2
, const char *iname
,
5560 struct displaced_step_closure
*dsc
)
5562 if (debug_displaced
)
5563 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.4x %.4x, "
5564 "opcode/class '%s' unmodified\n", insn1
, insn2
,
5567 dsc
->modinsn
[0] = insn1
;
5568 dsc
->modinsn
[1] = insn2
;
5574 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
5577 thumb_copy_unmodified_16bit (struct gdbarch
*gdbarch
, unsigned int insn
,
5579 struct displaced_step_closure
*dsc
)
5581 if (debug_displaced
)
5582 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.4x, "
5583 "opcode/class '%s' unmodified\n", insn
,
5586 dsc
->modinsn
[0] = insn
;
5591 /* Preload instructions with immediate offset. */
5594 cleanup_preload (struct gdbarch
*gdbarch
,
5595 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5597 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
5598 if (!dsc
->u
.preload
.immed
)
5599 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
5603 install_preload (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5604 struct displaced_step_closure
*dsc
, unsigned int rn
)
5607 /* Preload instructions:
5609 {pli/pld} [rn, #+/-imm]
5611 {pli/pld} [r0, #+/-imm]. */
5613 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5614 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5615 displaced_write_reg (regs
, dsc
, 0, rn_val
, CANNOT_WRITE_PC
);
5616 dsc
->u
.preload
.immed
= 1;
5618 dsc
->cleanup
= &cleanup_preload
;
5622 arm_copy_preload (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
5623 struct displaced_step_closure
*dsc
)
5625 unsigned int rn
= bits (insn
, 16, 19);
5627 if (!insn_references_pc (insn
, 0x000f0000ul
))
5628 return arm_copy_unmodified (gdbarch
, insn
, "preload", dsc
);
5630 if (debug_displaced
)
5631 fprintf_unfiltered (gdb_stdlog
, "displaced: copying preload insn %.8lx\n",
5632 (unsigned long) insn
);
5634 dsc
->modinsn
[0] = insn
& 0xfff0ffff;
5636 install_preload (gdbarch
, regs
, dsc
, rn
);
5642 thumb2_copy_preload (struct gdbarch
*gdbarch
, uint16_t insn1
, uint16_t insn2
,
5643 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5645 unsigned int rn
= bits (insn1
, 0, 3);
5646 unsigned int u_bit
= bit (insn1
, 7);
5647 int imm12
= bits (insn2
, 0, 11);
5650 if (rn
!= ARM_PC_REGNUM
)
5651 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
, "preload", dsc
);
5653 /* PC is only allowed to use in PLI (immediate,literal) Encoding T3, and
5654 PLD (literal) Encoding T1. */
5655 if (debug_displaced
)
5656 fprintf_unfiltered (gdb_stdlog
,
5657 "displaced: copying pld/pli pc (0x%x) %c imm12 %.4x\n",
5658 (unsigned int) dsc
->insn_addr
, u_bit
? '+' : '-',
5664 /* Rewrite instruction {pli/pld} PC imm12 into:
5665 Prepare: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
5669 Cleanup: r0 <- tmp[0], r1 <- tmp[1]. */
5671 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5672 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
5674 pc_val
= displaced_read_reg (regs
, dsc
, ARM_PC_REGNUM
);
5676 displaced_write_reg (regs
, dsc
, 0, pc_val
, CANNOT_WRITE_PC
);
5677 displaced_write_reg (regs
, dsc
, 1, imm12
, CANNOT_WRITE_PC
);
5678 dsc
->u
.preload
.immed
= 0;
5680 /* {pli/pld} [r0, r1] */
5681 dsc
->modinsn
[0] = insn1
& 0xfff0;
5682 dsc
->modinsn
[1] = 0xf001;
5685 dsc
->cleanup
= &cleanup_preload
;
5689 /* Preload instructions with register offset. */
5692 install_preload_reg(struct gdbarch
*gdbarch
, struct regcache
*regs
,
5693 struct displaced_step_closure
*dsc
, unsigned int rn
,
5696 ULONGEST rn_val
, rm_val
;
5698 /* Preload register-offset instructions:
5700 {pli/pld} [rn, rm {, shift}]
5702 {pli/pld} [r0, r1 {, shift}]. */
5704 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5705 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
5706 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5707 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
5708 displaced_write_reg (regs
, dsc
, 0, rn_val
, CANNOT_WRITE_PC
);
5709 displaced_write_reg (regs
, dsc
, 1, rm_val
, CANNOT_WRITE_PC
);
5710 dsc
->u
.preload
.immed
= 0;
5712 dsc
->cleanup
= &cleanup_preload
;
5716 arm_copy_preload_reg (struct gdbarch
*gdbarch
, uint32_t insn
,
5717 struct regcache
*regs
,
5718 struct displaced_step_closure
*dsc
)
5720 unsigned int rn
= bits (insn
, 16, 19);
5721 unsigned int rm
= bits (insn
, 0, 3);
5724 if (!insn_references_pc (insn
, 0x000f000ful
))
5725 return arm_copy_unmodified (gdbarch
, insn
, "preload reg", dsc
);
5727 if (debug_displaced
)
5728 fprintf_unfiltered (gdb_stdlog
, "displaced: copying preload insn %.8lx\n",
5729 (unsigned long) insn
);
5731 dsc
->modinsn
[0] = (insn
& 0xfff0fff0) | 0x1;
5733 install_preload_reg (gdbarch
, regs
, dsc
, rn
, rm
);
5737 /* Copy/cleanup coprocessor load and store instructions. */
5740 cleanup_copro_load_store (struct gdbarch
*gdbarch
,
5741 struct regcache
*regs
,
5742 struct displaced_step_closure
*dsc
)
5744 ULONGEST rn_val
= displaced_read_reg (regs
, dsc
, 0);
5746 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
5748 if (dsc
->u
.ldst
.writeback
)
5749 displaced_write_reg (regs
, dsc
, dsc
->u
.ldst
.rn
, rn_val
, LOAD_WRITE_PC
);
5753 install_copro_load_store (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5754 struct displaced_step_closure
*dsc
,
5755 int writeback
, unsigned int rn
)
5759 /* Coprocessor load/store instructions:
5761 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5763 {stc/stc2} [r0, #+/-imm].
5765 ldc/ldc2 are handled identically. */
5767 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5768 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5769 /* PC should be 4-byte aligned. */
5770 rn_val
= rn_val
& 0xfffffffc;
5771 displaced_write_reg (regs
, dsc
, 0, rn_val
, CANNOT_WRITE_PC
);
5773 dsc
->u
.ldst
.writeback
= writeback
;
5774 dsc
->u
.ldst
.rn
= rn
;
5776 dsc
->cleanup
= &cleanup_copro_load_store
;
5780 arm_copy_copro_load_store (struct gdbarch
*gdbarch
, uint32_t insn
,
5781 struct regcache
*regs
,
5782 struct displaced_step_closure
*dsc
)
5784 unsigned int rn
= bits (insn
, 16, 19);
5786 if (!insn_references_pc (insn
, 0x000f0000ul
))
5787 return arm_copy_unmodified (gdbarch
, insn
, "copro load/store", dsc
);
5789 if (debug_displaced
)
5790 fprintf_unfiltered (gdb_stdlog
, "displaced: copying coprocessor "
5791 "load/store insn %.8lx\n", (unsigned long) insn
);
5793 dsc
->modinsn
[0] = insn
& 0xfff0ffff;
5795 install_copro_load_store (gdbarch
, regs
, dsc
, bit (insn
, 25), rn
);
5801 thumb2_copy_copro_load_store (struct gdbarch
*gdbarch
, uint16_t insn1
,
5802 uint16_t insn2
, struct regcache
*regs
,
5803 struct displaced_step_closure
*dsc
)
5805 unsigned int rn
= bits (insn1
, 0, 3);
5807 if (rn
!= ARM_PC_REGNUM
)
5808 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
5809 "copro load/store", dsc
);
5811 if (debug_displaced
)
5812 fprintf_unfiltered (gdb_stdlog
, "displaced: copying coprocessor "
5813 "load/store insn %.4x%.4x\n", insn1
, insn2
);
5815 dsc
->modinsn
[0] = insn1
& 0xfff0;
5816 dsc
->modinsn
[1] = insn2
;
5819 /* This function is called for copying instruction LDC/LDC2/VLDR, which
5820 doesn't support writeback, so pass 0. */
5821 install_copro_load_store (gdbarch
, regs
, dsc
, 0, rn
);
5826 /* Clean up branch instructions (actually perform the branch, by setting
5830 cleanup_branch (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5831 struct displaced_step_closure
*dsc
)
5833 uint32_t status
= displaced_read_reg (regs
, dsc
, ARM_PS_REGNUM
);
5834 int branch_taken
= condition_true (dsc
->u
.branch
.cond
, status
);
5835 enum pc_write_style write_pc
= dsc
->u
.branch
.exchange
5836 ? BX_WRITE_PC
: BRANCH_WRITE_PC
;
5841 if (dsc
->u
.branch
.link
)
5843 /* The value of LR should be the next insn of current one. In order
5844 not to confuse logic hanlding later insn `bx lr', if current insn mode
5845 is Thumb, the bit 0 of LR value should be set to 1. */
5846 ULONGEST next_insn_addr
= dsc
->insn_addr
+ dsc
->insn_size
;
5849 next_insn_addr
|= 0x1;
5851 displaced_write_reg (regs
, dsc
, ARM_LR_REGNUM
, next_insn_addr
,
5855 displaced_write_reg (regs
, dsc
, ARM_PC_REGNUM
, dsc
->u
.branch
.dest
, write_pc
);
5858 /* Copy B/BL/BLX instructions with immediate destinations. */
5861 install_b_bl_blx (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5862 struct displaced_step_closure
*dsc
,
5863 unsigned int cond
, int exchange
, int link
, long offset
)
5865 /* Implement "BL<cond> <label>" as:
5867 Preparation: cond <- instruction condition
5868 Insn: mov r0, r0 (nop)
5869 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5871 B<cond> similar, but don't set r14 in cleanup. */
5873 dsc
->u
.branch
.cond
= cond
;
5874 dsc
->u
.branch
.link
= link
;
5875 dsc
->u
.branch
.exchange
= exchange
;
5877 dsc
->u
.branch
.dest
= dsc
->insn_addr
;
5878 if (link
&& exchange
)
5879 /* For BLX, offset is computed from the Align (PC, 4). */
5880 dsc
->u
.branch
.dest
= dsc
->u
.branch
.dest
& 0xfffffffc;
5883 dsc
->u
.branch
.dest
+= 4 + offset
;
5885 dsc
->u
.branch
.dest
+= 8 + offset
;
5887 dsc
->cleanup
= &cleanup_branch
;
5890 arm_copy_b_bl_blx (struct gdbarch
*gdbarch
, uint32_t insn
,
5891 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5893 unsigned int cond
= bits (insn
, 28, 31);
5894 int exchange
= (cond
== 0xf);
5895 int link
= exchange
|| bit (insn
, 24);
5898 if (debug_displaced
)
5899 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %s immediate insn "
5900 "%.8lx\n", (exchange
) ? "blx" : (link
) ? "bl" : "b",
5901 (unsigned long) insn
);
5903 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5904 then arrange the switch into Thumb mode. */
5905 offset
= (bits (insn
, 0, 23) << 2) | (bit (insn
, 24) << 1) | 1;
5907 offset
= bits (insn
, 0, 23) << 2;
5909 if (bit (offset
, 25))
5910 offset
= offset
| ~0x3ffffff;
5912 dsc
->modinsn
[0] = ARM_NOP
;
5914 install_b_bl_blx (gdbarch
, regs
, dsc
, cond
, exchange
, link
, offset
);
5919 thumb2_copy_b_bl_blx (struct gdbarch
*gdbarch
, uint16_t insn1
,
5920 uint16_t insn2
, struct regcache
*regs
,
5921 struct displaced_step_closure
*dsc
)
5923 int link
= bit (insn2
, 14);
5924 int exchange
= link
&& !bit (insn2
, 12);
5927 int j1
= bit (insn2
, 13);
5928 int j2
= bit (insn2
, 11);
5929 int s
= sbits (insn1
, 10, 10);
5930 int i1
= !(j1
^ bit (insn1
, 10));
5931 int i2
= !(j2
^ bit (insn1
, 10));
5933 if (!link
&& !exchange
) /* B */
5935 offset
= (bits (insn2
, 0, 10) << 1);
5936 if (bit (insn2
, 12)) /* Encoding T4 */
5938 offset
|= (bits (insn1
, 0, 9) << 12)
5944 else /* Encoding T3 */
5946 offset
|= (bits (insn1
, 0, 5) << 12)
5950 cond
= bits (insn1
, 6, 9);
5955 offset
= (bits (insn1
, 0, 9) << 12);
5956 offset
|= ((i2
<< 22) | (i1
<< 23) | (s
<< 24));
5957 offset
|= exchange
?
5958 (bits (insn2
, 1, 10) << 2) : (bits (insn2
, 0, 10) << 1);
5961 if (debug_displaced
)
5962 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %s insn "
5963 "%.4x %.4x with offset %.8lx\n",
5964 link
? (exchange
) ? "blx" : "bl" : "b",
5965 insn1
, insn2
, offset
);
5967 dsc
->modinsn
[0] = THUMB_NOP
;
5969 install_b_bl_blx (gdbarch
, regs
, dsc
, cond
, exchange
, link
, offset
);
5973 /* Copy B Thumb instructions. */
5975 thumb_copy_b (struct gdbarch
*gdbarch
, unsigned short insn
,
5976 struct displaced_step_closure
*dsc
)
5978 unsigned int cond
= 0;
5980 unsigned short bit_12_15
= bits (insn
, 12, 15);
5981 CORE_ADDR from
= dsc
->insn_addr
;
5983 if (bit_12_15
== 0xd)
5985 /* offset = SignExtend (imm8:0, 32) */
5986 offset
= sbits ((insn
<< 1), 0, 8);
5987 cond
= bits (insn
, 8, 11);
5989 else if (bit_12_15
== 0xe) /* Encoding T2 */
5991 offset
= sbits ((insn
<< 1), 0, 11);
5995 if (debug_displaced
)
5996 fprintf_unfiltered (gdb_stdlog
,
5997 "displaced: copying b immediate insn %.4x "
5998 "with offset %d\n", insn
, offset
);
6000 dsc
->u
.branch
.cond
= cond
;
6001 dsc
->u
.branch
.link
= 0;
6002 dsc
->u
.branch
.exchange
= 0;
6003 dsc
->u
.branch
.dest
= from
+ 4 + offset
;
6005 dsc
->modinsn
[0] = THUMB_NOP
;
6007 dsc
->cleanup
= &cleanup_branch
;
6012 /* Copy BX/BLX with register-specified destinations. */
6015 install_bx_blx_reg (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6016 struct displaced_step_closure
*dsc
, int link
,
6017 unsigned int cond
, unsigned int rm
)
6019 /* Implement {BX,BLX}<cond> <reg>" as:
6021 Preparation: cond <- instruction condition
6022 Insn: mov r0, r0 (nop)
6023 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
6025 Don't set r14 in cleanup for BX. */
6027 dsc
->u
.branch
.dest
= displaced_read_reg (regs
, dsc
, rm
);
6029 dsc
->u
.branch
.cond
= cond
;
6030 dsc
->u
.branch
.link
= link
;
6032 dsc
->u
.branch
.exchange
= 1;
6034 dsc
->cleanup
= &cleanup_branch
;
6038 arm_copy_bx_blx_reg (struct gdbarch
*gdbarch
, uint32_t insn
,
6039 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
6041 unsigned int cond
= bits (insn
, 28, 31);
6044 int link
= bit (insn
, 5);
6045 unsigned int rm
= bits (insn
, 0, 3);
6047 if (debug_displaced
)
6048 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.8lx",
6049 (unsigned long) insn
);
6051 dsc
->modinsn
[0] = ARM_NOP
;
6053 install_bx_blx_reg (gdbarch
, regs
, dsc
, link
, cond
, rm
);
6058 thumb_copy_bx_blx_reg (struct gdbarch
*gdbarch
, uint16_t insn
,
6059 struct regcache
*regs
,
6060 struct displaced_step_closure
*dsc
)
6062 int link
= bit (insn
, 7);
6063 unsigned int rm
= bits (insn
, 3, 6);
6065 if (debug_displaced
)
6066 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.4x",
6067 (unsigned short) insn
);
6069 dsc
->modinsn
[0] = THUMB_NOP
;
6071 install_bx_blx_reg (gdbarch
, regs
, dsc
, link
, INST_AL
, rm
);
6077 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
6080 cleanup_alu_imm (struct gdbarch
*gdbarch
,
6081 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
6083 ULONGEST rd_val
= displaced_read_reg (regs
, dsc
, 0);
6084 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
6085 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
6086 displaced_write_reg (regs
, dsc
, dsc
->rd
, rd_val
, ALU_WRITE_PC
);
6090 arm_copy_alu_imm (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
6091 struct displaced_step_closure
*dsc
)
6093 unsigned int rn
= bits (insn
, 16, 19);
6094 unsigned int rd
= bits (insn
, 12, 15);
6095 unsigned int op
= bits (insn
, 21, 24);
6096 int is_mov
= (op
== 0xd);
6097 ULONGEST rd_val
, rn_val
;
6099 if (!insn_references_pc (insn
, 0x000ff000ul
))
6100 return arm_copy_unmodified (gdbarch
, insn
, "ALU immediate", dsc
);
6102 if (debug_displaced
)
6103 fprintf_unfiltered (gdb_stdlog
, "displaced: copying immediate %s insn "
6104 "%.8lx\n", is_mov
? "move" : "ALU",
6105 (unsigned long) insn
);
6107 /* Instruction is of form:
6109 <op><cond> rd, [rn,] #imm
6113 Preparation: tmp1, tmp2 <- r0, r1;
6115 Insn: <op><cond> r0, r1, #imm
6116 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6119 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6120 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
6121 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6122 rd_val
= displaced_read_reg (regs
, dsc
, rd
);
6123 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
6124 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
6128 dsc
->modinsn
[0] = insn
& 0xfff00fff;
6130 dsc
->modinsn
[0] = (insn
& 0xfff00fff) | 0x10000;
6132 dsc
->cleanup
= &cleanup_alu_imm
;
6138 thumb2_copy_alu_imm (struct gdbarch
*gdbarch
, uint16_t insn1
,
6139 uint16_t insn2
, struct regcache
*regs
,
6140 struct displaced_step_closure
*dsc
)
6142 unsigned int op
= bits (insn1
, 5, 8);
6143 unsigned int rn
, rm
, rd
;
6144 ULONGEST rd_val
, rn_val
;
6146 rn
= bits (insn1
, 0, 3); /* Rn */
6147 rm
= bits (insn2
, 0, 3); /* Rm */
6148 rd
= bits (insn2
, 8, 11); /* Rd */
6150 /* This routine is only called for instruction MOV. */
6151 gdb_assert (op
== 0x2 && rn
== 0xf);
6153 if (rm
!= ARM_PC_REGNUM
&& rd
!= ARM_PC_REGNUM
)
6154 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
, "ALU imm", dsc
);
6156 if (debug_displaced
)
6157 fprintf_unfiltered (gdb_stdlog
, "displaced: copying reg %s insn %.4x%.4x\n",
6158 "ALU", insn1
, insn2
);
6160 /* Instruction is of form:
6162 <op><cond> rd, [rn,] #imm
6166 Preparation: tmp1, tmp2 <- r0, r1;
6168 Insn: <op><cond> r0, r1, #imm
6169 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6172 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6173 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
6174 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6175 rd_val
= displaced_read_reg (regs
, dsc
, rd
);
6176 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
6177 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
6180 dsc
->modinsn
[0] = insn1
;
6181 dsc
->modinsn
[1] = ((insn2
& 0xf0f0) | 0x1);
6184 dsc
->cleanup
= &cleanup_alu_imm
;
6189 /* Copy/cleanup arithmetic/logic insns with register RHS. */
6192 cleanup_alu_reg (struct gdbarch
*gdbarch
,
6193 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
6198 rd_val
= displaced_read_reg (regs
, dsc
, 0);
6200 for (i
= 0; i
< 3; i
++)
6201 displaced_write_reg (regs
, dsc
, i
, dsc
->tmp
[i
], CANNOT_WRITE_PC
);
6203 displaced_write_reg (regs
, dsc
, dsc
->rd
, rd_val
, ALU_WRITE_PC
);
6207 install_alu_reg (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6208 struct displaced_step_closure
*dsc
,
6209 unsigned int rd
, unsigned int rn
, unsigned int rm
)
6211 ULONGEST rd_val
, rn_val
, rm_val
;
6213 /* Instruction is of form:
6215 <op><cond> rd, [rn,] rm [, <shift>]
6219 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
6220 r0, r1, r2 <- rd, rn, rm
6221 Insn: <op><cond> r0, r1, r2 [, <shift>]
6222 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
6225 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6226 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
6227 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
6228 rd_val
= displaced_read_reg (regs
, dsc
, rd
);
6229 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6230 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
6231 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
6232 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
6233 displaced_write_reg (regs
, dsc
, 2, rm_val
, CANNOT_WRITE_PC
);
6236 dsc
->cleanup
= &cleanup_alu_reg
;
6240 arm_copy_alu_reg (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
6241 struct displaced_step_closure
*dsc
)
6243 unsigned int op
= bits (insn
, 21, 24);
6244 int is_mov
= (op
== 0xd);
6246 if (!insn_references_pc (insn
, 0x000ff00ful
))
6247 return arm_copy_unmodified (gdbarch
, insn
, "ALU reg", dsc
);
6249 if (debug_displaced
)
6250 fprintf_unfiltered (gdb_stdlog
, "displaced: copying reg %s insn %.8lx\n",
6251 is_mov
? "move" : "ALU", (unsigned long) insn
);
6254 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x2;
6256 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x10002;
6258 install_alu_reg (gdbarch
, regs
, dsc
, bits (insn
, 12, 15), bits (insn
, 16, 19),
6264 thumb_copy_alu_reg (struct gdbarch
*gdbarch
, uint16_t insn
,
6265 struct regcache
*regs
,
6266 struct displaced_step_closure
*dsc
)
6268 unsigned rn
, rm
, rd
;
6270 rd
= bits (insn
, 3, 6);
6271 rn
= (bit (insn
, 7) << 3) | bits (insn
, 0, 2);
6274 if (rd
!= ARM_PC_REGNUM
&& rn
!= ARM_PC_REGNUM
)
6275 return thumb_copy_unmodified_16bit (gdbarch
, insn
, "ALU reg", dsc
);
6277 if (debug_displaced
)
6278 fprintf_unfiltered (gdb_stdlog
, "displaced: copying reg %s insn %.4x\n",
6279 "ALU", (unsigned short) insn
);
6281 dsc
->modinsn
[0] = ((insn
& 0xff00) | 0x08);
6283 install_alu_reg (gdbarch
, regs
, dsc
, rd
, rn
, rm
);
6288 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
6291 cleanup_alu_shifted_reg (struct gdbarch
*gdbarch
,
6292 struct regcache
*regs
,
6293 struct displaced_step_closure
*dsc
)
6295 ULONGEST rd_val
= displaced_read_reg (regs
, dsc
, 0);
6298 for (i
= 0; i
< 4; i
++)
6299 displaced_write_reg (regs
, dsc
, i
, dsc
->tmp
[i
], CANNOT_WRITE_PC
);
6301 displaced_write_reg (regs
, dsc
, dsc
->rd
, rd_val
, ALU_WRITE_PC
);
6305 install_alu_shifted_reg (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6306 struct displaced_step_closure
*dsc
,
6307 unsigned int rd
, unsigned int rn
, unsigned int rm
,
6311 ULONGEST rd_val
, rn_val
, rm_val
, rs_val
;
6313 /* Instruction is of form:
6315 <op><cond> rd, [rn,] rm, <shift> rs
6319 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
6320 r0, r1, r2, r3 <- rd, rn, rm, rs
6321 Insn: <op><cond> r0, r1, r2, <shift> r3
6323 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
6327 for (i
= 0; i
< 4; i
++)
6328 dsc
->tmp
[i
] = displaced_read_reg (regs
, dsc
, i
);
6330 rd_val
= displaced_read_reg (regs
, dsc
, rd
);
6331 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6332 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
6333 rs_val
= displaced_read_reg (regs
, dsc
, rs
);
6334 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
6335 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
6336 displaced_write_reg (regs
, dsc
, 2, rm_val
, CANNOT_WRITE_PC
);
6337 displaced_write_reg (regs
, dsc
, 3, rs_val
, CANNOT_WRITE_PC
);
6339 dsc
->cleanup
= &cleanup_alu_shifted_reg
;
6343 arm_copy_alu_shifted_reg (struct gdbarch
*gdbarch
, uint32_t insn
,
6344 struct regcache
*regs
,
6345 struct displaced_step_closure
*dsc
)
6347 unsigned int op
= bits (insn
, 21, 24);
6348 int is_mov
= (op
== 0xd);
6349 unsigned int rd
, rn
, rm
, rs
;
6351 if (!insn_references_pc (insn
, 0x000fff0ful
))
6352 return arm_copy_unmodified (gdbarch
, insn
, "ALU shifted reg", dsc
);
6354 if (debug_displaced
)
6355 fprintf_unfiltered (gdb_stdlog
, "displaced: copying shifted reg %s insn "
6356 "%.8lx\n", is_mov
? "move" : "ALU",
6357 (unsigned long) insn
);
6359 rn
= bits (insn
, 16, 19);
6360 rm
= bits (insn
, 0, 3);
6361 rs
= bits (insn
, 8, 11);
6362 rd
= bits (insn
, 12, 15);
6365 dsc
->modinsn
[0] = (insn
& 0xfff000f0) | 0x302;
6367 dsc
->modinsn
[0] = (insn
& 0xfff000f0) | 0x10302;
6369 install_alu_shifted_reg (gdbarch
, regs
, dsc
, rd
, rn
, rm
, rs
);
6374 /* Clean up load instructions. */
6377 cleanup_load (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6378 struct displaced_step_closure
*dsc
)
6380 ULONGEST rt_val
, rt_val2
= 0, rn_val
;
6382 rt_val
= displaced_read_reg (regs
, dsc
, 0);
6383 if (dsc
->u
.ldst
.xfersize
== 8)
6384 rt_val2
= displaced_read_reg (regs
, dsc
, 1);
6385 rn_val
= displaced_read_reg (regs
, dsc
, 2);
6387 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
6388 if (dsc
->u
.ldst
.xfersize
> 4)
6389 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
6390 displaced_write_reg (regs
, dsc
, 2, dsc
->tmp
[2], CANNOT_WRITE_PC
);
6391 if (!dsc
->u
.ldst
.immed
)
6392 displaced_write_reg (regs
, dsc
, 3, dsc
->tmp
[3], CANNOT_WRITE_PC
);
6394 /* Handle register writeback. */
6395 if (dsc
->u
.ldst
.writeback
)
6396 displaced_write_reg (regs
, dsc
, dsc
->u
.ldst
.rn
, rn_val
, CANNOT_WRITE_PC
);
6397 /* Put result in right place. */
6398 displaced_write_reg (regs
, dsc
, dsc
->rd
, rt_val
, LOAD_WRITE_PC
);
6399 if (dsc
->u
.ldst
.xfersize
== 8)
6400 displaced_write_reg (regs
, dsc
, dsc
->rd
+ 1, rt_val2
, LOAD_WRITE_PC
);
6403 /* Clean up store instructions. */
6406 cleanup_store (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6407 struct displaced_step_closure
*dsc
)
6409 ULONGEST rn_val
= displaced_read_reg (regs
, dsc
, 2);
6411 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
6412 if (dsc
->u
.ldst
.xfersize
> 4)
6413 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
6414 displaced_write_reg (regs
, dsc
, 2, dsc
->tmp
[2], CANNOT_WRITE_PC
);
6415 if (!dsc
->u
.ldst
.immed
)
6416 displaced_write_reg (regs
, dsc
, 3, dsc
->tmp
[3], CANNOT_WRITE_PC
);
6417 if (!dsc
->u
.ldst
.restore_r4
)
6418 displaced_write_reg (regs
, dsc
, 4, dsc
->tmp
[4], CANNOT_WRITE_PC
);
6421 if (dsc
->u
.ldst
.writeback
)
6422 displaced_write_reg (regs
, dsc
, dsc
->u
.ldst
.rn
, rn_val
, CANNOT_WRITE_PC
);
6425 /* Copy "extra" load/store instructions. These are halfword/doubleword
6426 transfers, which have a different encoding to byte/word transfers. */
6429 arm_copy_extra_ld_st (struct gdbarch
*gdbarch
, uint32_t insn
, int unpriveleged
,
6430 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
6432 unsigned int op1
= bits (insn
, 20, 24);
6433 unsigned int op2
= bits (insn
, 5, 6);
6434 unsigned int rt
= bits (insn
, 12, 15);
6435 unsigned int rn
= bits (insn
, 16, 19);
6436 unsigned int rm
= bits (insn
, 0, 3);
6437 char load
[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
6438 char bytesize
[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
6439 int immed
= (op1
& 0x4) != 0;
6441 ULONGEST rt_val
, rt_val2
= 0, rn_val
, rm_val
= 0;
6443 if (!insn_references_pc (insn
, 0x000ff00ful
))
6444 return arm_copy_unmodified (gdbarch
, insn
, "extra load/store", dsc
);
6446 if (debug_displaced
)
6447 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %sextra load/store "
6448 "insn %.8lx\n", unpriveleged
? "unpriveleged " : "",
6449 (unsigned long) insn
);
6451 opcode
= ((op2
<< 2) | (op1
& 0x1) | ((op1
& 0x4) >> 1)) - 4;
6454 internal_error (__FILE__
, __LINE__
,
6455 _("copy_extra_ld_st: instruction decode error"));
6457 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6458 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
6459 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
6461 dsc
->tmp
[3] = displaced_read_reg (regs
, dsc
, 3);
6463 rt_val
= displaced_read_reg (regs
, dsc
, rt
);
6464 if (bytesize
[opcode
] == 8)
6465 rt_val2
= displaced_read_reg (regs
, dsc
, rt
+ 1);
6466 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6468 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
6470 displaced_write_reg (regs
, dsc
, 0, rt_val
, CANNOT_WRITE_PC
);
6471 if (bytesize
[opcode
] == 8)
6472 displaced_write_reg (regs
, dsc
, 1, rt_val2
, CANNOT_WRITE_PC
);
6473 displaced_write_reg (regs
, dsc
, 2, rn_val
, CANNOT_WRITE_PC
);
6475 displaced_write_reg (regs
, dsc
, 3, rm_val
, CANNOT_WRITE_PC
);
6478 dsc
->u
.ldst
.xfersize
= bytesize
[opcode
];
6479 dsc
->u
.ldst
.rn
= rn
;
6480 dsc
->u
.ldst
.immed
= immed
;
6481 dsc
->u
.ldst
.writeback
= bit (insn
, 24) == 0 || bit (insn
, 21) != 0;
6482 dsc
->u
.ldst
.restore_r4
= 0;
6485 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
6487 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
6488 dsc
->modinsn
[0] = (insn
& 0xfff00fff) | 0x20000;
6490 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
6492 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
6493 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x20003;
6495 dsc
->cleanup
= load
[opcode
] ? &cleanup_load
: &cleanup_store
;
6500 /* Copy byte/half word/word loads and stores. */
6503 install_load_store (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6504 struct displaced_step_closure
*dsc
, int load
,
6505 int immed
, int writeback
, int size
, int usermode
,
6506 int rt
, int rm
, int rn
)
6508 ULONGEST rt_val
, rn_val
, rm_val
= 0;
6510 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6511 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
6513 dsc
->tmp
[3] = displaced_read_reg (regs
, dsc
, 3);
6515 dsc
->tmp
[4] = displaced_read_reg (regs
, dsc
, 4);
6517 rt_val
= displaced_read_reg (regs
, dsc
, rt
);
6518 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6520 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
6522 displaced_write_reg (regs
, dsc
, 0, rt_val
, CANNOT_WRITE_PC
);
6523 displaced_write_reg (regs
, dsc
, 2, rn_val
, CANNOT_WRITE_PC
);
6525 displaced_write_reg (regs
, dsc
, 3, rm_val
, CANNOT_WRITE_PC
);
6527 dsc
->u
.ldst
.xfersize
= size
;
6528 dsc
->u
.ldst
.rn
= rn
;
6529 dsc
->u
.ldst
.immed
= immed
;
6530 dsc
->u
.ldst
.writeback
= writeback
;
6532 /* To write PC we can do:
6534 Before this sequence of instructions:
6535 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
6536 r2 is the Rn value got from dispalced_read_reg.
6538 Insn1: push {pc} Write address of STR instruction + offset on stack
6539 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
6540 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
6541 = addr(Insn1) + offset - addr(Insn3) - 8
6543 Insn4: add r4, r4, #8 r4 = offset - 8
6544 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
6546 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
6548 Otherwise we don't know what value to write for PC, since the offset is
6549 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
6550 of this can be found in Section "Saving from r15" in
6551 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
6553 dsc
->cleanup
= load
? &cleanup_load
: &cleanup_store
;
6558 thumb2_copy_load_literal (struct gdbarch
*gdbarch
, uint16_t insn1
,
6559 uint16_t insn2
, struct regcache
*regs
,
6560 struct displaced_step_closure
*dsc
, int size
)
6562 unsigned int u_bit
= bit (insn1
, 7);
6563 unsigned int rt
= bits (insn2
, 12, 15);
6564 int imm12
= bits (insn2
, 0, 11);
6567 if (debug_displaced
)
6568 fprintf_unfiltered (gdb_stdlog
,
6569 "displaced: copying ldr pc (0x%x) R%d %c imm12 %.4x\n",
6570 (unsigned int) dsc
->insn_addr
, rt
, u_bit
? '+' : '-',
6576 /* Rewrite instruction LDR Rt imm12 into:
6578 Prepare: tmp[0] <- r0, tmp[1] <- r2, tmp[2] <- r3, r2 <- pc, r3 <- imm12
6582 Cleanup: rt <- r0, r0 <- tmp[0], r2 <- tmp[1], r3 <- tmp[2]. */
6585 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6586 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
6587 dsc
->tmp
[3] = displaced_read_reg (regs
, dsc
, 3);
6589 pc_val
= displaced_read_reg (regs
, dsc
, ARM_PC_REGNUM
);
6591 pc_val
= pc_val
& 0xfffffffc;
6593 displaced_write_reg (regs
, dsc
, 2, pc_val
, CANNOT_WRITE_PC
);
6594 displaced_write_reg (regs
, dsc
, 3, imm12
, CANNOT_WRITE_PC
);
6598 dsc
->u
.ldst
.xfersize
= size
;
6599 dsc
->u
.ldst
.immed
= 0;
6600 dsc
->u
.ldst
.writeback
= 0;
6601 dsc
->u
.ldst
.restore_r4
= 0;
6603 /* LDR R0, R2, R3 */
6604 dsc
->modinsn
[0] = 0xf852;
6605 dsc
->modinsn
[1] = 0x3;
6608 dsc
->cleanup
= &cleanup_load
;
6614 thumb2_copy_load_reg_imm (struct gdbarch
*gdbarch
, uint16_t insn1
,
6615 uint16_t insn2
, struct regcache
*regs
,
6616 struct displaced_step_closure
*dsc
,
6617 int writeback
, int immed
)
6619 unsigned int rt
= bits (insn2
, 12, 15);
6620 unsigned int rn
= bits (insn1
, 0, 3);
6621 unsigned int rm
= bits (insn2
, 0, 3); /* Only valid if !immed. */
6622 /* In LDR (register), there is also a register Rm, which is not allowed to
6623 be PC, so we don't have to check it. */
6625 if (rt
!= ARM_PC_REGNUM
&& rn
!= ARM_PC_REGNUM
)
6626 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
, "load",
6629 if (debug_displaced
)
6630 fprintf_unfiltered (gdb_stdlog
,
6631 "displaced: copying ldr r%d [r%d] insn %.4x%.4x\n",
6632 rt
, rn
, insn1
, insn2
);
6634 install_load_store (gdbarch
, regs
, dsc
, 1, immed
, writeback
, 4,
6637 dsc
->u
.ldst
.restore_r4
= 0;
6640 /* ldr[b]<cond> rt, [rn, #imm], etc.
6642 ldr[b]<cond> r0, [r2, #imm]. */
6644 dsc
->modinsn
[0] = (insn1
& 0xfff0) | 0x2;
6645 dsc
->modinsn
[1] = insn2
& 0x0fff;
6648 /* ldr[b]<cond> rt, [rn, rm], etc.
6650 ldr[b]<cond> r0, [r2, r3]. */
6652 dsc
->modinsn
[0] = (insn1
& 0xfff0) | 0x2;
6653 dsc
->modinsn
[1] = (insn2
& 0x0ff0) | 0x3;
6663 arm_copy_ldr_str_ldrb_strb (struct gdbarch
*gdbarch
, uint32_t insn
,
6664 struct regcache
*regs
,
6665 struct displaced_step_closure
*dsc
,
6666 int load
, int size
, int usermode
)
6668 int immed
= !bit (insn
, 25);
6669 int writeback
= (bit (insn
, 24) == 0 || bit (insn
, 21) != 0);
6670 unsigned int rt
= bits (insn
, 12, 15);
6671 unsigned int rn
= bits (insn
, 16, 19);
6672 unsigned int rm
= bits (insn
, 0, 3); /* Only valid if !immed. */
6674 if (!insn_references_pc (insn
, 0x000ff00ful
))
6675 return arm_copy_unmodified (gdbarch
, insn
, "load/store", dsc
);
6677 if (debug_displaced
)
6678 fprintf_unfiltered (gdb_stdlog
,
6679 "displaced: copying %s%s r%d [r%d] insn %.8lx\n",
6680 load
? (size
== 1 ? "ldrb" : "ldr")
6681 : (size
== 1 ? "strb" : "str"), usermode
? "t" : "",
6683 (unsigned long) insn
);
6685 install_load_store (gdbarch
, regs
, dsc
, load
, immed
, writeback
, size
,
6686 usermode
, rt
, rm
, rn
);
6688 if (load
|| rt
!= ARM_PC_REGNUM
)
6690 dsc
->u
.ldst
.restore_r4
= 0;
6693 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
6695 {ldr,str}[b]<cond> r0, [r2, #imm]. */
6696 dsc
->modinsn
[0] = (insn
& 0xfff00fff) | 0x20000;
6698 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
6700 {ldr,str}[b]<cond> r0, [r2, r3]. */
6701 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x20003;
6705 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
6706 dsc
->u
.ldst
.restore_r4
= 1;
6707 dsc
->modinsn
[0] = 0xe92d8000; /* push {pc} */
6708 dsc
->modinsn
[1] = 0xe8bd0010; /* pop {r4} */
6709 dsc
->modinsn
[2] = 0xe044400f; /* sub r4, r4, pc. */
6710 dsc
->modinsn
[3] = 0xe2844008; /* add r4, r4, #8. */
6711 dsc
->modinsn
[4] = 0xe0800004; /* add r0, r0, r4. */
6715 dsc
->modinsn
[5] = (insn
& 0xfff00fff) | 0x20000;
6717 dsc
->modinsn
[5] = (insn
& 0xfff00ff0) | 0x20003;
6722 dsc
->cleanup
= load
? &cleanup_load
: &cleanup_store
;
6727 /* Cleanup LDM instructions with fully-populated register list. This is an
6728 unfortunate corner case: it's impossible to implement correctly by modifying
6729 the instruction. The issue is as follows: we have an instruction,
6733 which we must rewrite to avoid loading PC. A possible solution would be to
6734 do the load in two halves, something like (with suitable cleanup
6738 ldm[id][ab] r8!, {r0-r7}
6740 ldm[id][ab] r8, {r7-r14}
6743 but at present there's no suitable place for <temp>, since the scratch space
6744 is overwritten before the cleanup routine is called. For now, we simply
6745 emulate the instruction. */
6748 cleanup_block_load_all (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6749 struct displaced_step_closure
*dsc
)
6751 int inc
= dsc
->u
.block
.increment
;
6752 int bump_before
= dsc
->u
.block
.before
? (inc
? 4 : -4) : 0;
6753 int bump_after
= dsc
->u
.block
.before
? 0 : (inc
? 4 : -4);
6754 uint32_t regmask
= dsc
->u
.block
.regmask
;
6755 int regno
= inc
? 0 : 15;
6756 CORE_ADDR xfer_addr
= dsc
->u
.block
.xfer_addr
;
6757 int exception_return
= dsc
->u
.block
.load
&& dsc
->u
.block
.user
6758 && (regmask
& 0x8000) != 0;
6759 uint32_t status
= displaced_read_reg (regs
, dsc
, ARM_PS_REGNUM
);
6760 int do_transfer
= condition_true (dsc
->u
.block
.cond
, status
);
6761 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
6766 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6767 sensible we can do here. Complain loudly. */
6768 if (exception_return
)
6769 error (_("Cannot single-step exception return"));
6771 /* We don't handle any stores here for now. */
6772 gdb_assert (dsc
->u
.block
.load
!= 0);
6774 if (debug_displaced
)
6775 fprintf_unfiltered (gdb_stdlog
, "displaced: emulating block transfer: "
6776 "%s %s %s\n", dsc
->u
.block
.load
? "ldm" : "stm",
6777 dsc
->u
.block
.increment
? "inc" : "dec",
6778 dsc
->u
.block
.before
? "before" : "after");
6785 while (regno
<= ARM_PC_REGNUM
&& (regmask
& (1 << regno
)) == 0)
6788 while (regno
>= 0 && (regmask
& (1 << regno
)) == 0)
6791 xfer_addr
+= bump_before
;
6793 memword
= read_memory_unsigned_integer (xfer_addr
, 4, byte_order
);
6794 displaced_write_reg (regs
, dsc
, regno
, memword
, LOAD_WRITE_PC
);
6796 xfer_addr
+= bump_after
;
6798 regmask
&= ~(1 << regno
);
6801 if (dsc
->u
.block
.writeback
)
6802 displaced_write_reg (regs
, dsc
, dsc
->u
.block
.rn
, xfer_addr
,
6806 /* Clean up an STM which included the PC in the register list. */
6809 cleanup_block_store_pc (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6810 struct displaced_step_closure
*dsc
)
6812 uint32_t status
= displaced_read_reg (regs
, dsc
, ARM_PS_REGNUM
);
6813 int store_executed
= condition_true (dsc
->u
.block
.cond
, status
);
6814 CORE_ADDR pc_stored_at
, transferred_regs
= bitcount (dsc
->u
.block
.regmask
);
6815 CORE_ADDR stm_insn_addr
;
6818 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
6820 /* If condition code fails, there's nothing else to do. */
6821 if (!store_executed
)
6824 if (dsc
->u
.block
.increment
)
6826 pc_stored_at
= dsc
->u
.block
.xfer_addr
+ 4 * transferred_regs
;
6828 if (dsc
->u
.block
.before
)
6833 pc_stored_at
= dsc
->u
.block
.xfer_addr
;
6835 if (dsc
->u
.block
.before
)
6839 pc_val
= read_memory_unsigned_integer (pc_stored_at
, 4, byte_order
);
6840 stm_insn_addr
= dsc
->scratch_base
;
6841 offset
= pc_val
- stm_insn_addr
;
6843 if (debug_displaced
)
6844 fprintf_unfiltered (gdb_stdlog
, "displaced: detected PC offset %.8lx for "
6845 "STM instruction\n", offset
);
6847 /* Rewrite the stored PC to the proper value for the non-displaced original
6849 write_memory_unsigned_integer (pc_stored_at
, 4, byte_order
,
6850 dsc
->insn_addr
+ offset
);
6853 /* Clean up an LDM which includes the PC in the register list. We clumped all
6854 the registers in the transferred list into a contiguous range r0...rX (to
6855 avoid loading PC directly and losing control of the debugged program), so we
6856 must undo that here. */
6859 cleanup_block_load_pc (struct gdbarch
*gdbarch
,
6860 struct regcache
*regs
,
6861 struct displaced_step_closure
*dsc
)
6863 uint32_t status
= displaced_read_reg (regs
, dsc
, ARM_PS_REGNUM
);
6864 int load_executed
= condition_true (dsc
->u
.block
.cond
, status
), i
;
6865 unsigned int mask
= dsc
->u
.block
.regmask
, write_reg
= ARM_PC_REGNUM
;
6866 unsigned int regs_loaded
= bitcount (mask
);
6867 unsigned int num_to_shuffle
= regs_loaded
, clobbered
;
6869 /* The method employed here will fail if the register list is fully populated
6870 (we need to avoid loading PC directly). */
6871 gdb_assert (num_to_shuffle
< 16);
6876 clobbered
= (1 << num_to_shuffle
) - 1;
6878 while (num_to_shuffle
> 0)
6880 if ((mask
& (1 << write_reg
)) != 0)
6882 unsigned int read_reg
= num_to_shuffle
- 1;
6884 if (read_reg
!= write_reg
)
6886 ULONGEST rval
= displaced_read_reg (regs
, dsc
, read_reg
);
6887 displaced_write_reg (regs
, dsc
, write_reg
, rval
, LOAD_WRITE_PC
);
6888 if (debug_displaced
)
6889 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM: move "
6890 "loaded register r%d to r%d\n"), read_reg
,
6893 else if (debug_displaced
)
6894 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM: register "
6895 "r%d already in the right place\n"),
6898 clobbered
&= ~(1 << write_reg
);
6906 /* Restore any registers we scribbled over. */
6907 for (write_reg
= 0; clobbered
!= 0; write_reg
++)
6909 if ((clobbered
& (1 << write_reg
)) != 0)
6911 displaced_write_reg (regs
, dsc
, write_reg
, dsc
->tmp
[write_reg
],
6913 if (debug_displaced
)
6914 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM: restored "
6915 "clobbered register r%d\n"), write_reg
);
6916 clobbered
&= ~(1 << write_reg
);
6920 /* Perform register writeback manually. */
6921 if (dsc
->u
.block
.writeback
)
6923 ULONGEST new_rn_val
= dsc
->u
.block
.xfer_addr
;
6925 if (dsc
->u
.block
.increment
)
6926 new_rn_val
+= regs_loaded
* 4;
6928 new_rn_val
-= regs_loaded
* 4;
6930 displaced_write_reg (regs
, dsc
, dsc
->u
.block
.rn
, new_rn_val
,
6935 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6936 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6939 arm_copy_block_xfer (struct gdbarch
*gdbarch
, uint32_t insn
,
6940 struct regcache
*regs
,
6941 struct displaced_step_closure
*dsc
)
6943 int load
= bit (insn
, 20);
6944 int user
= bit (insn
, 22);
6945 int increment
= bit (insn
, 23);
6946 int before
= bit (insn
, 24);
6947 int writeback
= bit (insn
, 21);
6948 int rn
= bits (insn
, 16, 19);
6950 /* Block transfers which don't mention PC can be run directly
6952 if (rn
!= ARM_PC_REGNUM
&& (insn
& 0x8000) == 0)
6953 return arm_copy_unmodified (gdbarch
, insn
, "ldm/stm", dsc
);
6955 if (rn
== ARM_PC_REGNUM
)
6957 warning (_("displaced: Unpredictable LDM or STM with "
6958 "base register r15"));
6959 return arm_copy_unmodified (gdbarch
, insn
, "unpredictable ldm/stm", dsc
);
6962 if (debug_displaced
)
6963 fprintf_unfiltered (gdb_stdlog
, "displaced: copying block transfer insn "
6964 "%.8lx\n", (unsigned long) insn
);
6966 dsc
->u
.block
.xfer_addr
= displaced_read_reg (regs
, dsc
, rn
);
6967 dsc
->u
.block
.rn
= rn
;
6969 dsc
->u
.block
.load
= load
;
6970 dsc
->u
.block
.user
= user
;
6971 dsc
->u
.block
.increment
= increment
;
6972 dsc
->u
.block
.before
= before
;
6973 dsc
->u
.block
.writeback
= writeback
;
6974 dsc
->u
.block
.cond
= bits (insn
, 28, 31);
6976 dsc
->u
.block
.regmask
= insn
& 0xffff;
6980 if ((insn
& 0xffff) == 0xffff)
6982 /* LDM with a fully-populated register list. This case is
6983 particularly tricky. Implement for now by fully emulating the
6984 instruction (which might not behave perfectly in all cases, but
6985 these instructions should be rare enough for that not to matter
6987 dsc
->modinsn
[0] = ARM_NOP
;
6989 dsc
->cleanup
= &cleanup_block_load_all
;
6993 /* LDM of a list of registers which includes PC. Implement by
6994 rewriting the list of registers to be transferred into a
6995 contiguous chunk r0...rX before doing the transfer, then shuffling
6996 registers into the correct places in the cleanup routine. */
6997 unsigned int regmask
= insn
& 0xffff;
6998 unsigned int num_in_list
= bitcount (regmask
), new_regmask
, bit
= 1;
6999 unsigned int to
= 0, from
= 0, i
, new_rn
;
7001 for (i
= 0; i
< num_in_list
; i
++)
7002 dsc
->tmp
[i
] = displaced_read_reg (regs
, dsc
, i
);
7004 /* Writeback makes things complicated. We need to avoid clobbering
7005 the base register with one of the registers in our modified
7006 register list, but just using a different register can't work in
7009 ldm r14!, {r0-r13,pc}
7011 which would need to be rewritten as:
7015 but that can't work, because there's no free register for N.
7017 Solve this by turning off the writeback bit, and emulating
7018 writeback manually in the cleanup routine. */
7023 new_regmask
= (1 << num_in_list
) - 1;
7025 if (debug_displaced
)
7026 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM r%d%s, "
7027 "{..., pc}: original reg list %.4x, modified "
7028 "list %.4x\n"), rn
, writeback
? "!" : "",
7029 (int) insn
& 0xffff, new_regmask
);
7031 dsc
->modinsn
[0] = (insn
& ~0xffff) | (new_regmask
& 0xffff);
7033 dsc
->cleanup
= &cleanup_block_load_pc
;
7038 /* STM of a list of registers which includes PC. Run the instruction
7039 as-is, but out of line: this will store the wrong value for the PC,
7040 so we must manually fix up the memory in the cleanup routine.
7041 Doing things this way has the advantage that we can auto-detect
7042 the offset of the PC write (which is architecture-dependent) in
7043 the cleanup routine. */
7044 dsc
->modinsn
[0] = insn
;
7046 dsc
->cleanup
= &cleanup_block_store_pc
;
7053 thumb2_copy_block_xfer (struct gdbarch
*gdbarch
, uint16_t insn1
, uint16_t insn2
,
7054 struct regcache
*regs
,
7055 struct displaced_step_closure
*dsc
)
7057 int rn
= bits (insn1
, 0, 3);
7058 int load
= bit (insn1
, 4);
7059 int writeback
= bit (insn1
, 5);
7061 /* Block transfers which don't mention PC can be run directly
7063 if (rn
!= ARM_PC_REGNUM
&& (insn2
& 0x8000) == 0)
7064 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
, "ldm/stm", dsc
);
7066 if (rn
== ARM_PC_REGNUM
)
7068 warning (_("displaced: Unpredictable LDM or STM with "
7069 "base register r15"));
7070 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7071 "unpredictable ldm/stm", dsc
);
7074 if (debug_displaced
)
7075 fprintf_unfiltered (gdb_stdlog
, "displaced: copying block transfer insn "
7076 "%.4x%.4x\n", insn1
, insn2
);
7078 /* Clear bit 13, since it should be always zero. */
7079 dsc
->u
.block
.regmask
= (insn2
& 0xdfff);
7080 dsc
->u
.block
.rn
= rn
;
7082 dsc
->u
.block
.load
= load
;
7083 dsc
->u
.block
.user
= 0;
7084 dsc
->u
.block
.increment
= bit (insn1
, 7);
7085 dsc
->u
.block
.before
= bit (insn1
, 8);
7086 dsc
->u
.block
.writeback
= writeback
;
7087 dsc
->u
.block
.cond
= INST_AL
;
7088 dsc
->u
.block
.xfer_addr
= displaced_read_reg (regs
, dsc
, rn
);
7092 if (dsc
->u
.block
.regmask
== 0xffff)
7094 /* This branch is impossible to happen. */
7099 unsigned int regmask
= dsc
->u
.block
.regmask
;
7100 unsigned int num_in_list
= bitcount (regmask
), new_regmask
, bit
= 1;
7101 unsigned int to
= 0, from
= 0, i
, new_rn
;
7103 for (i
= 0; i
< num_in_list
; i
++)
7104 dsc
->tmp
[i
] = displaced_read_reg (regs
, dsc
, i
);
7109 new_regmask
= (1 << num_in_list
) - 1;
7111 if (debug_displaced
)
7112 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM r%d%s, "
7113 "{..., pc}: original reg list %.4x, modified "
7114 "list %.4x\n"), rn
, writeback
? "!" : "",
7115 (int) dsc
->u
.block
.regmask
, new_regmask
);
7117 dsc
->modinsn
[0] = insn1
;
7118 dsc
->modinsn
[1] = (new_regmask
& 0xffff);
7121 dsc
->cleanup
= &cleanup_block_load_pc
;
7126 dsc
->modinsn
[0] = insn1
;
7127 dsc
->modinsn
[1] = insn2
;
7129 dsc
->cleanup
= &cleanup_block_store_pc
;
7134 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
7135 for Linux, where some SVC instructions must be treated specially. */
7138 cleanup_svc (struct gdbarch
*gdbarch
, struct regcache
*regs
,
7139 struct displaced_step_closure
*dsc
)
7141 CORE_ADDR resume_addr
= dsc
->insn_addr
+ dsc
->insn_size
;
7143 if (debug_displaced
)
7144 fprintf_unfiltered (gdb_stdlog
, "displaced: cleanup for svc, resume at "
7145 "%.8lx\n", (unsigned long) resume_addr
);
7147 displaced_write_reg (regs
, dsc
, ARM_PC_REGNUM
, resume_addr
, BRANCH_WRITE_PC
);
7151 /* Common copy routine for svc instruciton. */
7154 install_svc (struct gdbarch
*gdbarch
, struct regcache
*regs
,
7155 struct displaced_step_closure
*dsc
)
7157 /* Preparation: none.
7158 Insn: unmodified svc.
7159 Cleanup: pc <- insn_addr + insn_size. */
7161 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
7163 dsc
->wrote_to_pc
= 1;
7165 /* Allow OS-specific code to override SVC handling. */
7166 if (dsc
->u
.svc
.copy_svc_os
)
7167 return dsc
->u
.svc
.copy_svc_os (gdbarch
, regs
, dsc
);
7170 dsc
->cleanup
= &cleanup_svc
;
7176 arm_copy_svc (struct gdbarch
*gdbarch
, uint32_t insn
,
7177 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
7180 if (debug_displaced
)
7181 fprintf_unfiltered (gdb_stdlog
, "displaced: copying svc insn %.8lx\n",
7182 (unsigned long) insn
);
7184 dsc
->modinsn
[0] = insn
;
7186 return install_svc (gdbarch
, regs
, dsc
);
7190 thumb_copy_svc (struct gdbarch
*gdbarch
, uint16_t insn
,
7191 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
7194 if (debug_displaced
)
7195 fprintf_unfiltered (gdb_stdlog
, "displaced: copying svc insn %.4x\n",
7198 dsc
->modinsn
[0] = insn
;
7200 return install_svc (gdbarch
, regs
, dsc
);
7203 /* Copy undefined instructions. */
7206 arm_copy_undef (struct gdbarch
*gdbarch
, uint32_t insn
,
7207 struct displaced_step_closure
*dsc
)
7209 if (debug_displaced
)
7210 fprintf_unfiltered (gdb_stdlog
,
7211 "displaced: copying undefined insn %.8lx\n",
7212 (unsigned long) insn
);
7214 dsc
->modinsn
[0] = insn
;
7220 thumb_32bit_copy_undef (struct gdbarch
*gdbarch
, uint16_t insn1
, uint16_t insn2
,
7221 struct displaced_step_closure
*dsc
)
7224 if (debug_displaced
)
7225 fprintf_unfiltered (gdb_stdlog
, "displaced: copying undefined insn "
7226 "%.4x %.4x\n", (unsigned short) insn1
,
7227 (unsigned short) insn2
);
7229 dsc
->modinsn
[0] = insn1
;
7230 dsc
->modinsn
[1] = insn2
;
7236 /* Copy unpredictable instructions. */
7239 arm_copy_unpred (struct gdbarch
*gdbarch
, uint32_t insn
,
7240 struct displaced_step_closure
*dsc
)
7242 if (debug_displaced
)
7243 fprintf_unfiltered (gdb_stdlog
, "displaced: copying unpredictable insn "
7244 "%.8lx\n", (unsigned long) insn
);
7246 dsc
->modinsn
[0] = insn
;
7251 /* The decode_* functions are instruction decoding helpers. They mostly follow
7252 the presentation in the ARM ARM. */
7255 arm_decode_misc_memhint_neon (struct gdbarch
*gdbarch
, uint32_t insn
,
7256 struct regcache
*regs
,
7257 struct displaced_step_closure
*dsc
)
7259 unsigned int op1
= bits (insn
, 20, 26), op2
= bits (insn
, 4, 7);
7260 unsigned int rn
= bits (insn
, 16, 19);
7262 if (op1
== 0x10 && (op2
& 0x2) == 0x0 && (rn
& 0xe) == 0x0)
7263 return arm_copy_unmodified (gdbarch
, insn
, "cps", dsc
);
7264 else if (op1
== 0x10 && op2
== 0x0 && (rn
& 0xe) == 0x1)
7265 return arm_copy_unmodified (gdbarch
, insn
, "setend", dsc
);
7266 else if ((op1
& 0x60) == 0x20)
7267 return arm_copy_unmodified (gdbarch
, insn
, "neon dataproc", dsc
);
7268 else if ((op1
& 0x71) == 0x40)
7269 return arm_copy_unmodified (gdbarch
, insn
, "neon elt/struct load/store",
7271 else if ((op1
& 0x77) == 0x41)
7272 return arm_copy_unmodified (gdbarch
, insn
, "unallocated mem hint", dsc
);
7273 else if ((op1
& 0x77) == 0x45)
7274 return arm_copy_preload (gdbarch
, insn
, regs
, dsc
); /* pli. */
7275 else if ((op1
& 0x77) == 0x51)
7278 return arm_copy_preload (gdbarch
, insn
, regs
, dsc
); /* pld/pldw. */
7280 return arm_copy_unpred (gdbarch
, insn
, dsc
);
7282 else if ((op1
& 0x77) == 0x55)
7283 return arm_copy_preload (gdbarch
, insn
, regs
, dsc
); /* pld/pldw. */
7284 else if (op1
== 0x57)
7287 case 0x1: return arm_copy_unmodified (gdbarch
, insn
, "clrex", dsc
);
7288 case 0x4: return arm_copy_unmodified (gdbarch
, insn
, "dsb", dsc
);
7289 case 0x5: return arm_copy_unmodified (gdbarch
, insn
, "dmb", dsc
);
7290 case 0x6: return arm_copy_unmodified (gdbarch
, insn
, "isb", dsc
);
7291 default: return arm_copy_unpred (gdbarch
, insn
, dsc
);
7293 else if ((op1
& 0x63) == 0x43)
7294 return arm_copy_unpred (gdbarch
, insn
, dsc
);
7295 else if ((op2
& 0x1) == 0x0)
7296 switch (op1
& ~0x80)
7299 return arm_copy_unmodified (gdbarch
, insn
, "unallocated mem hint", dsc
);
7301 return arm_copy_preload_reg (gdbarch
, insn
, regs
, dsc
); /* pli reg. */
7302 case 0x71: case 0x75:
7304 return arm_copy_preload_reg (gdbarch
, insn
, regs
, dsc
);
7305 case 0x63: case 0x67: case 0x73: case 0x77:
7306 return arm_copy_unpred (gdbarch
, insn
, dsc
);
7308 return arm_copy_undef (gdbarch
, insn
, dsc
);
7311 return arm_copy_undef (gdbarch
, insn
, dsc
); /* Probably unreachable. */
7315 arm_decode_unconditional (struct gdbarch
*gdbarch
, uint32_t insn
,
7316 struct regcache
*regs
,
7317 struct displaced_step_closure
*dsc
)
7319 if (bit (insn
, 27) == 0)
7320 return arm_decode_misc_memhint_neon (gdbarch
, insn
, regs
, dsc
);
7321 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
7322 else switch (((insn
& 0x7000000) >> 23) | ((insn
& 0x100000) >> 20))
7325 return arm_copy_unmodified (gdbarch
, insn
, "srs", dsc
);
7328 return arm_copy_unmodified (gdbarch
, insn
, "rfe", dsc
);
7330 case 0x4: case 0x5: case 0x6: case 0x7:
7331 return arm_copy_b_bl_blx (gdbarch
, insn
, regs
, dsc
);
7334 switch ((insn
& 0xe00000) >> 21)
7336 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
7338 return arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7341 return arm_copy_unmodified (gdbarch
, insn
, "mcrr/mcrr2", dsc
);
7344 return arm_copy_undef (gdbarch
, insn
, dsc
);
7349 int rn_f
= (bits (insn
, 16, 19) == 0xf);
7350 switch ((insn
& 0xe00000) >> 21)
7353 /* ldc/ldc2 imm (undefined for rn == pc). */
7354 return rn_f
? arm_copy_undef (gdbarch
, insn
, dsc
)
7355 : arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7358 return arm_copy_unmodified (gdbarch
, insn
, "mrrc/mrrc2", dsc
);
7360 case 0x4: case 0x5: case 0x6: case 0x7:
7361 /* ldc/ldc2 lit (undefined for rn != pc). */
7362 return rn_f
? arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
)
7363 : arm_copy_undef (gdbarch
, insn
, dsc
);
7366 return arm_copy_undef (gdbarch
, insn
, dsc
);
7371 return arm_copy_unmodified (gdbarch
, insn
, "stc/stc2", dsc
);
7374 if (bits (insn
, 16, 19) == 0xf)
7376 return arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7378 return arm_copy_undef (gdbarch
, insn
, dsc
);
7382 return arm_copy_unmodified (gdbarch
, insn
, "mcr/mcr2", dsc
);
7384 return arm_copy_unmodified (gdbarch
, insn
, "cdp/cdp2", dsc
);
7388 return arm_copy_unmodified (gdbarch
, insn
, "mrc/mrc2", dsc
);
7390 return arm_copy_unmodified (gdbarch
, insn
, "cdp/cdp2", dsc
);
7393 return arm_copy_undef (gdbarch
, insn
, dsc
);
7397 /* Decode miscellaneous instructions in dp/misc encoding space. */
7400 arm_decode_miscellaneous (struct gdbarch
*gdbarch
, uint32_t insn
,
7401 struct regcache
*regs
,
7402 struct displaced_step_closure
*dsc
)
7404 unsigned int op2
= bits (insn
, 4, 6);
7405 unsigned int op
= bits (insn
, 21, 22);
7406 unsigned int op1
= bits (insn
, 16, 19);
7411 return arm_copy_unmodified (gdbarch
, insn
, "mrs/msr", dsc
);
7414 if (op
== 0x1) /* bx. */
7415 return arm_copy_bx_blx_reg (gdbarch
, insn
, regs
, dsc
);
7417 return arm_copy_unmodified (gdbarch
, insn
, "clz", dsc
);
7419 return arm_copy_undef (gdbarch
, insn
, dsc
);
7423 /* Not really supported. */
7424 return arm_copy_unmodified (gdbarch
, insn
, "bxj", dsc
);
7426 return arm_copy_undef (gdbarch
, insn
, dsc
);
7430 return arm_copy_bx_blx_reg (gdbarch
, insn
,
7431 regs
, dsc
); /* blx register. */
7433 return arm_copy_undef (gdbarch
, insn
, dsc
);
7436 return arm_copy_unmodified (gdbarch
, insn
, "saturating add/sub", dsc
);
7440 return arm_copy_unmodified (gdbarch
, insn
, "bkpt", dsc
);
7442 /* Not really supported. */
7443 return arm_copy_unmodified (gdbarch
, insn
, "smc", dsc
);
7446 return arm_copy_undef (gdbarch
, insn
, dsc
);
7451 arm_decode_dp_misc (struct gdbarch
*gdbarch
, uint32_t insn
,
7452 struct regcache
*regs
,
7453 struct displaced_step_closure
*dsc
)
7456 switch (bits (insn
, 20, 24))
7459 return arm_copy_unmodified (gdbarch
, insn
, "movw", dsc
);
7462 return arm_copy_unmodified (gdbarch
, insn
, "movt", dsc
);
7464 case 0x12: case 0x16:
7465 return arm_copy_unmodified (gdbarch
, insn
, "msr imm", dsc
);
7468 return arm_copy_alu_imm (gdbarch
, insn
, regs
, dsc
);
7472 uint32_t op1
= bits (insn
, 20, 24), op2
= bits (insn
, 4, 7);
7474 if ((op1
& 0x19) != 0x10 && (op2
& 0x1) == 0x0)
7475 return arm_copy_alu_reg (gdbarch
, insn
, regs
, dsc
);
7476 else if ((op1
& 0x19) != 0x10 && (op2
& 0x9) == 0x1)
7477 return arm_copy_alu_shifted_reg (gdbarch
, insn
, regs
, dsc
);
7478 else if ((op1
& 0x19) == 0x10 && (op2
& 0x8) == 0x0)
7479 return arm_decode_miscellaneous (gdbarch
, insn
, regs
, dsc
);
7480 else if ((op1
& 0x19) == 0x10 && (op2
& 0x9) == 0x8)
7481 return arm_copy_unmodified (gdbarch
, insn
, "halfword mul/mla", dsc
);
7482 else if ((op1
& 0x10) == 0x00 && op2
== 0x9)
7483 return arm_copy_unmodified (gdbarch
, insn
, "mul/mla", dsc
);
7484 else if ((op1
& 0x10) == 0x10 && op2
== 0x9)
7485 return arm_copy_unmodified (gdbarch
, insn
, "synch", dsc
);
7486 else if (op2
== 0xb || (op2
& 0xd) == 0xd)
7487 /* 2nd arg means "unpriveleged". */
7488 return arm_copy_extra_ld_st (gdbarch
, insn
, (op1
& 0x12) == 0x02, regs
,
7492 /* Should be unreachable. */
7497 arm_decode_ld_st_word_ubyte (struct gdbarch
*gdbarch
, uint32_t insn
,
7498 struct regcache
*regs
,
7499 struct displaced_step_closure
*dsc
)
7501 int a
= bit (insn
, 25), b
= bit (insn
, 4);
7502 uint32_t op1
= bits (insn
, 20, 24);
7503 int rn_f
= bits (insn
, 16, 19) == 0xf;
7505 if ((!a
&& (op1
& 0x05) == 0x00 && (op1
& 0x17) != 0x02)
7506 || (a
&& (op1
& 0x05) == 0x00 && (op1
& 0x17) != 0x02 && !b
))
7507 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 4, 0);
7508 else if ((!a
&& (op1
& 0x17) == 0x02)
7509 || (a
&& (op1
& 0x17) == 0x02 && !b
))
7510 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 4, 1);
7511 else if ((!a
&& (op1
& 0x05) == 0x01 && (op1
& 0x17) != 0x03)
7512 || (a
&& (op1
& 0x05) == 0x01 && (op1
& 0x17) != 0x03 && !b
))
7513 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 4, 0);
7514 else if ((!a
&& (op1
& 0x17) == 0x03)
7515 || (a
&& (op1
& 0x17) == 0x03 && !b
))
7516 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 4, 1);
7517 else if ((!a
&& (op1
& 0x05) == 0x04 && (op1
& 0x17) != 0x06)
7518 || (a
&& (op1
& 0x05) == 0x04 && (op1
& 0x17) != 0x06 && !b
))
7519 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 1, 0);
7520 else if ((!a
&& (op1
& 0x17) == 0x06)
7521 || (a
&& (op1
& 0x17) == 0x06 && !b
))
7522 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 1, 1);
7523 else if ((!a
&& (op1
& 0x05) == 0x05 && (op1
& 0x17) != 0x07)
7524 || (a
&& (op1
& 0x05) == 0x05 && (op1
& 0x17) != 0x07 && !b
))
7525 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 1, 0);
7526 else if ((!a
&& (op1
& 0x17) == 0x07)
7527 || (a
&& (op1
& 0x17) == 0x07 && !b
))
7528 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 1, 1);
7530 /* Should be unreachable. */
7535 arm_decode_media (struct gdbarch
*gdbarch
, uint32_t insn
,
7536 struct displaced_step_closure
*dsc
)
7538 switch (bits (insn
, 20, 24))
7540 case 0x00: case 0x01: case 0x02: case 0x03:
7541 return arm_copy_unmodified (gdbarch
, insn
, "parallel add/sub signed", dsc
);
7543 case 0x04: case 0x05: case 0x06: case 0x07:
7544 return arm_copy_unmodified (gdbarch
, insn
, "parallel add/sub unsigned", dsc
);
7546 case 0x08: case 0x09: case 0x0a: case 0x0b:
7547 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
7548 return arm_copy_unmodified (gdbarch
, insn
,
7549 "decode/pack/unpack/saturate/reverse", dsc
);
7552 if (bits (insn
, 5, 7) == 0) /* op2. */
7554 if (bits (insn
, 12, 15) == 0xf)
7555 return arm_copy_unmodified (gdbarch
, insn
, "usad8", dsc
);
7557 return arm_copy_unmodified (gdbarch
, insn
, "usada8", dsc
);
7560 return arm_copy_undef (gdbarch
, insn
, dsc
);
7562 case 0x1a: case 0x1b:
7563 if (bits (insn
, 5, 6) == 0x2) /* op2[1:0]. */
7564 return arm_copy_unmodified (gdbarch
, insn
, "sbfx", dsc
);
7566 return arm_copy_undef (gdbarch
, insn
, dsc
);
7568 case 0x1c: case 0x1d:
7569 if (bits (insn
, 5, 6) == 0x0) /* op2[1:0]. */
7571 if (bits (insn
, 0, 3) == 0xf)
7572 return arm_copy_unmodified (gdbarch
, insn
, "bfc", dsc
);
7574 return arm_copy_unmodified (gdbarch
, insn
, "bfi", dsc
);
7577 return arm_copy_undef (gdbarch
, insn
, dsc
);
7579 case 0x1e: case 0x1f:
7580 if (bits (insn
, 5, 6) == 0x2) /* op2[1:0]. */
7581 return arm_copy_unmodified (gdbarch
, insn
, "ubfx", dsc
);
7583 return arm_copy_undef (gdbarch
, insn
, dsc
);
7586 /* Should be unreachable. */
7591 arm_decode_b_bl_ldmstm (struct gdbarch
*gdbarch
, int32_t insn
,
7592 struct regcache
*regs
,
7593 struct displaced_step_closure
*dsc
)
7596 return arm_copy_b_bl_blx (gdbarch
, insn
, regs
, dsc
);
7598 return arm_copy_block_xfer (gdbarch
, insn
, regs
, dsc
);
7602 arm_decode_ext_reg_ld_st (struct gdbarch
*gdbarch
, uint32_t insn
,
7603 struct regcache
*regs
,
7604 struct displaced_step_closure
*dsc
)
7606 unsigned int opcode
= bits (insn
, 20, 24);
7610 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
7611 return arm_copy_unmodified (gdbarch
, insn
, "vfp/neon mrrc/mcrr", dsc
);
7613 case 0x08: case 0x0a: case 0x0c: case 0x0e:
7614 case 0x12: case 0x16:
7615 return arm_copy_unmodified (gdbarch
, insn
, "vfp/neon vstm/vpush", dsc
);
7617 case 0x09: case 0x0b: case 0x0d: case 0x0f:
7618 case 0x13: case 0x17:
7619 return arm_copy_unmodified (gdbarch
, insn
, "vfp/neon vldm/vpop", dsc
);
7621 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7622 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7623 /* Note: no writeback for these instructions. Bit 25 will always be
7624 zero though (via caller), so the following works OK. */
7625 return arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7628 /* Should be unreachable. */
7632 /* Decode shifted register instructions. */
7635 thumb2_decode_dp_shift_reg (struct gdbarch
*gdbarch
, uint16_t insn1
,
7636 uint16_t insn2
, struct regcache
*regs
,
7637 struct displaced_step_closure
*dsc
)
7639 /* PC is only allowed to be used in instruction MOV. */
7641 unsigned int op
= bits (insn1
, 5, 8);
7642 unsigned int rn
= bits (insn1
, 0, 3);
7644 if (op
== 0x2 && rn
== 0xf) /* MOV */
7645 return thumb2_copy_alu_imm (gdbarch
, insn1
, insn2
, regs
, dsc
);
7647 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7648 "dp (shift reg)", dsc
);
7652 /* Decode extension register load/store. Exactly the same as
7653 arm_decode_ext_reg_ld_st. */
7656 thumb2_decode_ext_reg_ld_st (struct gdbarch
*gdbarch
, uint16_t insn1
,
7657 uint16_t insn2
, struct regcache
*regs
,
7658 struct displaced_step_closure
*dsc
)
7660 unsigned int opcode
= bits (insn1
, 4, 8);
7664 case 0x04: case 0x05:
7665 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7666 "vfp/neon vmov", dsc
);
7668 case 0x08: case 0x0c: /* 01x00 */
7669 case 0x0a: case 0x0e: /* 01x10 */
7670 case 0x12: case 0x16: /* 10x10 */
7671 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7672 "vfp/neon vstm/vpush", dsc
);
7674 case 0x09: case 0x0d: /* 01x01 */
7675 case 0x0b: case 0x0f: /* 01x11 */
7676 case 0x13: case 0x17: /* 10x11 */
7677 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7678 "vfp/neon vldm/vpop", dsc
);
7680 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7681 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7683 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7684 return thumb2_copy_copro_load_store (gdbarch
, insn1
, insn2
, regs
, dsc
);
7687 /* Should be unreachable. */
7692 arm_decode_svc_copro (struct gdbarch
*gdbarch
, uint32_t insn
, CORE_ADDR to
,
7693 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
7695 unsigned int op1
= bits (insn
, 20, 25);
7696 int op
= bit (insn
, 4);
7697 unsigned int coproc
= bits (insn
, 8, 11);
7698 unsigned int rn
= bits (insn
, 16, 19);
7700 if ((op1
& 0x20) == 0x00 && (op1
& 0x3a) != 0x00 && (coproc
& 0xe) == 0xa)
7701 return arm_decode_ext_reg_ld_st (gdbarch
, insn
, regs
, dsc
);
7702 else if ((op1
& 0x21) == 0x00 && (op1
& 0x3a) != 0x00
7703 && (coproc
& 0xe) != 0xa)
7705 return arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7706 else if ((op1
& 0x21) == 0x01 && (op1
& 0x3a) != 0x00
7707 && (coproc
& 0xe) != 0xa)
7708 /* ldc/ldc2 imm/lit. */
7709 return arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7710 else if ((op1
& 0x3e) == 0x00)
7711 return arm_copy_undef (gdbarch
, insn
, dsc
);
7712 else if ((op1
& 0x3e) == 0x04 && (coproc
& 0xe) == 0xa)
7713 return arm_copy_unmodified (gdbarch
, insn
, "neon 64bit xfer", dsc
);
7714 else if (op1
== 0x04 && (coproc
& 0xe) != 0xa)
7715 return arm_copy_unmodified (gdbarch
, insn
, "mcrr/mcrr2", dsc
);
7716 else if (op1
== 0x05 && (coproc
& 0xe) != 0xa)
7717 return arm_copy_unmodified (gdbarch
, insn
, "mrrc/mrrc2", dsc
);
7718 else if ((op1
& 0x30) == 0x20 && !op
)
7720 if ((coproc
& 0xe) == 0xa)
7721 return arm_copy_unmodified (gdbarch
, insn
, "vfp dataproc", dsc
);
7723 return arm_copy_unmodified (gdbarch
, insn
, "cdp/cdp2", dsc
);
7725 else if ((op1
& 0x30) == 0x20 && op
)
7726 return arm_copy_unmodified (gdbarch
, insn
, "neon 8/16/32 bit xfer", dsc
);
7727 else if ((op1
& 0x31) == 0x20 && op
&& (coproc
& 0xe) != 0xa)
7728 return arm_copy_unmodified (gdbarch
, insn
, "mcr/mcr2", dsc
);
7729 else if ((op1
& 0x31) == 0x21 && op
&& (coproc
& 0xe) != 0xa)
7730 return arm_copy_unmodified (gdbarch
, insn
, "mrc/mrc2", dsc
);
7731 else if ((op1
& 0x30) == 0x30)
7732 return arm_copy_svc (gdbarch
, insn
, regs
, dsc
);
7734 return arm_copy_undef (gdbarch
, insn
, dsc
); /* Possibly unreachable. */
7738 thumb2_decode_svc_copro (struct gdbarch
*gdbarch
, uint16_t insn1
,
7739 uint16_t insn2
, struct regcache
*regs
,
7740 struct displaced_step_closure
*dsc
)
7742 unsigned int coproc
= bits (insn2
, 8, 11);
7743 unsigned int op1
= bits (insn1
, 4, 9);
7744 unsigned int bit_5_8
= bits (insn1
, 5, 8);
7745 unsigned int bit_9
= bit (insn1
, 9);
7746 unsigned int bit_4
= bit (insn1
, 4);
7747 unsigned int rn
= bits (insn1
, 0, 3);
7752 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7753 "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
7755 else if (bit_5_8
== 0) /* UNDEFINED. */
7756 return thumb_32bit_copy_undef (gdbarch
, insn1
, insn2
, dsc
);
7759 /*coproc is 101x. SIMD/VFP, ext registers load/store. */
7760 if ((coproc
& 0xe) == 0xa)
7761 return thumb2_decode_ext_reg_ld_st (gdbarch
, insn1
, insn2
, regs
,
7763 else /* coproc is not 101x. */
7765 if (bit_4
== 0) /* STC/STC2. */
7766 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7768 else /* LDC/LDC2 {literal, immeidate}. */
7769 return thumb2_copy_copro_load_store (gdbarch
, insn1
, insn2
,
7775 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
, "coproc", dsc
);
7781 install_pc_relative (struct gdbarch
*gdbarch
, struct regcache
*regs
,
7782 struct displaced_step_closure
*dsc
, int rd
)
7788 Preparation: Rd <- PC
7794 int val
= displaced_read_reg (regs
, dsc
, ARM_PC_REGNUM
);
7795 displaced_write_reg (regs
, dsc
, rd
, val
, CANNOT_WRITE_PC
);
7799 thumb_copy_pc_relative_16bit (struct gdbarch
*gdbarch
, struct regcache
*regs
,
7800 struct displaced_step_closure
*dsc
,
7801 int rd
, unsigned int imm
)
7804 /* Encoding T2: ADDS Rd, #imm */
7805 dsc
->modinsn
[0] = (0x3000 | (rd
<< 8) | imm
);
7807 install_pc_relative (gdbarch
, regs
, dsc
, rd
);
7813 thumb_decode_pc_relative_16bit (struct gdbarch
*gdbarch
, uint16_t insn
,
7814 struct regcache
*regs
,
7815 struct displaced_step_closure
*dsc
)
7817 unsigned int rd
= bits (insn
, 8, 10);
7818 unsigned int imm8
= bits (insn
, 0, 7);
7820 if (debug_displaced
)
7821 fprintf_unfiltered (gdb_stdlog
,
7822 "displaced: copying thumb adr r%d, #%d insn %.4x\n",
7825 return thumb_copy_pc_relative_16bit (gdbarch
, regs
, dsc
, rd
, imm8
);
7829 thumb_copy_pc_relative_32bit (struct gdbarch
*gdbarch
, uint16_t insn1
,
7830 uint16_t insn2
, struct regcache
*regs
,
7831 struct displaced_step_closure
*dsc
)
7833 unsigned int rd
= bits (insn2
, 8, 11);
7834 /* Since immediate has the same encoding in ADR ADD and SUB, so we simply
7835 extract raw immediate encoding rather than computing immediate. When
7836 generating ADD or SUB instruction, we can simply perform OR operation to
7837 set immediate into ADD. */
7838 unsigned int imm_3_8
= insn2
& 0x70ff;
7839 unsigned int imm_i
= insn1
& 0x0400; /* Clear all bits except bit 10. */
7841 if (debug_displaced
)
7842 fprintf_unfiltered (gdb_stdlog
,
7843 "displaced: copying thumb adr r%d, #%d:%d insn %.4x%.4x\n",
7844 rd
, imm_i
, imm_3_8
, insn1
, insn2
);
7846 if (bit (insn1
, 7)) /* Encoding T2 */
7848 /* Encoding T3: SUB Rd, Rd, #imm */
7849 dsc
->modinsn
[0] = (0xf1a0 | rd
| imm_i
);
7850 dsc
->modinsn
[1] = ((rd
<< 8) | imm_3_8
);
7852 else /* Encoding T3 */
7854 /* Encoding T3: ADD Rd, Rd, #imm */
7855 dsc
->modinsn
[0] = (0xf100 | rd
| imm_i
);
7856 dsc
->modinsn
[1] = ((rd
<< 8) | imm_3_8
);
7860 install_pc_relative (gdbarch
, regs
, dsc
, rd
);
7866 thumb_copy_16bit_ldr_literal (struct gdbarch
*gdbarch
, unsigned short insn1
,
7867 struct regcache
*regs
,
7868 struct displaced_step_closure
*dsc
)
7870 unsigned int rt
= bits (insn1
, 8, 10);
7872 int imm8
= (bits (insn1
, 0, 7) << 2);
7873 CORE_ADDR from
= dsc
->insn_addr
;
7879 Preparation: tmp0 <- R0, tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
7881 Insn: LDR R0, [R2, R3];
7882 Cleanup: R2 <- tmp2, R3 <- tmp3, Rd <- R0, R0 <- tmp0 */
7884 if (debug_displaced
)
7885 fprintf_unfiltered (gdb_stdlog
,
7886 "displaced: copying thumb ldr r%d [pc #%d]\n"
7889 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
7890 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
7891 dsc
->tmp
[3] = displaced_read_reg (regs
, dsc
, 3);
7892 pc
= displaced_read_reg (regs
, dsc
, ARM_PC_REGNUM
);
7893 /* The assembler calculates the required value of the offset from the
7894 Align(PC,4) value of this instruction to the label. */
7895 pc
= pc
& 0xfffffffc;
7897 displaced_write_reg (regs
, dsc
, 2, pc
, CANNOT_WRITE_PC
);
7898 displaced_write_reg (regs
, dsc
, 3, imm8
, CANNOT_WRITE_PC
);
7901 dsc
->u
.ldst
.xfersize
= 4;
7903 dsc
->u
.ldst
.immed
= 0;
7904 dsc
->u
.ldst
.writeback
= 0;
7905 dsc
->u
.ldst
.restore_r4
= 0;
7907 dsc
->modinsn
[0] = 0x58d0; /* ldr r0, [r2, r3]*/
7909 dsc
->cleanup
= &cleanup_load
;
7914 /* Copy Thumb cbnz/cbz insruction. */
7917 thumb_copy_cbnz_cbz (struct gdbarch
*gdbarch
, uint16_t insn1
,
7918 struct regcache
*regs
,
7919 struct displaced_step_closure
*dsc
)
7921 int non_zero
= bit (insn1
, 11);
7922 unsigned int imm5
= (bit (insn1
, 9) << 6) | (bits (insn1
, 3, 7) << 1);
7923 CORE_ADDR from
= dsc
->insn_addr
;
7924 int rn
= bits (insn1
, 0, 2);
7925 int rn_val
= displaced_read_reg (regs
, dsc
, rn
);
7927 dsc
->u
.branch
.cond
= (rn_val
&& non_zero
) || (!rn_val
&& !non_zero
);
7928 /* CBNZ and CBZ do not affect the condition flags. If condition is true,
7929 set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
7930 condition is false, let it be, cleanup_branch will do nothing. */
7931 if (dsc
->u
.branch
.cond
)
7933 dsc
->u
.branch
.cond
= INST_AL
;
7934 dsc
->u
.branch
.dest
= from
+ 4 + imm5
;
7937 dsc
->u
.branch
.dest
= from
+ 2;
7939 dsc
->u
.branch
.link
= 0;
7940 dsc
->u
.branch
.exchange
= 0;
7942 if (debug_displaced
)
7943 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %s [r%d = 0x%x]"
7944 " insn %.4x to %.8lx\n", non_zero
? "cbnz" : "cbz",
7945 rn
, rn_val
, insn1
, dsc
->u
.branch
.dest
);
7947 dsc
->modinsn
[0] = THUMB_NOP
;
7949 dsc
->cleanup
= &cleanup_branch
;
7953 /* Copy Table Branch Byte/Halfword */
7955 thumb2_copy_table_branch (struct gdbarch
*gdbarch
, uint16_t insn1
,
7956 uint16_t insn2
, struct regcache
*regs
,
7957 struct displaced_step_closure
*dsc
)
7959 ULONGEST rn_val
, rm_val
;
7960 int is_tbh
= bit (insn2
, 4);
7961 CORE_ADDR halfwords
= 0;
7962 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
7964 rn_val
= displaced_read_reg (regs
, dsc
, bits (insn1
, 0, 3));
7965 rm_val
= displaced_read_reg (regs
, dsc
, bits (insn2
, 0, 3));
7971 target_read_memory (rn_val
+ 2 * rm_val
, buf
, 2);
7972 halfwords
= extract_unsigned_integer (buf
, 2, byte_order
);
7978 target_read_memory (rn_val
+ rm_val
, buf
, 1);
7979 halfwords
= extract_unsigned_integer (buf
, 1, byte_order
);
7982 if (debug_displaced
)
7983 fprintf_unfiltered (gdb_stdlog
, "displaced: %s base 0x%x offset 0x%x"
7984 " offset 0x%x\n", is_tbh
? "tbh" : "tbb",
7985 (unsigned int) rn_val
, (unsigned int) rm_val
,
7986 (unsigned int) halfwords
);
7988 dsc
->u
.branch
.cond
= INST_AL
;
7989 dsc
->u
.branch
.link
= 0;
7990 dsc
->u
.branch
.exchange
= 0;
7991 dsc
->u
.branch
.dest
= dsc
->insn_addr
+ 4 + 2 * halfwords
;
7993 dsc
->cleanup
= &cleanup_branch
;
7999 cleanup_pop_pc_16bit_all (struct gdbarch
*gdbarch
, struct regcache
*regs
,
8000 struct displaced_step_closure
*dsc
)
8003 int val
= displaced_read_reg (regs
, dsc
, 7);
8004 displaced_write_reg (regs
, dsc
, ARM_PC_REGNUM
, val
, BX_WRITE_PC
);
8007 val
= displaced_read_reg (regs
, dsc
, 8);
8008 displaced_write_reg (regs
, dsc
, 7, val
, CANNOT_WRITE_PC
);
8011 displaced_write_reg (regs
, dsc
, 8, dsc
->tmp
[0], CANNOT_WRITE_PC
);
8016 thumb_copy_pop_pc_16bit (struct gdbarch
*gdbarch
, unsigned short insn1
,
8017 struct regcache
*regs
,
8018 struct displaced_step_closure
*dsc
)
8020 dsc
->u
.block
.regmask
= insn1
& 0x00ff;
8022 /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
8025 (1) register list is full, that is, r0-r7 are used.
8026 Prepare: tmp[0] <- r8
8028 POP {r0, r1, ...., r6, r7}; remove PC from reglist
8029 MOV r8, r7; Move value of r7 to r8;
8030 POP {r7}; Store PC value into r7.
8032 Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
8034 (2) register list is not full, supposing there are N registers in
8035 register list (except PC, 0 <= N <= 7).
8036 Prepare: for each i, 0 - N, tmp[i] <- ri.
8038 POP {r0, r1, ...., rN};
8040 Cleanup: Set registers in original reglist from r0 - rN. Restore r0 - rN
8041 from tmp[] properly.
8043 if (debug_displaced
)
8044 fprintf_unfiltered (gdb_stdlog
,
8045 "displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
8046 dsc
->u
.block
.regmask
, insn1
);
8048 if (dsc
->u
.block
.regmask
== 0xff)
8050 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 8);
8052 dsc
->modinsn
[0] = (insn1
& 0xfeff); /* POP {r0,r1,...,r6, r7} */
8053 dsc
->modinsn
[1] = 0x46b8; /* MOV r8, r7 */
8054 dsc
->modinsn
[2] = 0xbc80; /* POP {r7} */
8057 dsc
->cleanup
= &cleanup_pop_pc_16bit_all
;
8061 unsigned int num_in_list
= bitcount (dsc
->u
.block
.regmask
);
8062 unsigned int new_regmask
, bit
= 1;
8063 unsigned int to
= 0, from
= 0, i
, new_rn
;
8065 for (i
= 0; i
< num_in_list
+ 1; i
++)
8066 dsc
->tmp
[i
] = displaced_read_reg (regs
, dsc
, i
);
8068 new_regmask
= (1 << (num_in_list
+ 1)) - 1;
8070 if (debug_displaced
)
8071 fprintf_unfiltered (gdb_stdlog
, _("displaced: POP "
8072 "{..., pc}: original reg list %.4x,"
8073 " modified list %.4x\n"),
8074 (int) dsc
->u
.block
.regmask
, new_regmask
);
8076 dsc
->u
.block
.regmask
|= 0x8000;
8077 dsc
->u
.block
.writeback
= 0;
8078 dsc
->u
.block
.cond
= INST_AL
;
8080 dsc
->modinsn
[0] = (insn1
& ~0x1ff) | (new_regmask
& 0xff);
8082 dsc
->cleanup
= &cleanup_block_load_pc
;
8089 thumb_process_displaced_16bit_insn (struct gdbarch
*gdbarch
, uint16_t insn1
,
8090 struct regcache
*regs
,
8091 struct displaced_step_closure
*dsc
)
8093 unsigned short op_bit_12_15
= bits (insn1
, 12, 15);
8094 unsigned short op_bit_10_11
= bits (insn1
, 10, 11);
8097 /* 16-bit thumb instructions. */
8098 switch (op_bit_12_15
)
8100 /* Shift (imme), add, subtract, move and compare. */
8101 case 0: case 1: case 2: case 3:
8102 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
,
8103 "shift/add/sub/mov/cmp",
8107 switch (op_bit_10_11
)
8109 case 0: /* Data-processing */
8110 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
,
8114 case 1: /* Special data instructions and branch and exchange. */
8116 unsigned short op
= bits (insn1
, 7, 9);
8117 if (op
== 6 || op
== 7) /* BX or BLX */
8118 err
= thumb_copy_bx_blx_reg (gdbarch
, insn1
, regs
, dsc
);
8119 else if (bits (insn1
, 6, 7) != 0) /* ADD/MOV/CMP high registers. */
8120 err
= thumb_copy_alu_reg (gdbarch
, insn1
, regs
, dsc
);
8122 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "special data",
8126 default: /* LDR (literal) */
8127 err
= thumb_copy_16bit_ldr_literal (gdbarch
, insn1
, regs
, dsc
);
8130 case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
8131 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "ldr/str", dsc
);
8134 if (op_bit_10_11
< 2) /* Generate PC-relative address */
8135 err
= thumb_decode_pc_relative_16bit (gdbarch
, insn1
, regs
, dsc
);
8136 else /* Generate SP-relative address */
8137 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "sp-relative", dsc
);
8139 case 11: /* Misc 16-bit instructions */
8141 switch (bits (insn1
, 8, 11))
8143 case 1: case 3: case 9: case 11: /* CBNZ, CBZ */
8144 err
= thumb_copy_cbnz_cbz (gdbarch
, insn1
, regs
, dsc
);
8146 case 12: case 13: /* POP */
8147 if (bit (insn1
, 8)) /* PC is in register list. */
8148 err
= thumb_copy_pop_pc_16bit (gdbarch
, insn1
, regs
, dsc
);
8150 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "pop", dsc
);
8152 case 15: /* If-Then, and hints */
8153 if (bits (insn1
, 0, 3))
8154 /* If-Then makes up to four following instructions conditional.
8155 IT instruction itself is not conditional, so handle it as a
8156 common unmodified instruction. */
8157 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "If-Then",
8160 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "hints", dsc
);
8163 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "misc", dsc
);
8168 if (op_bit_10_11
< 2) /* Store multiple registers */
8169 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "stm", dsc
);
8170 else /* Load multiple registers */
8171 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "ldm", dsc
);
8173 case 13: /* Conditional branch and supervisor call */
8174 if (bits (insn1
, 9, 11) != 7) /* conditional branch */
8175 err
= thumb_copy_b (gdbarch
, insn1
, dsc
);
8177 err
= thumb_copy_svc (gdbarch
, insn1
, regs
, dsc
);
8179 case 14: /* Unconditional branch */
8180 err
= thumb_copy_b (gdbarch
, insn1
, dsc
);
8187 internal_error (__FILE__
, __LINE__
,
8188 _("thumb_process_displaced_16bit_insn: Instruction decode error"));
8192 decode_thumb_32bit_ld_mem_hints (struct gdbarch
*gdbarch
,
8193 uint16_t insn1
, uint16_t insn2
,
8194 struct regcache
*regs
,
8195 struct displaced_step_closure
*dsc
)
8197 int rt
= bits (insn2
, 12, 15);
8198 int rn
= bits (insn1
, 0, 3);
8199 int op1
= bits (insn1
, 7, 8);
8202 switch (bits (insn1
, 5, 6))
8204 case 0: /* Load byte and memory hints */
8205 if (rt
== 0xf) /* PLD/PLI */
8208 /* PLD literal or Encoding T3 of PLI(immediate, literal). */
8209 return thumb2_copy_preload (gdbarch
, insn1
, insn2
, regs
, dsc
);
8211 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8216 if (rn
== 0xf) /* LDRB/LDRSB (literal) */
8217 return thumb2_copy_load_literal (gdbarch
, insn1
, insn2
, regs
, dsc
,
8220 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8221 "ldrb{reg, immediate}/ldrbt",
8226 case 1: /* Load halfword and memory hints. */
8227 if (rt
== 0xf) /* PLD{W} and Unalloc memory hint. */
8228 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8229 "pld/unalloc memhint", dsc
);
8233 return thumb2_copy_load_literal (gdbarch
, insn1
, insn2
, regs
, dsc
,
8236 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8240 case 2: /* Load word */
8242 int insn2_bit_8_11
= bits (insn2
, 8, 11);
8245 return thumb2_copy_load_literal (gdbarch
, insn1
, insn2
, regs
, dsc
, 4);
8246 else if (op1
== 0x1) /* Encoding T3 */
8247 return thumb2_copy_load_reg_imm (gdbarch
, insn1
, insn2
, regs
, dsc
,
8249 else /* op1 == 0x0 */
8251 if (insn2_bit_8_11
== 0xc || (insn2_bit_8_11
& 0x9) == 0x9)
8252 /* LDR (immediate) */
8253 return thumb2_copy_load_reg_imm (gdbarch
, insn1
, insn2
, regs
,
8254 dsc
, bit (insn2
, 8), 1);
8255 else if (insn2_bit_8_11
== 0xe) /* LDRT */
8256 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8259 /* LDR (register) */
8260 return thumb2_copy_load_reg_imm (gdbarch
, insn1
, insn2
, regs
,
8266 return thumb_32bit_copy_undef (gdbarch
, insn1
, insn2
, dsc
);
8273 thumb_process_displaced_32bit_insn (struct gdbarch
*gdbarch
, uint16_t insn1
,
8274 uint16_t insn2
, struct regcache
*regs
,
8275 struct displaced_step_closure
*dsc
)
8278 unsigned short op
= bit (insn2
, 15);
8279 unsigned int op1
= bits (insn1
, 11, 12);
8285 switch (bits (insn1
, 9, 10))
8290 /* Load/store {dual, execlusive}, table branch. */
8291 if (bits (insn1
, 7, 8) == 1 && bits (insn1
, 4, 5) == 1
8292 && bits (insn2
, 5, 7) == 0)
8293 err
= thumb2_copy_table_branch (gdbarch
, insn1
, insn2
, regs
,
8296 /* PC is not allowed to use in load/store {dual, exclusive}
8298 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8299 "load/store dual/ex", dsc
);
8301 else /* load/store multiple */
8303 switch (bits (insn1
, 7, 8))
8305 case 0: case 3: /* SRS, RFE */
8306 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8309 case 1: case 2: /* LDM/STM/PUSH/POP */
8310 err
= thumb2_copy_block_xfer (gdbarch
, insn1
, insn2
, regs
, dsc
);
8317 /* Data-processing (shift register). */
8318 err
= thumb2_decode_dp_shift_reg (gdbarch
, insn1
, insn2
, regs
,
8321 default: /* Coprocessor instructions. */
8322 err
= thumb2_decode_svc_copro (gdbarch
, insn1
, insn2
, regs
, dsc
);
8327 case 2: /* op1 = 2 */
8328 if (op
) /* Branch and misc control. */
8330 if (bit (insn2
, 14) /* BLX/BL */
8331 || bit (insn2
, 12) /* Unconditional branch */
8332 || (bits (insn1
, 7, 9) != 0x7)) /* Conditional branch */
8333 err
= thumb2_copy_b_bl_blx (gdbarch
, insn1
, insn2
, regs
, dsc
);
8335 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8340 if (bit (insn1
, 9)) /* Data processing (plain binary imm). */
8342 int op
= bits (insn1
, 4, 8);
8343 int rn
= bits (insn1
, 0, 3);
8344 if ((op
== 0 || op
== 0xa) && rn
== 0xf)
8345 err
= thumb_copy_pc_relative_32bit (gdbarch
, insn1
, insn2
,
8348 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8351 else /* Data processing (modified immeidate) */
8352 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8356 case 3: /* op1 = 3 */
8357 switch (bits (insn1
, 9, 10))
8361 err
= decode_thumb_32bit_ld_mem_hints (gdbarch
, insn1
, insn2
,
8363 else /* NEON Load/Store and Store single data item */
8364 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8365 "neon elt/struct load/store",
8368 case 1: /* op1 = 3, bits (9, 10) == 1 */
8369 switch (bits (insn1
, 7, 8))
8371 case 0: case 1: /* Data processing (register) */
8372 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8375 case 2: /* Multiply and absolute difference */
8376 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8377 "mul/mua/diff", dsc
);
8379 case 3: /* Long multiply and divide */
8380 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8385 default: /* Coprocessor instructions */
8386 err
= thumb2_decode_svc_copro (gdbarch
, insn1
, insn2
, regs
, dsc
);
8395 internal_error (__FILE__
, __LINE__
,
8396 _("thumb_process_displaced_32bit_insn: Instruction decode error"));
8401 thumb_process_displaced_insn (struct gdbarch
*gdbarch
, CORE_ADDR from
,
8402 CORE_ADDR to
, struct regcache
*regs
,
8403 struct displaced_step_closure
*dsc
)
8405 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
8407 = read_memory_unsigned_integer (from
, 2, byte_order_for_code
);
8409 if (debug_displaced
)
8410 fprintf_unfiltered (gdb_stdlog
, "displaced: process thumb insn %.4x "
8411 "at %.8lx\n", insn1
, (unsigned long) from
);
8414 dsc
->insn_size
= thumb_insn_size (insn1
);
8415 if (thumb_insn_size (insn1
) == 4)
8418 = read_memory_unsigned_integer (from
+ 2, 2, byte_order_for_code
);
8419 thumb_process_displaced_32bit_insn (gdbarch
, insn1
, insn2
, regs
, dsc
);
8422 thumb_process_displaced_16bit_insn (gdbarch
, insn1
, regs
, dsc
);
8426 arm_process_displaced_insn (struct gdbarch
*gdbarch
, CORE_ADDR from
,
8427 CORE_ADDR to
, struct regcache
*regs
,
8428 struct displaced_step_closure
*dsc
)
8431 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
8434 /* Most displaced instructions use a 1-instruction scratch space, so set this
8435 here and override below if/when necessary. */
8437 dsc
->insn_addr
= from
;
8438 dsc
->scratch_base
= to
;
8439 dsc
->cleanup
= NULL
;
8440 dsc
->wrote_to_pc
= 0;
8442 if (!displaced_in_arm_mode (regs
))
8443 return thumb_process_displaced_insn (gdbarch
, from
, to
, regs
, dsc
);
8447 insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
8448 if (debug_displaced
)
8449 fprintf_unfiltered (gdb_stdlog
, "displaced: stepping insn %.8lx "
8450 "at %.8lx\n", (unsigned long) insn
,
8451 (unsigned long) from
);
8453 if ((insn
& 0xf0000000) == 0xf0000000)
8454 err
= arm_decode_unconditional (gdbarch
, insn
, regs
, dsc
);
8455 else switch (((insn
& 0x10) >> 4) | ((insn
& 0xe000000) >> 24))
8457 case 0x0: case 0x1: case 0x2: case 0x3:
8458 err
= arm_decode_dp_misc (gdbarch
, insn
, regs
, dsc
);
8461 case 0x4: case 0x5: case 0x6:
8462 err
= arm_decode_ld_st_word_ubyte (gdbarch
, insn
, regs
, dsc
);
8466 err
= arm_decode_media (gdbarch
, insn
, dsc
);
8469 case 0x8: case 0x9: case 0xa: case 0xb:
8470 err
= arm_decode_b_bl_ldmstm (gdbarch
, insn
, regs
, dsc
);
8473 case 0xc: case 0xd: case 0xe: case 0xf:
8474 err
= arm_decode_svc_copro (gdbarch
, insn
, to
, regs
, dsc
);
8479 internal_error (__FILE__
, __LINE__
,
8480 _("arm_process_displaced_insn: Instruction decode error"));
8483 /* Actually set up the scratch space for a displaced instruction. */
8486 arm_displaced_init_closure (struct gdbarch
*gdbarch
, CORE_ADDR from
,
8487 CORE_ADDR to
, struct displaced_step_closure
*dsc
)
8489 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
8490 unsigned int i
, len
, offset
;
8491 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
8492 int size
= dsc
->is_thumb
? 2 : 4;
8493 const unsigned char *bkp_insn
;
8496 /* Poke modified instruction(s). */
8497 for (i
= 0; i
< dsc
->numinsns
; i
++)
8499 if (debug_displaced
)
8501 fprintf_unfiltered (gdb_stdlog
, "displaced: writing insn ");
8503 fprintf_unfiltered (gdb_stdlog
, "%.8lx",
8506 fprintf_unfiltered (gdb_stdlog
, "%.4x",
8507 (unsigned short)dsc
->modinsn
[i
]);
8509 fprintf_unfiltered (gdb_stdlog
, " at %.8lx\n",
8510 (unsigned long) to
+ offset
);
8513 write_memory_unsigned_integer (to
+ offset
, size
,
8514 byte_order_for_code
,
8519 /* Choose the correct breakpoint instruction. */
8522 bkp_insn
= tdep
->thumb_breakpoint
;
8523 len
= tdep
->thumb_breakpoint_size
;
8527 bkp_insn
= tdep
->arm_breakpoint
;
8528 len
= tdep
->arm_breakpoint_size
;
8531 /* Put breakpoint afterwards. */
8532 write_memory (to
+ offset
, bkp_insn
, len
);
8534 if (debug_displaced
)
8535 fprintf_unfiltered (gdb_stdlog
, "displaced: copy %s->%s: ",
8536 paddress (gdbarch
, from
), paddress (gdbarch
, to
));
8539 /* Entry point for copying an instruction into scratch space for displaced
8542 struct displaced_step_closure
*
8543 arm_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
8544 CORE_ADDR from
, CORE_ADDR to
,
8545 struct regcache
*regs
)
8547 struct displaced_step_closure
*dsc
8548 = xmalloc (sizeof (struct displaced_step_closure
));
8549 arm_process_displaced_insn (gdbarch
, from
, to
, regs
, dsc
);
8550 arm_displaced_init_closure (gdbarch
, from
, to
, dsc
);
8555 /* Entry point for cleaning things up after a displaced instruction has been
8559 arm_displaced_step_fixup (struct gdbarch
*gdbarch
,
8560 struct displaced_step_closure
*dsc
,
8561 CORE_ADDR from
, CORE_ADDR to
,
8562 struct regcache
*regs
)
8565 dsc
->cleanup (gdbarch
, regs
, dsc
);
8567 if (!dsc
->wrote_to_pc
)
8568 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
,
8569 dsc
->insn_addr
+ dsc
->insn_size
);
8573 #include "bfd-in2.h"
8574 #include "libcoff.h"
8577 gdb_print_insn_arm (bfd_vma memaddr
, disassemble_info
*info
)
8579 struct gdbarch
*gdbarch
= info
->application_data
;
8581 if (arm_pc_is_thumb (gdbarch
, memaddr
))
8583 static asymbol
*asym
;
8584 static combined_entry_type ce
;
8585 static struct coff_symbol_struct csym
;
8586 static struct bfd fake_bfd
;
8587 static bfd_target fake_target
;
8589 if (csym
.native
== NULL
)
8591 /* Create a fake symbol vector containing a Thumb symbol.
8592 This is solely so that the code in print_insn_little_arm()
8593 and print_insn_big_arm() in opcodes/arm-dis.c will detect
8594 the presence of a Thumb symbol and switch to decoding
8595 Thumb instructions. */
8597 fake_target
.flavour
= bfd_target_coff_flavour
;
8598 fake_bfd
.xvec
= &fake_target
;
8599 ce
.u
.syment
.n_sclass
= C_THUMBEXTFUNC
;
8601 csym
.symbol
.the_bfd
= &fake_bfd
;
8602 csym
.symbol
.name
= "fake";
8603 asym
= (asymbol
*) & csym
;
8606 memaddr
= UNMAKE_THUMB_ADDR (memaddr
);
8607 info
->symbols
= &asym
;
8610 info
->symbols
= NULL
;
8612 if (info
->endian
== BFD_ENDIAN_BIG
)
8613 return print_insn_big_arm (memaddr
, info
);
8615 return print_insn_little_arm (memaddr
, info
);
8618 /* The following define instruction sequences that will cause ARM
8619 cpu's to take an undefined instruction trap. These are used to
8620 signal a breakpoint to GDB.
8622 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
8623 modes. A different instruction is required for each mode. The ARM
8624 cpu's can also be big or little endian. Thus four different
8625 instructions are needed to support all cases.
8627 Note: ARMv4 defines several new instructions that will take the
8628 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
8629 not in fact add the new instructions. The new undefined
8630 instructions in ARMv4 are all instructions that had no defined
8631 behaviour in earlier chips. There is no guarantee that they will
8632 raise an exception, but may be treated as NOP's. In practice, it
8633 may only safe to rely on instructions matching:
8635 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
8636 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
8637 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
8639 Even this may only true if the condition predicate is true. The
8640 following use a condition predicate of ALWAYS so it is always TRUE.
8642 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
8643 and NetBSD all use a software interrupt rather than an undefined
8644 instruction to force a trap. This can be handled by by the
8645 abi-specific code during establishment of the gdbarch vector. */
8647 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
8648 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
8649 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
8650 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
8652 static const char arm_default_arm_le_breakpoint
[] = ARM_LE_BREAKPOINT
;
8653 static const char arm_default_arm_be_breakpoint
[] = ARM_BE_BREAKPOINT
;
8654 static const char arm_default_thumb_le_breakpoint
[] = THUMB_LE_BREAKPOINT
;
8655 static const char arm_default_thumb_be_breakpoint
[] = THUMB_BE_BREAKPOINT
;
8657 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
8658 the program counter value to determine whether a 16-bit or 32-bit
8659 breakpoint should be used. It returns a pointer to a string of
8660 bytes that encode a breakpoint instruction, stores the length of
8661 the string to *lenptr, and adjusts the program counter (if
8662 necessary) to point to the actual memory location where the
8663 breakpoint should be inserted. */
8665 static const unsigned char *
8666 arm_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
, int *lenptr
)
8668 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
8669 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
8671 if (arm_pc_is_thumb (gdbarch
, *pcptr
))
8673 *pcptr
= UNMAKE_THUMB_ADDR (*pcptr
);
8675 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
8676 check whether we are replacing a 32-bit instruction. */
8677 if (tdep
->thumb2_breakpoint
!= NULL
)
8680 if (target_read_memory (*pcptr
, buf
, 2) == 0)
8682 unsigned short inst1
;
8683 inst1
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
8684 if (thumb_insn_size (inst1
) == 4)
8686 *lenptr
= tdep
->thumb2_breakpoint_size
;
8687 return tdep
->thumb2_breakpoint
;
8692 *lenptr
= tdep
->thumb_breakpoint_size
;
8693 return tdep
->thumb_breakpoint
;
8697 *lenptr
= tdep
->arm_breakpoint_size
;
8698 return tdep
->arm_breakpoint
;
8703 arm_remote_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
,
8706 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
8708 arm_breakpoint_from_pc (gdbarch
, pcptr
, kindptr
);
8710 if (arm_pc_is_thumb (gdbarch
, *pcptr
) && *kindptr
== 4)
8711 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
8712 that this is not confused with a 32-bit ARM breakpoint. */
8716 /* Extract from an array REGBUF containing the (raw) register state a
8717 function return value of type TYPE, and copy that, in virtual
8718 format, into VALBUF. */
8721 arm_extract_return_value (struct type
*type
, struct regcache
*regs
,
8724 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
8725 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
8727 if (TYPE_CODE_FLT
== TYPE_CODE (type
))
8729 switch (gdbarch_tdep (gdbarch
)->fp_model
)
8733 /* The value is in register F0 in internal format. We need to
8734 extract the raw value and then convert it to the desired
8736 bfd_byte tmpbuf
[FP_REGISTER_SIZE
];
8738 regcache_cooked_read (regs
, ARM_F0_REGNUM
, tmpbuf
);
8739 convert_from_extended (floatformat_from_type (type
), tmpbuf
,
8740 valbuf
, gdbarch_byte_order (gdbarch
));
8744 case ARM_FLOAT_SOFT_FPA
:
8745 case ARM_FLOAT_SOFT_VFP
:
8746 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8747 not using the VFP ABI code. */
8749 regcache_cooked_read (regs
, ARM_A1_REGNUM
, valbuf
);
8750 if (TYPE_LENGTH (type
) > 4)
8751 regcache_cooked_read (regs
, ARM_A1_REGNUM
+ 1,
8752 valbuf
+ INT_REGISTER_SIZE
);
8756 internal_error (__FILE__
, __LINE__
,
8757 _("arm_extract_return_value: "
8758 "Floating point model not supported"));
8762 else if (TYPE_CODE (type
) == TYPE_CODE_INT
8763 || TYPE_CODE (type
) == TYPE_CODE_CHAR
8764 || TYPE_CODE (type
) == TYPE_CODE_BOOL
8765 || TYPE_CODE (type
) == TYPE_CODE_PTR
8766 || TYPE_CODE (type
) == TYPE_CODE_REF
8767 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
8769 /* If the type is a plain integer, then the access is
8770 straight-forward. Otherwise we have to play around a bit
8772 int len
= TYPE_LENGTH (type
);
8773 int regno
= ARM_A1_REGNUM
;
8778 /* By using store_unsigned_integer we avoid having to do
8779 anything special for small big-endian values. */
8780 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
8781 store_unsigned_integer (valbuf
,
8782 (len
> INT_REGISTER_SIZE
8783 ? INT_REGISTER_SIZE
: len
),
8785 len
-= INT_REGISTER_SIZE
;
8786 valbuf
+= INT_REGISTER_SIZE
;
8791 /* For a structure or union the behaviour is as if the value had
8792 been stored to word-aligned memory and then loaded into
8793 registers with 32-bit load instruction(s). */
8794 int len
= TYPE_LENGTH (type
);
8795 int regno
= ARM_A1_REGNUM
;
8796 bfd_byte tmpbuf
[INT_REGISTER_SIZE
];
8800 regcache_cooked_read (regs
, regno
++, tmpbuf
);
8801 memcpy (valbuf
, tmpbuf
,
8802 len
> INT_REGISTER_SIZE
? INT_REGISTER_SIZE
: len
);
8803 len
-= INT_REGISTER_SIZE
;
8804 valbuf
+= INT_REGISTER_SIZE
;
8810 /* Will a function return an aggregate type in memory or in a
8811 register? Return 0 if an aggregate type can be returned in a
8812 register, 1 if it must be returned in memory. */
8815 arm_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
8818 enum type_code code
;
8820 CHECK_TYPEDEF (type
);
8822 /* In the ARM ABI, "integer" like aggregate types are returned in
8823 registers. For an aggregate type to be integer like, its size
8824 must be less than or equal to INT_REGISTER_SIZE and the
8825 offset of each addressable subfield must be zero. Note that bit
8826 fields are not addressable, and all addressable subfields of
8827 unions always start at offset zero.
8829 This function is based on the behaviour of GCC 2.95.1.
8830 See: gcc/arm.c: arm_return_in_memory() for details.
8832 Note: All versions of GCC before GCC 2.95.2 do not set up the
8833 parameters correctly for a function returning the following
8834 structure: struct { float f;}; This should be returned in memory,
8835 not a register. Richard Earnshaw sent me a patch, but I do not
8836 know of any way to detect if a function like the above has been
8837 compiled with the correct calling convention. */
8839 /* All aggregate types that won't fit in a register must be returned
8841 if (TYPE_LENGTH (type
) > INT_REGISTER_SIZE
)
8846 /* The AAPCS says all aggregates not larger than a word are returned
8848 if (gdbarch_tdep (gdbarch
)->arm_abi
!= ARM_ABI_APCS
)
8851 /* The only aggregate types that can be returned in a register are
8852 structs and unions. Arrays must be returned in memory. */
8853 code
= TYPE_CODE (type
);
8854 if ((TYPE_CODE_STRUCT
!= code
) && (TYPE_CODE_UNION
!= code
))
8859 /* Assume all other aggregate types can be returned in a register.
8860 Run a check for structures, unions and arrays. */
8863 if ((TYPE_CODE_STRUCT
== code
) || (TYPE_CODE_UNION
== code
))
8866 /* Need to check if this struct/union is "integer" like. For
8867 this to be true, its size must be less than or equal to
8868 INT_REGISTER_SIZE and the offset of each addressable
8869 subfield must be zero. Note that bit fields are not
8870 addressable, and unions always start at offset zero. If any
8871 of the subfields is a floating point type, the struct/union
8872 cannot be an integer type. */
8874 /* For each field in the object, check:
8875 1) Is it FP? --> yes, nRc = 1;
8876 2) Is it addressable (bitpos != 0) and
8877 not packed (bitsize == 0)?
8881 for (i
= 0; i
< TYPE_NFIELDS (type
); i
++)
8883 enum type_code field_type_code
;
8884 field_type_code
= TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type
,
8887 /* Is it a floating point type field? */
8888 if (field_type_code
== TYPE_CODE_FLT
)
8894 /* If bitpos != 0, then we have to care about it. */
8895 if (TYPE_FIELD_BITPOS (type
, i
) != 0)
8897 /* Bitfields are not addressable. If the field bitsize is
8898 zero, then the field is not packed. Hence it cannot be
8899 a bitfield or any other packed type. */
8900 if (TYPE_FIELD_BITSIZE (type
, i
) == 0)
8912 /* Write into appropriate registers a function return value of type
8913 TYPE, given in virtual format. */
8916 arm_store_return_value (struct type
*type
, struct regcache
*regs
,
8917 const gdb_byte
*valbuf
)
8919 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
8920 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
8922 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
8924 char buf
[MAX_REGISTER_SIZE
];
8926 switch (gdbarch_tdep (gdbarch
)->fp_model
)
8930 convert_to_extended (floatformat_from_type (type
), buf
, valbuf
,
8931 gdbarch_byte_order (gdbarch
));
8932 regcache_cooked_write (regs
, ARM_F0_REGNUM
, buf
);
8935 case ARM_FLOAT_SOFT_FPA
:
8936 case ARM_FLOAT_SOFT_VFP
:
8937 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8938 not using the VFP ABI code. */
8940 regcache_cooked_write (regs
, ARM_A1_REGNUM
, valbuf
);
8941 if (TYPE_LENGTH (type
) > 4)
8942 regcache_cooked_write (regs
, ARM_A1_REGNUM
+ 1,
8943 valbuf
+ INT_REGISTER_SIZE
);
8947 internal_error (__FILE__
, __LINE__
,
8948 _("arm_store_return_value: Floating "
8949 "point model not supported"));
8953 else if (TYPE_CODE (type
) == TYPE_CODE_INT
8954 || TYPE_CODE (type
) == TYPE_CODE_CHAR
8955 || TYPE_CODE (type
) == TYPE_CODE_BOOL
8956 || TYPE_CODE (type
) == TYPE_CODE_PTR
8957 || TYPE_CODE (type
) == TYPE_CODE_REF
8958 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
8960 if (TYPE_LENGTH (type
) <= 4)
8962 /* Values of one word or less are zero/sign-extended and
8964 bfd_byte tmpbuf
[INT_REGISTER_SIZE
];
8965 LONGEST val
= unpack_long (type
, valbuf
);
8967 store_signed_integer (tmpbuf
, INT_REGISTER_SIZE
, byte_order
, val
);
8968 regcache_cooked_write (regs
, ARM_A1_REGNUM
, tmpbuf
);
8972 /* Integral values greater than one word are stored in consecutive
8973 registers starting with r0. This will always be a multiple of
8974 the regiser size. */
8975 int len
= TYPE_LENGTH (type
);
8976 int regno
= ARM_A1_REGNUM
;
8980 regcache_cooked_write (regs
, regno
++, valbuf
);
8981 len
-= INT_REGISTER_SIZE
;
8982 valbuf
+= INT_REGISTER_SIZE
;
8988 /* For a structure or union the behaviour is as if the value had
8989 been stored to word-aligned memory and then loaded into
8990 registers with 32-bit load instruction(s). */
8991 int len
= TYPE_LENGTH (type
);
8992 int regno
= ARM_A1_REGNUM
;
8993 bfd_byte tmpbuf
[INT_REGISTER_SIZE
];
8997 memcpy (tmpbuf
, valbuf
,
8998 len
> INT_REGISTER_SIZE
? INT_REGISTER_SIZE
: len
);
8999 regcache_cooked_write (regs
, regno
++, tmpbuf
);
9000 len
-= INT_REGISTER_SIZE
;
9001 valbuf
+= INT_REGISTER_SIZE
;
9007 /* Handle function return values. */
9009 static enum return_value_convention
9010 arm_return_value (struct gdbarch
*gdbarch
, struct type
*func_type
,
9011 struct type
*valtype
, struct regcache
*regcache
,
9012 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
9014 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
9015 enum arm_vfp_cprc_base_type vfp_base_type
;
9018 if (arm_vfp_abi_for_function (gdbarch
, func_type
)
9019 && arm_vfp_call_candidate (valtype
, &vfp_base_type
, &vfp_base_count
))
9021 int reg_char
= arm_vfp_cprc_reg_char (vfp_base_type
);
9022 int unit_length
= arm_vfp_cprc_unit_length (vfp_base_type
);
9024 for (i
= 0; i
< vfp_base_count
; i
++)
9026 if (reg_char
== 'q')
9029 arm_neon_quad_write (gdbarch
, regcache
, i
,
9030 writebuf
+ i
* unit_length
);
9033 arm_neon_quad_read (gdbarch
, regcache
, i
,
9034 readbuf
+ i
* unit_length
);
9041 sprintf (name_buf
, "%c%d", reg_char
, i
);
9042 regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
9045 regcache_cooked_write (regcache
, regnum
,
9046 writebuf
+ i
* unit_length
);
9048 regcache_cooked_read (regcache
, regnum
,
9049 readbuf
+ i
* unit_length
);
9052 return RETURN_VALUE_REGISTER_CONVENTION
;
9055 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
9056 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
9057 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
9059 if (tdep
->struct_return
== pcc_struct_return
9060 || arm_return_in_memory (gdbarch
, valtype
))
9061 return RETURN_VALUE_STRUCT_CONVENTION
;
9065 arm_store_return_value (valtype
, regcache
, writebuf
);
9068 arm_extract_return_value (valtype
, regcache
, readbuf
);
9070 return RETURN_VALUE_REGISTER_CONVENTION
;
9075 arm_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
9077 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
9078 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
9079 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
9081 char buf
[INT_REGISTER_SIZE
];
9083 jb_addr
= get_frame_register_unsigned (frame
, ARM_A1_REGNUM
);
9085 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
9089 *pc
= extract_unsigned_integer (buf
, INT_REGISTER_SIZE
, byte_order
);
9093 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
9094 return the target PC. Otherwise return 0. */
9097 arm_skip_stub (struct frame_info
*frame
, CORE_ADDR pc
)
9101 CORE_ADDR start_addr
;
9103 /* Find the starting address and name of the function containing the PC. */
9104 if (find_pc_partial_function (pc
, &name
, &start_addr
, NULL
) == 0)
9107 /* If PC is in a Thumb call or return stub, return the address of the
9108 target PC, which is in a register. The thunk functions are called
9109 _call_via_xx, where x is the register name. The possible names
9110 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
9111 functions, named __ARM_call_via_r[0-7]. */
9112 if (strncmp (name
, "_call_via_", 10) == 0
9113 || strncmp (name
, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
9115 /* Use the name suffix to determine which register contains the
9117 static char *table
[15] =
9118 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
9119 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
9122 int offset
= strlen (name
) - 2;
9124 for (regno
= 0; regno
<= 14; regno
++)
9125 if (strcmp (&name
[offset
], table
[regno
]) == 0)
9126 return get_frame_register_unsigned (frame
, regno
);
9129 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
9130 non-interworking calls to foo. We could decode the stubs
9131 to find the target but it's easier to use the symbol table. */
9132 namelen
= strlen (name
);
9133 if (name
[0] == '_' && name
[1] == '_'
9134 && ((namelen
> 2 + strlen ("_from_thumb")
9135 && strncmp (name
+ namelen
- strlen ("_from_thumb"), "_from_thumb",
9136 strlen ("_from_thumb")) == 0)
9137 || (namelen
> 2 + strlen ("_from_arm")
9138 && strncmp (name
+ namelen
- strlen ("_from_arm"), "_from_arm",
9139 strlen ("_from_arm")) == 0)))
9142 int target_len
= namelen
- 2;
9143 struct minimal_symbol
*minsym
;
9144 struct objfile
*objfile
;
9145 struct obj_section
*sec
;
9147 if (name
[namelen
- 1] == 'b')
9148 target_len
-= strlen ("_from_thumb");
9150 target_len
-= strlen ("_from_arm");
9152 target_name
= alloca (target_len
+ 1);
9153 memcpy (target_name
, name
+ 2, target_len
);
9154 target_name
[target_len
] = '\0';
9156 sec
= find_pc_section (pc
);
9157 objfile
= (sec
== NULL
) ? NULL
: sec
->objfile
;
9158 minsym
= lookup_minimal_symbol (target_name
, NULL
, objfile
);
9160 return SYMBOL_VALUE_ADDRESS (minsym
);
9165 return 0; /* not a stub */
9169 set_arm_command (char *args
, int from_tty
)
9171 printf_unfiltered (_("\
9172 \"set arm\" must be followed by an apporpriate subcommand.\n"));
9173 help_list (setarmcmdlist
, "set arm ", all_commands
, gdb_stdout
);
9177 show_arm_command (char *args
, int from_tty
)
9179 cmd_show_list (showarmcmdlist
, from_tty
, "");
9183 arm_update_current_architecture (void)
9185 struct gdbarch_info info
;
9187 /* If the current architecture is not ARM, we have nothing to do. */
9188 if (gdbarch_bfd_arch_info (target_gdbarch
)->arch
!= bfd_arch_arm
)
9191 /* Update the architecture. */
9192 gdbarch_info_init (&info
);
9194 if (!gdbarch_update_p (info
))
9195 internal_error (__FILE__
, __LINE__
, _("could not update architecture"));
9199 set_fp_model_sfunc (char *args
, int from_tty
,
9200 struct cmd_list_element
*c
)
9202 enum arm_float_model fp_model
;
9204 for (fp_model
= ARM_FLOAT_AUTO
; fp_model
!= ARM_FLOAT_LAST
; fp_model
++)
9205 if (strcmp (current_fp_model
, fp_model_strings
[fp_model
]) == 0)
9207 arm_fp_model
= fp_model
;
9211 if (fp_model
== ARM_FLOAT_LAST
)
9212 internal_error (__FILE__
, __LINE__
, _("Invalid fp model accepted: %s."),
9215 arm_update_current_architecture ();
9219 show_fp_model (struct ui_file
*file
, int from_tty
,
9220 struct cmd_list_element
*c
, const char *value
)
9222 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
9224 if (arm_fp_model
== ARM_FLOAT_AUTO
9225 && gdbarch_bfd_arch_info (target_gdbarch
)->arch
== bfd_arch_arm
)
9226 fprintf_filtered (file
, _("\
9227 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
9228 fp_model_strings
[tdep
->fp_model
]);
9230 fprintf_filtered (file
, _("\
9231 The current ARM floating point model is \"%s\".\n"),
9232 fp_model_strings
[arm_fp_model
]);
9236 arm_set_abi (char *args
, int from_tty
,
9237 struct cmd_list_element
*c
)
9239 enum arm_abi_kind arm_abi
;
9241 for (arm_abi
= ARM_ABI_AUTO
; arm_abi
!= ARM_ABI_LAST
; arm_abi
++)
9242 if (strcmp (arm_abi_string
, arm_abi_strings
[arm_abi
]) == 0)
9244 arm_abi_global
= arm_abi
;
9248 if (arm_abi
== ARM_ABI_LAST
)
9249 internal_error (__FILE__
, __LINE__
, _("Invalid ABI accepted: %s."),
9252 arm_update_current_architecture ();
9256 arm_show_abi (struct ui_file
*file
, int from_tty
,
9257 struct cmd_list_element
*c
, const char *value
)
9259 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
9261 if (arm_abi_global
== ARM_ABI_AUTO
9262 && gdbarch_bfd_arch_info (target_gdbarch
)->arch
== bfd_arch_arm
)
9263 fprintf_filtered (file
, _("\
9264 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
9265 arm_abi_strings
[tdep
->arm_abi
]);
9267 fprintf_filtered (file
, _("The current ARM ABI is \"%s\".\n"),
9272 arm_show_fallback_mode (struct ui_file
*file
, int from_tty
,
9273 struct cmd_list_element
*c
, const char *value
)
9275 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
9277 fprintf_filtered (file
,
9278 _("The current execution mode assumed "
9279 "(when symbols are unavailable) is \"%s\".\n"),
9280 arm_fallback_mode_string
);
9284 arm_show_force_mode (struct ui_file
*file
, int from_tty
,
9285 struct cmd_list_element
*c
, const char *value
)
9287 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
9289 fprintf_filtered (file
,
9290 _("The current execution mode assumed "
9291 "(even when symbols are available) is \"%s\".\n"),
9292 arm_force_mode_string
);
9295 /* If the user changes the register disassembly style used for info
9296 register and other commands, we have to also switch the style used
9297 in opcodes for disassembly output. This function is run in the "set
9298 arm disassembly" command, and does that. */
9301 set_disassembly_style_sfunc (char *args
, int from_tty
,
9302 struct cmd_list_element
*c
)
9304 set_disassembly_style ();
9307 /* Return the ARM register name corresponding to register I. */
9309 arm_register_name (struct gdbarch
*gdbarch
, int i
)
9311 const int num_regs
= gdbarch_num_regs (gdbarch
);
9313 if (gdbarch_tdep (gdbarch
)->have_vfp_pseudos
9314 && i
>= num_regs
&& i
< num_regs
+ 32)
9316 static const char *const vfp_pseudo_names
[] = {
9317 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
9318 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
9319 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
9320 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
9323 return vfp_pseudo_names
[i
- num_regs
];
9326 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
9327 && i
>= num_regs
+ 32 && i
< num_regs
+ 32 + 16)
9329 static const char *const neon_pseudo_names
[] = {
9330 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
9331 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
9334 return neon_pseudo_names
[i
- num_regs
- 32];
9337 if (i
>= ARRAY_SIZE (arm_register_names
))
9338 /* These registers are only supported on targets which supply
9339 an XML description. */
9342 return arm_register_names
[i
];
9346 set_disassembly_style (void)
9350 /* Find the style that the user wants. */
9351 for (current
= 0; current
< num_disassembly_options
; current
++)
9352 if (disassembly_style
== valid_disassembly_styles
[current
])
9354 gdb_assert (current
< num_disassembly_options
);
9356 /* Synchronize the disassembler. */
9357 set_arm_regname_option (current
);
9360 /* Test whether the coff symbol specific value corresponds to a Thumb
9364 coff_sym_is_thumb (int val
)
9366 return (val
== C_THUMBEXT
9367 || val
== C_THUMBSTAT
9368 || val
== C_THUMBEXTFUNC
9369 || val
== C_THUMBSTATFUNC
9370 || val
== C_THUMBLABEL
);
9373 /* arm_coff_make_msymbol_special()
9374 arm_elf_make_msymbol_special()
9376 These functions test whether the COFF or ELF symbol corresponds to
9377 an address in thumb code, and set a "special" bit in a minimal
9378 symbol to indicate that it does. */
9381 arm_elf_make_msymbol_special(asymbol
*sym
, struct minimal_symbol
*msym
)
9383 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type
*)sym
)->internal_elf_sym
)
9384 == ST_BRANCH_TO_THUMB
)
9385 MSYMBOL_SET_SPECIAL (msym
);
9389 arm_coff_make_msymbol_special(int val
, struct minimal_symbol
*msym
)
9391 if (coff_sym_is_thumb (val
))
9392 MSYMBOL_SET_SPECIAL (msym
);
9396 arm_objfile_data_free (struct objfile
*objfile
, void *arg
)
9398 struct arm_per_objfile
*data
= arg
;
9401 for (i
= 0; i
< objfile
->obfd
->section_count
; i
++)
9402 VEC_free (arm_mapping_symbol_s
, data
->section_maps
[i
]);
9406 arm_record_special_symbol (struct gdbarch
*gdbarch
, struct objfile
*objfile
,
9409 const char *name
= bfd_asymbol_name (sym
);
9410 struct arm_per_objfile
*data
;
9411 VEC(arm_mapping_symbol_s
) **map_p
;
9412 struct arm_mapping_symbol new_map_sym
;
9414 gdb_assert (name
[0] == '$');
9415 if (name
[1] != 'a' && name
[1] != 't' && name
[1] != 'd')
9418 data
= objfile_data (objfile
, arm_objfile_data_key
);
9421 data
= OBSTACK_ZALLOC (&objfile
->objfile_obstack
,
9422 struct arm_per_objfile
);
9423 set_objfile_data (objfile
, arm_objfile_data_key
, data
);
9424 data
->section_maps
= OBSTACK_CALLOC (&objfile
->objfile_obstack
,
9425 objfile
->obfd
->section_count
,
9426 VEC(arm_mapping_symbol_s
) *);
9428 map_p
= &data
->section_maps
[bfd_get_section (sym
)->index
];
9430 new_map_sym
.value
= sym
->value
;
9431 new_map_sym
.type
= name
[1];
9433 /* Assume that most mapping symbols appear in order of increasing
9434 value. If they were randomly distributed, it would be faster to
9435 always push here and then sort at first use. */
9436 if (!VEC_empty (arm_mapping_symbol_s
, *map_p
))
9438 struct arm_mapping_symbol
*prev_map_sym
;
9440 prev_map_sym
= VEC_last (arm_mapping_symbol_s
, *map_p
);
9441 if (prev_map_sym
->value
>= sym
->value
)
9444 idx
= VEC_lower_bound (arm_mapping_symbol_s
, *map_p
, &new_map_sym
,
9445 arm_compare_mapping_symbols
);
9446 VEC_safe_insert (arm_mapping_symbol_s
, *map_p
, idx
, &new_map_sym
);
9451 VEC_safe_push (arm_mapping_symbol_s
, *map_p
, &new_map_sym
);
9455 arm_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
9457 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
9458 regcache_cooked_write_unsigned (regcache
, ARM_PC_REGNUM
, pc
);
9460 /* If necessary, set the T bit. */
9463 ULONGEST val
, t_bit
;
9464 regcache_cooked_read_unsigned (regcache
, ARM_PS_REGNUM
, &val
);
9465 t_bit
= arm_psr_thumb_bit (gdbarch
);
9466 if (arm_pc_is_thumb (gdbarch
, pc
))
9467 regcache_cooked_write_unsigned (regcache
, ARM_PS_REGNUM
,
9470 regcache_cooked_write_unsigned (regcache
, ARM_PS_REGNUM
,
9475 /* Read the contents of a NEON quad register, by reading from two
9476 double registers. This is used to implement the quad pseudo
9477 registers, and for argument passing in case the quad registers are
9478 missing; vectors are passed in quad registers when using the VFP
9479 ABI, even if a NEON unit is not present. REGNUM is the index of
9480 the quad register, in [0, 15]. */
9482 static enum register_status
9483 arm_neon_quad_read (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
9484 int regnum
, gdb_byte
*buf
)
9487 gdb_byte reg_buf
[8];
9488 int offset
, double_regnum
;
9489 enum register_status status
;
9491 sprintf (name_buf
, "d%d", regnum
<< 1);
9492 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
9495 /* d0 is always the least significant half of q0. */
9496 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
9501 status
= regcache_raw_read (regcache
, double_regnum
, reg_buf
);
9502 if (status
!= REG_VALID
)
9504 memcpy (buf
+ offset
, reg_buf
, 8);
9506 offset
= 8 - offset
;
9507 status
= regcache_raw_read (regcache
, double_regnum
+ 1, reg_buf
);
9508 if (status
!= REG_VALID
)
9510 memcpy (buf
+ offset
, reg_buf
, 8);
9515 static enum register_status
9516 arm_pseudo_read (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
9517 int regnum
, gdb_byte
*buf
)
9519 const int num_regs
= gdbarch_num_regs (gdbarch
);
9521 gdb_byte reg_buf
[8];
9522 int offset
, double_regnum
;
9524 gdb_assert (regnum
>= num_regs
);
9527 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
&& regnum
>= 32 && regnum
< 48)
9528 /* Quad-precision register. */
9529 return arm_neon_quad_read (gdbarch
, regcache
, regnum
- 32, buf
);
9532 enum register_status status
;
9534 /* Single-precision register. */
9535 gdb_assert (regnum
< 32);
9537 /* s0 is always the least significant half of d0. */
9538 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
9539 offset
= (regnum
& 1) ? 0 : 4;
9541 offset
= (regnum
& 1) ? 4 : 0;
9543 sprintf (name_buf
, "d%d", regnum
>> 1);
9544 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
9547 status
= regcache_raw_read (regcache
, double_regnum
, reg_buf
);
9548 if (status
== REG_VALID
)
9549 memcpy (buf
, reg_buf
+ offset
, 4);
9554 /* Store the contents of BUF to a NEON quad register, by writing to
9555 two double registers. This is used to implement the quad pseudo
9556 registers, and for argument passing in case the quad registers are
9557 missing; vectors are passed in quad registers when using the VFP
9558 ABI, even if a NEON unit is not present. REGNUM is the index
9559 of the quad register, in [0, 15]. */
9562 arm_neon_quad_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
9563 int regnum
, const gdb_byte
*buf
)
9566 gdb_byte reg_buf
[8];
9567 int offset
, double_regnum
;
9569 sprintf (name_buf
, "d%d", regnum
<< 1);
9570 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
9573 /* d0 is always the least significant half of q0. */
9574 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
9579 regcache_raw_write (regcache
, double_regnum
, buf
+ offset
);
9580 offset
= 8 - offset
;
9581 regcache_raw_write (regcache
, double_regnum
+ 1, buf
+ offset
);
9585 arm_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
9586 int regnum
, const gdb_byte
*buf
)
9588 const int num_regs
= gdbarch_num_regs (gdbarch
);
9590 gdb_byte reg_buf
[8];
9591 int offset
, double_regnum
;
9593 gdb_assert (regnum
>= num_regs
);
9596 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
&& regnum
>= 32 && regnum
< 48)
9597 /* Quad-precision register. */
9598 arm_neon_quad_write (gdbarch
, regcache
, regnum
- 32, buf
);
9601 /* Single-precision register. */
9602 gdb_assert (regnum
< 32);
9604 /* s0 is always the least significant half of d0. */
9605 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
9606 offset
= (regnum
& 1) ? 0 : 4;
9608 offset
= (regnum
& 1) ? 4 : 0;
9610 sprintf (name_buf
, "d%d", regnum
>> 1);
9611 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
9614 regcache_raw_read (regcache
, double_regnum
, reg_buf
);
9615 memcpy (reg_buf
+ offset
, buf
, 4);
9616 regcache_raw_write (regcache
, double_regnum
, reg_buf
);
9620 static struct value
*
9621 value_of_arm_user_reg (struct frame_info
*frame
, const void *baton
)
9623 const int *reg_p
= baton
;
9624 return value_of_register (*reg_p
, frame
);
9627 static enum gdb_osabi
9628 arm_elf_osabi_sniffer (bfd
*abfd
)
9630 unsigned int elfosabi
;
9631 enum gdb_osabi osabi
= GDB_OSABI_UNKNOWN
;
9633 elfosabi
= elf_elfheader (abfd
)->e_ident
[EI_OSABI
];
9635 if (elfosabi
== ELFOSABI_ARM
)
9636 /* GNU tools use this value. Check note sections in this case,
9638 bfd_map_over_sections (abfd
,
9639 generic_elf_osabi_sniff_abi_tag_sections
,
9642 /* Anything else will be handled by the generic ELF sniffer. */
9647 arm_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
9648 struct reggroup
*group
)
9650 /* FPS register's type is INT, but belongs to float_reggroup. Beside
9651 this, FPS register belongs to save_regroup, restore_reggroup, and
9652 all_reggroup, of course. */
9653 if (regnum
== ARM_FPS_REGNUM
)
9654 return (group
== float_reggroup
9655 || group
== save_reggroup
9656 || group
== restore_reggroup
9657 || group
== all_reggroup
);
9659 return default_register_reggroup_p (gdbarch
, regnum
, group
);
9663 /* Initialize the current architecture based on INFO. If possible,
9664 re-use an architecture from ARCHES, which is a list of
9665 architectures already created during this debugging session.
9667 Called e.g. at program startup, when reading a core file, and when
9668 reading a binary file. */
9670 static struct gdbarch
*
9671 arm_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
9673 struct gdbarch_tdep
*tdep
;
9674 struct gdbarch
*gdbarch
;
9675 struct gdbarch_list
*best_arch
;
9676 enum arm_abi_kind arm_abi
= arm_abi_global
;
9677 enum arm_float_model fp_model
= arm_fp_model
;
9678 struct tdesc_arch_data
*tdesc_data
= NULL
;
9680 int have_vfp_registers
= 0, have_vfp_pseudos
= 0, have_neon_pseudos
= 0;
9682 int have_fpa_registers
= 1;
9683 const struct target_desc
*tdesc
= info
.target_desc
;
9685 /* If we have an object to base this architecture on, try to determine
9688 if (arm_abi
== ARM_ABI_AUTO
&& info
.abfd
!= NULL
)
9690 int ei_osabi
, e_flags
;
9692 switch (bfd_get_flavour (info
.abfd
))
9694 case bfd_target_aout_flavour
:
9695 /* Assume it's an old APCS-style ABI. */
9696 arm_abi
= ARM_ABI_APCS
;
9699 case bfd_target_coff_flavour
:
9700 /* Assume it's an old APCS-style ABI. */
9702 arm_abi
= ARM_ABI_APCS
;
9705 case bfd_target_elf_flavour
:
9706 ei_osabi
= elf_elfheader (info
.abfd
)->e_ident
[EI_OSABI
];
9707 e_flags
= elf_elfheader (info
.abfd
)->e_flags
;
9709 if (ei_osabi
== ELFOSABI_ARM
)
9711 /* GNU tools used to use this value, but do not for EABI
9712 objects. There's nowhere to tag an EABI version
9713 anyway, so assume APCS. */
9714 arm_abi
= ARM_ABI_APCS
;
9716 else if (ei_osabi
== ELFOSABI_NONE
)
9718 int eabi_ver
= EF_ARM_EABI_VERSION (e_flags
);
9719 int attr_arch
, attr_profile
;
9723 case EF_ARM_EABI_UNKNOWN
:
9724 /* Assume GNU tools. */
9725 arm_abi
= ARM_ABI_APCS
;
9728 case EF_ARM_EABI_VER4
:
9729 case EF_ARM_EABI_VER5
:
9730 arm_abi
= ARM_ABI_AAPCS
;
9731 /* EABI binaries default to VFP float ordering.
9732 They may also contain build attributes that can
9733 be used to identify if the VFP argument-passing
9735 if (fp_model
== ARM_FLOAT_AUTO
)
9738 switch (bfd_elf_get_obj_attr_int (info
.abfd
,
9743 /* "The user intended FP parameter/result
9744 passing to conform to AAPCS, base
9746 fp_model
= ARM_FLOAT_SOFT_VFP
;
9749 /* "The user intended FP parameter/result
9750 passing to conform to AAPCS, VFP
9752 fp_model
= ARM_FLOAT_VFP
;
9755 /* "The user intended FP parameter/result
9756 passing to conform to tool chain-specific
9757 conventions" - we don't know any such
9758 conventions, so leave it as "auto". */
9761 /* Attribute value not mentioned in the
9762 October 2008 ABI, so leave it as
9767 fp_model
= ARM_FLOAT_SOFT_VFP
;
9773 /* Leave it as "auto". */
9774 warning (_("unknown ARM EABI version 0x%x"), eabi_ver
);
9779 /* Detect M-profile programs. This only works if the
9780 executable file includes build attributes; GCC does
9781 copy them to the executable, but e.g. RealView does
9783 attr_arch
= bfd_elf_get_obj_attr_int (info
.abfd
, OBJ_ATTR_PROC
,
9785 attr_profile
= bfd_elf_get_obj_attr_int (info
.abfd
,
9787 Tag_CPU_arch_profile
);
9788 /* GCC specifies the profile for v6-M; RealView only
9789 specifies the profile for architectures starting with
9790 V7 (as opposed to architectures with a tag
9791 numerically greater than TAG_CPU_ARCH_V7). */
9792 if (!tdesc_has_registers (tdesc
)
9793 && (attr_arch
== TAG_CPU_ARCH_V6_M
9794 || attr_arch
== TAG_CPU_ARCH_V6S_M
9795 || attr_profile
== 'M'))
9796 tdesc
= tdesc_arm_with_m
;
9800 if (fp_model
== ARM_FLOAT_AUTO
)
9802 int e_flags
= elf_elfheader (info
.abfd
)->e_flags
;
9804 switch (e_flags
& (EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
))
9807 /* Leave it as "auto". Strictly speaking this case
9808 means FPA, but almost nobody uses that now, and
9809 many toolchains fail to set the appropriate bits
9810 for the floating-point model they use. */
9812 case EF_ARM_SOFT_FLOAT
:
9813 fp_model
= ARM_FLOAT_SOFT_FPA
;
9815 case EF_ARM_VFP_FLOAT
:
9816 fp_model
= ARM_FLOAT_VFP
;
9818 case EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
:
9819 fp_model
= ARM_FLOAT_SOFT_VFP
;
9824 if (e_flags
& EF_ARM_BE8
)
9825 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
9830 /* Leave it as "auto". */
9835 /* Check any target description for validity. */
9836 if (tdesc_has_registers (tdesc
))
9838 /* For most registers we require GDB's default names; but also allow
9839 the numeric names for sp / lr / pc, as a convenience. */
9840 static const char *const arm_sp_names
[] = { "r13", "sp", NULL
};
9841 static const char *const arm_lr_names
[] = { "r14", "lr", NULL
};
9842 static const char *const arm_pc_names
[] = { "r15", "pc", NULL
};
9844 const struct tdesc_feature
*feature
;
9847 feature
= tdesc_find_feature (tdesc
,
9848 "org.gnu.gdb.arm.core");
9849 if (feature
== NULL
)
9851 feature
= tdesc_find_feature (tdesc
,
9852 "org.gnu.gdb.arm.m-profile");
9853 if (feature
== NULL
)
9859 tdesc_data
= tdesc_data_alloc ();
9862 for (i
= 0; i
< ARM_SP_REGNUM
; i
++)
9863 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
, i
,
9864 arm_register_names
[i
]);
9865 valid_p
&= tdesc_numbered_register_choices (feature
, tdesc_data
,
9868 valid_p
&= tdesc_numbered_register_choices (feature
, tdesc_data
,
9871 valid_p
&= tdesc_numbered_register_choices (feature
, tdesc_data
,
9875 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
9876 ARM_PS_REGNUM
, "xpsr");
9878 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
9879 ARM_PS_REGNUM
, "cpsr");
9883 tdesc_data_cleanup (tdesc_data
);
9887 feature
= tdesc_find_feature (tdesc
,
9888 "org.gnu.gdb.arm.fpa");
9889 if (feature
!= NULL
)
9892 for (i
= ARM_F0_REGNUM
; i
<= ARM_FPS_REGNUM
; i
++)
9893 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
, i
,
9894 arm_register_names
[i
]);
9897 tdesc_data_cleanup (tdesc_data
);
9902 have_fpa_registers
= 0;
9904 feature
= tdesc_find_feature (tdesc
,
9905 "org.gnu.gdb.xscale.iwmmxt");
9906 if (feature
!= NULL
)
9908 static const char *const iwmmxt_names
[] = {
9909 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
9910 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
9911 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
9912 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
9916 for (i
= ARM_WR0_REGNUM
; i
<= ARM_WR15_REGNUM
; i
++)
9918 &= tdesc_numbered_register (feature
, tdesc_data
, i
,
9919 iwmmxt_names
[i
- ARM_WR0_REGNUM
]);
9921 /* Check for the control registers, but do not fail if they
9923 for (i
= ARM_WC0_REGNUM
; i
<= ARM_WCASF_REGNUM
; i
++)
9924 tdesc_numbered_register (feature
, tdesc_data
, i
,
9925 iwmmxt_names
[i
- ARM_WR0_REGNUM
]);
9927 for (i
= ARM_WCGR0_REGNUM
; i
<= ARM_WCGR3_REGNUM
; i
++)
9929 &= tdesc_numbered_register (feature
, tdesc_data
, i
,
9930 iwmmxt_names
[i
- ARM_WR0_REGNUM
]);
9934 tdesc_data_cleanup (tdesc_data
);
9939 /* If we have a VFP unit, check whether the single precision registers
9940 are present. If not, then we will synthesize them as pseudo
9942 feature
= tdesc_find_feature (tdesc
,
9943 "org.gnu.gdb.arm.vfp");
9944 if (feature
!= NULL
)
9946 static const char *const vfp_double_names
[] = {
9947 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
9948 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
9949 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
9950 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
9953 /* Require the double precision registers. There must be either
9956 for (i
= 0; i
< 32; i
++)
9958 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
9960 vfp_double_names
[i
]);
9964 if (!valid_p
&& i
== 16)
9967 /* Also require FPSCR. */
9968 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
9969 ARM_FPSCR_REGNUM
, "fpscr");
9972 tdesc_data_cleanup (tdesc_data
);
9976 if (tdesc_unnumbered_register (feature
, "s0") == 0)
9977 have_vfp_pseudos
= 1;
9979 have_vfp_registers
= 1;
9981 /* If we have VFP, also check for NEON. The architecture allows
9982 NEON without VFP (integer vector operations only), but GDB
9983 does not support that. */
9984 feature
= tdesc_find_feature (tdesc
,
9985 "org.gnu.gdb.arm.neon");
9986 if (feature
!= NULL
)
9988 /* NEON requires 32 double-precision registers. */
9991 tdesc_data_cleanup (tdesc_data
);
9995 /* If there are quad registers defined by the stub, use
9996 their type; otherwise (normally) provide them with
9997 the default type. */
9998 if (tdesc_unnumbered_register (feature
, "q0") == 0)
9999 have_neon_pseudos
= 1;
10006 /* If there is already a candidate, use it. */
10007 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
10009 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
10011 if (arm_abi
!= ARM_ABI_AUTO
10012 && arm_abi
!= gdbarch_tdep (best_arch
->gdbarch
)->arm_abi
)
10015 if (fp_model
!= ARM_FLOAT_AUTO
10016 && fp_model
!= gdbarch_tdep (best_arch
->gdbarch
)->fp_model
)
10019 /* There are various other properties in tdep that we do not
10020 need to check here: those derived from a target description,
10021 since gdbarches with a different target description are
10022 automatically disqualified. */
10024 /* Do check is_m, though, since it might come from the binary. */
10025 if (is_m
!= gdbarch_tdep (best_arch
->gdbarch
)->is_m
)
10028 /* Found a match. */
10032 if (best_arch
!= NULL
)
10034 if (tdesc_data
!= NULL
)
10035 tdesc_data_cleanup (tdesc_data
);
10036 return best_arch
->gdbarch
;
10039 tdep
= xcalloc (1, sizeof (struct gdbarch_tdep
));
10040 gdbarch
= gdbarch_alloc (&info
, tdep
);
10042 /* Record additional information about the architecture we are defining.
10043 These are gdbarch discriminators, like the OSABI. */
10044 tdep
->arm_abi
= arm_abi
;
10045 tdep
->fp_model
= fp_model
;
10047 tdep
->have_fpa_registers
= have_fpa_registers
;
10048 tdep
->have_vfp_registers
= have_vfp_registers
;
10049 tdep
->have_vfp_pseudos
= have_vfp_pseudos
;
10050 tdep
->have_neon_pseudos
= have_neon_pseudos
;
10051 tdep
->have_neon
= have_neon
;
10054 switch (info
.byte_order_for_code
)
10056 case BFD_ENDIAN_BIG
:
10057 tdep
->arm_breakpoint
= arm_default_arm_be_breakpoint
;
10058 tdep
->arm_breakpoint_size
= sizeof (arm_default_arm_be_breakpoint
);
10059 tdep
->thumb_breakpoint
= arm_default_thumb_be_breakpoint
;
10060 tdep
->thumb_breakpoint_size
= sizeof (arm_default_thumb_be_breakpoint
);
10064 case BFD_ENDIAN_LITTLE
:
10065 tdep
->arm_breakpoint
= arm_default_arm_le_breakpoint
;
10066 tdep
->arm_breakpoint_size
= sizeof (arm_default_arm_le_breakpoint
);
10067 tdep
->thumb_breakpoint
= arm_default_thumb_le_breakpoint
;
10068 tdep
->thumb_breakpoint_size
= sizeof (arm_default_thumb_le_breakpoint
);
10073 internal_error (__FILE__
, __LINE__
,
10074 _("arm_gdbarch_init: bad byte order for float format"));
10077 /* On ARM targets char defaults to unsigned. */
10078 set_gdbarch_char_signed (gdbarch
, 0);
10080 /* Note: for displaced stepping, this includes the breakpoint, and one word
10081 of additional scratch space. This setting isn't used for anything beside
10082 displaced stepping at present. */
10083 set_gdbarch_max_insn_length (gdbarch
, 4 * DISPLACED_MODIFIED_INSNS
);
10085 /* This should be low enough for everything. */
10086 tdep
->lowest_pc
= 0x20;
10087 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
10089 /* The default, for both APCS and AAPCS, is to return small
10090 structures in registers. */
10091 tdep
->struct_return
= reg_struct_return
;
10093 set_gdbarch_push_dummy_call (gdbarch
, arm_push_dummy_call
);
10094 set_gdbarch_frame_align (gdbarch
, arm_frame_align
);
10096 set_gdbarch_write_pc (gdbarch
, arm_write_pc
);
10098 /* Frame handling. */
10099 set_gdbarch_dummy_id (gdbarch
, arm_dummy_id
);
10100 set_gdbarch_unwind_pc (gdbarch
, arm_unwind_pc
);
10101 set_gdbarch_unwind_sp (gdbarch
, arm_unwind_sp
);
10103 frame_base_set_default (gdbarch
, &arm_normal_base
);
10105 /* Address manipulation. */
10106 set_gdbarch_smash_text_address (gdbarch
, arm_smash_text_address
);
10107 set_gdbarch_addr_bits_remove (gdbarch
, arm_addr_bits_remove
);
10109 /* Advance PC across function entry code. */
10110 set_gdbarch_skip_prologue (gdbarch
, arm_skip_prologue
);
10112 /* Detect whether PC is in function epilogue. */
10113 set_gdbarch_in_function_epilogue_p (gdbarch
, arm_in_function_epilogue_p
);
10115 /* Skip trampolines. */
10116 set_gdbarch_skip_trampoline_code (gdbarch
, arm_skip_stub
);
10118 /* The stack grows downward. */
10119 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
10121 /* Breakpoint manipulation. */
10122 set_gdbarch_breakpoint_from_pc (gdbarch
, arm_breakpoint_from_pc
);
10123 set_gdbarch_remote_breakpoint_from_pc (gdbarch
,
10124 arm_remote_breakpoint_from_pc
);
10126 /* Information about registers, etc. */
10127 set_gdbarch_sp_regnum (gdbarch
, ARM_SP_REGNUM
);
10128 set_gdbarch_pc_regnum (gdbarch
, ARM_PC_REGNUM
);
10129 set_gdbarch_num_regs (gdbarch
, ARM_NUM_REGS
);
10130 set_gdbarch_register_type (gdbarch
, arm_register_type
);
10131 set_gdbarch_register_reggroup_p (gdbarch
, arm_register_reggroup_p
);
10133 /* This "info float" is FPA-specific. Use the generic version if we
10134 do not have FPA. */
10135 if (gdbarch_tdep (gdbarch
)->have_fpa_registers
)
10136 set_gdbarch_print_float_info (gdbarch
, arm_print_float_info
);
10138 /* Internal <-> external register number maps. */
10139 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, arm_dwarf_reg_to_regnum
);
10140 set_gdbarch_register_sim_regno (gdbarch
, arm_register_sim_regno
);
10142 set_gdbarch_register_name (gdbarch
, arm_register_name
);
10144 /* Returning results. */
10145 set_gdbarch_return_value (gdbarch
, arm_return_value
);
10148 set_gdbarch_print_insn (gdbarch
, gdb_print_insn_arm
);
10150 /* Minsymbol frobbing. */
10151 set_gdbarch_elf_make_msymbol_special (gdbarch
, arm_elf_make_msymbol_special
);
10152 set_gdbarch_coff_make_msymbol_special (gdbarch
,
10153 arm_coff_make_msymbol_special
);
10154 set_gdbarch_record_special_symbol (gdbarch
, arm_record_special_symbol
);
10156 /* Thumb-2 IT block support. */
10157 set_gdbarch_adjust_breakpoint_address (gdbarch
,
10158 arm_adjust_breakpoint_address
);
10160 /* Virtual tables. */
10161 set_gdbarch_vbit_in_delta (gdbarch
, 1);
10163 /* Hook in the ABI-specific overrides, if they have been registered. */
10164 gdbarch_init_osabi (info
, gdbarch
);
10166 dwarf2_frame_set_init_reg (gdbarch
, arm_dwarf2_frame_init_reg
);
10168 /* Add some default predicates. */
10169 frame_unwind_append_unwinder (gdbarch
, &arm_stub_unwind
);
10170 dwarf2_append_unwinders (gdbarch
);
10171 frame_unwind_append_unwinder (gdbarch
, &arm_exidx_unwind
);
10172 frame_unwind_append_unwinder (gdbarch
, &arm_prologue_unwind
);
10174 /* Now we have tuned the configuration, set a few final things,
10175 based on what the OS ABI has told us. */
10177 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
10178 binaries are always marked. */
10179 if (tdep
->arm_abi
== ARM_ABI_AUTO
)
10180 tdep
->arm_abi
= ARM_ABI_APCS
;
10182 /* Watchpoints are not steppable. */
10183 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
10185 /* We used to default to FPA for generic ARM, but almost nobody
10186 uses that now, and we now provide a way for the user to force
10187 the model. So default to the most useful variant. */
10188 if (tdep
->fp_model
== ARM_FLOAT_AUTO
)
10189 tdep
->fp_model
= ARM_FLOAT_SOFT_FPA
;
10191 if (tdep
->jb_pc
>= 0)
10192 set_gdbarch_get_longjmp_target (gdbarch
, arm_get_longjmp_target
);
10194 /* Floating point sizes and format. */
10195 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
10196 if (tdep
->fp_model
== ARM_FLOAT_SOFT_FPA
|| tdep
->fp_model
== ARM_FLOAT_FPA
)
10198 set_gdbarch_double_format
10199 (gdbarch
, floatformats_ieee_double_littlebyte_bigword
);
10200 set_gdbarch_long_double_format
10201 (gdbarch
, floatformats_ieee_double_littlebyte_bigword
);
10205 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
10206 set_gdbarch_long_double_format (gdbarch
, floatformats_ieee_double
);
10209 if (have_vfp_pseudos
)
10211 /* NOTE: These are the only pseudo registers used by
10212 the ARM target at the moment. If more are added, a
10213 little more care in numbering will be needed. */
10215 int num_pseudos
= 32;
10216 if (have_neon_pseudos
)
10218 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudos
);
10219 set_gdbarch_pseudo_register_read (gdbarch
, arm_pseudo_read
);
10220 set_gdbarch_pseudo_register_write (gdbarch
, arm_pseudo_write
);
10225 set_tdesc_pseudo_register_name (gdbarch
, arm_register_name
);
10227 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
10229 /* Override tdesc_register_type to adjust the types of VFP
10230 registers for NEON. */
10231 set_gdbarch_register_type (gdbarch
, arm_register_type
);
10234 /* Add standard register aliases. We add aliases even for those
10235 nanes which are used by the current architecture - it's simpler,
10236 and does no harm, since nothing ever lists user registers. */
10237 for (i
= 0; i
< ARRAY_SIZE (arm_register_aliases
); i
++)
10238 user_reg_add (gdbarch
, arm_register_aliases
[i
].name
,
10239 value_of_arm_user_reg
, &arm_register_aliases
[i
].regnum
);
10245 arm_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
10247 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
10252 fprintf_unfiltered (file
, _("arm_dump_tdep: Lowest pc = 0x%lx"),
10253 (unsigned long) tdep
->lowest_pc
);
10256 extern initialize_file_ftype _initialize_arm_tdep
; /* -Wmissing-prototypes */
10259 _initialize_arm_tdep (void)
10261 struct ui_file
*stb
;
10263 struct cmd_list_element
*new_set
, *new_show
;
10264 const char *setname
;
10265 const char *setdesc
;
10266 const char *const *regnames
;
10268 static char *helptext
;
10269 char regdesc
[1024], *rdptr
= regdesc
;
10270 size_t rest
= sizeof (regdesc
);
10272 gdbarch_register (bfd_arch_arm
, arm_gdbarch_init
, arm_dump_tdep
);
10274 arm_objfile_data_key
10275 = register_objfile_data_with_cleanup (NULL
, arm_objfile_data_free
);
10277 /* Add ourselves to objfile event chain. */
10278 observer_attach_new_objfile (arm_exidx_new_objfile
);
10280 = register_objfile_data_with_cleanup (NULL
, arm_exidx_data_free
);
10282 /* Register an ELF OS ABI sniffer for ARM binaries. */
10283 gdbarch_register_osabi_sniffer (bfd_arch_arm
,
10284 bfd_target_elf_flavour
,
10285 arm_elf_osabi_sniffer
);
10287 /* Initialize the standard target descriptions. */
10288 initialize_tdesc_arm_with_m ();
10289 initialize_tdesc_arm_with_iwmmxt ();
10290 initialize_tdesc_arm_with_vfpv2 ();
10291 initialize_tdesc_arm_with_vfpv3 ();
10292 initialize_tdesc_arm_with_neon ();
10294 /* Get the number of possible sets of register names defined in opcodes. */
10295 num_disassembly_options
= get_arm_regname_num_options ();
10297 /* Add root prefix command for all "set arm"/"show arm" commands. */
10298 add_prefix_cmd ("arm", no_class
, set_arm_command
,
10299 _("Various ARM-specific commands."),
10300 &setarmcmdlist
, "set arm ", 0, &setlist
);
10302 add_prefix_cmd ("arm", no_class
, show_arm_command
,
10303 _("Various ARM-specific commands."),
10304 &showarmcmdlist
, "show arm ", 0, &showlist
);
10306 /* Sync the opcode insn printer with our register viewer. */
10307 parse_arm_disassembler_option ("reg-names-std");
10309 /* Initialize the array that will be passed to
10310 add_setshow_enum_cmd(). */
10311 valid_disassembly_styles
10312 = xmalloc ((num_disassembly_options
+ 1) * sizeof (char *));
10313 for (i
= 0; i
< num_disassembly_options
; i
++)
10315 numregs
= get_arm_regnames (i
, &setname
, &setdesc
, ®names
);
10316 valid_disassembly_styles
[i
] = setname
;
10317 length
= snprintf (rdptr
, rest
, "%s - %s\n", setname
, setdesc
);
10320 /* When we find the default names, tell the disassembler to use
10322 if (!strcmp (setname
, "std"))
10324 disassembly_style
= setname
;
10325 set_arm_regname_option (i
);
10328 /* Mark the end of valid options. */
10329 valid_disassembly_styles
[num_disassembly_options
] = NULL
;
10331 /* Create the help text. */
10332 stb
= mem_fileopen ();
10333 fprintf_unfiltered (stb
, "%s%s%s",
10334 _("The valid values are:\n"),
10336 _("The default is \"std\"."));
10337 helptext
= ui_file_xstrdup (stb
, NULL
);
10338 ui_file_delete (stb
);
10340 add_setshow_enum_cmd("disassembler", no_class
,
10341 valid_disassembly_styles
, &disassembly_style
,
10342 _("Set the disassembly style."),
10343 _("Show the disassembly style."),
10345 set_disassembly_style_sfunc
,
10346 NULL
, /* FIXME: i18n: The disassembly style is
10348 &setarmcmdlist
, &showarmcmdlist
);
10350 add_setshow_boolean_cmd ("apcs32", no_class
, &arm_apcs_32
,
10351 _("Set usage of ARM 32-bit mode."),
10352 _("Show usage of ARM 32-bit mode."),
10353 _("When off, a 26-bit PC will be used."),
10355 NULL
, /* FIXME: i18n: Usage of ARM 32-bit
10357 &setarmcmdlist
, &showarmcmdlist
);
10359 /* Add a command to allow the user to force the FPU model. */
10360 add_setshow_enum_cmd ("fpu", no_class
, fp_model_strings
, ¤t_fp_model
,
10361 _("Set the floating point type."),
10362 _("Show the floating point type."),
10363 _("auto - Determine the FP typefrom the OS-ABI.\n\
10364 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
10365 fpa - FPA co-processor (GCC compiled).\n\
10366 softvfp - Software FP with pure-endian doubles.\n\
10367 vfp - VFP co-processor."),
10368 set_fp_model_sfunc
, show_fp_model
,
10369 &setarmcmdlist
, &showarmcmdlist
);
10371 /* Add a command to allow the user to force the ABI. */
10372 add_setshow_enum_cmd ("abi", class_support
, arm_abi_strings
, &arm_abi_string
,
10374 _("Show the ABI."),
10375 NULL
, arm_set_abi
, arm_show_abi
,
10376 &setarmcmdlist
, &showarmcmdlist
);
10378 /* Add two commands to allow the user to force the assumed
10380 add_setshow_enum_cmd ("fallback-mode", class_support
,
10381 arm_mode_strings
, &arm_fallback_mode_string
,
10382 _("Set the mode assumed when symbols are unavailable."),
10383 _("Show the mode assumed when symbols are unavailable."),
10384 NULL
, NULL
, arm_show_fallback_mode
,
10385 &setarmcmdlist
, &showarmcmdlist
);
10386 add_setshow_enum_cmd ("force-mode", class_support
,
10387 arm_mode_strings
, &arm_force_mode_string
,
10388 _("Set the mode assumed even when symbols are available."),
10389 _("Show the mode assumed even when symbols are available."),
10390 NULL
, NULL
, arm_show_force_mode
,
10391 &setarmcmdlist
, &showarmcmdlist
);
10393 /* Debugging flag. */
10394 add_setshow_boolean_cmd ("arm", class_maintenance
, &arm_debug
,
10395 _("Set ARM debugging."),
10396 _("Show ARM debugging."),
10397 _("When on, arm-specific debugging is enabled."),
10399 NULL
, /* FIXME: i18n: "ARM debugging is %s. */
10400 &setdebuglist
, &showdebuglist
);