1 /* Common target dependent code for GDB on ARM systems.
3 Copyright (C) 1988-1989, 1991-1993, 1995-1996, 1998-2012 Free
4 Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include <ctype.h> /* XXX for isupper (). */
28 #include "gdb_string.h"
29 #include "dis-asm.h" /* For register styles. */
31 #include "reggroups.h"
34 #include "arch-utils.h"
36 #include "frame-unwind.h"
37 #include "frame-base.h"
38 #include "trad-frame.h"
40 #include "dwarf2-frame.h"
42 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
49 #include "gdb/sim-arm.h"
52 #include "coff/internal.h"
55 #include "gdb_assert.h"
58 #include "features/arm-with-m.c"
59 #include "features/arm-with-m-fpa-layout.c"
60 #include "features/arm-with-iwmmxt.c"
61 #include "features/arm-with-vfpv2.c"
62 #include "features/arm-with-vfpv3.c"
63 #include "features/arm-with-neon.c"
67 /* Macros for setting and testing a bit in a minimal symbol that marks
68 it as Thumb function. The MSB of the minimal symbol's "info" field
69 is used for this purpose.
71 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
72 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
74 #define MSYMBOL_SET_SPECIAL(msym) \
75 MSYMBOL_TARGET_FLAG_1 (msym) = 1
77 #define MSYMBOL_IS_SPECIAL(msym) \
78 MSYMBOL_TARGET_FLAG_1 (msym)
80 /* Per-objfile data used for mapping symbols. */
81 static const struct objfile_data
*arm_objfile_data_key
;
83 struct arm_mapping_symbol
88 typedef struct arm_mapping_symbol arm_mapping_symbol_s
;
89 DEF_VEC_O(arm_mapping_symbol_s
);
91 struct arm_per_objfile
93 VEC(arm_mapping_symbol_s
) **section_maps
;
96 /* The list of available "set arm ..." and "show arm ..." commands. */
97 static struct cmd_list_element
*setarmcmdlist
= NULL
;
98 static struct cmd_list_element
*showarmcmdlist
= NULL
;
100 /* The type of floating-point to use. Keep this in sync with enum
101 arm_float_model, and the help string in _initialize_arm_tdep. */
102 static const char *const fp_model_strings
[] =
112 /* A variable that can be configured by the user. */
113 static enum arm_float_model arm_fp_model
= ARM_FLOAT_AUTO
;
114 static const char *current_fp_model
= "auto";
116 /* The ABI to use. Keep this in sync with arm_abi_kind. */
117 static const char *const arm_abi_strings
[] =
125 /* A variable that can be configured by the user. */
126 static enum arm_abi_kind arm_abi_global
= ARM_ABI_AUTO
;
127 static const char *arm_abi_string
= "auto";
129 /* The execution mode to assume. */
130 static const char *const arm_mode_strings
[] =
138 static const char *arm_fallback_mode_string
= "auto";
139 static const char *arm_force_mode_string
= "auto";
141 /* Internal override of the execution mode. -1 means no override,
142 0 means override to ARM mode, 1 means override to Thumb mode.
143 The effect is the same as if arm_force_mode has been set by the
144 user (except the internal override has precedence over a user's
145 arm_force_mode override). */
146 static int arm_override_mode
= -1;
148 /* Number of different reg name sets (options). */
149 static int num_disassembly_options
;
151 /* The standard register names, and all the valid aliases for them. Note
152 that `fp', `sp' and `pc' are not added in this alias list, because they
153 have been added as builtin user registers in
154 std-regs.c:_initialize_frame_reg. */
159 } arm_register_aliases
[] = {
160 /* Basic register numbers. */
177 /* Synonyms (argument and variable registers). */
190 /* Other platform-specific names for r9. */
196 /* Names used by GCC (not listed in the ARM EABI). */
198 /* A special name from the older ATPCS. */
202 static const char *const arm_register_names
[] =
203 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
204 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
205 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
206 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
207 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
208 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
209 "fps", "cpsr" }; /* 24 25 */
211 /* Valid register name styles. */
212 static const char **valid_disassembly_styles
;
214 /* Disassembly style to use. Default to "std" register names. */
215 static const char *disassembly_style
;
217 /* This is used to keep the bfd arch_info in sync with the disassembly
219 static void set_disassembly_style_sfunc(char *, int,
220 struct cmd_list_element
*);
221 static void set_disassembly_style (void);
223 static void convert_from_extended (const struct floatformat
*, const void *,
225 static void convert_to_extended (const struct floatformat
*, void *,
228 static enum register_status
arm_neon_quad_read (struct gdbarch
*gdbarch
,
229 struct regcache
*regcache
,
230 int regnum
, gdb_byte
*buf
);
231 static void arm_neon_quad_write (struct gdbarch
*gdbarch
,
232 struct regcache
*regcache
,
233 int regnum
, const gdb_byte
*buf
);
235 static int thumb_insn_size (unsigned short inst1
);
237 struct arm_prologue_cache
239 /* The stack pointer at the time this frame was created; i.e. the
240 caller's stack pointer when this function was called. It is used
241 to identify this frame. */
244 /* The frame base for this frame is just prev_sp - frame size.
245 FRAMESIZE is the distance from the frame pointer to the
246 initial stack pointer. */
250 /* The register used to hold the frame pointer for this frame. */
253 /* Saved register offsets. */
254 struct trad_frame_saved_reg
*saved_regs
;
257 static CORE_ADDR
arm_analyze_prologue (struct gdbarch
*gdbarch
,
258 CORE_ADDR prologue_start
,
259 CORE_ADDR prologue_end
,
260 struct arm_prologue_cache
*cache
);
262 /* Architecture version for displaced stepping. This effects the behaviour of
263 certain instructions, and really should not be hard-wired. */
265 #define DISPLACED_STEPPING_ARCH_VERSION 5
267 /* Addresses for calling Thumb functions have the bit 0 set.
268 Here are some macros to test, set, or clear bit 0 of addresses. */
269 #define IS_THUMB_ADDR(addr) ((addr) & 1)
270 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
271 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
273 /* Set to true if the 32-bit mode is in use. */
277 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
280 arm_psr_thumb_bit (struct gdbarch
*gdbarch
)
282 if (gdbarch_tdep (gdbarch
)->is_m
)
288 /* Determine if FRAME is executing in Thumb mode. */
291 arm_frame_is_thumb (struct frame_info
*frame
)
294 ULONGEST t_bit
= arm_psr_thumb_bit (get_frame_arch (frame
));
296 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
297 directly (from a signal frame or dummy frame) or by interpreting
298 the saved LR (from a prologue or DWARF frame). So consult it and
299 trust the unwinders. */
300 cpsr
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
302 return (cpsr
& t_bit
) != 0;
305 /* Callback for VEC_lower_bound. */
308 arm_compare_mapping_symbols (const struct arm_mapping_symbol
*lhs
,
309 const struct arm_mapping_symbol
*rhs
)
311 return lhs
->value
< rhs
->value
;
314 /* Search for the mapping symbol covering MEMADDR. If one is found,
315 return its type. Otherwise, return 0. If START is non-NULL,
316 set *START to the location of the mapping symbol. */
319 arm_find_mapping_symbol (CORE_ADDR memaddr
, CORE_ADDR
*start
)
321 struct obj_section
*sec
;
323 /* If there are mapping symbols, consult them. */
324 sec
= find_pc_section (memaddr
);
327 struct arm_per_objfile
*data
;
328 VEC(arm_mapping_symbol_s
) *map
;
329 struct arm_mapping_symbol map_key
= { memaddr
- obj_section_addr (sec
),
333 data
= objfile_data (sec
->objfile
, arm_objfile_data_key
);
336 map
= data
->section_maps
[sec
->the_bfd_section
->index
];
337 if (!VEC_empty (arm_mapping_symbol_s
, map
))
339 struct arm_mapping_symbol
*map_sym
;
341 idx
= VEC_lower_bound (arm_mapping_symbol_s
, map
, &map_key
,
342 arm_compare_mapping_symbols
);
344 /* VEC_lower_bound finds the earliest ordered insertion
345 point. If the following symbol starts at this exact
346 address, we use that; otherwise, the preceding
347 mapping symbol covers this address. */
348 if (idx
< VEC_length (arm_mapping_symbol_s
, map
))
350 map_sym
= VEC_index (arm_mapping_symbol_s
, map
, idx
);
351 if (map_sym
->value
== map_key
.value
)
354 *start
= map_sym
->value
+ obj_section_addr (sec
);
355 return map_sym
->type
;
361 map_sym
= VEC_index (arm_mapping_symbol_s
, map
, idx
- 1);
363 *start
= map_sym
->value
+ obj_section_addr (sec
);
364 return map_sym
->type
;
373 /* Determine if the program counter specified in MEMADDR is in a Thumb
374 function. This function should be called for addresses unrelated to
375 any executing frame; otherwise, prefer arm_frame_is_thumb. */
378 arm_pc_is_thumb (struct gdbarch
*gdbarch
, CORE_ADDR memaddr
)
380 struct obj_section
*sec
;
381 struct minimal_symbol
*sym
;
383 struct displaced_step_closure
* dsc
384 = get_displaced_step_closure_by_addr(memaddr
);
386 /* If checking the mode of displaced instruction in copy area, the mode
387 should be determined by instruction on the original address. */
391 fprintf_unfiltered (gdb_stdlog
,
392 "displaced: check mode of %.8lx instead of %.8lx\n",
393 (unsigned long) dsc
->insn_addr
,
394 (unsigned long) memaddr
);
395 memaddr
= dsc
->insn_addr
;
398 /* If bit 0 of the address is set, assume this is a Thumb address. */
399 if (IS_THUMB_ADDR (memaddr
))
402 /* Respect internal mode override if active. */
403 if (arm_override_mode
!= -1)
404 return arm_override_mode
;
406 /* If the user wants to override the symbol table, let him. */
407 if (strcmp (arm_force_mode_string
, "arm") == 0)
409 if (strcmp (arm_force_mode_string
, "thumb") == 0)
412 /* ARM v6-M and v7-M are always in Thumb mode. */
413 if (gdbarch_tdep (gdbarch
)->is_m
)
416 /* If there are mapping symbols, consult them. */
417 type
= arm_find_mapping_symbol (memaddr
, NULL
);
421 /* Thumb functions have a "special" bit set in minimal symbols. */
422 sym
= lookup_minimal_symbol_by_pc (memaddr
);
424 return (MSYMBOL_IS_SPECIAL (sym
));
426 /* If the user wants to override the fallback mode, let them. */
427 if (strcmp (arm_fallback_mode_string
, "arm") == 0)
429 if (strcmp (arm_fallback_mode_string
, "thumb") == 0)
432 /* If we couldn't find any symbol, but we're talking to a running
433 target, then trust the current value of $cpsr. This lets
434 "display/i $pc" always show the correct mode (though if there is
435 a symbol table we will not reach here, so it still may not be
436 displayed in the mode it will be executed). */
437 if (target_has_registers
)
438 return arm_frame_is_thumb (get_current_frame ());
440 /* Otherwise we're out of luck; we assume ARM. */
444 /* Remove useless bits from addresses in a running program. */
446 arm_addr_bits_remove (struct gdbarch
*gdbarch
, CORE_ADDR val
)
449 return UNMAKE_THUMB_ADDR (val
);
451 return (val
& 0x03fffffc);
454 /* When reading symbols, we need to zap the low bit of the address,
455 which may be set to 1 for Thumb functions. */
457 arm_smash_text_address (struct gdbarch
*gdbarch
, CORE_ADDR val
)
462 /* Return 1 if PC is the start of a compiler helper function which
463 can be safely ignored during prologue skipping. IS_THUMB is true
464 if the function is known to be a Thumb function due to the way it
467 skip_prologue_function (struct gdbarch
*gdbarch
, CORE_ADDR pc
, int is_thumb
)
469 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
470 struct minimal_symbol
*msym
;
472 msym
= lookup_minimal_symbol_by_pc (pc
);
474 && SYMBOL_VALUE_ADDRESS (msym
) == pc
475 && SYMBOL_LINKAGE_NAME (msym
) != NULL
)
477 const char *name
= SYMBOL_LINKAGE_NAME (msym
);
479 /* The GNU linker's Thumb call stub to foo is named
481 if (strstr (name
, "_from_thumb") != NULL
)
484 /* On soft-float targets, __truncdfsf2 is called to convert promoted
485 arguments to their argument types in non-prototyped
487 if (strncmp (name
, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
489 if (strncmp (name
, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
492 /* Internal functions related to thread-local storage. */
493 if (strncmp (name
, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
495 if (strncmp (name
, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
500 /* If we run against a stripped glibc, we may be unable to identify
501 special functions by name. Check for one important case,
502 __aeabi_read_tp, by comparing the *code* against the default
503 implementation (this is hand-written ARM assembler in glibc). */
506 && read_memory_unsigned_integer (pc
, 4, byte_order_for_code
)
507 == 0xe3e00a0f /* mov r0, #0xffff0fff */
508 && read_memory_unsigned_integer (pc
+ 4, 4, byte_order_for_code
)
509 == 0xe240f01f) /* sub pc, r0, #31 */
516 /* Support routines for instruction parsing. */
517 #define submask(x) ((1L << ((x) + 1)) - 1)
518 #define bit(obj,st) (((obj) >> (st)) & 1)
519 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
520 #define sbits(obj,st,fn) \
521 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
522 #define BranchDest(addr,instr) \
523 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
525 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
526 the first 16-bit of instruction, and INSN2 is the second 16-bit of
528 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
529 ((bits ((insn1), 0, 3) << 12) \
530 | (bits ((insn1), 10, 10) << 11) \
531 | (bits ((insn2), 12, 14) << 8) \
532 | bits ((insn2), 0, 7))
534 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
535 the 32-bit instruction. */
536 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
537 ((bits ((insn), 16, 19) << 12) \
538 | bits ((insn), 0, 11))
540 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
543 thumb_expand_immediate (unsigned int imm
)
545 unsigned int count
= imm
>> 7;
553 return (imm
& 0xff) | ((imm
& 0xff) << 16);
555 return ((imm
& 0xff) << 8) | ((imm
& 0xff) << 24);
557 return (imm
& 0xff) | ((imm
& 0xff) << 8)
558 | ((imm
& 0xff) << 16) | ((imm
& 0xff) << 24);
561 return (0x80 | (imm
& 0x7f)) << (32 - count
);
564 /* Return 1 if the 16-bit Thumb instruction INST might change
565 control flow, 0 otherwise. */
568 thumb_instruction_changes_pc (unsigned short inst
)
570 if ((inst
& 0xff00) == 0xbd00) /* pop {rlist, pc} */
573 if ((inst
& 0xf000) == 0xd000) /* conditional branch */
576 if ((inst
& 0xf800) == 0xe000) /* unconditional branch */
579 if ((inst
& 0xff00) == 0x4700) /* bx REG, blx REG */
582 if ((inst
& 0xff87) == 0x4687) /* mov pc, REG */
585 if ((inst
& 0xf500) == 0xb100) /* CBNZ or CBZ. */
591 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
592 might change control flow, 0 otherwise. */
595 thumb2_instruction_changes_pc (unsigned short inst1
, unsigned short inst2
)
597 if ((inst1
& 0xf800) == 0xf000 && (inst2
& 0x8000) == 0x8000)
599 /* Branches and miscellaneous control instructions. */
601 if ((inst2
& 0x1000) != 0 || (inst2
& 0xd001) == 0xc000)
606 else if (inst1
== 0xf3de && (inst2
& 0xff00) == 0x3f00)
608 /* SUBS PC, LR, #imm8. */
611 else if ((inst2
& 0xd000) == 0x8000 && (inst1
& 0x0380) != 0x0380)
613 /* Conditional branch. */
620 if ((inst1
& 0xfe50) == 0xe810)
622 /* Load multiple or RFE. */
624 if (bit (inst1
, 7) && !bit (inst1
, 8))
630 else if (!bit (inst1
, 7) && bit (inst1
, 8))
636 else if (bit (inst1
, 7) && bit (inst1
, 8))
641 else if (!bit (inst1
, 7) && !bit (inst1
, 8))
650 if ((inst1
& 0xffef) == 0xea4f && (inst2
& 0xfff0) == 0x0f00)
652 /* MOV PC or MOVS PC. */
656 if ((inst1
& 0xff70) == 0xf850 && (inst2
& 0xf000) == 0xf000)
659 if (bits (inst1
, 0, 3) == 15)
665 if ((inst2
& 0x0fc0) == 0x0000)
671 if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf000)
677 if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf010)
686 /* Analyze a Thumb prologue, looking for a recognizable stack frame
687 and frame pointer. Scan until we encounter a store that could
688 clobber the stack frame unexpectedly, or an unknown instruction.
689 Return the last address which is definitely safe to skip for an
690 initial breakpoint. */
693 thumb_analyze_prologue (struct gdbarch
*gdbarch
,
694 CORE_ADDR start
, CORE_ADDR limit
,
695 struct arm_prologue_cache
*cache
)
697 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
698 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
701 struct pv_area
*stack
;
702 struct cleanup
*back_to
;
704 CORE_ADDR unrecognized_pc
= 0;
706 for (i
= 0; i
< 16; i
++)
707 regs
[i
] = pv_register (i
, 0);
708 stack
= make_pv_area (ARM_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
709 back_to
= make_cleanup_free_pv_area (stack
);
711 while (start
< limit
)
715 insn
= read_memory_unsigned_integer (start
, 2, byte_order_for_code
);
717 if ((insn
& 0xfe00) == 0xb400) /* push { rlist } */
722 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
725 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
726 whether to save LR (R14). */
727 mask
= (insn
& 0xff) | ((insn
& 0x100) << 6);
729 /* Calculate offsets of saved R0-R7 and LR. */
730 for (regno
= ARM_LR_REGNUM
; regno
>= 0; regno
--)
731 if (mask
& (1 << regno
))
733 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
],
735 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 4, regs
[regno
]);
738 else if ((insn
& 0xff00) == 0xb000) /* add sp, #simm OR
741 offset
= (insn
& 0x7f) << 2; /* get scaled offset */
742 if (insn
& 0x80) /* Check for SUB. */
743 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
],
746 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
],
749 else if ((insn
& 0xf800) == 0xa800) /* add Rd, sp, #imm */
750 regs
[bits (insn
, 8, 10)] = pv_add_constant (regs
[ARM_SP_REGNUM
],
752 else if ((insn
& 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
753 && pv_is_register (regs
[bits (insn
, 3, 5)], ARM_SP_REGNUM
))
754 regs
[bits (insn
, 0, 2)] = pv_add_constant (regs
[bits (insn
, 3, 5)],
756 else if ((insn
& 0xf800) == 0x3000 /* add Rd, #imm */
757 && pv_is_register (regs
[bits (insn
, 8, 10)], ARM_SP_REGNUM
))
758 regs
[bits (insn
, 8, 10)] = pv_add_constant (regs
[bits (insn
, 8, 10)],
760 else if ((insn
& 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
761 && pv_is_register (regs
[bits (insn
, 6, 8)], ARM_SP_REGNUM
)
762 && pv_is_constant (regs
[bits (insn
, 3, 5)]))
763 regs
[bits (insn
, 0, 2)] = pv_add (regs
[bits (insn
, 3, 5)],
764 regs
[bits (insn
, 6, 8)]);
765 else if ((insn
& 0xff00) == 0x4400 /* add Rd, Rm */
766 && pv_is_constant (regs
[bits (insn
, 3, 6)]))
768 int rd
= (bit (insn
, 7) << 3) + bits (insn
, 0, 2);
769 int rm
= bits (insn
, 3, 6);
770 regs
[rd
] = pv_add (regs
[rd
], regs
[rm
]);
772 else if ((insn
& 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
774 int dst_reg
= (insn
& 0x7) + ((insn
& 0x80) >> 4);
775 int src_reg
= (insn
& 0x78) >> 3;
776 regs
[dst_reg
] = regs
[src_reg
];
778 else if ((insn
& 0xf800) == 0x9000) /* str rd, [sp, #off] */
780 /* Handle stores to the stack. Normally pushes are used,
781 but with GCC -mtpcs-frame, there may be other stores
782 in the prologue to create the frame. */
783 int regno
= (insn
>> 8) & 0x7;
786 offset
= (insn
& 0xff) << 2;
787 addr
= pv_add_constant (regs
[ARM_SP_REGNUM
], offset
);
789 if (pv_area_store_would_trash (stack
, addr
))
792 pv_area_store (stack
, addr
, 4, regs
[regno
]);
794 else if ((insn
& 0xf800) == 0x6000) /* str rd, [rn, #off] */
796 int rd
= bits (insn
, 0, 2);
797 int rn
= bits (insn
, 3, 5);
800 offset
= bits (insn
, 6, 10) << 2;
801 addr
= pv_add_constant (regs
[rn
], offset
);
803 if (pv_area_store_would_trash (stack
, addr
))
806 pv_area_store (stack
, addr
, 4, regs
[rd
]);
808 else if (((insn
& 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
809 || (insn
& 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
810 && pv_is_register (regs
[bits (insn
, 3, 5)], ARM_SP_REGNUM
))
811 /* Ignore stores of argument registers to the stack. */
813 else if ((insn
& 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
814 && pv_is_register (regs
[bits (insn
, 8, 10)], ARM_SP_REGNUM
))
815 /* Ignore block loads from the stack, potentially copying
816 parameters from memory. */
818 else if ((insn
& 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
819 || ((insn
& 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
820 && pv_is_register (regs
[bits (insn
, 3, 5)], ARM_SP_REGNUM
)))
821 /* Similarly ignore single loads from the stack. */
823 else if ((insn
& 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
824 || (insn
& 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
825 /* Skip register copies, i.e. saves to another register
826 instead of the stack. */
828 else if ((insn
& 0xf800) == 0x2000) /* movs Rd, #imm */
829 /* Recognize constant loads; even with small stacks these are necessary
831 regs
[bits (insn
, 8, 10)] = pv_constant (bits (insn
, 0, 7));
832 else if ((insn
& 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
834 /* Constant pool loads, for the same reason. */
835 unsigned int constant
;
838 loc
= start
+ 4 + bits (insn
, 0, 7) * 4;
839 constant
= read_memory_unsigned_integer (loc
, 4, byte_order
);
840 regs
[bits (insn
, 8, 10)] = pv_constant (constant
);
842 else if (thumb_insn_size (insn
) == 4) /* 32-bit Thumb-2 instructions. */
844 unsigned short inst2
;
846 inst2
= read_memory_unsigned_integer (start
+ 2, 2,
847 byte_order_for_code
);
849 if ((insn
& 0xf800) == 0xf000 && (inst2
& 0xe800) == 0xe800)
851 /* BL, BLX. Allow some special function calls when
852 skipping the prologue; GCC generates these before
853 storing arguments to the stack. */
855 int j1
, j2
, imm1
, imm2
;
857 imm1
= sbits (insn
, 0, 10);
858 imm2
= bits (inst2
, 0, 10);
859 j1
= bit (inst2
, 13);
860 j2
= bit (inst2
, 11);
862 offset
= ((imm1
<< 12) + (imm2
<< 1));
863 offset
^= ((!j2
) << 22) | ((!j1
) << 23);
865 nextpc
= start
+ 4 + offset
;
866 /* For BLX make sure to clear the low bits. */
867 if (bit (inst2
, 12) == 0)
868 nextpc
= nextpc
& 0xfffffffc;
870 if (!skip_prologue_function (gdbarch
, nextpc
,
871 bit (inst2
, 12) != 0))
875 else if ((insn
& 0xffd0) == 0xe900 /* stmdb Rn{!},
877 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
879 pv_t addr
= regs
[bits (insn
, 0, 3)];
882 if (pv_area_store_would_trash (stack
, addr
))
885 /* Calculate offsets of saved registers. */
886 for (regno
= ARM_LR_REGNUM
; regno
>= 0; regno
--)
887 if (inst2
& (1 << regno
))
889 addr
= pv_add_constant (addr
, -4);
890 pv_area_store (stack
, addr
, 4, regs
[regno
]);
894 regs
[bits (insn
, 0, 3)] = addr
;
897 else if ((insn
& 0xff50) == 0xe940 /* strd Rt, Rt2,
899 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
901 int regno1
= bits (inst2
, 12, 15);
902 int regno2
= bits (inst2
, 8, 11);
903 pv_t addr
= regs
[bits (insn
, 0, 3)];
905 offset
= inst2
& 0xff;
907 addr
= pv_add_constant (addr
, offset
);
909 addr
= pv_add_constant (addr
, -offset
);
911 if (pv_area_store_would_trash (stack
, addr
))
914 pv_area_store (stack
, addr
, 4, regs
[regno1
]);
915 pv_area_store (stack
, pv_add_constant (addr
, 4),
919 regs
[bits (insn
, 0, 3)] = addr
;
922 else if ((insn
& 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
923 && (inst2
& 0x0c00) == 0x0c00
924 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
926 int regno
= bits (inst2
, 12, 15);
927 pv_t addr
= regs
[bits (insn
, 0, 3)];
929 offset
= inst2
& 0xff;
931 addr
= pv_add_constant (addr
, offset
);
933 addr
= pv_add_constant (addr
, -offset
);
935 if (pv_area_store_would_trash (stack
, addr
))
938 pv_area_store (stack
, addr
, 4, regs
[regno
]);
941 regs
[bits (insn
, 0, 3)] = addr
;
944 else if ((insn
& 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
945 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
947 int regno
= bits (inst2
, 12, 15);
950 offset
= inst2
& 0xfff;
951 addr
= pv_add_constant (regs
[bits (insn
, 0, 3)], offset
);
953 if (pv_area_store_would_trash (stack
, addr
))
956 pv_area_store (stack
, addr
, 4, regs
[regno
]);
959 else if ((insn
& 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
960 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
961 /* Ignore stores of argument registers to the stack. */
964 else if ((insn
& 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
965 && (inst2
& 0x0d00) == 0x0c00
966 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
967 /* Ignore stores of argument registers to the stack. */
970 else if ((insn
& 0xffd0) == 0xe890 /* ldmia Rn[!],
972 && (inst2
& 0x8000) == 0x0000
973 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
974 /* Ignore block loads from the stack, potentially copying
975 parameters from memory. */
978 else if ((insn
& 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
980 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
981 /* Similarly ignore dual loads from the stack. */
984 else if ((insn
& 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
985 && (inst2
& 0x0d00) == 0x0c00
986 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
987 /* Similarly ignore single loads from the stack. */
990 else if ((insn
& 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
991 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
992 /* Similarly ignore single loads from the stack. */
995 else if ((insn
& 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
996 && (inst2
& 0x8000) == 0x0000)
998 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
999 | (bits (inst2
, 12, 14) << 8)
1000 | bits (inst2
, 0, 7));
1002 regs
[bits (inst2
, 8, 11)]
1003 = pv_add_constant (regs
[bits (insn
, 0, 3)],
1004 thumb_expand_immediate (imm
));
1007 else if ((insn
& 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1008 && (inst2
& 0x8000) == 0x0000)
1010 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1011 | (bits (inst2
, 12, 14) << 8)
1012 | bits (inst2
, 0, 7));
1014 regs
[bits (inst2
, 8, 11)]
1015 = pv_add_constant (regs
[bits (insn
, 0, 3)], imm
);
1018 else if ((insn
& 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1019 && (inst2
& 0x8000) == 0x0000)
1021 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1022 | (bits (inst2
, 12, 14) << 8)
1023 | bits (inst2
, 0, 7));
1025 regs
[bits (inst2
, 8, 11)]
1026 = pv_add_constant (regs
[bits (insn
, 0, 3)],
1027 - (CORE_ADDR
) thumb_expand_immediate (imm
));
1030 else if ((insn
& 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1031 && (inst2
& 0x8000) == 0x0000)
1033 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1034 | (bits (inst2
, 12, 14) << 8)
1035 | bits (inst2
, 0, 7));
1037 regs
[bits (inst2
, 8, 11)]
1038 = pv_add_constant (regs
[bits (insn
, 0, 3)], - (CORE_ADDR
) imm
);
1041 else if ((insn
& 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1043 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1044 | (bits (inst2
, 12, 14) << 8)
1045 | bits (inst2
, 0, 7));
1047 regs
[bits (inst2
, 8, 11)]
1048 = pv_constant (thumb_expand_immediate (imm
));
1051 else if ((insn
& 0xfbf0) == 0xf240) /* movw Rd, #const */
1054 = EXTRACT_MOVW_MOVT_IMM_T (insn
, inst2
);
1056 regs
[bits (inst2
, 8, 11)] = pv_constant (imm
);
1059 else if (insn
== 0xea5f /* mov.w Rd,Rm */
1060 && (inst2
& 0xf0f0) == 0)
1062 int dst_reg
= (inst2
& 0x0f00) >> 8;
1063 int src_reg
= inst2
& 0xf;
1064 regs
[dst_reg
] = regs
[src_reg
];
1067 else if ((insn
& 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1069 /* Constant pool loads. */
1070 unsigned int constant
;
1073 offset
= bits (insn
, 0, 11);
1075 loc
= start
+ 4 + offset
;
1077 loc
= start
+ 4 - offset
;
1079 constant
= read_memory_unsigned_integer (loc
, 4, byte_order
);
1080 regs
[bits (inst2
, 12, 15)] = pv_constant (constant
);
1083 else if ((insn
& 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1085 /* Constant pool loads. */
1086 unsigned int constant
;
1089 offset
= bits (insn
, 0, 7) << 2;
1091 loc
= start
+ 4 + offset
;
1093 loc
= start
+ 4 - offset
;
1095 constant
= read_memory_unsigned_integer (loc
, 4, byte_order
);
1096 regs
[bits (inst2
, 12, 15)] = pv_constant (constant
);
1098 constant
= read_memory_unsigned_integer (loc
+ 4, 4, byte_order
);
1099 regs
[bits (inst2
, 8, 11)] = pv_constant (constant
);
1102 else if (thumb2_instruction_changes_pc (insn
, inst2
))
1104 /* Don't scan past anything that might change control flow. */
1109 /* The optimizer might shove anything into the prologue,
1110 so we just skip what we don't recognize. */
1111 unrecognized_pc
= start
;
1116 else if (thumb_instruction_changes_pc (insn
))
1118 /* Don't scan past anything that might change control flow. */
1123 /* The optimizer might shove anything into the prologue,
1124 so we just skip what we don't recognize. */
1125 unrecognized_pc
= start
;
1132 fprintf_unfiltered (gdb_stdlog
, "Prologue scan stopped at %s\n",
1133 paddress (gdbarch
, start
));
1135 if (unrecognized_pc
== 0)
1136 unrecognized_pc
= start
;
1140 do_cleanups (back_to
);
1141 return unrecognized_pc
;
1144 if (pv_is_register (regs
[ARM_FP_REGNUM
], ARM_SP_REGNUM
))
1146 /* Frame pointer is fp. Frame size is constant. */
1147 cache
->framereg
= ARM_FP_REGNUM
;
1148 cache
->framesize
= -regs
[ARM_FP_REGNUM
].k
;
1150 else if (pv_is_register (regs
[THUMB_FP_REGNUM
], ARM_SP_REGNUM
))
1152 /* Frame pointer is r7. Frame size is constant. */
1153 cache
->framereg
= THUMB_FP_REGNUM
;
1154 cache
->framesize
= -regs
[THUMB_FP_REGNUM
].k
;
1158 /* Try the stack pointer... this is a bit desperate. */
1159 cache
->framereg
= ARM_SP_REGNUM
;
1160 cache
->framesize
= -regs
[ARM_SP_REGNUM
].k
;
1163 for (i
= 0; i
< 16; i
++)
1164 if (pv_area_find_reg (stack
, gdbarch
, i
, &offset
))
1165 cache
->saved_regs
[i
].addr
= offset
;
1167 do_cleanups (back_to
);
1168 return unrecognized_pc
;
1172 /* Try to analyze the instructions starting from PC, which load symbol
1173 __stack_chk_guard. Return the address of instruction after loading this
1174 symbol, set the dest register number to *BASEREG, and set the size of
1175 instructions for loading symbol in OFFSET. Return 0 if instructions are
1179 arm_analyze_load_stack_chk_guard(CORE_ADDR pc
, struct gdbarch
*gdbarch
,
1180 unsigned int *destreg
, int *offset
)
1182 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1183 int is_thumb
= arm_pc_is_thumb (gdbarch
, pc
);
1184 unsigned int low
, high
, address
;
1189 unsigned short insn1
1190 = read_memory_unsigned_integer (pc
, 2, byte_order_for_code
);
1192 if ((insn1
& 0xf800) == 0x4800) /* ldr Rd, #immed */
1194 *destreg
= bits (insn1
, 8, 10);
1196 address
= bits (insn1
, 0, 7);
1198 else if ((insn1
& 0xfbf0) == 0xf240) /* movw Rd, #const */
1200 unsigned short insn2
1201 = read_memory_unsigned_integer (pc
+ 2, 2, byte_order_for_code
);
1203 low
= EXTRACT_MOVW_MOVT_IMM_T (insn1
, insn2
);
1206 = read_memory_unsigned_integer (pc
+ 4, 2, byte_order_for_code
);
1208 = read_memory_unsigned_integer (pc
+ 6, 2, byte_order_for_code
);
1210 /* movt Rd, #const */
1211 if ((insn1
& 0xfbc0) == 0xf2c0)
1213 high
= EXTRACT_MOVW_MOVT_IMM_T (insn1
, insn2
);
1214 *destreg
= bits (insn2
, 8, 11);
1216 address
= (high
<< 16 | low
);
1223 = read_memory_unsigned_integer (pc
, 4, byte_order_for_code
);
1225 if ((insn
& 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1227 address
= bits (insn
, 0, 11);
1228 *destreg
= bits (insn
, 12, 15);
1231 else if ((insn
& 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1233 low
= EXTRACT_MOVW_MOVT_IMM_A (insn
);
1236 = read_memory_unsigned_integer (pc
+ 4, 4, byte_order_for_code
);
1238 if ((insn
& 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1240 high
= EXTRACT_MOVW_MOVT_IMM_A (insn
);
1241 *destreg
= bits (insn
, 12, 15);
1243 address
= (high
<< 16 | low
);
1251 /* Try to skip a sequence of instructions used for stack protector. If PC
1252 points to the first instruction of this sequence, return the address of
1253 first instruction after this sequence, otherwise, return original PC.
1255 On arm, this sequence of instructions is composed of mainly three steps,
1256 Step 1: load symbol __stack_chk_guard,
1257 Step 2: load from address of __stack_chk_guard,
1258 Step 3: store it to somewhere else.
1260 Usually, instructions on step 2 and step 3 are the same on various ARM
1261 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1262 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1263 instructions in step 1 vary from different ARM architectures. On ARMv7,
1266 movw Rn, #:lower16:__stack_chk_guard
1267 movt Rn, #:upper16:__stack_chk_guard
1274 .word __stack_chk_guard
1276 Since ldr/str is a very popular instruction, we can't use them as
1277 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1278 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1279 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1282 arm_skip_stack_protector(CORE_ADDR pc
, struct gdbarch
*gdbarch
)
1284 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1285 unsigned int address
, basereg
;
1286 struct minimal_symbol
*stack_chk_guard
;
1288 int is_thumb
= arm_pc_is_thumb (gdbarch
, pc
);
1291 /* Try to parse the instructions in Step 1. */
1292 addr
= arm_analyze_load_stack_chk_guard (pc
, gdbarch
,
1297 stack_chk_guard
= lookup_minimal_symbol_by_pc (addr
);
1298 /* If name of symbol doesn't start with '__stack_chk_guard', this
1299 instruction sequence is not for stack protector. If symbol is
1300 removed, we conservatively think this sequence is for stack protector. */
1302 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard
), "__stack_chk_guard",
1303 strlen ("__stack_chk_guard")) != 0)
1308 unsigned int destreg
;
1310 = read_memory_unsigned_integer (pc
+ offset
, 2, byte_order_for_code
);
1312 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1313 if ((insn
& 0xf800) != 0x6800)
1315 if (bits (insn
, 3, 5) != basereg
)
1317 destreg
= bits (insn
, 0, 2);
1319 insn
= read_memory_unsigned_integer (pc
+ offset
+ 2, 2,
1320 byte_order_for_code
);
1321 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1322 if ((insn
& 0xf800) != 0x6000)
1324 if (destreg
!= bits (insn
, 0, 2))
1329 unsigned int destreg
;
1331 = read_memory_unsigned_integer (pc
+ offset
, 4, byte_order_for_code
);
1333 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1334 if ((insn
& 0x0e500000) != 0x04100000)
1336 if (bits (insn
, 16, 19) != basereg
)
1338 destreg
= bits (insn
, 12, 15);
1339 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1340 insn
= read_memory_unsigned_integer (pc
+ offset
+ 4,
1341 4, byte_order_for_code
);
1342 if ((insn
& 0x0e500000) != 0x04000000)
1344 if (bits (insn
, 12, 15) != destreg
)
1347 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1350 return pc
+ offset
+ 4;
1352 return pc
+ offset
+ 8;
1355 /* Advance the PC across any function entry prologue instructions to
1356 reach some "real" code.
1358 The APCS (ARM Procedure Call Standard) defines the following
1362 [stmfd sp!, {a1,a2,a3,a4}]
1363 stmfd sp!, {...,fp,ip,lr,pc}
1364 [stfe f7, [sp, #-12]!]
1365 [stfe f6, [sp, #-12]!]
1366 [stfe f5, [sp, #-12]!]
1367 [stfe f4, [sp, #-12]!]
1368 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1371 arm_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
1373 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1376 CORE_ADDR func_addr
, limit_pc
;
1377 struct symtab_and_line sal
;
1379 /* See if we can determine the end of the prologue via the symbol table.
1380 If so, then return either PC, or the PC after the prologue, whichever
1382 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
1384 CORE_ADDR post_prologue_pc
1385 = skip_prologue_using_sal (gdbarch
, func_addr
);
1386 struct symtab
*s
= find_pc_symtab (func_addr
);
1388 if (post_prologue_pc
)
1390 = arm_skip_stack_protector (post_prologue_pc
, gdbarch
);
1393 /* GCC always emits a line note before the prologue and another
1394 one after, even if the two are at the same address or on the
1395 same line. Take advantage of this so that we do not need to
1396 know every instruction that might appear in the prologue. We
1397 will have producer information for most binaries; if it is
1398 missing (e.g. for -gstabs), assuming the GNU tools. */
1399 if (post_prologue_pc
1401 || s
->producer
== NULL
1402 || strncmp (s
->producer
, "GNU ", sizeof ("GNU ") - 1) == 0))
1403 return post_prologue_pc
;
1405 if (post_prologue_pc
!= 0)
1407 CORE_ADDR analyzed_limit
;
1409 /* For non-GCC compilers, make sure the entire line is an
1410 acceptable prologue; GDB will round this function's
1411 return value up to the end of the following line so we
1412 can not skip just part of a line (and we do not want to).
1414 RealView does not treat the prologue specially, but does
1415 associate prologue code with the opening brace; so this
1416 lets us skip the first line if we think it is the opening
1418 if (arm_pc_is_thumb (gdbarch
, func_addr
))
1419 analyzed_limit
= thumb_analyze_prologue (gdbarch
, func_addr
,
1420 post_prologue_pc
, NULL
);
1422 analyzed_limit
= arm_analyze_prologue (gdbarch
, func_addr
,
1423 post_prologue_pc
, NULL
);
1425 if (analyzed_limit
!= post_prologue_pc
)
1428 return post_prologue_pc
;
1432 /* Can't determine prologue from the symbol table, need to examine
1435 /* Find an upper limit on the function prologue using the debug
1436 information. If the debug information could not be used to provide
1437 that bound, then use an arbitrary large number as the upper bound. */
1438 /* Like arm_scan_prologue, stop no later than pc + 64. */
1439 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
1441 limit_pc
= pc
+ 64; /* Magic. */
1444 /* Check if this is Thumb code. */
1445 if (arm_pc_is_thumb (gdbarch
, pc
))
1446 return thumb_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
1448 for (skip_pc
= pc
; skip_pc
< limit_pc
; skip_pc
+= 4)
1450 inst
= read_memory_unsigned_integer (skip_pc
, 4, byte_order_for_code
);
1452 /* "mov ip, sp" is no longer a required part of the prologue. */
1453 if (inst
== 0xe1a0c00d) /* mov ip, sp */
1456 if ((inst
& 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1459 if ((inst
& 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1462 /* Some prologues begin with "str lr, [sp, #-4]!". */
1463 if (inst
== 0xe52de004) /* str lr, [sp, #-4]! */
1466 if ((inst
& 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1469 if ((inst
& 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1472 /* Any insns after this point may float into the code, if it makes
1473 for better instruction scheduling, so we skip them only if we
1474 find them, but still consider the function to be frame-ful. */
1476 /* We may have either one sfmfd instruction here, or several stfe
1477 insns, depending on the version of floating point code we
1479 if ((inst
& 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1482 if ((inst
& 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1485 if ((inst
& 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1488 if ((inst
& 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1491 if ((inst
& 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1492 || (inst
& 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1493 || (inst
& 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1496 if ((inst
& 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1497 || (inst
& 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1498 || (inst
& 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1501 /* Un-recognized instruction; stop scanning. */
1505 return skip_pc
; /* End of prologue. */
1509 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1510 This function decodes a Thumb function prologue to determine:
1511 1) the size of the stack frame
1512 2) which registers are saved on it
1513 3) the offsets of saved regs
1514 4) the offset from the stack pointer to the frame pointer
1516 A typical Thumb function prologue would create this stack frame
1517 (offsets relative to FP)
1518 old SP -> 24 stack parameters
1521 R7 -> 0 local variables (16 bytes)
1522 SP -> -12 additional stack space (12 bytes)
1523 The frame size would thus be 36 bytes, and the frame offset would be
1524 12 bytes. The frame register is R7.
1526 The comments for thumb_skip_prolog() describe the algorithm we use
1527 to detect the end of the prolog. */
1531 thumb_scan_prologue (struct gdbarch
*gdbarch
, CORE_ADDR prev_pc
,
1532 CORE_ADDR block_addr
, struct arm_prologue_cache
*cache
)
1534 CORE_ADDR prologue_start
;
1535 CORE_ADDR prologue_end
;
1536 CORE_ADDR current_pc
;
1538 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
1541 /* See comment in arm_scan_prologue for an explanation of
1543 if (prologue_end
> prologue_start
+ 64)
1545 prologue_end
= prologue_start
+ 64;
1549 /* We're in the boondocks: we have no idea where the start of the
1553 prologue_end
= min (prologue_end
, prev_pc
);
1555 thumb_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
1558 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1561 arm_instruction_changes_pc (uint32_t this_instr
)
1563 if (bits (this_instr
, 28, 31) == INST_NV
)
1564 /* Unconditional instructions. */
1565 switch (bits (this_instr
, 24, 27))
1569 /* Branch with Link and change to Thumb. */
1574 /* Coprocessor register transfer. */
1575 if (bits (this_instr
, 12, 15) == 15)
1576 error (_("Invalid update to pc in instruction"));
1582 switch (bits (this_instr
, 25, 27))
1585 if (bits (this_instr
, 23, 24) == 2 && bit (this_instr
, 20) == 0)
1587 /* Multiplies and extra load/stores. */
1588 if (bit (this_instr
, 4) == 1 && bit (this_instr
, 7) == 1)
1589 /* Neither multiplies nor extension load/stores are allowed
1593 /* Otherwise, miscellaneous instructions. */
1595 /* BX <reg>, BXJ <reg>, BLX <reg> */
1596 if (bits (this_instr
, 4, 27) == 0x12fff1
1597 || bits (this_instr
, 4, 27) == 0x12fff2
1598 || bits (this_instr
, 4, 27) == 0x12fff3)
1601 /* Other miscellaneous instructions are unpredictable if they
1605 /* Data processing instruction. Fall through. */
1608 if (bits (this_instr
, 12, 15) == 15)
1615 /* Media instructions and architecturally undefined instructions. */
1616 if (bits (this_instr
, 25, 27) == 3 && bit (this_instr
, 4) == 1)
1620 if (bit (this_instr
, 20) == 0)
1624 if (bits (this_instr
, 12, 15) == ARM_PC_REGNUM
)
1630 /* Load/store multiple. */
1631 if (bit (this_instr
, 20) == 1 && bit (this_instr
, 15) == 1)
1637 /* Branch and branch with link. */
1642 /* Coprocessor transfers or SWIs can not affect PC. */
1646 internal_error (__FILE__
, __LINE__
, _("bad value in switch"));
1650 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1651 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1652 fill it in. Return the first address not recognized as a prologue
1655 We recognize all the instructions typically found in ARM prologues,
1656 plus harmless instructions which can be skipped (either for analysis
1657 purposes, or a more restrictive set that can be skipped when finding
1658 the end of the prologue). */
1661 arm_analyze_prologue (struct gdbarch
*gdbarch
,
1662 CORE_ADDR prologue_start
, CORE_ADDR prologue_end
,
1663 struct arm_prologue_cache
*cache
)
1665 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1666 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1668 CORE_ADDR offset
, current_pc
;
1669 pv_t regs
[ARM_FPS_REGNUM
];
1670 struct pv_area
*stack
;
1671 struct cleanup
*back_to
;
1672 int framereg
, framesize
;
1673 CORE_ADDR unrecognized_pc
= 0;
1675 /* Search the prologue looking for instructions that set up the
1676 frame pointer, adjust the stack pointer, and save registers.
1678 Be careful, however, and if it doesn't look like a prologue,
1679 don't try to scan it. If, for instance, a frameless function
1680 begins with stmfd sp!, then we will tell ourselves there is
1681 a frame, which will confuse stack traceback, as well as "finish"
1682 and other operations that rely on a knowledge of the stack
1685 for (regno
= 0; regno
< ARM_FPS_REGNUM
; regno
++)
1686 regs
[regno
] = pv_register (regno
, 0);
1687 stack
= make_pv_area (ARM_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
1688 back_to
= make_cleanup_free_pv_area (stack
);
1690 for (current_pc
= prologue_start
;
1691 current_pc
< prologue_end
;
1695 = read_memory_unsigned_integer (current_pc
, 4, byte_order_for_code
);
1697 if (insn
== 0xe1a0c00d) /* mov ip, sp */
1699 regs
[ARM_IP_REGNUM
] = regs
[ARM_SP_REGNUM
];
1702 else if ((insn
& 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1703 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1705 unsigned imm
= insn
& 0xff; /* immediate value */
1706 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1707 int rd
= bits (insn
, 12, 15);
1708 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1709 regs
[rd
] = pv_add_constant (regs
[bits (insn
, 16, 19)], imm
);
1712 else if ((insn
& 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1713 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1715 unsigned imm
= insn
& 0xff; /* immediate value */
1716 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1717 int rd
= bits (insn
, 12, 15);
1718 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1719 regs
[rd
] = pv_add_constant (regs
[bits (insn
, 16, 19)], -imm
);
1722 else if ((insn
& 0xffff0fff) == 0xe52d0004) /* str Rd,
1725 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1727 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -4);
1728 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 4,
1729 regs
[bits (insn
, 12, 15)]);
1732 else if ((insn
& 0xffff0000) == 0xe92d0000)
1733 /* stmfd sp!, {..., fp, ip, lr, pc}
1735 stmfd sp!, {a1, a2, a3, a4} */
1737 int mask
= insn
& 0xffff;
1739 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1742 /* Calculate offsets of saved registers. */
1743 for (regno
= ARM_PC_REGNUM
; regno
>= 0; regno
--)
1744 if (mask
& (1 << regno
))
1747 = pv_add_constant (regs
[ARM_SP_REGNUM
], -4);
1748 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 4, regs
[regno
]);
1751 else if ((insn
& 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1752 || (insn
& 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1753 || (insn
& 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1755 /* No need to add this to saved_regs -- it's just an arg reg. */
1758 else if ((insn
& 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1759 || (insn
& 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1760 || (insn
& 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1762 /* No need to add this to saved_regs -- it's just an arg reg. */
1765 else if ((insn
& 0xfff00000) == 0xe8800000 /* stm Rn,
1767 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1769 /* No need to add this to saved_regs -- it's just arg regs. */
1772 else if ((insn
& 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1774 unsigned imm
= insn
& 0xff; /* immediate value */
1775 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1776 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1777 regs
[ARM_FP_REGNUM
] = pv_add_constant (regs
[ARM_IP_REGNUM
], -imm
);
1779 else if ((insn
& 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1781 unsigned imm
= insn
& 0xff; /* immediate value */
1782 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1783 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1784 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -imm
);
1786 else if ((insn
& 0xffff7fff) == 0xed6d0103 /* stfe f?,
1788 && gdbarch_tdep (gdbarch
)->have_fpa_registers
)
1790 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1793 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -12);
1794 regno
= ARM_F0_REGNUM
+ ((insn
>> 12) & 0x07);
1795 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 12, regs
[regno
]);
1797 else if ((insn
& 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1799 && gdbarch_tdep (gdbarch
)->have_fpa_registers
)
1801 int n_saved_fp_regs
;
1802 unsigned int fp_start_reg
, fp_bound_reg
;
1804 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1807 if ((insn
& 0x800) == 0x800) /* N0 is set */
1809 if ((insn
& 0x40000) == 0x40000) /* N1 is set */
1810 n_saved_fp_regs
= 3;
1812 n_saved_fp_regs
= 1;
1816 if ((insn
& 0x40000) == 0x40000) /* N1 is set */
1817 n_saved_fp_regs
= 2;
1819 n_saved_fp_regs
= 4;
1822 fp_start_reg
= ARM_F0_REGNUM
+ ((insn
>> 12) & 0x7);
1823 fp_bound_reg
= fp_start_reg
+ n_saved_fp_regs
;
1824 for (; fp_start_reg
< fp_bound_reg
; fp_start_reg
++)
1826 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -12);
1827 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 12,
1828 regs
[fp_start_reg
++]);
1831 else if ((insn
& 0xff000000) == 0xeb000000 && cache
== NULL
) /* bl */
1833 /* Allow some special function calls when skipping the
1834 prologue; GCC generates these before storing arguments to
1836 CORE_ADDR dest
= BranchDest (current_pc
, insn
);
1838 if (skip_prologue_function (gdbarch
, dest
, 0))
1843 else if ((insn
& 0xf0000000) != 0xe0000000)
1844 break; /* Condition not true, exit early. */
1845 else if (arm_instruction_changes_pc (insn
))
1846 /* Don't scan past anything that might change control flow. */
1848 else if ((insn
& 0xfe500000) == 0xe8100000 /* ldm */
1849 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1850 /* Ignore block loads from the stack, potentially copying
1851 parameters from memory. */
1853 else if ((insn
& 0xfc500000) == 0xe4100000
1854 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1855 /* Similarly ignore single loads from the stack. */
1857 else if ((insn
& 0xffff0ff0) == 0xe1a00000)
1858 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1859 register instead of the stack. */
1863 /* The optimizer might shove anything into the prologue,
1864 so we just skip what we don't recognize. */
1865 unrecognized_pc
= current_pc
;
1870 if (unrecognized_pc
== 0)
1871 unrecognized_pc
= current_pc
;
1873 /* The frame size is just the distance from the frame register
1874 to the original stack pointer. */
1875 if (pv_is_register (regs
[ARM_FP_REGNUM
], ARM_SP_REGNUM
))
1877 /* Frame pointer is fp. */
1878 framereg
= ARM_FP_REGNUM
;
1879 framesize
= -regs
[ARM_FP_REGNUM
].k
;
1883 /* Try the stack pointer... this is a bit desperate. */
1884 framereg
= ARM_SP_REGNUM
;
1885 framesize
= -regs
[ARM_SP_REGNUM
].k
;
1890 cache
->framereg
= framereg
;
1891 cache
->framesize
= framesize
;
1893 for (regno
= 0; regno
< ARM_FPS_REGNUM
; regno
++)
1894 if (pv_area_find_reg (stack
, gdbarch
, regno
, &offset
))
1895 cache
->saved_regs
[regno
].addr
= offset
;
1899 fprintf_unfiltered (gdb_stdlog
, "Prologue scan stopped at %s\n",
1900 paddress (gdbarch
, unrecognized_pc
));
1902 do_cleanups (back_to
);
1903 return unrecognized_pc
;
1907 arm_scan_prologue (struct frame_info
*this_frame
,
1908 struct arm_prologue_cache
*cache
)
1910 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1911 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1913 CORE_ADDR prologue_start
, prologue_end
, current_pc
;
1914 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
1915 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
1916 pv_t regs
[ARM_FPS_REGNUM
];
1917 struct pv_area
*stack
;
1918 struct cleanup
*back_to
;
1921 /* Assume there is no frame until proven otherwise. */
1922 cache
->framereg
= ARM_SP_REGNUM
;
1923 cache
->framesize
= 0;
1925 /* Check for Thumb prologue. */
1926 if (arm_frame_is_thumb (this_frame
))
1928 thumb_scan_prologue (gdbarch
, prev_pc
, block_addr
, cache
);
1932 /* Find the function prologue. If we can't find the function in
1933 the symbol table, peek in the stack frame to find the PC. */
1934 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
1937 /* One way to find the end of the prologue (which works well
1938 for unoptimized code) is to do the following:
1940 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1943 prologue_end = prev_pc;
1944 else if (sal.end < prologue_end)
1945 prologue_end = sal.end;
1947 This mechanism is very accurate so long as the optimizer
1948 doesn't move any instructions from the function body into the
1949 prologue. If this happens, sal.end will be the last
1950 instruction in the first hunk of prologue code just before
1951 the first instruction that the scheduler has moved from
1952 the body to the prologue.
1954 In order to make sure that we scan all of the prologue
1955 instructions, we use a slightly less accurate mechanism which
1956 may scan more than necessary. To help compensate for this
1957 lack of accuracy, the prologue scanning loop below contains
1958 several clauses which'll cause the loop to terminate early if
1959 an implausible prologue instruction is encountered.
1965 is a suitable endpoint since it accounts for the largest
1966 possible prologue plus up to five instructions inserted by
1969 if (prologue_end
> prologue_start
+ 64)
1971 prologue_end
= prologue_start
+ 64; /* See above. */
1976 /* We have no symbol information. Our only option is to assume this
1977 function has a standard stack frame and the normal frame register.
1978 Then, we can find the value of our frame pointer on entrance to
1979 the callee (or at the present moment if this is the innermost frame).
1980 The value stored there should be the address of the stmfd + 8. */
1981 CORE_ADDR frame_loc
;
1982 LONGEST return_value
;
1984 frame_loc
= get_frame_register_unsigned (this_frame
, ARM_FP_REGNUM
);
1985 if (!safe_read_memory_integer (frame_loc
, 4, byte_order
, &return_value
))
1989 prologue_start
= gdbarch_addr_bits_remove
1990 (gdbarch
, return_value
) - 8;
1991 prologue_end
= prologue_start
+ 64; /* See above. */
1995 if (prev_pc
< prologue_end
)
1996 prologue_end
= prev_pc
;
1998 arm_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
2001 static struct arm_prologue_cache
*
2002 arm_make_prologue_cache (struct frame_info
*this_frame
)
2005 struct arm_prologue_cache
*cache
;
2006 CORE_ADDR unwound_fp
;
2008 cache
= FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache
);
2009 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
2011 arm_scan_prologue (this_frame
, cache
);
2013 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
2014 if (unwound_fp
== 0)
2017 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
2019 /* Calculate actual addresses of saved registers using offsets
2020 determined by arm_scan_prologue. */
2021 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
2022 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
2023 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
2028 /* Our frame ID for a normal frame is the current function's starting PC
2029 and the caller's SP when we were called. */
2032 arm_prologue_this_id (struct frame_info
*this_frame
,
2034 struct frame_id
*this_id
)
2036 struct arm_prologue_cache
*cache
;
2040 if (*this_cache
== NULL
)
2041 *this_cache
= arm_make_prologue_cache (this_frame
);
2042 cache
= *this_cache
;
2044 /* This is meant to halt the backtrace at "_start". */
2045 pc
= get_frame_pc (this_frame
);
2046 if (pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
2049 /* If we've hit a wall, stop. */
2050 if (cache
->prev_sp
== 0)
2053 /* Use function start address as part of the frame ID. If we cannot
2054 identify the start address (due to missing symbol information),
2055 fall back to just using the current PC. */
2056 func
= get_frame_func (this_frame
);
2060 id
= frame_id_build (cache
->prev_sp
, func
);
2064 static struct value
*
2065 arm_prologue_prev_register (struct frame_info
*this_frame
,
2069 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2070 struct arm_prologue_cache
*cache
;
2072 if (*this_cache
== NULL
)
2073 *this_cache
= arm_make_prologue_cache (this_frame
);
2074 cache
= *this_cache
;
2076 /* If we are asked to unwind the PC, then we need to return the LR
2077 instead. The prologue may save PC, but it will point into this
2078 frame's prologue, not the next frame's resume location. Also
2079 strip the saved T bit. A valid LR may have the low bit set, but
2080 a valid PC never does. */
2081 if (prev_regnum
== ARM_PC_REGNUM
)
2085 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
2086 return frame_unwind_got_constant (this_frame
, prev_regnum
,
2087 arm_addr_bits_remove (gdbarch
, lr
));
2090 /* SP is generally not saved to the stack, but this frame is
2091 identified by the next frame's stack pointer at the time of the call.
2092 The value was already reconstructed into PREV_SP. */
2093 if (prev_regnum
== ARM_SP_REGNUM
)
2094 return frame_unwind_got_constant (this_frame
, prev_regnum
, cache
->prev_sp
);
2096 /* The CPSR may have been changed by the call instruction and by the
2097 called function. The only bit we can reconstruct is the T bit,
2098 by checking the low bit of LR as of the call. This is a reliable
2099 indicator of Thumb-ness except for some ARM v4T pre-interworking
2100 Thumb code, which could get away with a clear low bit as long as
2101 the called function did not use bx. Guess that all other
2102 bits are unchanged; the condition flags are presumably lost,
2103 but the processor status is likely valid. */
2104 if (prev_regnum
== ARM_PS_REGNUM
)
2107 ULONGEST t_bit
= arm_psr_thumb_bit (gdbarch
);
2109 cpsr
= get_frame_register_unsigned (this_frame
, prev_regnum
);
2110 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
2111 if (IS_THUMB_ADDR (lr
))
2115 return frame_unwind_got_constant (this_frame
, prev_regnum
, cpsr
);
2118 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
2122 struct frame_unwind arm_prologue_unwind
= {
2124 default_frame_unwind_stop_reason
,
2125 arm_prologue_this_id
,
2126 arm_prologue_prev_register
,
2128 default_frame_sniffer
2131 /* Maintain a list of ARM exception table entries per objfile, similar to the
2132 list of mapping symbols. We only cache entries for standard ARM-defined
2133 personality routines; the cache will contain only the frame unwinding
2134 instructions associated with the entry (not the descriptors). */
2136 static const struct objfile_data
*arm_exidx_data_key
;
2138 struct arm_exidx_entry
2143 typedef struct arm_exidx_entry arm_exidx_entry_s
;
2144 DEF_VEC_O(arm_exidx_entry_s
);
2146 struct arm_exidx_data
2148 VEC(arm_exidx_entry_s
) **section_maps
;
2152 arm_exidx_data_free (struct objfile
*objfile
, void *arg
)
2154 struct arm_exidx_data
*data
= arg
;
2157 for (i
= 0; i
< objfile
->obfd
->section_count
; i
++)
2158 VEC_free (arm_exidx_entry_s
, data
->section_maps
[i
]);
2162 arm_compare_exidx_entries (const struct arm_exidx_entry
*lhs
,
2163 const struct arm_exidx_entry
*rhs
)
2165 return lhs
->addr
< rhs
->addr
;
2168 static struct obj_section
*
2169 arm_obj_section_from_vma (struct objfile
*objfile
, bfd_vma vma
)
2171 struct obj_section
*osect
;
2173 ALL_OBJFILE_OSECTIONS (objfile
, osect
)
2174 if (bfd_get_section_flags (objfile
->obfd
,
2175 osect
->the_bfd_section
) & SEC_ALLOC
)
2177 bfd_vma start
, size
;
2178 start
= bfd_get_section_vma (objfile
->obfd
, osect
->the_bfd_section
);
2179 size
= bfd_get_section_size (osect
->the_bfd_section
);
2181 if (start
<= vma
&& vma
< start
+ size
)
2188 /* Parse contents of exception table and exception index sections
2189 of OBJFILE, and fill in the exception table entry cache.
2191 For each entry that refers to a standard ARM-defined personality
2192 routine, extract the frame unwinding instructions (from either
2193 the index or the table section). The unwinding instructions
2195 - extracting them from the rest of the table data
2196 - converting to host endianness
2197 - appending the implicit 0xb0 ("Finish") code
2199 The extracted and normalized instructions are stored for later
2200 retrieval by the arm_find_exidx_entry routine. */
2203 arm_exidx_new_objfile (struct objfile
*objfile
)
2205 struct cleanup
*cleanups
;
2206 struct arm_exidx_data
*data
;
2207 asection
*exidx
, *extab
;
2208 bfd_vma exidx_vma
= 0, extab_vma
= 0;
2209 bfd_size_type exidx_size
= 0, extab_size
= 0;
2210 gdb_byte
*exidx_data
= NULL
, *extab_data
= NULL
;
2213 /* If we've already touched this file, do nothing. */
2214 if (!objfile
|| objfile_data (objfile
, arm_exidx_data_key
) != NULL
)
2216 cleanups
= make_cleanup (null_cleanup
, NULL
);
2218 /* Read contents of exception table and index. */
2219 exidx
= bfd_get_section_by_name (objfile
->obfd
, ".ARM.exidx");
2222 exidx_vma
= bfd_section_vma (objfile
->obfd
, exidx
);
2223 exidx_size
= bfd_get_section_size (exidx
);
2224 exidx_data
= xmalloc (exidx_size
);
2225 make_cleanup (xfree
, exidx_data
);
2227 if (!bfd_get_section_contents (objfile
->obfd
, exidx
,
2228 exidx_data
, 0, exidx_size
))
2230 do_cleanups (cleanups
);
2235 extab
= bfd_get_section_by_name (objfile
->obfd
, ".ARM.extab");
2238 extab_vma
= bfd_section_vma (objfile
->obfd
, extab
);
2239 extab_size
= bfd_get_section_size (extab
);
2240 extab_data
= xmalloc (extab_size
);
2241 make_cleanup (xfree
, extab_data
);
2243 if (!bfd_get_section_contents (objfile
->obfd
, extab
,
2244 extab_data
, 0, extab_size
))
2246 do_cleanups (cleanups
);
2251 /* Allocate exception table data structure. */
2252 data
= OBSTACK_ZALLOC (&objfile
->objfile_obstack
, struct arm_exidx_data
);
2253 set_objfile_data (objfile
, arm_exidx_data_key
, data
);
2254 data
->section_maps
= OBSTACK_CALLOC (&objfile
->objfile_obstack
,
2255 objfile
->obfd
->section_count
,
2256 VEC(arm_exidx_entry_s
) *);
2258 /* Fill in exception table. */
2259 for (i
= 0; i
< exidx_size
/ 8; i
++)
2261 struct arm_exidx_entry new_exidx_entry
;
2262 bfd_vma idx
= bfd_h_get_32 (objfile
->obfd
, exidx_data
+ i
* 8);
2263 bfd_vma val
= bfd_h_get_32 (objfile
->obfd
, exidx_data
+ i
* 8 + 4);
2264 bfd_vma addr
= 0, word
= 0;
2265 int n_bytes
= 0, n_words
= 0;
2266 struct obj_section
*sec
;
2267 gdb_byte
*entry
= NULL
;
2269 /* Extract address of start of function. */
2270 idx
= ((idx
& 0x7fffffff) ^ 0x40000000) - 0x40000000;
2271 idx
+= exidx_vma
+ i
* 8;
2273 /* Find section containing function and compute section offset. */
2274 sec
= arm_obj_section_from_vma (objfile
, idx
);
2277 idx
-= bfd_get_section_vma (objfile
->obfd
, sec
->the_bfd_section
);
2279 /* Determine address of exception table entry. */
2282 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2284 else if ((val
& 0xff000000) == 0x80000000)
2286 /* Exception table entry embedded in .ARM.exidx
2287 -- must be short form. */
2291 else if (!(val
& 0x80000000))
2293 /* Exception table entry in .ARM.extab. */
2294 addr
= ((val
& 0x7fffffff) ^ 0x40000000) - 0x40000000;
2295 addr
+= exidx_vma
+ i
* 8 + 4;
2297 if (addr
>= extab_vma
&& addr
+ 4 <= extab_vma
+ extab_size
)
2299 word
= bfd_h_get_32 (objfile
->obfd
,
2300 extab_data
+ addr
- extab_vma
);
2303 if ((word
& 0xff000000) == 0x80000000)
2308 else if ((word
& 0xff000000) == 0x81000000
2309 || (word
& 0xff000000) == 0x82000000)
2313 n_words
= ((word
>> 16) & 0xff);
2315 else if (!(word
& 0x80000000))
2318 struct obj_section
*pers_sec
;
2319 int gnu_personality
= 0;
2321 /* Custom personality routine. */
2322 pers
= ((word
& 0x7fffffff) ^ 0x40000000) - 0x40000000;
2323 pers
= UNMAKE_THUMB_ADDR (pers
+ addr
- 4);
2325 /* Check whether we've got one of the variants of the
2326 GNU personality routines. */
2327 pers_sec
= arm_obj_section_from_vma (objfile
, pers
);
2330 static const char *personality
[] =
2332 "__gcc_personality_v0",
2333 "__gxx_personality_v0",
2334 "__gcj_personality_v0",
2335 "__gnu_objc_personality_v0",
2339 CORE_ADDR pc
= pers
+ obj_section_offset (pers_sec
);
2342 for (k
= 0; personality
[k
]; k
++)
2343 if (lookup_minimal_symbol_by_pc_name
2344 (pc
, personality
[k
], objfile
))
2346 gnu_personality
= 1;
2351 /* If so, the next word contains a word count in the high
2352 byte, followed by the same unwind instructions as the
2353 pre-defined forms. */
2355 && addr
+ 4 <= extab_vma
+ extab_size
)
2357 word
= bfd_h_get_32 (objfile
->obfd
,
2358 extab_data
+ addr
- extab_vma
);
2361 n_words
= ((word
>> 24) & 0xff);
2367 /* Sanity check address. */
2369 if (addr
< extab_vma
|| addr
+ 4 * n_words
> extab_vma
+ extab_size
)
2370 n_words
= n_bytes
= 0;
2372 /* The unwind instructions reside in WORD (only the N_BYTES least
2373 significant bytes are valid), followed by N_WORDS words in the
2374 extab section starting at ADDR. */
2375 if (n_bytes
|| n_words
)
2377 gdb_byte
*p
= entry
= obstack_alloc (&objfile
->objfile_obstack
,
2378 n_bytes
+ n_words
* 4 + 1);
2381 *p
++ = (gdb_byte
) ((word
>> (8 * n_bytes
)) & 0xff);
2385 word
= bfd_h_get_32 (objfile
->obfd
,
2386 extab_data
+ addr
- extab_vma
);
2389 *p
++ = (gdb_byte
) ((word
>> 24) & 0xff);
2390 *p
++ = (gdb_byte
) ((word
>> 16) & 0xff);
2391 *p
++ = (gdb_byte
) ((word
>> 8) & 0xff);
2392 *p
++ = (gdb_byte
) (word
& 0xff);
2395 /* Implied "Finish" to terminate the list. */
2399 /* Push entry onto vector. They are guaranteed to always
2400 appear in order of increasing addresses. */
2401 new_exidx_entry
.addr
= idx
;
2402 new_exidx_entry
.entry
= entry
;
2403 VEC_safe_push (arm_exidx_entry_s
,
2404 data
->section_maps
[sec
->the_bfd_section
->index
],
2408 do_cleanups (cleanups
);
2411 /* Search for the exception table entry covering MEMADDR. If one is found,
2412 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2413 set *START to the start of the region covered by this entry. */
2416 arm_find_exidx_entry (CORE_ADDR memaddr
, CORE_ADDR
*start
)
2418 struct obj_section
*sec
;
2420 sec
= find_pc_section (memaddr
);
2423 struct arm_exidx_data
*data
;
2424 VEC(arm_exidx_entry_s
) *map
;
2425 struct arm_exidx_entry map_key
= { memaddr
- obj_section_addr (sec
), 0 };
2428 data
= objfile_data (sec
->objfile
, arm_exidx_data_key
);
2431 map
= data
->section_maps
[sec
->the_bfd_section
->index
];
2432 if (!VEC_empty (arm_exidx_entry_s
, map
))
2434 struct arm_exidx_entry
*map_sym
;
2436 idx
= VEC_lower_bound (arm_exidx_entry_s
, map
, &map_key
,
2437 arm_compare_exidx_entries
);
2439 /* VEC_lower_bound finds the earliest ordered insertion
2440 point. If the following symbol starts at this exact
2441 address, we use that; otherwise, the preceding
2442 exception table entry covers this address. */
2443 if (idx
< VEC_length (arm_exidx_entry_s
, map
))
2445 map_sym
= VEC_index (arm_exidx_entry_s
, map
, idx
);
2446 if (map_sym
->addr
== map_key
.addr
)
2449 *start
= map_sym
->addr
+ obj_section_addr (sec
);
2450 return map_sym
->entry
;
2456 map_sym
= VEC_index (arm_exidx_entry_s
, map
, idx
- 1);
2458 *start
= map_sym
->addr
+ obj_section_addr (sec
);
2459 return map_sym
->entry
;
2468 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2469 instruction list from the ARM exception table entry ENTRY, allocate and
2470 return a prologue cache structure describing how to unwind this frame.
2472 Return NULL if the unwinding instruction list contains a "spare",
2473 "reserved" or "refuse to unwind" instruction as defined in section
2474 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2475 for the ARM Architecture" document. */
2477 static struct arm_prologue_cache
*
2478 arm_exidx_fill_cache (struct frame_info
*this_frame
, gdb_byte
*entry
)
2483 struct arm_prologue_cache
*cache
;
2484 cache
= FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache
);
2485 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
2491 /* Whenever we reload SP, we actually have to retrieve its
2492 actual value in the current frame. */
2495 if (trad_frame_realreg_p (cache
->saved_regs
, ARM_SP_REGNUM
))
2497 int reg
= cache
->saved_regs
[ARM_SP_REGNUM
].realreg
;
2498 vsp
= get_frame_register_unsigned (this_frame
, reg
);
2502 CORE_ADDR addr
= cache
->saved_regs
[ARM_SP_REGNUM
].addr
;
2503 vsp
= get_frame_memory_unsigned (this_frame
, addr
, 4);
2509 /* Decode next unwind instruction. */
2512 if ((insn
& 0xc0) == 0)
2514 int offset
= insn
& 0x3f;
2515 vsp
+= (offset
<< 2) + 4;
2517 else if ((insn
& 0xc0) == 0x40)
2519 int offset
= insn
& 0x3f;
2520 vsp
-= (offset
<< 2) + 4;
2522 else if ((insn
& 0xf0) == 0x80)
2524 int mask
= ((insn
& 0xf) << 8) | *entry
++;
2527 /* The special case of an all-zero mask identifies
2528 "Refuse to unwind". We return NULL to fall back
2529 to the prologue analyzer. */
2533 /* Pop registers r4..r15 under mask. */
2534 for (i
= 0; i
< 12; i
++)
2535 if (mask
& (1 << i
))
2537 cache
->saved_regs
[4 + i
].addr
= vsp
;
2541 /* Special-case popping SP -- we need to reload vsp. */
2542 if (mask
& (1 << (ARM_SP_REGNUM
- 4)))
2545 else if ((insn
& 0xf0) == 0x90)
2547 int reg
= insn
& 0xf;
2549 /* Reserved cases. */
2550 if (reg
== ARM_SP_REGNUM
|| reg
== ARM_PC_REGNUM
)
2553 /* Set SP from another register and mark VSP for reload. */
2554 cache
->saved_regs
[ARM_SP_REGNUM
] = cache
->saved_regs
[reg
];
2557 else if ((insn
& 0xf0) == 0xa0)
2559 int count
= insn
& 0x7;
2560 int pop_lr
= (insn
& 0x8) != 0;
2563 /* Pop r4..r[4+count]. */
2564 for (i
= 0; i
<= count
; i
++)
2566 cache
->saved_regs
[4 + i
].addr
= vsp
;
2570 /* If indicated by flag, pop LR as well. */
2573 cache
->saved_regs
[ARM_LR_REGNUM
].addr
= vsp
;
2577 else if (insn
== 0xb0)
2579 /* We could only have updated PC by popping into it; if so, it
2580 will show up as address. Otherwise, copy LR into PC. */
2581 if (!trad_frame_addr_p (cache
->saved_regs
, ARM_PC_REGNUM
))
2582 cache
->saved_regs
[ARM_PC_REGNUM
]
2583 = cache
->saved_regs
[ARM_LR_REGNUM
];
2588 else if (insn
== 0xb1)
2590 int mask
= *entry
++;
2593 /* All-zero mask and mask >= 16 is "spare". */
2594 if (mask
== 0 || mask
>= 16)
2597 /* Pop r0..r3 under mask. */
2598 for (i
= 0; i
< 4; i
++)
2599 if (mask
& (1 << i
))
2601 cache
->saved_regs
[i
].addr
= vsp
;
2605 else if (insn
== 0xb2)
2607 ULONGEST offset
= 0;
2612 offset
|= (*entry
& 0x7f) << shift
;
2615 while (*entry
++ & 0x80);
2617 vsp
+= 0x204 + (offset
<< 2);
2619 else if (insn
== 0xb3)
2621 int start
= *entry
>> 4;
2622 int count
= (*entry
++) & 0xf;
2625 /* Only registers D0..D15 are valid here. */
2626 if (start
+ count
>= 16)
2629 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2630 for (i
= 0; i
<= count
; i
++)
2632 cache
->saved_regs
[ARM_D0_REGNUM
+ start
+ i
].addr
= vsp
;
2636 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2639 else if ((insn
& 0xf8) == 0xb8)
2641 int count
= insn
& 0x7;
2644 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2645 for (i
= 0; i
<= count
; i
++)
2647 cache
->saved_regs
[ARM_D0_REGNUM
+ 8 + i
].addr
= vsp
;
2651 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2654 else if (insn
== 0xc6)
2656 int start
= *entry
>> 4;
2657 int count
= (*entry
++) & 0xf;
2660 /* Only registers WR0..WR15 are valid. */
2661 if (start
+ count
>= 16)
2664 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2665 for (i
= 0; i
<= count
; i
++)
2667 cache
->saved_regs
[ARM_WR0_REGNUM
+ start
+ i
].addr
= vsp
;
2671 else if (insn
== 0xc7)
2673 int mask
= *entry
++;
2676 /* All-zero mask and mask >= 16 is "spare". */
2677 if (mask
== 0 || mask
>= 16)
2680 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2681 for (i
= 0; i
< 4; i
++)
2682 if (mask
& (1 << i
))
2684 cache
->saved_regs
[ARM_WCGR0_REGNUM
+ i
].addr
= vsp
;
2688 else if ((insn
& 0xf8) == 0xc0)
2690 int count
= insn
& 0x7;
2693 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2694 for (i
= 0; i
<= count
; i
++)
2696 cache
->saved_regs
[ARM_WR0_REGNUM
+ 10 + i
].addr
= vsp
;
2700 else if (insn
== 0xc8)
2702 int start
= *entry
>> 4;
2703 int count
= (*entry
++) & 0xf;
2706 /* Only registers D0..D31 are valid. */
2707 if (start
+ count
>= 16)
2710 /* Pop VFP double-precision registers
2711 D[16+start]..D[16+start+count]. */
2712 for (i
= 0; i
<= count
; i
++)
2714 cache
->saved_regs
[ARM_D0_REGNUM
+ 16 + start
+ i
].addr
= vsp
;
2718 else if (insn
== 0xc9)
2720 int start
= *entry
>> 4;
2721 int count
= (*entry
++) & 0xf;
2724 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2725 for (i
= 0; i
<= count
; i
++)
2727 cache
->saved_regs
[ARM_D0_REGNUM
+ start
+ i
].addr
= vsp
;
2731 else if ((insn
& 0xf8) == 0xd0)
2733 int count
= insn
& 0x7;
2736 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2737 for (i
= 0; i
<= count
; i
++)
2739 cache
->saved_regs
[ARM_D0_REGNUM
+ 8 + i
].addr
= vsp
;
2745 /* Everything else is "spare". */
2750 /* If we restore SP from a register, assume this was the frame register.
2751 Otherwise just fall back to SP as frame register. */
2752 if (trad_frame_realreg_p (cache
->saved_regs
, ARM_SP_REGNUM
))
2753 cache
->framereg
= cache
->saved_regs
[ARM_SP_REGNUM
].realreg
;
2755 cache
->framereg
= ARM_SP_REGNUM
;
2757 /* Determine offset to previous frame. */
2759 = vsp
- get_frame_register_unsigned (this_frame
, cache
->framereg
);
2761 /* We already got the previous SP. */
2762 cache
->prev_sp
= vsp
;
2767 /* Unwinding via ARM exception table entries. Note that the sniffer
2768 already computes a filled-in prologue cache, which is then used
2769 with the same arm_prologue_this_id and arm_prologue_prev_register
2770 routines also used for prologue-parsing based unwinding. */
2773 arm_exidx_unwind_sniffer (const struct frame_unwind
*self
,
2774 struct frame_info
*this_frame
,
2775 void **this_prologue_cache
)
2777 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2778 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2779 CORE_ADDR addr_in_block
, exidx_region
, func_start
;
2780 struct arm_prologue_cache
*cache
;
2783 /* See if we have an ARM exception table entry covering this address. */
2784 addr_in_block
= get_frame_address_in_block (this_frame
);
2785 entry
= arm_find_exidx_entry (addr_in_block
, &exidx_region
);
2789 /* The ARM exception table does not describe unwind information
2790 for arbitrary PC values, but is guaranteed to be correct only
2791 at call sites. We have to decide here whether we want to use
2792 ARM exception table information for this frame, or fall back
2793 to using prologue parsing. (Note that if we have DWARF CFI,
2794 this sniffer isn't even called -- CFI is always preferred.)
2796 Before we make this decision, however, we check whether we
2797 actually have *symbol* information for the current frame.
2798 If not, prologue parsing would not work anyway, so we might
2799 as well use the exception table and hope for the best. */
2800 if (find_pc_partial_function (addr_in_block
, NULL
, &func_start
, NULL
))
2804 /* If the next frame is "normal", we are at a call site in this
2805 frame, so exception information is guaranteed to be valid. */
2806 if (get_next_frame (this_frame
)
2807 && get_frame_type (get_next_frame (this_frame
)) == NORMAL_FRAME
)
2810 /* We also assume exception information is valid if we're currently
2811 blocked in a system call. The system library is supposed to
2812 ensure this, so that e.g. pthread cancellation works. */
2813 if (arm_frame_is_thumb (this_frame
))
2817 if (safe_read_memory_integer (get_frame_pc (this_frame
) - 2, 2,
2818 byte_order_for_code
, &insn
)
2819 && (insn
& 0xff00) == 0xdf00 /* svc */)
2826 if (safe_read_memory_integer (get_frame_pc (this_frame
) - 4, 4,
2827 byte_order_for_code
, &insn
)
2828 && (insn
& 0x0f000000) == 0x0f000000 /* svc */)
2832 /* Bail out if we don't know that exception information is valid. */
2836 /* The ARM exception index does not mark the *end* of the region
2837 covered by the entry, and some functions will not have any entry.
2838 To correctly recognize the end of the covered region, the linker
2839 should have inserted dummy records with a CANTUNWIND marker.
2841 Unfortunately, current versions of GNU ld do not reliably do
2842 this, and thus we may have found an incorrect entry above.
2843 As a (temporary) sanity check, we only use the entry if it
2844 lies *within* the bounds of the function. Note that this check
2845 might reject perfectly valid entries that just happen to cover
2846 multiple functions; therefore this check ought to be removed
2847 once the linker is fixed. */
2848 if (func_start
> exidx_region
)
2852 /* Decode the list of unwinding instructions into a prologue cache.
2853 Note that this may fail due to e.g. a "refuse to unwind" code. */
2854 cache
= arm_exidx_fill_cache (this_frame
, entry
);
2858 *this_prologue_cache
= cache
;
2862 struct frame_unwind arm_exidx_unwind
= {
2864 default_frame_unwind_stop_reason
,
2865 arm_prologue_this_id
,
2866 arm_prologue_prev_register
,
2868 arm_exidx_unwind_sniffer
2871 static struct arm_prologue_cache
*
2872 arm_make_stub_cache (struct frame_info
*this_frame
)
2874 struct arm_prologue_cache
*cache
;
2876 cache
= FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache
);
2877 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
2879 cache
->prev_sp
= get_frame_register_unsigned (this_frame
, ARM_SP_REGNUM
);
2884 /* Our frame ID for a stub frame is the current SP and LR. */
2887 arm_stub_this_id (struct frame_info
*this_frame
,
2889 struct frame_id
*this_id
)
2891 struct arm_prologue_cache
*cache
;
2893 if (*this_cache
== NULL
)
2894 *this_cache
= arm_make_stub_cache (this_frame
);
2895 cache
= *this_cache
;
2897 *this_id
= frame_id_build (cache
->prev_sp
, get_frame_pc (this_frame
));
2901 arm_stub_unwind_sniffer (const struct frame_unwind
*self
,
2902 struct frame_info
*this_frame
,
2903 void **this_prologue_cache
)
2905 CORE_ADDR addr_in_block
;
2908 addr_in_block
= get_frame_address_in_block (this_frame
);
2909 if (in_plt_section (addr_in_block
, NULL
)
2910 /* We also use the stub winder if the target memory is unreadable
2911 to avoid having the prologue unwinder trying to read it. */
2912 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
2918 struct frame_unwind arm_stub_unwind
= {
2920 default_frame_unwind_stop_reason
,
2922 arm_prologue_prev_register
,
2924 arm_stub_unwind_sniffer
2928 arm_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
2930 struct arm_prologue_cache
*cache
;
2932 if (*this_cache
== NULL
)
2933 *this_cache
= arm_make_prologue_cache (this_frame
);
2934 cache
= *this_cache
;
2936 return cache
->prev_sp
- cache
->framesize
;
2939 struct frame_base arm_normal_base
= {
2940 &arm_prologue_unwind
,
2941 arm_normal_frame_base
,
2942 arm_normal_frame_base
,
2943 arm_normal_frame_base
2946 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2947 dummy frame. The frame ID's base needs to match the TOS value
2948 saved by save_dummy_frame_tos() and returned from
2949 arm_push_dummy_call, and the PC needs to match the dummy frame's
2952 static struct frame_id
2953 arm_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2955 return frame_id_build (get_frame_register_unsigned (this_frame
,
2957 get_frame_pc (this_frame
));
2960 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2961 be used to construct the previous frame's ID, after looking up the
2962 containing function). */
2965 arm_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2968 pc
= frame_unwind_register_unsigned (this_frame
, ARM_PC_REGNUM
);
2969 return arm_addr_bits_remove (gdbarch
, pc
);
2973 arm_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2975 return frame_unwind_register_unsigned (this_frame
, ARM_SP_REGNUM
);
2978 static struct value
*
2979 arm_dwarf2_prev_register (struct frame_info
*this_frame
, void **this_cache
,
2982 struct gdbarch
* gdbarch
= get_frame_arch (this_frame
);
2984 ULONGEST t_bit
= arm_psr_thumb_bit (gdbarch
);
2989 /* The PC is normally copied from the return column, which
2990 describes saves of LR. However, that version may have an
2991 extra bit set to indicate Thumb state. The bit is not
2993 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
2994 return frame_unwind_got_constant (this_frame
, regnum
,
2995 arm_addr_bits_remove (gdbarch
, lr
));
2998 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
2999 cpsr
= get_frame_register_unsigned (this_frame
, regnum
);
3000 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
3001 if (IS_THUMB_ADDR (lr
))
3005 return frame_unwind_got_constant (this_frame
, regnum
, cpsr
);
3008 internal_error (__FILE__
, __LINE__
,
3009 _("Unexpected register %d"), regnum
);
3014 arm_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
3015 struct dwarf2_frame_state_reg
*reg
,
3016 struct frame_info
*this_frame
)
3022 reg
->how
= DWARF2_FRAME_REG_FN
;
3023 reg
->loc
.fn
= arm_dwarf2_prev_register
;
3026 reg
->how
= DWARF2_FRAME_REG_CFA
;
3031 /* Return true if we are in the function's epilogue, i.e. after the
3032 instruction that destroyed the function's stack frame. */
3035 thumb_in_function_epilogue_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
3037 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
3038 unsigned int insn
, insn2
;
3039 int found_return
= 0, found_stack_adjust
= 0;
3040 CORE_ADDR func_start
, func_end
;
3044 if (!find_pc_partial_function (pc
, NULL
, &func_start
, &func_end
))
3047 /* The epilogue is a sequence of instructions along the following lines:
3049 - add stack frame size to SP or FP
3050 - [if frame pointer used] restore SP from FP
3051 - restore registers from SP [may include PC]
3052 - a return-type instruction [if PC wasn't already restored]
3054 In a first pass, we scan forward from the current PC and verify the
3055 instructions we find as compatible with this sequence, ending in a
3058 However, this is not sufficient to distinguish indirect function calls
3059 within a function from indirect tail calls in the epilogue in some cases.
3060 Therefore, if we didn't already find any SP-changing instruction during
3061 forward scan, we add a backward scanning heuristic to ensure we actually
3062 are in the epilogue. */
3065 while (scan_pc
< func_end
&& !found_return
)
3067 if (target_read_memory (scan_pc
, buf
, 2))
3071 insn
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
3073 if ((insn
& 0xff80) == 0x4700) /* bx <Rm> */
3075 else if (insn
== 0x46f7) /* mov pc, lr */
3077 else if (insn
== 0x46bd) /* mov sp, r7 */
3078 found_stack_adjust
= 1;
3079 else if ((insn
& 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3080 found_stack_adjust
= 1;
3081 else if ((insn
& 0xfe00) == 0xbc00) /* pop <registers> */
3083 found_stack_adjust
= 1;
3084 if (insn
& 0x0100) /* <registers> include PC. */
3087 else if (thumb_insn_size (insn
) == 4) /* 32-bit Thumb-2 instruction */
3089 if (target_read_memory (scan_pc
, buf
, 2))
3093 insn2
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
3095 if (insn
== 0xe8bd) /* ldm.w sp!, <registers> */
3097 found_stack_adjust
= 1;
3098 if (insn2
& 0x8000) /* <registers> include PC. */
3101 else if (insn
== 0xf85d /* ldr.w <Rt>, [sp], #4 */
3102 && (insn2
& 0x0fff) == 0x0b04)
3104 found_stack_adjust
= 1;
3105 if ((insn2
& 0xf000) == 0xf000) /* <Rt> is PC. */
3108 else if ((insn
& 0xffbf) == 0xecbd /* vldm sp!, <list> */
3109 && (insn2
& 0x0e00) == 0x0a00)
3110 found_stack_adjust
= 1;
3121 /* Since any instruction in the epilogue sequence, with the possible
3122 exception of return itself, updates the stack pointer, we need to
3123 scan backwards for at most one instruction. Try either a 16-bit or
3124 a 32-bit instruction. This is just a heuristic, so we do not worry
3125 too much about false positives. */
3127 if (!found_stack_adjust
)
3129 if (pc
- 4 < func_start
)
3131 if (target_read_memory (pc
- 4, buf
, 4))
3134 insn
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
3135 insn2
= extract_unsigned_integer (buf
+ 2, 2, byte_order_for_code
);
3137 if (insn2
== 0x46bd) /* mov sp, r7 */
3138 found_stack_adjust
= 1;
3139 else if ((insn2
& 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3140 found_stack_adjust
= 1;
3141 else if ((insn2
& 0xff00) == 0xbc00) /* pop <registers> without PC */
3142 found_stack_adjust
= 1;
3143 else if (insn
== 0xe8bd) /* ldm.w sp!, <registers> */
3144 found_stack_adjust
= 1;
3145 else if (insn
== 0xf85d /* ldr.w <Rt>, [sp], #4 */
3146 && (insn2
& 0x0fff) == 0x0b04)
3147 found_stack_adjust
= 1;
3148 else if ((insn
& 0xffbf) == 0xecbd /* vldm sp!, <list> */
3149 && (insn2
& 0x0e00) == 0x0a00)
3150 found_stack_adjust
= 1;
3153 return found_stack_adjust
;
3156 /* Return true if we are in the function's epilogue, i.e. after the
3157 instruction that destroyed the function's stack frame. */
3160 arm_in_function_epilogue_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
3162 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
3164 int found_return
, found_stack_adjust
;
3165 CORE_ADDR func_start
, func_end
;
3167 if (arm_pc_is_thumb (gdbarch
, pc
))
3168 return thumb_in_function_epilogue_p (gdbarch
, pc
);
3170 if (!find_pc_partial_function (pc
, NULL
, &func_start
, &func_end
))
3173 /* We are in the epilogue if the previous instruction was a stack
3174 adjustment and the next instruction is a possible return (bx, mov
3175 pc, or pop). We could have to scan backwards to find the stack
3176 adjustment, or forwards to find the return, but this is a decent
3177 approximation. First scan forwards. */
3180 insn
= read_memory_unsigned_integer (pc
, 4, byte_order_for_code
);
3181 if (bits (insn
, 28, 31) != INST_NV
)
3183 if ((insn
& 0x0ffffff0) == 0x012fff10)
3186 else if ((insn
& 0x0ffffff0) == 0x01a0f000)
3189 else if ((insn
& 0x0fff0000) == 0x08bd0000
3190 && (insn
& 0x0000c000) != 0)
3191 /* POP (LDMIA), including PC or LR. */
3198 /* Scan backwards. This is just a heuristic, so do not worry about
3199 false positives from mode changes. */
3201 if (pc
< func_start
+ 4)
3204 found_stack_adjust
= 0;
3205 insn
= read_memory_unsigned_integer (pc
- 4, 4, byte_order_for_code
);
3206 if (bits (insn
, 28, 31) != INST_NV
)
3208 if ((insn
& 0x0df0f000) == 0x0080d000)
3209 /* ADD SP (register or immediate). */
3210 found_stack_adjust
= 1;
3211 else if ((insn
& 0x0df0f000) == 0x0040d000)
3212 /* SUB SP (register or immediate). */
3213 found_stack_adjust
= 1;
3214 else if ((insn
& 0x0ffffff0) == 0x01a0d000)
3216 found_stack_adjust
= 1;
3217 else if ((insn
& 0x0fff0000) == 0x08bd0000)
3219 found_stack_adjust
= 1;
3222 if (found_stack_adjust
)
3229 /* When arguments must be pushed onto the stack, they go on in reverse
3230 order. The code below implements a FILO (stack) to do this. */
3235 struct stack_item
*prev
;
3239 static struct stack_item
*
3240 push_stack_item (struct stack_item
*prev
, const void *contents
, int len
)
3242 struct stack_item
*si
;
3243 si
= xmalloc (sizeof (struct stack_item
));
3244 si
->data
= xmalloc (len
);
3247 memcpy (si
->data
, contents
, len
);
3251 static struct stack_item
*
3252 pop_stack_item (struct stack_item
*si
)
3254 struct stack_item
*dead
= si
;
3262 /* Return the alignment (in bytes) of the given type. */
3265 arm_type_align (struct type
*t
)
3271 t
= check_typedef (t
);
3272 switch (TYPE_CODE (t
))
3275 /* Should never happen. */
3276 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
3280 case TYPE_CODE_ENUM
:
3284 case TYPE_CODE_RANGE
:
3285 case TYPE_CODE_BITSTRING
:
3287 case TYPE_CODE_CHAR
:
3288 case TYPE_CODE_BOOL
:
3289 return TYPE_LENGTH (t
);
3291 case TYPE_CODE_ARRAY
:
3292 case TYPE_CODE_COMPLEX
:
3293 /* TODO: What about vector types? */
3294 return arm_type_align (TYPE_TARGET_TYPE (t
));
3296 case TYPE_CODE_STRUCT
:
3297 case TYPE_CODE_UNION
:
3299 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
3301 falign
= arm_type_align (TYPE_FIELD_TYPE (t
, n
));
3309 /* Possible base types for a candidate for passing and returning in
3312 enum arm_vfp_cprc_base_type
3321 /* The length of one element of base type B. */
3324 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b
)
3328 case VFP_CPRC_SINGLE
:
3330 case VFP_CPRC_DOUBLE
:
3332 case VFP_CPRC_VEC64
:
3334 case VFP_CPRC_VEC128
:
3337 internal_error (__FILE__
, __LINE__
, _("Invalid VFP CPRC type: %d."),
3342 /* The character ('s', 'd' or 'q') for the type of VFP register used
3343 for passing base type B. */
3346 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b
)
3350 case VFP_CPRC_SINGLE
:
3352 case VFP_CPRC_DOUBLE
:
3354 case VFP_CPRC_VEC64
:
3356 case VFP_CPRC_VEC128
:
3359 internal_error (__FILE__
, __LINE__
, _("Invalid VFP CPRC type: %d."),
3364 /* Determine whether T may be part of a candidate for passing and
3365 returning in VFP registers, ignoring the limit on the total number
3366 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3367 classification of the first valid component found; if it is not
3368 VFP_CPRC_UNKNOWN, all components must have the same classification
3369 as *BASE_TYPE. If it is found that T contains a type not permitted
3370 for passing and returning in VFP registers, a type differently
3371 classified from *BASE_TYPE, or two types differently classified
3372 from each other, return -1, otherwise return the total number of
3373 base-type elements found (possibly 0 in an empty structure or
3374 array). Vectors and complex types are not currently supported,
3375 matching the generic AAPCS support. */
3378 arm_vfp_cprc_sub_candidate (struct type
*t
,
3379 enum arm_vfp_cprc_base_type
*base_type
)
3381 t
= check_typedef (t
);
3382 switch (TYPE_CODE (t
))
3385 switch (TYPE_LENGTH (t
))
3388 if (*base_type
== VFP_CPRC_UNKNOWN
)
3389 *base_type
= VFP_CPRC_SINGLE
;
3390 else if (*base_type
!= VFP_CPRC_SINGLE
)
3395 if (*base_type
== VFP_CPRC_UNKNOWN
)
3396 *base_type
= VFP_CPRC_DOUBLE
;
3397 else if (*base_type
!= VFP_CPRC_DOUBLE
)
3406 case TYPE_CODE_ARRAY
:
3410 count
= arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t
), base_type
);
3413 if (TYPE_LENGTH (t
) == 0)
3415 gdb_assert (count
== 0);
3418 else if (count
== 0)
3420 unitlen
= arm_vfp_cprc_unit_length (*base_type
);
3421 gdb_assert ((TYPE_LENGTH (t
) % unitlen
) == 0);
3422 return TYPE_LENGTH (t
) / unitlen
;
3426 case TYPE_CODE_STRUCT
:
3431 for (i
= 0; i
< TYPE_NFIELDS (t
); i
++)
3433 int sub_count
= arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t
, i
),
3435 if (sub_count
== -1)
3439 if (TYPE_LENGTH (t
) == 0)
3441 gdb_assert (count
== 0);
3444 else if (count
== 0)
3446 unitlen
= arm_vfp_cprc_unit_length (*base_type
);
3447 if (TYPE_LENGTH (t
) != unitlen
* count
)
3452 case TYPE_CODE_UNION
:
3457 for (i
= 0; i
< TYPE_NFIELDS (t
); i
++)
3459 int sub_count
= arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t
, i
),
3461 if (sub_count
== -1)
3463 count
= (count
> sub_count
? count
: sub_count
);
3465 if (TYPE_LENGTH (t
) == 0)
3467 gdb_assert (count
== 0);
3470 else if (count
== 0)
3472 unitlen
= arm_vfp_cprc_unit_length (*base_type
);
3473 if (TYPE_LENGTH (t
) != unitlen
* count
)
3485 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3486 if passed to or returned from a non-variadic function with the VFP
3487 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3488 *BASE_TYPE to the base type for T and *COUNT to the number of
3489 elements of that base type before returning. */
3492 arm_vfp_call_candidate (struct type
*t
, enum arm_vfp_cprc_base_type
*base_type
,
3495 enum arm_vfp_cprc_base_type b
= VFP_CPRC_UNKNOWN
;
3496 int c
= arm_vfp_cprc_sub_candidate (t
, &b
);
3497 if (c
<= 0 || c
> 4)
3504 /* Return 1 if the VFP ABI should be used for passing arguments to and
3505 returning values from a function of type FUNC_TYPE, 0
3509 arm_vfp_abi_for_function (struct gdbarch
*gdbarch
, struct type
*func_type
)
3511 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3512 /* Variadic functions always use the base ABI. Assume that functions
3513 without debug info are not variadic. */
3514 if (func_type
&& TYPE_VARARGS (check_typedef (func_type
)))
3516 /* The VFP ABI is only supported as a variant of AAPCS. */
3517 if (tdep
->arm_abi
!= ARM_ABI_AAPCS
)
3519 return gdbarch_tdep (gdbarch
)->fp_model
== ARM_FLOAT_VFP
;
3522 /* We currently only support passing parameters in integer registers, which
3523 conforms with GCC's default model, and VFP argument passing following
3524 the VFP variant of AAPCS. Several other variants exist and
3525 we should probably support some of them based on the selected ABI. */
3528 arm_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
3529 struct regcache
*regcache
, CORE_ADDR bp_addr
, int nargs
,
3530 struct value
**args
, CORE_ADDR sp
, int struct_return
,
3531 CORE_ADDR struct_addr
)
3533 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3537 struct stack_item
*si
= NULL
;
3540 unsigned vfp_regs_free
= (1 << 16) - 1;
3542 /* Determine the type of this function and whether the VFP ABI
3544 ftype
= check_typedef (value_type (function
));
3545 if (TYPE_CODE (ftype
) == TYPE_CODE_PTR
)
3546 ftype
= check_typedef (TYPE_TARGET_TYPE (ftype
));
3547 use_vfp_abi
= arm_vfp_abi_for_function (gdbarch
, ftype
);
3549 /* Set the return address. For the ARM, the return breakpoint is
3550 always at BP_ADDR. */
3551 if (arm_pc_is_thumb (gdbarch
, bp_addr
))
3553 regcache_cooked_write_unsigned (regcache
, ARM_LR_REGNUM
, bp_addr
);
3555 /* Walk through the list of args and determine how large a temporary
3556 stack is required. Need to take care here as structs may be
3557 passed on the stack, and we have to push them. */
3560 argreg
= ARM_A1_REGNUM
;
3563 /* The struct_return pointer occupies the first parameter
3564 passing register. */
3568 fprintf_unfiltered (gdb_stdlog
, "struct return in %s = %s\n",
3569 gdbarch_register_name (gdbarch
, argreg
),
3570 paddress (gdbarch
, struct_addr
));
3571 regcache_cooked_write_unsigned (regcache
, argreg
, struct_addr
);
3575 for (argnum
= 0; argnum
< nargs
; argnum
++)
3578 struct type
*arg_type
;
3579 struct type
*target_type
;
3580 enum type_code typecode
;
3581 const bfd_byte
*val
;
3583 enum arm_vfp_cprc_base_type vfp_base_type
;
3585 int may_use_core_reg
= 1;
3587 arg_type
= check_typedef (value_type (args
[argnum
]));
3588 len
= TYPE_LENGTH (arg_type
);
3589 target_type
= TYPE_TARGET_TYPE (arg_type
);
3590 typecode
= TYPE_CODE (arg_type
);
3591 val
= value_contents (args
[argnum
]);
3593 align
= arm_type_align (arg_type
);
3594 /* Round alignment up to a whole number of words. */
3595 align
= (align
+ INT_REGISTER_SIZE
- 1) & ~(INT_REGISTER_SIZE
- 1);
3596 /* Different ABIs have different maximum alignments. */
3597 if (gdbarch_tdep (gdbarch
)->arm_abi
== ARM_ABI_APCS
)
3599 /* The APCS ABI only requires word alignment. */
3600 align
= INT_REGISTER_SIZE
;
3604 /* The AAPCS requires at most doubleword alignment. */
3605 if (align
> INT_REGISTER_SIZE
* 2)
3606 align
= INT_REGISTER_SIZE
* 2;
3610 && arm_vfp_call_candidate (arg_type
, &vfp_base_type
,
3618 /* Because this is a CPRC it cannot go in a core register or
3619 cause a core register to be skipped for alignment.
3620 Either it goes in VFP registers and the rest of this loop
3621 iteration is skipped for this argument, or it goes on the
3622 stack (and the stack alignment code is correct for this
3624 may_use_core_reg
= 0;
3626 unit_length
= arm_vfp_cprc_unit_length (vfp_base_type
);
3627 shift
= unit_length
/ 4;
3628 mask
= (1 << (shift
* vfp_base_count
)) - 1;
3629 for (regno
= 0; regno
< 16; regno
+= shift
)
3630 if (((vfp_regs_free
>> regno
) & mask
) == mask
)
3639 vfp_regs_free
&= ~(mask
<< regno
);
3640 reg_scaled
= regno
/ shift
;
3641 reg_char
= arm_vfp_cprc_reg_char (vfp_base_type
);
3642 for (i
= 0; i
< vfp_base_count
; i
++)
3646 if (reg_char
== 'q')
3647 arm_neon_quad_write (gdbarch
, regcache
, reg_scaled
+ i
,
3648 val
+ i
* unit_length
);
3651 sprintf (name_buf
, "%c%d", reg_char
, reg_scaled
+ i
);
3652 regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
3654 regcache_cooked_write (regcache
, regnum
,
3655 val
+ i
* unit_length
);
3662 /* This CPRC could not go in VFP registers, so all VFP
3663 registers are now marked as used. */
3668 /* Push stack padding for dowubleword alignment. */
3669 if (nstack
& (align
- 1))
3671 si
= push_stack_item (si
, val
, INT_REGISTER_SIZE
);
3672 nstack
+= INT_REGISTER_SIZE
;
3675 /* Doubleword aligned quantities must go in even register pairs. */
3676 if (may_use_core_reg
3677 && argreg
<= ARM_LAST_ARG_REGNUM
3678 && align
> INT_REGISTER_SIZE
3682 /* If the argument is a pointer to a function, and it is a
3683 Thumb function, create a LOCAL copy of the value and set
3684 the THUMB bit in it. */
3685 if (TYPE_CODE_PTR
== typecode
3686 && target_type
!= NULL
3687 && TYPE_CODE_FUNC
== TYPE_CODE (check_typedef (target_type
)))
3689 CORE_ADDR regval
= extract_unsigned_integer (val
, len
, byte_order
);
3690 if (arm_pc_is_thumb (gdbarch
, regval
))
3692 bfd_byte
*copy
= alloca (len
);
3693 store_unsigned_integer (copy
, len
, byte_order
,
3694 MAKE_THUMB_ADDR (regval
));
3699 /* Copy the argument to general registers or the stack in
3700 register-sized pieces. Large arguments are split between
3701 registers and stack. */
3704 int partial_len
= len
< INT_REGISTER_SIZE
? len
: INT_REGISTER_SIZE
;
3706 if (may_use_core_reg
&& argreg
<= ARM_LAST_ARG_REGNUM
)
3708 /* The argument is being passed in a general purpose
3711 = extract_unsigned_integer (val
, partial_len
, byte_order
);
3712 if (byte_order
== BFD_ENDIAN_BIG
)
3713 regval
<<= (INT_REGISTER_SIZE
- partial_len
) * 8;
3715 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s = 0x%s\n",
3717 gdbarch_register_name
3719 phex (regval
, INT_REGISTER_SIZE
));
3720 regcache_cooked_write_unsigned (regcache
, argreg
, regval
);
3725 /* Push the arguments onto the stack. */
3727 fprintf_unfiltered (gdb_stdlog
, "arg %d @ sp + %d\n",
3729 si
= push_stack_item (si
, val
, INT_REGISTER_SIZE
);
3730 nstack
+= INT_REGISTER_SIZE
;
3737 /* If we have an odd number of words to push, then decrement the stack
3738 by one word now, so first stack argument will be dword aligned. */
3745 write_memory (sp
, si
->data
, si
->len
);
3746 si
= pop_stack_item (si
);
3749 /* Finally, update teh SP register. */
3750 regcache_cooked_write_unsigned (regcache
, ARM_SP_REGNUM
, sp
);
3756 /* Always align the frame to an 8-byte boundary. This is required on
3757 some platforms and harmless on the rest. */
3760 arm_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
3762 /* Align the stack to eight bytes. */
3763 return sp
& ~ (CORE_ADDR
) 7;
3767 print_fpu_flags (int flags
)
3769 if (flags
& (1 << 0))
3770 fputs ("IVO ", stdout
);
3771 if (flags
& (1 << 1))
3772 fputs ("DVZ ", stdout
);
3773 if (flags
& (1 << 2))
3774 fputs ("OFL ", stdout
);
3775 if (flags
& (1 << 3))
3776 fputs ("UFL ", stdout
);
3777 if (flags
& (1 << 4))
3778 fputs ("INX ", stdout
);
3782 /* Print interesting information about the floating point processor
3783 (if present) or emulator. */
3785 arm_print_float_info (struct gdbarch
*gdbarch
, struct ui_file
*file
,
3786 struct frame_info
*frame
, const char *args
)
3788 unsigned long status
= get_frame_register_unsigned (frame
, ARM_FPS_REGNUM
);
3791 type
= (status
>> 24) & 127;
3792 if (status
& (1 << 31))
3793 printf (_("Hardware FPU type %d\n"), type
);
3795 printf (_("Software FPU type %d\n"), type
);
3796 /* i18n: [floating point unit] mask */
3797 fputs (_("mask: "), stdout
);
3798 print_fpu_flags (status
>> 16);
3799 /* i18n: [floating point unit] flags */
3800 fputs (_("flags: "), stdout
);
3801 print_fpu_flags (status
);
3804 /* Construct the ARM extended floating point type. */
3805 static struct type
*
3806 arm_ext_type (struct gdbarch
*gdbarch
)
3808 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3810 if (!tdep
->arm_ext_type
)
3812 = arch_float_type (gdbarch
, -1, "builtin_type_arm_ext",
3813 floatformats_arm_ext
);
3815 return tdep
->arm_ext_type
;
3818 static struct type
*
3819 arm_neon_double_type (struct gdbarch
*gdbarch
)
3821 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3823 if (tdep
->neon_double_type
== NULL
)
3825 struct type
*t
, *elem
;
3827 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_neon_d",
3829 elem
= builtin_type (gdbarch
)->builtin_uint8
;
3830 append_composite_type_field (t
, "u8", init_vector_type (elem
, 8));
3831 elem
= builtin_type (gdbarch
)->builtin_uint16
;
3832 append_composite_type_field (t
, "u16", init_vector_type (elem
, 4));
3833 elem
= builtin_type (gdbarch
)->builtin_uint32
;
3834 append_composite_type_field (t
, "u32", init_vector_type (elem
, 2));
3835 elem
= builtin_type (gdbarch
)->builtin_uint64
;
3836 append_composite_type_field (t
, "u64", elem
);
3837 elem
= builtin_type (gdbarch
)->builtin_float
;
3838 append_composite_type_field (t
, "f32", init_vector_type (elem
, 2));
3839 elem
= builtin_type (gdbarch
)->builtin_double
;
3840 append_composite_type_field (t
, "f64", elem
);
3842 TYPE_VECTOR (t
) = 1;
3843 TYPE_NAME (t
) = "neon_d";
3844 tdep
->neon_double_type
= t
;
3847 return tdep
->neon_double_type
;
3850 /* FIXME: The vector types are not correctly ordered on big-endian
3851 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3852 bits of d0 - regardless of what unit size is being held in d0. So
3853 the offset of the first uint8 in d0 is 7, but the offset of the
3854 first float is 4. This code works as-is for little-endian
3857 static struct type
*
3858 arm_neon_quad_type (struct gdbarch
*gdbarch
)
3860 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3862 if (tdep
->neon_quad_type
== NULL
)
3864 struct type
*t
, *elem
;
3866 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_neon_q",
3868 elem
= builtin_type (gdbarch
)->builtin_uint8
;
3869 append_composite_type_field (t
, "u8", init_vector_type (elem
, 16));
3870 elem
= builtin_type (gdbarch
)->builtin_uint16
;
3871 append_composite_type_field (t
, "u16", init_vector_type (elem
, 8));
3872 elem
= builtin_type (gdbarch
)->builtin_uint32
;
3873 append_composite_type_field (t
, "u32", init_vector_type (elem
, 4));
3874 elem
= builtin_type (gdbarch
)->builtin_uint64
;
3875 append_composite_type_field (t
, "u64", init_vector_type (elem
, 2));
3876 elem
= builtin_type (gdbarch
)->builtin_float
;
3877 append_composite_type_field (t
, "f32", init_vector_type (elem
, 4));
3878 elem
= builtin_type (gdbarch
)->builtin_double
;
3879 append_composite_type_field (t
, "f64", init_vector_type (elem
, 2));
3881 TYPE_VECTOR (t
) = 1;
3882 TYPE_NAME (t
) = "neon_q";
3883 tdep
->neon_quad_type
= t
;
3886 return tdep
->neon_quad_type
;
3889 /* Return the GDB type object for the "standard" data type of data in
3892 static struct type
*
3893 arm_register_type (struct gdbarch
*gdbarch
, int regnum
)
3895 int num_regs
= gdbarch_num_regs (gdbarch
);
3897 if (gdbarch_tdep (gdbarch
)->have_vfp_pseudos
3898 && regnum
>= num_regs
&& regnum
< num_regs
+ 32)
3899 return builtin_type (gdbarch
)->builtin_float
;
3901 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
3902 && regnum
>= num_regs
+ 32 && regnum
< num_regs
+ 32 + 16)
3903 return arm_neon_quad_type (gdbarch
);
3905 /* If the target description has register information, we are only
3906 in this function so that we can override the types of
3907 double-precision registers for NEON. */
3908 if (tdesc_has_registers (gdbarch_target_desc (gdbarch
)))
3910 struct type
*t
= tdesc_register_type (gdbarch
, regnum
);
3912 if (regnum
>= ARM_D0_REGNUM
&& regnum
< ARM_D0_REGNUM
+ 32
3913 && TYPE_CODE (t
) == TYPE_CODE_FLT
3914 && gdbarch_tdep (gdbarch
)->have_neon
)
3915 return arm_neon_double_type (gdbarch
);
3920 if (regnum
>= ARM_F0_REGNUM
&& regnum
< ARM_F0_REGNUM
+ NUM_FREGS
)
3922 if (!gdbarch_tdep (gdbarch
)->have_fpa_registers
)
3923 return builtin_type (gdbarch
)->builtin_void
;
3925 return arm_ext_type (gdbarch
);
3927 else if (regnum
== ARM_SP_REGNUM
)
3928 return builtin_type (gdbarch
)->builtin_data_ptr
;
3929 else if (regnum
== ARM_PC_REGNUM
)
3930 return builtin_type (gdbarch
)->builtin_func_ptr
;
3931 else if (regnum
>= ARRAY_SIZE (arm_register_names
))
3932 /* These registers are only supported on targets which supply
3933 an XML description. */
3934 return builtin_type (gdbarch
)->builtin_int0
;
3936 return builtin_type (gdbarch
)->builtin_uint32
;
3939 /* Map a DWARF register REGNUM onto the appropriate GDB register
3943 arm_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
3945 /* Core integer regs. */
3946 if (reg
>= 0 && reg
<= 15)
3949 /* Legacy FPA encoding. These were once used in a way which
3950 overlapped with VFP register numbering, so their use is
3951 discouraged, but GDB doesn't support the ARM toolchain
3952 which used them for VFP. */
3953 if (reg
>= 16 && reg
<= 23)
3954 return ARM_F0_REGNUM
+ reg
- 16;
3956 /* New assignments for the FPA registers. */
3957 if (reg
>= 96 && reg
<= 103)
3958 return ARM_F0_REGNUM
+ reg
- 96;
3960 /* WMMX register assignments. */
3961 if (reg
>= 104 && reg
<= 111)
3962 return ARM_WCGR0_REGNUM
+ reg
- 104;
3964 if (reg
>= 112 && reg
<= 127)
3965 return ARM_WR0_REGNUM
+ reg
- 112;
3967 if (reg
>= 192 && reg
<= 199)
3968 return ARM_WC0_REGNUM
+ reg
- 192;
3970 /* VFP v2 registers. A double precision value is actually
3971 in d1 rather than s2, but the ABI only defines numbering
3972 for the single precision registers. This will "just work"
3973 in GDB for little endian targets (we'll read eight bytes,
3974 starting in s0 and then progressing to s1), but will be
3975 reversed on big endian targets with VFP. This won't
3976 be a problem for the new Neon quad registers; you're supposed
3977 to use DW_OP_piece for those. */
3978 if (reg
>= 64 && reg
<= 95)
3982 sprintf (name_buf
, "s%d", reg
- 64);
3983 return user_reg_map_name_to_regnum (gdbarch
, name_buf
,
3987 /* VFP v3 / Neon registers. This range is also used for VFP v2
3988 registers, except that it now describes d0 instead of s0. */
3989 if (reg
>= 256 && reg
<= 287)
3993 sprintf (name_buf
, "d%d", reg
- 256);
3994 return user_reg_map_name_to_regnum (gdbarch
, name_buf
,
4001 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4003 arm_register_sim_regno (struct gdbarch
*gdbarch
, int regnum
)
4006 gdb_assert (reg
>= 0 && reg
< gdbarch_num_regs (gdbarch
));
4008 if (regnum
>= ARM_WR0_REGNUM
&& regnum
<= ARM_WR15_REGNUM
)
4009 return regnum
- ARM_WR0_REGNUM
+ SIM_ARM_IWMMXT_COP0R0_REGNUM
;
4011 if (regnum
>= ARM_WC0_REGNUM
&& regnum
<= ARM_WC7_REGNUM
)
4012 return regnum
- ARM_WC0_REGNUM
+ SIM_ARM_IWMMXT_COP1R0_REGNUM
;
4014 if (regnum
>= ARM_WCGR0_REGNUM
&& regnum
<= ARM_WCGR7_REGNUM
)
4015 return regnum
- ARM_WCGR0_REGNUM
+ SIM_ARM_IWMMXT_COP1R8_REGNUM
;
4017 if (reg
< NUM_GREGS
)
4018 return SIM_ARM_R0_REGNUM
+ reg
;
4021 if (reg
< NUM_FREGS
)
4022 return SIM_ARM_FP0_REGNUM
+ reg
;
4025 if (reg
< NUM_SREGS
)
4026 return SIM_ARM_FPS_REGNUM
+ reg
;
4029 internal_error (__FILE__
, __LINE__
, _("Bad REGNUM %d"), regnum
);
4032 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4033 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4034 It is thought that this is is the floating-point register format on
4035 little-endian systems. */
4038 convert_from_extended (const struct floatformat
*fmt
, const void *ptr
,
4039 void *dbl
, int endianess
)
4043 if (endianess
== BFD_ENDIAN_BIG
)
4044 floatformat_to_doublest (&floatformat_arm_ext_big
, ptr
, &d
);
4046 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword
,
4048 floatformat_from_doublest (fmt
, &d
, dbl
);
4052 convert_to_extended (const struct floatformat
*fmt
, void *dbl
, const void *ptr
,
4057 floatformat_to_doublest (fmt
, ptr
, &d
);
4058 if (endianess
== BFD_ENDIAN_BIG
)
4059 floatformat_from_doublest (&floatformat_arm_ext_big
, &d
, dbl
);
4061 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword
,
4066 condition_true (unsigned long cond
, unsigned long status_reg
)
4068 if (cond
== INST_AL
|| cond
== INST_NV
)
4074 return ((status_reg
& FLAG_Z
) != 0);
4076 return ((status_reg
& FLAG_Z
) == 0);
4078 return ((status_reg
& FLAG_C
) != 0);
4080 return ((status_reg
& FLAG_C
) == 0);
4082 return ((status_reg
& FLAG_N
) != 0);
4084 return ((status_reg
& FLAG_N
) == 0);
4086 return ((status_reg
& FLAG_V
) != 0);
4088 return ((status_reg
& FLAG_V
) == 0);
4090 return ((status_reg
& (FLAG_C
| FLAG_Z
)) == FLAG_C
);
4092 return ((status_reg
& (FLAG_C
| FLAG_Z
)) != FLAG_C
);
4094 return (((status_reg
& FLAG_N
) == 0) == ((status_reg
& FLAG_V
) == 0));
4096 return (((status_reg
& FLAG_N
) == 0) != ((status_reg
& FLAG_V
) == 0));
4098 return (((status_reg
& FLAG_Z
) == 0)
4099 && (((status_reg
& FLAG_N
) == 0)
4100 == ((status_reg
& FLAG_V
) == 0)));
4102 return (((status_reg
& FLAG_Z
) != 0)
4103 || (((status_reg
& FLAG_N
) == 0)
4104 != ((status_reg
& FLAG_V
) == 0)));
4109 static unsigned long
4110 shifted_reg_val (struct frame_info
*frame
, unsigned long inst
, int carry
,
4111 unsigned long pc_val
, unsigned long status_reg
)
4113 unsigned long res
, shift
;
4114 int rm
= bits (inst
, 0, 3);
4115 unsigned long shifttype
= bits (inst
, 5, 6);
4119 int rs
= bits (inst
, 8, 11);
4120 shift
= (rs
== 15 ? pc_val
+ 8
4121 : get_frame_register_unsigned (frame
, rs
)) & 0xFF;
4124 shift
= bits (inst
, 7, 11);
4126 res
= (rm
== ARM_PC_REGNUM
4127 ? (pc_val
+ (bit (inst
, 4) ? 12 : 8))
4128 : get_frame_register_unsigned (frame
, rm
));
4133 res
= shift
>= 32 ? 0 : res
<< shift
;
4137 res
= shift
>= 32 ? 0 : res
>> shift
;
4143 res
= ((res
& 0x80000000L
)
4144 ? ~((~res
) >> shift
) : res
>> shift
);
4147 case 3: /* ROR/RRX */
4150 res
= (res
>> 1) | (carry
? 0x80000000L
: 0);
4152 res
= (res
>> shift
) | (res
<< (32 - shift
));
4156 return res
& 0xffffffff;
4159 /* Return number of 1-bits in VAL. */
4162 bitcount (unsigned long val
)
4165 for (nbits
= 0; val
!= 0; nbits
++)
4166 val
&= val
- 1; /* Delete rightmost 1-bit in val. */
4170 /* Return the size in bytes of the complete Thumb instruction whose
4171 first halfword is INST1. */
4174 thumb_insn_size (unsigned short inst1
)
4176 if ((inst1
& 0xe000) == 0xe000 && (inst1
& 0x1800) != 0)
4183 thumb_advance_itstate (unsigned int itstate
)
4185 /* Preserve IT[7:5], the first three bits of the condition. Shift
4186 the upcoming condition flags left by one bit. */
4187 itstate
= (itstate
& 0xe0) | ((itstate
<< 1) & 0x1f);
4189 /* If we have finished the IT block, clear the state. */
4190 if ((itstate
& 0x0f) == 0)
4196 /* Find the next PC after the current instruction executes. In some
4197 cases we can not statically determine the answer (see the IT state
4198 handling in this function); in that case, a breakpoint may be
4199 inserted in addition to the returned PC, which will be used to set
4200 another breakpoint by our caller. */
4203 thumb_get_next_pc_raw (struct frame_info
*frame
, CORE_ADDR pc
)
4205 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
4206 struct address_space
*aspace
= get_frame_address_space (frame
);
4207 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
4208 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
4209 unsigned long pc_val
= ((unsigned long) pc
) + 4; /* PC after prefetch */
4210 unsigned short inst1
;
4211 CORE_ADDR nextpc
= pc
+ 2; /* Default is next instruction. */
4212 unsigned long offset
;
4213 ULONGEST status
, itstate
;
4215 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4216 pc_val
= MAKE_THUMB_ADDR (pc_val
);
4218 inst1
= read_memory_unsigned_integer (pc
, 2, byte_order_for_code
);
4220 /* Thumb-2 conditional execution support. There are eight bits in
4221 the CPSR which describe conditional execution state. Once
4222 reconstructed (they're in a funny order), the low five bits
4223 describe the low bit of the condition for each instruction and
4224 how many instructions remain. The high three bits describe the
4225 base condition. One of the low four bits will be set if an IT
4226 block is active. These bits read as zero on earlier
4228 status
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
4229 itstate
= ((status
>> 8) & 0xfc) | ((status
>> 25) & 0x3);
4231 /* If-Then handling. On GNU/Linux, where this routine is used, we
4232 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4233 can disable execution of the undefined instruction. So we might
4234 miss the breakpoint if we set it on a skipped conditional
4235 instruction. Because conditional instructions can change the
4236 flags, affecting the execution of further instructions, we may
4237 need to set two breakpoints. */
4239 if (gdbarch_tdep (gdbarch
)->thumb2_breakpoint
!= NULL
)
4241 if ((inst1
& 0xff00) == 0xbf00 && (inst1
& 0x000f) != 0)
4243 /* An IT instruction. Because this instruction does not
4244 modify the flags, we can accurately predict the next
4245 executed instruction. */
4246 itstate
= inst1
& 0x00ff;
4247 pc
+= thumb_insn_size (inst1
);
4249 while (itstate
!= 0 && ! condition_true (itstate
>> 4, status
))
4251 inst1
= read_memory_unsigned_integer (pc
, 2,
4252 byte_order_for_code
);
4253 pc
+= thumb_insn_size (inst1
);
4254 itstate
= thumb_advance_itstate (itstate
);
4257 return MAKE_THUMB_ADDR (pc
);
4259 else if (itstate
!= 0)
4261 /* We are in a conditional block. Check the condition. */
4262 if (! condition_true (itstate
>> 4, status
))
4264 /* Advance to the next executed instruction. */
4265 pc
+= thumb_insn_size (inst1
);
4266 itstate
= thumb_advance_itstate (itstate
);
4268 while (itstate
!= 0 && ! condition_true (itstate
>> 4, status
))
4270 inst1
= read_memory_unsigned_integer (pc
, 2,
4271 byte_order_for_code
);
4272 pc
+= thumb_insn_size (inst1
);
4273 itstate
= thumb_advance_itstate (itstate
);
4276 return MAKE_THUMB_ADDR (pc
);
4278 else if ((itstate
& 0x0f) == 0x08)
4280 /* This is the last instruction of the conditional
4281 block, and it is executed. We can handle it normally
4282 because the following instruction is not conditional,
4283 and we must handle it normally because it is
4284 permitted to branch. Fall through. */
4290 /* There are conditional instructions after this one.
4291 If this instruction modifies the flags, then we can
4292 not predict what the next executed instruction will
4293 be. Fortunately, this instruction is architecturally
4294 forbidden to branch; we know it will fall through.
4295 Start by skipping past it. */
4296 pc
+= thumb_insn_size (inst1
);
4297 itstate
= thumb_advance_itstate (itstate
);
4299 /* Set a breakpoint on the following instruction. */
4300 gdb_assert ((itstate
& 0x0f) != 0);
4301 arm_insert_single_step_breakpoint (gdbarch
, aspace
,
4302 MAKE_THUMB_ADDR (pc
));
4303 cond_negated
= (itstate
>> 4) & 1;
4305 /* Skip all following instructions with the same
4306 condition. If there is a later instruction in the IT
4307 block with the opposite condition, set the other
4308 breakpoint there. If not, then set a breakpoint on
4309 the instruction after the IT block. */
4312 inst1
= read_memory_unsigned_integer (pc
, 2,
4313 byte_order_for_code
);
4314 pc
+= thumb_insn_size (inst1
);
4315 itstate
= thumb_advance_itstate (itstate
);
4317 while (itstate
!= 0 && ((itstate
>> 4) & 1) == cond_negated
);
4319 return MAKE_THUMB_ADDR (pc
);
4323 else if (itstate
& 0x0f)
4325 /* We are in a conditional block. Check the condition. */
4326 int cond
= itstate
>> 4;
4328 if (! condition_true (cond
, status
))
4329 /* Advance to the next instruction. All the 32-bit
4330 instructions share a common prefix. */
4331 return MAKE_THUMB_ADDR (pc
+ thumb_insn_size (inst1
));
4333 /* Otherwise, handle the instruction normally. */
4336 if ((inst1
& 0xff00) == 0xbd00) /* pop {rlist, pc} */
4340 /* Fetch the saved PC from the stack. It's stored above
4341 all of the other registers. */
4342 offset
= bitcount (bits (inst1
, 0, 7)) * INT_REGISTER_SIZE
;
4343 sp
= get_frame_register_unsigned (frame
, ARM_SP_REGNUM
);
4344 nextpc
= read_memory_unsigned_integer (sp
+ offset
, 4, byte_order
);
4346 else if ((inst1
& 0xf000) == 0xd000) /* conditional branch */
4348 unsigned long cond
= bits (inst1
, 8, 11);
4349 if (cond
== 0x0f) /* 0x0f = SWI */
4351 struct gdbarch_tdep
*tdep
;
4352 tdep
= gdbarch_tdep (gdbarch
);
4354 if (tdep
->syscall_next_pc
!= NULL
)
4355 nextpc
= tdep
->syscall_next_pc (frame
);
4358 else if (cond
!= 0x0f && condition_true (cond
, status
))
4359 nextpc
= pc_val
+ (sbits (inst1
, 0, 7) << 1);
4361 else if ((inst1
& 0xf800) == 0xe000) /* unconditional branch */
4363 nextpc
= pc_val
+ (sbits (inst1
, 0, 10) << 1);
4365 else if (thumb_insn_size (inst1
) == 4) /* 32-bit instruction */
4367 unsigned short inst2
;
4368 inst2
= read_memory_unsigned_integer (pc
+ 2, 2, byte_order_for_code
);
4370 /* Default to the next instruction. */
4372 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4374 if ((inst1
& 0xf800) == 0xf000 && (inst2
& 0x8000) == 0x8000)
4376 /* Branches and miscellaneous control instructions. */
4378 if ((inst2
& 0x1000) != 0 || (inst2
& 0xd001) == 0xc000)
4381 int j1
, j2
, imm1
, imm2
;
4383 imm1
= sbits (inst1
, 0, 10);
4384 imm2
= bits (inst2
, 0, 10);
4385 j1
= bit (inst2
, 13);
4386 j2
= bit (inst2
, 11);
4388 offset
= ((imm1
<< 12) + (imm2
<< 1));
4389 offset
^= ((!j2
) << 22) | ((!j1
) << 23);
4391 nextpc
= pc_val
+ offset
;
4392 /* For BLX make sure to clear the low bits. */
4393 if (bit (inst2
, 12) == 0)
4394 nextpc
= nextpc
& 0xfffffffc;
4396 else if (inst1
== 0xf3de && (inst2
& 0xff00) == 0x3f00)
4398 /* SUBS PC, LR, #imm8. */
4399 nextpc
= get_frame_register_unsigned (frame
, ARM_LR_REGNUM
);
4400 nextpc
-= inst2
& 0x00ff;
4402 else if ((inst2
& 0xd000) == 0x8000 && (inst1
& 0x0380) != 0x0380)
4404 /* Conditional branch. */
4405 if (condition_true (bits (inst1
, 6, 9), status
))
4407 int sign
, j1
, j2
, imm1
, imm2
;
4409 sign
= sbits (inst1
, 10, 10);
4410 imm1
= bits (inst1
, 0, 5);
4411 imm2
= bits (inst2
, 0, 10);
4412 j1
= bit (inst2
, 13);
4413 j2
= bit (inst2
, 11);
4415 offset
= (sign
<< 20) + (j2
<< 19) + (j1
<< 18);
4416 offset
+= (imm1
<< 12) + (imm2
<< 1);
4418 nextpc
= pc_val
+ offset
;
4422 else if ((inst1
& 0xfe50) == 0xe810)
4424 /* Load multiple or RFE. */
4425 int rn
, offset
, load_pc
= 1;
4427 rn
= bits (inst1
, 0, 3);
4428 if (bit (inst1
, 7) && !bit (inst1
, 8))
4431 if (!bit (inst2
, 15))
4433 offset
= bitcount (inst2
) * 4 - 4;
4435 else if (!bit (inst1
, 7) && bit (inst1
, 8))
4438 if (!bit (inst2
, 15))
4442 else if (bit (inst1
, 7) && bit (inst1
, 8))
4447 else if (!bit (inst1
, 7) && !bit (inst1
, 8))
4457 CORE_ADDR addr
= get_frame_register_unsigned (frame
, rn
);
4458 nextpc
= get_frame_memory_unsigned (frame
, addr
+ offset
, 4);
4461 else if ((inst1
& 0xffef) == 0xea4f && (inst2
& 0xfff0) == 0x0f00)
4463 /* MOV PC or MOVS PC. */
4464 nextpc
= get_frame_register_unsigned (frame
, bits (inst2
, 0, 3));
4465 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4467 else if ((inst1
& 0xff70) == 0xf850 && (inst2
& 0xf000) == 0xf000)
4471 int rn
, load_pc
= 1;
4473 rn
= bits (inst1
, 0, 3);
4474 base
= get_frame_register_unsigned (frame
, rn
);
4475 if (rn
== ARM_PC_REGNUM
)
4477 base
= (base
+ 4) & ~(CORE_ADDR
) 0x3;
4479 base
+= bits (inst2
, 0, 11);
4481 base
-= bits (inst2
, 0, 11);
4483 else if (bit (inst1
, 7))
4484 base
+= bits (inst2
, 0, 11);
4485 else if (bit (inst2
, 11))
4487 if (bit (inst2
, 10))
4490 base
+= bits (inst2
, 0, 7);
4492 base
-= bits (inst2
, 0, 7);
4495 else if ((inst2
& 0x0fc0) == 0x0000)
4497 int shift
= bits (inst2
, 4, 5), rm
= bits (inst2
, 0, 3);
4498 base
+= get_frame_register_unsigned (frame
, rm
) << shift
;
4505 nextpc
= get_frame_memory_unsigned (frame
, base
, 4);
4507 else if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf000)
4510 CORE_ADDR tbl_reg
, table
, offset
, length
;
4512 tbl_reg
= bits (inst1
, 0, 3);
4513 if (tbl_reg
== 0x0f)
4514 table
= pc
+ 4; /* Regcache copy of PC isn't right yet. */
4516 table
= get_frame_register_unsigned (frame
, tbl_reg
);
4518 offset
= get_frame_register_unsigned (frame
, bits (inst2
, 0, 3));
4519 length
= 2 * get_frame_memory_unsigned (frame
, table
+ offset
, 1);
4520 nextpc
= pc_val
+ length
;
4522 else if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf010)
4525 CORE_ADDR tbl_reg
, table
, offset
, length
;
4527 tbl_reg
= bits (inst1
, 0, 3);
4528 if (tbl_reg
== 0x0f)
4529 table
= pc
+ 4; /* Regcache copy of PC isn't right yet. */
4531 table
= get_frame_register_unsigned (frame
, tbl_reg
);
4533 offset
= 2 * get_frame_register_unsigned (frame
, bits (inst2
, 0, 3));
4534 length
= 2 * get_frame_memory_unsigned (frame
, table
+ offset
, 2);
4535 nextpc
= pc_val
+ length
;
4538 else if ((inst1
& 0xff00) == 0x4700) /* bx REG, blx REG */
4540 if (bits (inst1
, 3, 6) == 0x0f)
4543 nextpc
= get_frame_register_unsigned (frame
, bits (inst1
, 3, 6));
4545 else if ((inst1
& 0xff87) == 0x4687) /* mov pc, REG */
4547 if (bits (inst1
, 3, 6) == 0x0f)
4550 nextpc
= get_frame_register_unsigned (frame
, bits (inst1
, 3, 6));
4552 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4554 else if ((inst1
& 0xf500) == 0xb100)
4557 int imm
= (bit (inst1
, 9) << 6) + (bits (inst1
, 3, 7) << 1);
4558 ULONGEST reg
= get_frame_register_unsigned (frame
, bits (inst1
, 0, 2));
4560 if (bit (inst1
, 11) && reg
!= 0)
4561 nextpc
= pc_val
+ imm
;
4562 else if (!bit (inst1
, 11) && reg
== 0)
4563 nextpc
= pc_val
+ imm
;
4568 /* Get the raw next address. PC is the current program counter, in
4569 FRAME, which is assumed to be executing in ARM mode.
4571 The value returned has the execution state of the next instruction
4572 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4573 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4577 arm_get_next_pc_raw (struct frame_info
*frame
, CORE_ADDR pc
)
4579 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
4580 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
4581 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
4582 unsigned long pc_val
;
4583 unsigned long this_instr
;
4584 unsigned long status
;
4587 pc_val
= (unsigned long) pc
;
4588 this_instr
= read_memory_unsigned_integer (pc
, 4, byte_order_for_code
);
4590 status
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
4591 nextpc
= (CORE_ADDR
) (pc_val
+ 4); /* Default case */
4593 if (bits (this_instr
, 28, 31) == INST_NV
)
4594 switch (bits (this_instr
, 24, 27))
4599 /* Branch with Link and change to Thumb. */
4600 nextpc
= BranchDest (pc
, this_instr
);
4601 nextpc
|= bit (this_instr
, 24) << 1;
4602 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4608 /* Coprocessor register transfer. */
4609 if (bits (this_instr
, 12, 15) == 15)
4610 error (_("Invalid update to pc in instruction"));
4613 else if (condition_true (bits (this_instr
, 28, 31), status
))
4615 switch (bits (this_instr
, 24, 27))
4618 case 0x1: /* data processing */
4622 unsigned long operand1
, operand2
, result
= 0;
4626 if (bits (this_instr
, 12, 15) != 15)
4629 if (bits (this_instr
, 22, 25) == 0
4630 && bits (this_instr
, 4, 7) == 9) /* multiply */
4631 error (_("Invalid update to pc in instruction"));
4633 /* BX <reg>, BLX <reg> */
4634 if (bits (this_instr
, 4, 27) == 0x12fff1
4635 || bits (this_instr
, 4, 27) == 0x12fff3)
4637 rn
= bits (this_instr
, 0, 3);
4638 nextpc
= ((rn
== ARM_PC_REGNUM
)
4640 : get_frame_register_unsigned (frame
, rn
));
4645 /* Multiply into PC. */
4646 c
= (status
& FLAG_C
) ? 1 : 0;
4647 rn
= bits (this_instr
, 16, 19);
4648 operand1
= ((rn
== ARM_PC_REGNUM
)
4650 : get_frame_register_unsigned (frame
, rn
));
4652 if (bit (this_instr
, 25))
4654 unsigned long immval
= bits (this_instr
, 0, 7);
4655 unsigned long rotate
= 2 * bits (this_instr
, 8, 11);
4656 operand2
= ((immval
>> rotate
) | (immval
<< (32 - rotate
)))
4659 else /* operand 2 is a shifted register. */
4660 operand2
= shifted_reg_val (frame
, this_instr
, c
,
4663 switch (bits (this_instr
, 21, 24))
4666 result
= operand1
& operand2
;
4670 result
= operand1
^ operand2
;
4674 result
= operand1
- operand2
;
4678 result
= operand2
- operand1
;
4682 result
= operand1
+ operand2
;
4686 result
= operand1
+ operand2
+ c
;
4690 result
= operand1
- operand2
+ c
;
4694 result
= operand2
- operand1
+ c
;
4700 case 0xb: /* tst, teq, cmp, cmn */
4701 result
= (unsigned long) nextpc
;
4705 result
= operand1
| operand2
;
4709 /* Always step into a function. */
4714 result
= operand1
& ~operand2
;
4722 /* In 26-bit APCS the bottom two bits of the result are
4723 ignored, and we always end up in ARM state. */
4725 nextpc
= arm_addr_bits_remove (gdbarch
, result
);
4733 case 0x5: /* data transfer */
4736 if (bit (this_instr
, 20))
4739 if (bits (this_instr
, 12, 15) == 15)
4745 if (bit (this_instr
, 22))
4746 error (_("Invalid update to pc in instruction"));
4748 /* byte write to PC */
4749 rn
= bits (this_instr
, 16, 19);
4750 base
= ((rn
== ARM_PC_REGNUM
)
4752 : get_frame_register_unsigned (frame
, rn
));
4754 if (bit (this_instr
, 24))
4757 int c
= (status
& FLAG_C
) ? 1 : 0;
4758 unsigned long offset
=
4759 (bit (this_instr
, 25)
4760 ? shifted_reg_val (frame
, this_instr
, c
, pc_val
, status
)
4761 : bits (this_instr
, 0, 11));
4763 if (bit (this_instr
, 23))
4769 (CORE_ADDR
) read_memory_unsigned_integer ((CORE_ADDR
) base
,
4776 case 0x9: /* block transfer */
4777 if (bit (this_instr
, 20))
4780 if (bit (this_instr
, 15))
4784 unsigned long rn_val
4785 = get_frame_register_unsigned (frame
,
4786 bits (this_instr
, 16, 19));
4788 if (bit (this_instr
, 23))
4791 unsigned long reglist
= bits (this_instr
, 0, 14);
4792 offset
= bitcount (reglist
) * 4;
4793 if (bit (this_instr
, 24)) /* pre */
4796 else if (bit (this_instr
, 24))
4800 (CORE_ADDR
) read_memory_unsigned_integer ((CORE_ADDR
)
4807 case 0xb: /* branch & link */
4808 case 0xa: /* branch */
4810 nextpc
= BranchDest (pc
, this_instr
);
4816 case 0xe: /* coproc ops */
4820 struct gdbarch_tdep
*tdep
;
4821 tdep
= gdbarch_tdep (gdbarch
);
4823 if (tdep
->syscall_next_pc
!= NULL
)
4824 nextpc
= tdep
->syscall_next_pc (frame
);
4830 fprintf_filtered (gdb_stderr
, _("Bad bit-field extraction\n"));
4838 /* Determine next PC after current instruction executes. Will call either
4839 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
4840 loop is detected. */
4843 arm_get_next_pc (struct frame_info
*frame
, CORE_ADDR pc
)
4847 if (arm_frame_is_thumb (frame
))
4849 nextpc
= thumb_get_next_pc_raw (frame
, pc
);
4850 if (nextpc
== MAKE_THUMB_ADDR (pc
))
4851 error (_("Infinite loop detected"));
4855 nextpc
= arm_get_next_pc_raw (frame
, pc
);
4857 error (_("Infinite loop detected"));
4863 /* Like insert_single_step_breakpoint, but make sure we use a breakpoint
4864 of the appropriate mode (as encoded in the PC value), even if this
4865 differs from what would be expected according to the symbol tables. */
4868 arm_insert_single_step_breakpoint (struct gdbarch
*gdbarch
,
4869 struct address_space
*aspace
,
4872 struct cleanup
*old_chain
4873 = make_cleanup_restore_integer (&arm_override_mode
);
4875 arm_override_mode
= IS_THUMB_ADDR (pc
);
4876 pc
= gdbarch_addr_bits_remove (gdbarch
, pc
);
4878 insert_single_step_breakpoint (gdbarch
, aspace
, pc
);
4880 do_cleanups (old_chain
);
4883 /* Checks for an atomic sequence of instructions beginning with a LDREX{,B,H,D}
4884 instruction and ending with a STREX{,B,H,D} instruction. If such a sequence
4885 is found, attempt to step through it. A breakpoint is placed at the end of
4889 thumb_deal_with_atomic_sequence_raw (struct frame_info
*frame
)
4891 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
4892 struct address_space
*aspace
= get_frame_address_space (frame
);
4893 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
4894 CORE_ADDR pc
= get_frame_pc (frame
);
4895 CORE_ADDR breaks
[2] = {-1, -1};
4897 unsigned short insn1
, insn2
;
4900 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
4901 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
4902 ULONGEST status
, itstate
;
4904 /* We currently do not support atomic sequences within an IT block. */
4905 status
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
4906 itstate
= ((status
>> 8) & 0xfc) | ((status
>> 25) & 0x3);
4910 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction. */
4911 insn1
= read_memory_unsigned_integer (loc
, 2, byte_order_for_code
);
4913 if (thumb_insn_size (insn1
) != 4)
4916 insn2
= read_memory_unsigned_integer (loc
, 2, byte_order_for_code
);
4918 if (!((insn1
& 0xfff0) == 0xe850
4919 || ((insn1
& 0xfff0) == 0xe8d0 && (insn2
& 0x00c0) == 0x0040)))
4922 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
4924 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
4926 insn1
= read_memory_unsigned_integer (loc
, 2, byte_order_for_code
);
4929 if (thumb_insn_size (insn1
) != 4)
4931 /* Assume that there is at most one conditional branch in the
4932 atomic sequence. If a conditional branch is found, put a
4933 breakpoint in its destination address. */
4934 if ((insn1
& 0xf000) == 0xd000 && bits (insn1
, 8, 11) != 0x0f)
4936 if (last_breakpoint
> 0)
4937 return 0; /* More than one conditional branch found,
4938 fallback to the standard code. */
4940 breaks
[1] = loc
+ 2 + (sbits (insn1
, 0, 7) << 1);
4944 /* We do not support atomic sequences that use any *other*
4945 instructions but conditional branches to change the PC.
4946 Fall back to standard code to avoid losing control of
4948 else if (thumb_instruction_changes_pc (insn1
))
4953 insn2
= read_memory_unsigned_integer (loc
, 2, byte_order_for_code
);
4956 /* Assume that there is at most one conditional branch in the
4957 atomic sequence. If a conditional branch is found, put a
4958 breakpoint in its destination address. */
4959 if ((insn1
& 0xf800) == 0xf000
4960 && (insn2
& 0xd000) == 0x8000
4961 && (insn1
& 0x0380) != 0x0380)
4963 int sign
, j1
, j2
, imm1
, imm2
;
4964 unsigned int offset
;
4966 sign
= sbits (insn1
, 10, 10);
4967 imm1
= bits (insn1
, 0, 5);
4968 imm2
= bits (insn2
, 0, 10);
4969 j1
= bit (insn2
, 13);
4970 j2
= bit (insn2
, 11);
4972 offset
= (sign
<< 20) + (j2
<< 19) + (j1
<< 18);
4973 offset
+= (imm1
<< 12) + (imm2
<< 1);
4975 if (last_breakpoint
> 0)
4976 return 0; /* More than one conditional branch found,
4977 fallback to the standard code. */
4979 breaks
[1] = loc
+ offset
;
4983 /* We do not support atomic sequences that use any *other*
4984 instructions but conditional branches to change the PC.
4985 Fall back to standard code to avoid losing control of
4987 else if (thumb2_instruction_changes_pc (insn1
, insn2
))
4990 /* If we find a strex{,b,h,d}, we're done. */
4991 if ((insn1
& 0xfff0) == 0xe840
4992 || ((insn1
& 0xfff0) == 0xe8c0 && (insn2
& 0x00c0) == 0x0040))
4997 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
4998 if (insn_count
== atomic_sequence_length
)
5001 /* Insert a breakpoint right after the end of the atomic sequence. */
5004 /* Check for duplicated breakpoints. Check also for a breakpoint
5005 placed (branch instruction's destination) anywhere in sequence. */
5007 && (breaks
[1] == breaks
[0]
5008 || (breaks
[1] >= pc
&& breaks
[1] < loc
)))
5009 last_breakpoint
= 0;
5011 /* Effectively inserts the breakpoints. */
5012 for (index
= 0; index
<= last_breakpoint
; index
++)
5013 arm_insert_single_step_breakpoint (gdbarch
, aspace
,
5014 MAKE_THUMB_ADDR (breaks
[index
]));
5020 arm_deal_with_atomic_sequence_raw (struct frame_info
*frame
)
5022 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
5023 struct address_space
*aspace
= get_frame_address_space (frame
);
5024 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
5025 CORE_ADDR pc
= get_frame_pc (frame
);
5026 CORE_ADDR breaks
[2] = {-1, -1};
5031 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
5032 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
5034 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction.
5035 Note that we do not currently support conditionally executed atomic
5037 insn
= read_memory_unsigned_integer (loc
, 4, byte_order_for_code
);
5039 if ((insn
& 0xff9000f0) != 0xe1900090)
5042 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
5044 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
5046 insn
= read_memory_unsigned_integer (loc
, 4, byte_order_for_code
);
5049 /* Assume that there is at most one conditional branch in the atomic
5050 sequence. If a conditional branch is found, put a breakpoint in
5051 its destination address. */
5052 if (bits (insn
, 24, 27) == 0xa)
5054 if (last_breakpoint
> 0)
5055 return 0; /* More than one conditional branch found, fallback
5056 to the standard single-step code. */
5058 breaks
[1] = BranchDest (loc
- 4, insn
);
5062 /* We do not support atomic sequences that use any *other* instructions
5063 but conditional branches to change the PC. Fall back to standard
5064 code to avoid losing control of execution. */
5065 else if (arm_instruction_changes_pc (insn
))
5068 /* If we find a strex{,b,h,d}, we're done. */
5069 if ((insn
& 0xff9000f0) == 0xe1800090)
5073 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
5074 if (insn_count
== atomic_sequence_length
)
5077 /* Insert a breakpoint right after the end of the atomic sequence. */
5080 /* Check for duplicated breakpoints. Check also for a breakpoint
5081 placed (branch instruction's destination) anywhere in sequence. */
5083 && (breaks
[1] == breaks
[0]
5084 || (breaks
[1] >= pc
&& breaks
[1] < loc
)))
5085 last_breakpoint
= 0;
5087 /* Effectively inserts the breakpoints. */
5088 for (index
= 0; index
<= last_breakpoint
; index
++)
5089 arm_insert_single_step_breakpoint (gdbarch
, aspace
, breaks
[index
]);
5095 arm_deal_with_atomic_sequence (struct frame_info
*frame
)
5097 if (arm_frame_is_thumb (frame
))
5098 return thumb_deal_with_atomic_sequence_raw (frame
);
5100 return arm_deal_with_atomic_sequence_raw (frame
);
5103 /* single_step() is called just before we want to resume the inferior,
5104 if we want to single-step it but there is no hardware or kernel
5105 single-step support. We find the target of the coming instruction
5106 and breakpoint it. */
5109 arm_software_single_step (struct frame_info
*frame
)
5111 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
5112 struct address_space
*aspace
= get_frame_address_space (frame
);
5115 if (arm_deal_with_atomic_sequence (frame
))
5118 next_pc
= arm_get_next_pc (frame
, get_frame_pc (frame
));
5119 arm_insert_single_step_breakpoint (gdbarch
, aspace
, next_pc
);
5124 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
5125 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
5126 NULL if an error occurs. BUF is freed. */
5129 extend_buffer_earlier (gdb_byte
*buf
, CORE_ADDR endaddr
,
5130 int old_len
, int new_len
)
5132 gdb_byte
*new_buf
, *middle
;
5133 int bytes_to_read
= new_len
- old_len
;
5135 new_buf
= xmalloc (new_len
);
5136 memcpy (new_buf
+ bytes_to_read
, buf
, old_len
);
5138 if (target_read_memory (endaddr
- new_len
, new_buf
, bytes_to_read
) != 0)
5146 /* An IT block is at most the 2-byte IT instruction followed by
5147 four 4-byte instructions. The furthest back we must search to
5148 find an IT block that affects the current instruction is thus
5149 2 + 3 * 4 == 14 bytes. */
5150 #define MAX_IT_BLOCK_PREFIX 14
5152 /* Use a quick scan if there are more than this many bytes of
5154 #define IT_SCAN_THRESHOLD 32
5156 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
5157 A breakpoint in an IT block may not be hit, depending on the
5160 arm_adjust_breakpoint_address (struct gdbarch
*gdbarch
, CORE_ADDR bpaddr
)
5164 CORE_ADDR boundary
, func_start
;
5165 int buf_len
, buf2_len
;
5166 enum bfd_endian order
= gdbarch_byte_order_for_code (gdbarch
);
5167 int i
, any
, last_it
, last_it_count
;
5169 /* If we are using BKPT breakpoints, none of this is necessary. */
5170 if (gdbarch_tdep (gdbarch
)->thumb2_breakpoint
== NULL
)
5173 /* ARM mode does not have this problem. */
5174 if (!arm_pc_is_thumb (gdbarch
, bpaddr
))
5177 /* We are setting a breakpoint in Thumb code that could potentially
5178 contain an IT block. The first step is to find how much Thumb
5179 code there is; we do not need to read outside of known Thumb
5181 map_type
= arm_find_mapping_symbol (bpaddr
, &boundary
);
5183 /* Thumb-2 code must have mapping symbols to have a chance. */
5186 bpaddr
= gdbarch_addr_bits_remove (gdbarch
, bpaddr
);
5188 if (find_pc_partial_function (bpaddr
, NULL
, &func_start
, NULL
)
5189 && func_start
> boundary
)
5190 boundary
= func_start
;
5192 /* Search for a candidate IT instruction. We have to do some fancy
5193 footwork to distinguish a real IT instruction from the second
5194 half of a 32-bit instruction, but there is no need for that if
5195 there's no candidate. */
5196 buf_len
= min (bpaddr
- boundary
, MAX_IT_BLOCK_PREFIX
);
5198 /* No room for an IT instruction. */
5201 buf
= xmalloc (buf_len
);
5202 if (target_read_memory (bpaddr
- buf_len
, buf
, buf_len
) != 0)
5205 for (i
= 0; i
< buf_len
; i
+= 2)
5207 unsigned short inst1
= extract_unsigned_integer (&buf
[i
], 2, order
);
5208 if ((inst1
& 0xff00) == 0xbf00 && (inst1
& 0x000f) != 0)
5220 /* OK, the code bytes before this instruction contain at least one
5221 halfword which resembles an IT instruction. We know that it's
5222 Thumb code, but there are still two possibilities. Either the
5223 halfword really is an IT instruction, or it is the second half of
5224 a 32-bit Thumb instruction. The only way we can tell is to
5225 scan forwards from a known instruction boundary. */
5226 if (bpaddr
- boundary
> IT_SCAN_THRESHOLD
)
5230 /* There's a lot of code before this instruction. Start with an
5231 optimistic search; it's easy to recognize halfwords that can
5232 not be the start of a 32-bit instruction, and use that to
5233 lock on to the instruction boundaries. */
5234 buf
= extend_buffer_earlier (buf
, bpaddr
, buf_len
, IT_SCAN_THRESHOLD
);
5237 buf_len
= IT_SCAN_THRESHOLD
;
5240 for (i
= 0; i
< buf_len
- sizeof (buf
) && ! definite
; i
+= 2)
5242 unsigned short inst1
= extract_unsigned_integer (&buf
[i
], 2, order
);
5243 if (thumb_insn_size (inst1
) == 2)
5250 /* At this point, if DEFINITE, BUF[I] is the first place we
5251 are sure that we know the instruction boundaries, and it is far
5252 enough from BPADDR that we could not miss an IT instruction
5253 affecting BPADDR. If ! DEFINITE, give up - start from a
5257 buf
= extend_buffer_earlier (buf
, bpaddr
, buf_len
,
5261 buf_len
= bpaddr
- boundary
;
5267 buf
= extend_buffer_earlier (buf
, bpaddr
, buf_len
, bpaddr
- boundary
);
5270 buf_len
= bpaddr
- boundary
;
5274 /* Scan forwards. Find the last IT instruction before BPADDR. */
5279 unsigned short inst1
= extract_unsigned_integer (&buf
[i
], 2, order
);
5281 if ((inst1
& 0xff00) == 0xbf00 && (inst1
& 0x000f) != 0)
5286 else if (inst1
& 0x0002)
5288 else if (inst1
& 0x0004)
5293 i
+= thumb_insn_size (inst1
);
5299 /* There wasn't really an IT instruction after all. */
5302 if (last_it_count
< 1)
5303 /* It was too far away. */
5306 /* This really is a trouble spot. Move the breakpoint to the IT
5308 return bpaddr
- buf_len
+ last_it
;
5311 /* ARM displaced stepping support.
5313 Generally ARM displaced stepping works as follows:
5315 1. When an instruction is to be single-stepped, it is first decoded by
5316 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5317 Depending on the type of instruction, it is then copied to a scratch
5318 location, possibly in a modified form. The copy_* set of functions
5319 performs such modification, as necessary. A breakpoint is placed after
5320 the modified instruction in the scratch space to return control to GDB.
5321 Note in particular that instructions which modify the PC will no longer
5322 do so after modification.
5324 2. The instruction is single-stepped, by setting the PC to the scratch
5325 location address, and resuming. Control returns to GDB when the
5328 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5329 function used for the current instruction. This function's job is to
5330 put the CPU/memory state back to what it would have been if the
5331 instruction had been executed unmodified in its original location. */
5333 /* NOP instruction (mov r0, r0). */
5334 #define ARM_NOP 0xe1a00000
5335 #define THUMB_NOP 0x4600
5337 /* Helper for register reads for displaced stepping. In particular, this
5338 returns the PC as it would be seen by the instruction at its original
5342 displaced_read_reg (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5346 CORE_ADDR from
= dsc
->insn_addr
;
5348 if (regno
== ARM_PC_REGNUM
)
5350 /* Compute pipeline offset:
5351 - When executing an ARM instruction, PC reads as the address of the
5352 current instruction plus 8.
5353 - When executing a Thumb instruction, PC reads as the address of the
5354 current instruction plus 4. */
5361 if (debug_displaced
)
5362 fprintf_unfiltered (gdb_stdlog
, "displaced: read pc value %.8lx\n",
5363 (unsigned long) from
);
5364 return (ULONGEST
) from
;
5368 regcache_cooked_read_unsigned (regs
, regno
, &ret
);
5369 if (debug_displaced
)
5370 fprintf_unfiltered (gdb_stdlog
, "displaced: read r%d value %.8lx\n",
5371 regno
, (unsigned long) ret
);
5377 displaced_in_arm_mode (struct regcache
*regs
)
5380 ULONGEST t_bit
= arm_psr_thumb_bit (get_regcache_arch (regs
));
5382 regcache_cooked_read_unsigned (regs
, ARM_PS_REGNUM
, &ps
);
5384 return (ps
& t_bit
) == 0;
5387 /* Write to the PC as from a branch instruction. */
5390 branch_write_pc (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5394 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5395 architecture versions < 6. */
5396 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
,
5397 val
& ~(ULONGEST
) 0x3);
5399 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
,
5400 val
& ~(ULONGEST
) 0x1);
5403 /* Write to the PC as from a branch-exchange instruction. */
5406 bx_write_pc (struct regcache
*regs
, ULONGEST val
)
5409 ULONGEST t_bit
= arm_psr_thumb_bit (get_regcache_arch (regs
));
5411 regcache_cooked_read_unsigned (regs
, ARM_PS_REGNUM
, &ps
);
5415 regcache_cooked_write_unsigned (regs
, ARM_PS_REGNUM
, ps
| t_bit
);
5416 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
& 0xfffffffe);
5418 else if ((val
& 2) == 0)
5420 regcache_cooked_write_unsigned (regs
, ARM_PS_REGNUM
, ps
& ~t_bit
);
5421 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
);
5425 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5426 mode, align dest to 4 bytes). */
5427 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5428 regcache_cooked_write_unsigned (regs
, ARM_PS_REGNUM
, ps
& ~t_bit
);
5429 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
& 0xfffffffc);
5433 /* Write to the PC as if from a load instruction. */
5436 load_write_pc (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5439 if (DISPLACED_STEPPING_ARCH_VERSION
>= 5)
5440 bx_write_pc (regs
, val
);
5442 branch_write_pc (regs
, dsc
, val
);
5445 /* Write to the PC as if from an ALU instruction. */
5448 alu_write_pc (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5451 if (DISPLACED_STEPPING_ARCH_VERSION
>= 7 && !dsc
->is_thumb
)
5452 bx_write_pc (regs
, val
);
5454 branch_write_pc (regs
, dsc
, val
);
5457 /* Helper for writing to registers for displaced stepping. Writing to the PC
5458 has a varying effects depending on the instruction which does the write:
5459 this is controlled by the WRITE_PC argument. */
5462 displaced_write_reg (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5463 int regno
, ULONGEST val
, enum pc_write_style write_pc
)
5465 if (regno
== ARM_PC_REGNUM
)
5467 if (debug_displaced
)
5468 fprintf_unfiltered (gdb_stdlog
, "displaced: writing pc %.8lx\n",
5469 (unsigned long) val
);
5472 case BRANCH_WRITE_PC
:
5473 branch_write_pc (regs
, dsc
, val
);
5477 bx_write_pc (regs
, val
);
5481 load_write_pc (regs
, dsc
, val
);
5485 alu_write_pc (regs
, dsc
, val
);
5488 case CANNOT_WRITE_PC
:
5489 warning (_("Instruction wrote to PC in an unexpected way when "
5490 "single-stepping"));
5494 internal_error (__FILE__
, __LINE__
,
5495 _("Invalid argument to displaced_write_reg"));
5498 dsc
->wrote_to_pc
= 1;
5502 if (debug_displaced
)
5503 fprintf_unfiltered (gdb_stdlog
, "displaced: writing r%d value %.8lx\n",
5504 regno
, (unsigned long) val
);
5505 regcache_cooked_write_unsigned (regs
, regno
, val
);
5509 /* This function is used to concisely determine if an instruction INSN
5510 references PC. Register fields of interest in INSN should have the
5511 corresponding fields of BITMASK set to 0b1111. The function
5512 returns return 1 if any of these fields in INSN reference the PC
5513 (also 0b1111, r15), else it returns 0. */
5516 insn_references_pc (uint32_t insn
, uint32_t bitmask
)
5518 uint32_t lowbit
= 1;
5520 while (bitmask
!= 0)
5524 for (; lowbit
&& (bitmask
& lowbit
) == 0; lowbit
<<= 1)
5530 mask
= lowbit
* 0xf;
5532 if ((insn
& mask
) == mask
)
5541 /* The simplest copy function. Many instructions have the same effect no
5542 matter what address they are executed at: in those cases, use this. */
5545 arm_copy_unmodified (struct gdbarch
*gdbarch
, uint32_t insn
,
5546 const char *iname
, struct displaced_step_closure
*dsc
)
5548 if (debug_displaced
)
5549 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.8lx, "
5550 "opcode/class '%s' unmodified\n", (unsigned long) insn
,
5553 dsc
->modinsn
[0] = insn
;
5559 thumb_copy_unmodified_32bit (struct gdbarch
*gdbarch
, uint16_t insn1
,
5560 uint16_t insn2
, const char *iname
,
5561 struct displaced_step_closure
*dsc
)
5563 if (debug_displaced
)
5564 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.4x %.4x, "
5565 "opcode/class '%s' unmodified\n", insn1
, insn2
,
5568 dsc
->modinsn
[0] = insn1
;
5569 dsc
->modinsn
[1] = insn2
;
5575 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
5578 thumb_copy_unmodified_16bit (struct gdbarch
*gdbarch
, unsigned int insn
,
5580 struct displaced_step_closure
*dsc
)
5582 if (debug_displaced
)
5583 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.4x, "
5584 "opcode/class '%s' unmodified\n", insn
,
5587 dsc
->modinsn
[0] = insn
;
5592 /* Preload instructions with immediate offset. */
5595 cleanup_preload (struct gdbarch
*gdbarch
,
5596 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5598 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
5599 if (!dsc
->u
.preload
.immed
)
5600 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
5604 install_preload (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5605 struct displaced_step_closure
*dsc
, unsigned int rn
)
5608 /* Preload instructions:
5610 {pli/pld} [rn, #+/-imm]
5612 {pli/pld} [r0, #+/-imm]. */
5614 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5615 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5616 displaced_write_reg (regs
, dsc
, 0, rn_val
, CANNOT_WRITE_PC
);
5617 dsc
->u
.preload
.immed
= 1;
5619 dsc
->cleanup
= &cleanup_preload
;
5623 arm_copy_preload (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
5624 struct displaced_step_closure
*dsc
)
5626 unsigned int rn
= bits (insn
, 16, 19);
5628 if (!insn_references_pc (insn
, 0x000f0000ul
))
5629 return arm_copy_unmodified (gdbarch
, insn
, "preload", dsc
);
5631 if (debug_displaced
)
5632 fprintf_unfiltered (gdb_stdlog
, "displaced: copying preload insn %.8lx\n",
5633 (unsigned long) insn
);
5635 dsc
->modinsn
[0] = insn
& 0xfff0ffff;
5637 install_preload (gdbarch
, regs
, dsc
, rn
);
5643 thumb2_copy_preload (struct gdbarch
*gdbarch
, uint16_t insn1
, uint16_t insn2
,
5644 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5646 unsigned int rn
= bits (insn1
, 0, 3);
5647 unsigned int u_bit
= bit (insn1
, 7);
5648 int imm12
= bits (insn2
, 0, 11);
5651 if (rn
!= ARM_PC_REGNUM
)
5652 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
, "preload", dsc
);
5654 /* PC is only allowed to use in PLI (immediate,literal) Encoding T3, and
5655 PLD (literal) Encoding T1. */
5656 if (debug_displaced
)
5657 fprintf_unfiltered (gdb_stdlog
,
5658 "displaced: copying pld/pli pc (0x%x) %c imm12 %.4x\n",
5659 (unsigned int) dsc
->insn_addr
, u_bit
? '+' : '-',
5665 /* Rewrite instruction {pli/pld} PC imm12 into:
5666 Prepare: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
5670 Cleanup: r0 <- tmp[0], r1 <- tmp[1]. */
5672 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5673 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
5675 pc_val
= displaced_read_reg (regs
, dsc
, ARM_PC_REGNUM
);
5677 displaced_write_reg (regs
, dsc
, 0, pc_val
, CANNOT_WRITE_PC
);
5678 displaced_write_reg (regs
, dsc
, 1, imm12
, CANNOT_WRITE_PC
);
5679 dsc
->u
.preload
.immed
= 0;
5681 /* {pli/pld} [r0, r1] */
5682 dsc
->modinsn
[0] = insn1
& 0xfff0;
5683 dsc
->modinsn
[1] = 0xf001;
5686 dsc
->cleanup
= &cleanup_preload
;
5690 /* Preload instructions with register offset. */
5693 install_preload_reg(struct gdbarch
*gdbarch
, struct regcache
*regs
,
5694 struct displaced_step_closure
*dsc
, unsigned int rn
,
5697 ULONGEST rn_val
, rm_val
;
5699 /* Preload register-offset instructions:
5701 {pli/pld} [rn, rm {, shift}]
5703 {pli/pld} [r0, r1 {, shift}]. */
5705 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5706 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
5707 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5708 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
5709 displaced_write_reg (regs
, dsc
, 0, rn_val
, CANNOT_WRITE_PC
);
5710 displaced_write_reg (regs
, dsc
, 1, rm_val
, CANNOT_WRITE_PC
);
5711 dsc
->u
.preload
.immed
= 0;
5713 dsc
->cleanup
= &cleanup_preload
;
5717 arm_copy_preload_reg (struct gdbarch
*gdbarch
, uint32_t insn
,
5718 struct regcache
*regs
,
5719 struct displaced_step_closure
*dsc
)
5721 unsigned int rn
= bits (insn
, 16, 19);
5722 unsigned int rm
= bits (insn
, 0, 3);
5725 if (!insn_references_pc (insn
, 0x000f000ful
))
5726 return arm_copy_unmodified (gdbarch
, insn
, "preload reg", dsc
);
5728 if (debug_displaced
)
5729 fprintf_unfiltered (gdb_stdlog
, "displaced: copying preload insn %.8lx\n",
5730 (unsigned long) insn
);
5732 dsc
->modinsn
[0] = (insn
& 0xfff0fff0) | 0x1;
5734 install_preload_reg (gdbarch
, regs
, dsc
, rn
, rm
);
5738 /* Copy/cleanup coprocessor load and store instructions. */
5741 cleanup_copro_load_store (struct gdbarch
*gdbarch
,
5742 struct regcache
*regs
,
5743 struct displaced_step_closure
*dsc
)
5745 ULONGEST rn_val
= displaced_read_reg (regs
, dsc
, 0);
5747 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
5749 if (dsc
->u
.ldst
.writeback
)
5750 displaced_write_reg (regs
, dsc
, dsc
->u
.ldst
.rn
, rn_val
, LOAD_WRITE_PC
);
5754 install_copro_load_store (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5755 struct displaced_step_closure
*dsc
,
5756 int writeback
, unsigned int rn
)
5760 /* Coprocessor load/store instructions:
5762 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5764 {stc/stc2} [r0, #+/-imm].
5766 ldc/ldc2 are handled identically. */
5768 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5769 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5770 /* PC should be 4-byte aligned. */
5771 rn_val
= rn_val
& 0xfffffffc;
5772 displaced_write_reg (regs
, dsc
, 0, rn_val
, CANNOT_WRITE_PC
);
5774 dsc
->u
.ldst
.writeback
= writeback
;
5775 dsc
->u
.ldst
.rn
= rn
;
5777 dsc
->cleanup
= &cleanup_copro_load_store
;
5781 arm_copy_copro_load_store (struct gdbarch
*gdbarch
, uint32_t insn
,
5782 struct regcache
*regs
,
5783 struct displaced_step_closure
*dsc
)
5785 unsigned int rn
= bits (insn
, 16, 19);
5787 if (!insn_references_pc (insn
, 0x000f0000ul
))
5788 return arm_copy_unmodified (gdbarch
, insn
, "copro load/store", dsc
);
5790 if (debug_displaced
)
5791 fprintf_unfiltered (gdb_stdlog
, "displaced: copying coprocessor "
5792 "load/store insn %.8lx\n", (unsigned long) insn
);
5794 dsc
->modinsn
[0] = insn
& 0xfff0ffff;
5796 install_copro_load_store (gdbarch
, regs
, dsc
, bit (insn
, 25), rn
);
5802 thumb2_copy_copro_load_store (struct gdbarch
*gdbarch
, uint16_t insn1
,
5803 uint16_t insn2
, struct regcache
*regs
,
5804 struct displaced_step_closure
*dsc
)
5806 unsigned int rn
= bits (insn1
, 0, 3);
5808 if (rn
!= ARM_PC_REGNUM
)
5809 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
5810 "copro load/store", dsc
);
5812 if (debug_displaced
)
5813 fprintf_unfiltered (gdb_stdlog
, "displaced: copying coprocessor "
5814 "load/store insn %.4x%.4x\n", insn1
, insn2
);
5816 dsc
->modinsn
[0] = insn1
& 0xfff0;
5817 dsc
->modinsn
[1] = insn2
;
5820 /* This function is called for copying instruction LDC/LDC2/VLDR, which
5821 doesn't support writeback, so pass 0. */
5822 install_copro_load_store (gdbarch
, regs
, dsc
, 0, rn
);
5827 /* Clean up branch instructions (actually perform the branch, by setting
5831 cleanup_branch (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5832 struct displaced_step_closure
*dsc
)
5834 uint32_t status
= displaced_read_reg (regs
, dsc
, ARM_PS_REGNUM
);
5835 int branch_taken
= condition_true (dsc
->u
.branch
.cond
, status
);
5836 enum pc_write_style write_pc
= dsc
->u
.branch
.exchange
5837 ? BX_WRITE_PC
: BRANCH_WRITE_PC
;
5842 if (dsc
->u
.branch
.link
)
5844 /* The value of LR should be the next insn of current one. In order
5845 not to confuse logic hanlding later insn `bx lr', if current insn mode
5846 is Thumb, the bit 0 of LR value should be set to 1. */
5847 ULONGEST next_insn_addr
= dsc
->insn_addr
+ dsc
->insn_size
;
5850 next_insn_addr
|= 0x1;
5852 displaced_write_reg (regs
, dsc
, ARM_LR_REGNUM
, next_insn_addr
,
5856 displaced_write_reg (regs
, dsc
, ARM_PC_REGNUM
, dsc
->u
.branch
.dest
, write_pc
);
5859 /* Copy B/BL/BLX instructions with immediate destinations. */
5862 install_b_bl_blx (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5863 struct displaced_step_closure
*dsc
,
5864 unsigned int cond
, int exchange
, int link
, long offset
)
5866 /* Implement "BL<cond> <label>" as:
5868 Preparation: cond <- instruction condition
5869 Insn: mov r0, r0 (nop)
5870 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5872 B<cond> similar, but don't set r14 in cleanup. */
5874 dsc
->u
.branch
.cond
= cond
;
5875 dsc
->u
.branch
.link
= link
;
5876 dsc
->u
.branch
.exchange
= exchange
;
5878 dsc
->u
.branch
.dest
= dsc
->insn_addr
;
5879 if (link
&& exchange
)
5880 /* For BLX, offset is computed from the Align (PC, 4). */
5881 dsc
->u
.branch
.dest
= dsc
->u
.branch
.dest
& 0xfffffffc;
5884 dsc
->u
.branch
.dest
+= 4 + offset
;
5886 dsc
->u
.branch
.dest
+= 8 + offset
;
5888 dsc
->cleanup
= &cleanup_branch
;
5891 arm_copy_b_bl_blx (struct gdbarch
*gdbarch
, uint32_t insn
,
5892 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5894 unsigned int cond
= bits (insn
, 28, 31);
5895 int exchange
= (cond
== 0xf);
5896 int link
= exchange
|| bit (insn
, 24);
5899 if (debug_displaced
)
5900 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %s immediate insn "
5901 "%.8lx\n", (exchange
) ? "blx" : (link
) ? "bl" : "b",
5902 (unsigned long) insn
);
5904 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5905 then arrange the switch into Thumb mode. */
5906 offset
= (bits (insn
, 0, 23) << 2) | (bit (insn
, 24) << 1) | 1;
5908 offset
= bits (insn
, 0, 23) << 2;
5910 if (bit (offset
, 25))
5911 offset
= offset
| ~0x3ffffff;
5913 dsc
->modinsn
[0] = ARM_NOP
;
5915 install_b_bl_blx (gdbarch
, regs
, dsc
, cond
, exchange
, link
, offset
);
5920 thumb2_copy_b_bl_blx (struct gdbarch
*gdbarch
, uint16_t insn1
,
5921 uint16_t insn2
, struct regcache
*regs
,
5922 struct displaced_step_closure
*dsc
)
5924 int link
= bit (insn2
, 14);
5925 int exchange
= link
&& !bit (insn2
, 12);
5928 int j1
= bit (insn2
, 13);
5929 int j2
= bit (insn2
, 11);
5930 int s
= sbits (insn1
, 10, 10);
5931 int i1
= !(j1
^ bit (insn1
, 10));
5932 int i2
= !(j2
^ bit (insn1
, 10));
5934 if (!link
&& !exchange
) /* B */
5936 offset
= (bits (insn2
, 0, 10) << 1);
5937 if (bit (insn2
, 12)) /* Encoding T4 */
5939 offset
|= (bits (insn1
, 0, 9) << 12)
5945 else /* Encoding T3 */
5947 offset
|= (bits (insn1
, 0, 5) << 12)
5951 cond
= bits (insn1
, 6, 9);
5956 offset
= (bits (insn1
, 0, 9) << 12);
5957 offset
|= ((i2
<< 22) | (i1
<< 23) | (s
<< 24));
5958 offset
|= exchange
?
5959 (bits (insn2
, 1, 10) << 2) : (bits (insn2
, 0, 10) << 1);
5962 if (debug_displaced
)
5963 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %s insn "
5964 "%.4x %.4x with offset %.8lx\n",
5965 link
? (exchange
) ? "blx" : "bl" : "b",
5966 insn1
, insn2
, offset
);
5968 dsc
->modinsn
[0] = THUMB_NOP
;
5970 install_b_bl_blx (gdbarch
, regs
, dsc
, cond
, exchange
, link
, offset
);
5974 /* Copy B Thumb instructions. */
5976 thumb_copy_b (struct gdbarch
*gdbarch
, unsigned short insn
,
5977 struct displaced_step_closure
*dsc
)
5979 unsigned int cond
= 0;
5981 unsigned short bit_12_15
= bits (insn
, 12, 15);
5982 CORE_ADDR from
= dsc
->insn_addr
;
5984 if (bit_12_15
== 0xd)
5986 /* offset = SignExtend (imm8:0, 32) */
5987 offset
= sbits ((insn
<< 1), 0, 8);
5988 cond
= bits (insn
, 8, 11);
5990 else if (bit_12_15
== 0xe) /* Encoding T2 */
5992 offset
= sbits ((insn
<< 1), 0, 11);
5996 if (debug_displaced
)
5997 fprintf_unfiltered (gdb_stdlog
,
5998 "displaced: copying b immediate insn %.4x "
5999 "with offset %d\n", insn
, offset
);
6001 dsc
->u
.branch
.cond
= cond
;
6002 dsc
->u
.branch
.link
= 0;
6003 dsc
->u
.branch
.exchange
= 0;
6004 dsc
->u
.branch
.dest
= from
+ 4 + offset
;
6006 dsc
->modinsn
[0] = THUMB_NOP
;
6008 dsc
->cleanup
= &cleanup_branch
;
6013 /* Copy BX/BLX with register-specified destinations. */
6016 install_bx_blx_reg (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6017 struct displaced_step_closure
*dsc
, int link
,
6018 unsigned int cond
, unsigned int rm
)
6020 /* Implement {BX,BLX}<cond> <reg>" as:
6022 Preparation: cond <- instruction condition
6023 Insn: mov r0, r0 (nop)
6024 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
6026 Don't set r14 in cleanup for BX. */
6028 dsc
->u
.branch
.dest
= displaced_read_reg (regs
, dsc
, rm
);
6030 dsc
->u
.branch
.cond
= cond
;
6031 dsc
->u
.branch
.link
= link
;
6033 dsc
->u
.branch
.exchange
= 1;
6035 dsc
->cleanup
= &cleanup_branch
;
6039 arm_copy_bx_blx_reg (struct gdbarch
*gdbarch
, uint32_t insn
,
6040 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
6042 unsigned int cond
= bits (insn
, 28, 31);
6045 int link
= bit (insn
, 5);
6046 unsigned int rm
= bits (insn
, 0, 3);
6048 if (debug_displaced
)
6049 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.8lx",
6050 (unsigned long) insn
);
6052 dsc
->modinsn
[0] = ARM_NOP
;
6054 install_bx_blx_reg (gdbarch
, regs
, dsc
, link
, cond
, rm
);
6059 thumb_copy_bx_blx_reg (struct gdbarch
*gdbarch
, uint16_t insn
,
6060 struct regcache
*regs
,
6061 struct displaced_step_closure
*dsc
)
6063 int link
= bit (insn
, 7);
6064 unsigned int rm
= bits (insn
, 3, 6);
6066 if (debug_displaced
)
6067 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.4x",
6068 (unsigned short) insn
);
6070 dsc
->modinsn
[0] = THUMB_NOP
;
6072 install_bx_blx_reg (gdbarch
, regs
, dsc
, link
, INST_AL
, rm
);
6078 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
6081 cleanup_alu_imm (struct gdbarch
*gdbarch
,
6082 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
6084 ULONGEST rd_val
= displaced_read_reg (regs
, dsc
, 0);
6085 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
6086 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
6087 displaced_write_reg (regs
, dsc
, dsc
->rd
, rd_val
, ALU_WRITE_PC
);
6091 arm_copy_alu_imm (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
6092 struct displaced_step_closure
*dsc
)
6094 unsigned int rn
= bits (insn
, 16, 19);
6095 unsigned int rd
= bits (insn
, 12, 15);
6096 unsigned int op
= bits (insn
, 21, 24);
6097 int is_mov
= (op
== 0xd);
6098 ULONGEST rd_val
, rn_val
;
6100 if (!insn_references_pc (insn
, 0x000ff000ul
))
6101 return arm_copy_unmodified (gdbarch
, insn
, "ALU immediate", dsc
);
6103 if (debug_displaced
)
6104 fprintf_unfiltered (gdb_stdlog
, "displaced: copying immediate %s insn "
6105 "%.8lx\n", is_mov
? "move" : "ALU",
6106 (unsigned long) insn
);
6108 /* Instruction is of form:
6110 <op><cond> rd, [rn,] #imm
6114 Preparation: tmp1, tmp2 <- r0, r1;
6116 Insn: <op><cond> r0, r1, #imm
6117 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6120 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6121 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
6122 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6123 rd_val
= displaced_read_reg (regs
, dsc
, rd
);
6124 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
6125 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
6129 dsc
->modinsn
[0] = insn
& 0xfff00fff;
6131 dsc
->modinsn
[0] = (insn
& 0xfff00fff) | 0x10000;
6133 dsc
->cleanup
= &cleanup_alu_imm
;
6139 thumb2_copy_alu_imm (struct gdbarch
*gdbarch
, uint16_t insn1
,
6140 uint16_t insn2
, struct regcache
*regs
,
6141 struct displaced_step_closure
*dsc
)
6143 unsigned int op
= bits (insn1
, 5, 8);
6144 unsigned int rn
, rm
, rd
;
6145 ULONGEST rd_val
, rn_val
;
6147 rn
= bits (insn1
, 0, 3); /* Rn */
6148 rm
= bits (insn2
, 0, 3); /* Rm */
6149 rd
= bits (insn2
, 8, 11); /* Rd */
6151 /* This routine is only called for instruction MOV. */
6152 gdb_assert (op
== 0x2 && rn
== 0xf);
6154 if (rm
!= ARM_PC_REGNUM
&& rd
!= ARM_PC_REGNUM
)
6155 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
, "ALU imm", dsc
);
6157 if (debug_displaced
)
6158 fprintf_unfiltered (gdb_stdlog
, "displaced: copying reg %s insn %.4x%.4x\n",
6159 "ALU", insn1
, insn2
);
6161 /* Instruction is of form:
6163 <op><cond> rd, [rn,] #imm
6167 Preparation: tmp1, tmp2 <- r0, r1;
6169 Insn: <op><cond> r0, r1, #imm
6170 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6173 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6174 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
6175 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6176 rd_val
= displaced_read_reg (regs
, dsc
, rd
);
6177 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
6178 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
6181 dsc
->modinsn
[0] = insn1
;
6182 dsc
->modinsn
[1] = ((insn2
& 0xf0f0) | 0x1);
6185 dsc
->cleanup
= &cleanup_alu_imm
;
6190 /* Copy/cleanup arithmetic/logic insns with register RHS. */
6193 cleanup_alu_reg (struct gdbarch
*gdbarch
,
6194 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
6199 rd_val
= displaced_read_reg (regs
, dsc
, 0);
6201 for (i
= 0; i
< 3; i
++)
6202 displaced_write_reg (regs
, dsc
, i
, dsc
->tmp
[i
], CANNOT_WRITE_PC
);
6204 displaced_write_reg (regs
, dsc
, dsc
->rd
, rd_val
, ALU_WRITE_PC
);
6208 install_alu_reg (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6209 struct displaced_step_closure
*dsc
,
6210 unsigned int rd
, unsigned int rn
, unsigned int rm
)
6212 ULONGEST rd_val
, rn_val
, rm_val
;
6214 /* Instruction is of form:
6216 <op><cond> rd, [rn,] rm [, <shift>]
6220 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
6221 r0, r1, r2 <- rd, rn, rm
6222 Insn: <op><cond> r0, r1, r2 [, <shift>]
6223 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
6226 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6227 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
6228 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
6229 rd_val
= displaced_read_reg (regs
, dsc
, rd
);
6230 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6231 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
6232 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
6233 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
6234 displaced_write_reg (regs
, dsc
, 2, rm_val
, CANNOT_WRITE_PC
);
6237 dsc
->cleanup
= &cleanup_alu_reg
;
6241 arm_copy_alu_reg (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
6242 struct displaced_step_closure
*dsc
)
6244 unsigned int op
= bits (insn
, 21, 24);
6245 int is_mov
= (op
== 0xd);
6247 if (!insn_references_pc (insn
, 0x000ff00ful
))
6248 return arm_copy_unmodified (gdbarch
, insn
, "ALU reg", dsc
);
6250 if (debug_displaced
)
6251 fprintf_unfiltered (gdb_stdlog
, "displaced: copying reg %s insn %.8lx\n",
6252 is_mov
? "move" : "ALU", (unsigned long) insn
);
6255 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x2;
6257 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x10002;
6259 install_alu_reg (gdbarch
, regs
, dsc
, bits (insn
, 12, 15), bits (insn
, 16, 19),
6265 thumb_copy_alu_reg (struct gdbarch
*gdbarch
, uint16_t insn
,
6266 struct regcache
*regs
,
6267 struct displaced_step_closure
*dsc
)
6269 unsigned rn
, rm
, rd
;
6271 rd
= bits (insn
, 3, 6);
6272 rn
= (bit (insn
, 7) << 3) | bits (insn
, 0, 2);
6275 if (rd
!= ARM_PC_REGNUM
&& rn
!= ARM_PC_REGNUM
)
6276 return thumb_copy_unmodified_16bit (gdbarch
, insn
, "ALU reg", dsc
);
6278 if (debug_displaced
)
6279 fprintf_unfiltered (gdb_stdlog
, "displaced: copying reg %s insn %.4x\n",
6280 "ALU", (unsigned short) insn
);
6282 dsc
->modinsn
[0] = ((insn
& 0xff00) | 0x08);
6284 install_alu_reg (gdbarch
, regs
, dsc
, rd
, rn
, rm
);
6289 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
6292 cleanup_alu_shifted_reg (struct gdbarch
*gdbarch
,
6293 struct regcache
*regs
,
6294 struct displaced_step_closure
*dsc
)
6296 ULONGEST rd_val
= displaced_read_reg (regs
, dsc
, 0);
6299 for (i
= 0; i
< 4; i
++)
6300 displaced_write_reg (regs
, dsc
, i
, dsc
->tmp
[i
], CANNOT_WRITE_PC
);
6302 displaced_write_reg (regs
, dsc
, dsc
->rd
, rd_val
, ALU_WRITE_PC
);
6306 install_alu_shifted_reg (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6307 struct displaced_step_closure
*dsc
,
6308 unsigned int rd
, unsigned int rn
, unsigned int rm
,
6312 ULONGEST rd_val
, rn_val
, rm_val
, rs_val
;
6314 /* Instruction is of form:
6316 <op><cond> rd, [rn,] rm, <shift> rs
6320 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
6321 r0, r1, r2, r3 <- rd, rn, rm, rs
6322 Insn: <op><cond> r0, r1, r2, <shift> r3
6324 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
6328 for (i
= 0; i
< 4; i
++)
6329 dsc
->tmp
[i
] = displaced_read_reg (regs
, dsc
, i
);
6331 rd_val
= displaced_read_reg (regs
, dsc
, rd
);
6332 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6333 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
6334 rs_val
= displaced_read_reg (regs
, dsc
, rs
);
6335 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
6336 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
6337 displaced_write_reg (regs
, dsc
, 2, rm_val
, CANNOT_WRITE_PC
);
6338 displaced_write_reg (regs
, dsc
, 3, rs_val
, CANNOT_WRITE_PC
);
6340 dsc
->cleanup
= &cleanup_alu_shifted_reg
;
6344 arm_copy_alu_shifted_reg (struct gdbarch
*gdbarch
, uint32_t insn
,
6345 struct regcache
*regs
,
6346 struct displaced_step_closure
*dsc
)
6348 unsigned int op
= bits (insn
, 21, 24);
6349 int is_mov
= (op
== 0xd);
6350 unsigned int rd
, rn
, rm
, rs
;
6352 if (!insn_references_pc (insn
, 0x000fff0ful
))
6353 return arm_copy_unmodified (gdbarch
, insn
, "ALU shifted reg", dsc
);
6355 if (debug_displaced
)
6356 fprintf_unfiltered (gdb_stdlog
, "displaced: copying shifted reg %s insn "
6357 "%.8lx\n", is_mov
? "move" : "ALU",
6358 (unsigned long) insn
);
6360 rn
= bits (insn
, 16, 19);
6361 rm
= bits (insn
, 0, 3);
6362 rs
= bits (insn
, 8, 11);
6363 rd
= bits (insn
, 12, 15);
6366 dsc
->modinsn
[0] = (insn
& 0xfff000f0) | 0x302;
6368 dsc
->modinsn
[0] = (insn
& 0xfff000f0) | 0x10302;
6370 install_alu_shifted_reg (gdbarch
, regs
, dsc
, rd
, rn
, rm
, rs
);
6375 /* Clean up load instructions. */
6378 cleanup_load (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6379 struct displaced_step_closure
*dsc
)
6381 ULONGEST rt_val
, rt_val2
= 0, rn_val
;
6383 rt_val
= displaced_read_reg (regs
, dsc
, 0);
6384 if (dsc
->u
.ldst
.xfersize
== 8)
6385 rt_val2
= displaced_read_reg (regs
, dsc
, 1);
6386 rn_val
= displaced_read_reg (regs
, dsc
, 2);
6388 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
6389 if (dsc
->u
.ldst
.xfersize
> 4)
6390 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
6391 displaced_write_reg (regs
, dsc
, 2, dsc
->tmp
[2], CANNOT_WRITE_PC
);
6392 if (!dsc
->u
.ldst
.immed
)
6393 displaced_write_reg (regs
, dsc
, 3, dsc
->tmp
[3], CANNOT_WRITE_PC
);
6395 /* Handle register writeback. */
6396 if (dsc
->u
.ldst
.writeback
)
6397 displaced_write_reg (regs
, dsc
, dsc
->u
.ldst
.rn
, rn_val
, CANNOT_WRITE_PC
);
6398 /* Put result in right place. */
6399 displaced_write_reg (regs
, dsc
, dsc
->rd
, rt_val
, LOAD_WRITE_PC
);
6400 if (dsc
->u
.ldst
.xfersize
== 8)
6401 displaced_write_reg (regs
, dsc
, dsc
->rd
+ 1, rt_val2
, LOAD_WRITE_PC
);
6404 /* Clean up store instructions. */
6407 cleanup_store (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6408 struct displaced_step_closure
*dsc
)
6410 ULONGEST rn_val
= displaced_read_reg (regs
, dsc
, 2);
6412 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
6413 if (dsc
->u
.ldst
.xfersize
> 4)
6414 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
6415 displaced_write_reg (regs
, dsc
, 2, dsc
->tmp
[2], CANNOT_WRITE_PC
);
6416 if (!dsc
->u
.ldst
.immed
)
6417 displaced_write_reg (regs
, dsc
, 3, dsc
->tmp
[3], CANNOT_WRITE_PC
);
6418 if (!dsc
->u
.ldst
.restore_r4
)
6419 displaced_write_reg (regs
, dsc
, 4, dsc
->tmp
[4], CANNOT_WRITE_PC
);
6422 if (dsc
->u
.ldst
.writeback
)
6423 displaced_write_reg (regs
, dsc
, dsc
->u
.ldst
.rn
, rn_val
, CANNOT_WRITE_PC
);
6426 /* Copy "extra" load/store instructions. These are halfword/doubleword
6427 transfers, which have a different encoding to byte/word transfers. */
6430 arm_copy_extra_ld_st (struct gdbarch
*gdbarch
, uint32_t insn
, int unpriveleged
,
6431 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
6433 unsigned int op1
= bits (insn
, 20, 24);
6434 unsigned int op2
= bits (insn
, 5, 6);
6435 unsigned int rt
= bits (insn
, 12, 15);
6436 unsigned int rn
= bits (insn
, 16, 19);
6437 unsigned int rm
= bits (insn
, 0, 3);
6438 char load
[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
6439 char bytesize
[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
6440 int immed
= (op1
& 0x4) != 0;
6442 ULONGEST rt_val
, rt_val2
= 0, rn_val
, rm_val
= 0;
6444 if (!insn_references_pc (insn
, 0x000ff00ful
))
6445 return arm_copy_unmodified (gdbarch
, insn
, "extra load/store", dsc
);
6447 if (debug_displaced
)
6448 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %sextra load/store "
6449 "insn %.8lx\n", unpriveleged
? "unpriveleged " : "",
6450 (unsigned long) insn
);
6452 opcode
= ((op2
<< 2) | (op1
& 0x1) | ((op1
& 0x4) >> 1)) - 4;
6455 internal_error (__FILE__
, __LINE__
,
6456 _("copy_extra_ld_st: instruction decode error"));
6458 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6459 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
6460 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
6462 dsc
->tmp
[3] = displaced_read_reg (regs
, dsc
, 3);
6464 rt_val
= displaced_read_reg (regs
, dsc
, rt
);
6465 if (bytesize
[opcode
] == 8)
6466 rt_val2
= displaced_read_reg (regs
, dsc
, rt
+ 1);
6467 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6469 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
6471 displaced_write_reg (regs
, dsc
, 0, rt_val
, CANNOT_WRITE_PC
);
6472 if (bytesize
[opcode
] == 8)
6473 displaced_write_reg (regs
, dsc
, 1, rt_val2
, CANNOT_WRITE_PC
);
6474 displaced_write_reg (regs
, dsc
, 2, rn_val
, CANNOT_WRITE_PC
);
6476 displaced_write_reg (regs
, dsc
, 3, rm_val
, CANNOT_WRITE_PC
);
6479 dsc
->u
.ldst
.xfersize
= bytesize
[opcode
];
6480 dsc
->u
.ldst
.rn
= rn
;
6481 dsc
->u
.ldst
.immed
= immed
;
6482 dsc
->u
.ldst
.writeback
= bit (insn
, 24) == 0 || bit (insn
, 21) != 0;
6483 dsc
->u
.ldst
.restore_r4
= 0;
6486 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
6488 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
6489 dsc
->modinsn
[0] = (insn
& 0xfff00fff) | 0x20000;
6491 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
6493 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
6494 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x20003;
6496 dsc
->cleanup
= load
[opcode
] ? &cleanup_load
: &cleanup_store
;
6501 /* Copy byte/half word/word loads and stores. */
6504 install_load_store (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6505 struct displaced_step_closure
*dsc
, int load
,
6506 int immed
, int writeback
, int size
, int usermode
,
6507 int rt
, int rm
, int rn
)
6509 ULONGEST rt_val
, rn_val
, rm_val
= 0;
6511 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6512 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
6514 dsc
->tmp
[3] = displaced_read_reg (regs
, dsc
, 3);
6516 dsc
->tmp
[4] = displaced_read_reg (regs
, dsc
, 4);
6518 rt_val
= displaced_read_reg (regs
, dsc
, rt
);
6519 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6521 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
6523 displaced_write_reg (regs
, dsc
, 0, rt_val
, CANNOT_WRITE_PC
);
6524 displaced_write_reg (regs
, dsc
, 2, rn_val
, CANNOT_WRITE_PC
);
6526 displaced_write_reg (regs
, dsc
, 3, rm_val
, CANNOT_WRITE_PC
);
6528 dsc
->u
.ldst
.xfersize
= size
;
6529 dsc
->u
.ldst
.rn
= rn
;
6530 dsc
->u
.ldst
.immed
= immed
;
6531 dsc
->u
.ldst
.writeback
= writeback
;
6533 /* To write PC we can do:
6535 Before this sequence of instructions:
6536 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
6537 r2 is the Rn value got from dispalced_read_reg.
6539 Insn1: push {pc} Write address of STR instruction + offset on stack
6540 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
6541 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
6542 = addr(Insn1) + offset - addr(Insn3) - 8
6544 Insn4: add r4, r4, #8 r4 = offset - 8
6545 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
6547 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
6549 Otherwise we don't know what value to write for PC, since the offset is
6550 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
6551 of this can be found in Section "Saving from r15" in
6552 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
6554 dsc
->cleanup
= load
? &cleanup_load
: &cleanup_store
;
6559 thumb2_copy_load_literal (struct gdbarch
*gdbarch
, uint16_t insn1
,
6560 uint16_t insn2
, struct regcache
*regs
,
6561 struct displaced_step_closure
*dsc
, int size
)
6563 unsigned int u_bit
= bit (insn1
, 7);
6564 unsigned int rt
= bits (insn2
, 12, 15);
6565 int imm12
= bits (insn2
, 0, 11);
6568 if (debug_displaced
)
6569 fprintf_unfiltered (gdb_stdlog
,
6570 "displaced: copying ldr pc (0x%x) R%d %c imm12 %.4x\n",
6571 (unsigned int) dsc
->insn_addr
, rt
, u_bit
? '+' : '-',
6577 /* Rewrite instruction LDR Rt imm12 into:
6579 Prepare: tmp[0] <- r0, tmp[1] <- r2, tmp[2] <- r3, r2 <- pc, r3 <- imm12
6583 Cleanup: rt <- r0, r0 <- tmp[0], r2 <- tmp[1], r3 <- tmp[2]. */
6586 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6587 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
6588 dsc
->tmp
[3] = displaced_read_reg (regs
, dsc
, 3);
6590 pc_val
= displaced_read_reg (regs
, dsc
, ARM_PC_REGNUM
);
6592 pc_val
= pc_val
& 0xfffffffc;
6594 displaced_write_reg (regs
, dsc
, 2, pc_val
, CANNOT_WRITE_PC
);
6595 displaced_write_reg (regs
, dsc
, 3, imm12
, CANNOT_WRITE_PC
);
6599 dsc
->u
.ldst
.xfersize
= size
;
6600 dsc
->u
.ldst
.immed
= 0;
6601 dsc
->u
.ldst
.writeback
= 0;
6602 dsc
->u
.ldst
.restore_r4
= 0;
6604 /* LDR R0, R2, R3 */
6605 dsc
->modinsn
[0] = 0xf852;
6606 dsc
->modinsn
[1] = 0x3;
6609 dsc
->cleanup
= &cleanup_load
;
6615 thumb2_copy_load_reg_imm (struct gdbarch
*gdbarch
, uint16_t insn1
,
6616 uint16_t insn2
, struct regcache
*regs
,
6617 struct displaced_step_closure
*dsc
,
6618 int writeback
, int immed
)
6620 unsigned int rt
= bits (insn2
, 12, 15);
6621 unsigned int rn
= bits (insn1
, 0, 3);
6622 unsigned int rm
= bits (insn2
, 0, 3); /* Only valid if !immed. */
6623 /* In LDR (register), there is also a register Rm, which is not allowed to
6624 be PC, so we don't have to check it. */
6626 if (rt
!= ARM_PC_REGNUM
&& rn
!= ARM_PC_REGNUM
)
6627 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
, "load",
6630 if (debug_displaced
)
6631 fprintf_unfiltered (gdb_stdlog
,
6632 "displaced: copying ldr r%d [r%d] insn %.4x%.4x\n",
6633 rt
, rn
, insn1
, insn2
);
6635 install_load_store (gdbarch
, regs
, dsc
, 1, immed
, writeback
, 4,
6638 dsc
->u
.ldst
.restore_r4
= 0;
6641 /* ldr[b]<cond> rt, [rn, #imm], etc.
6643 ldr[b]<cond> r0, [r2, #imm]. */
6645 dsc
->modinsn
[0] = (insn1
& 0xfff0) | 0x2;
6646 dsc
->modinsn
[1] = insn2
& 0x0fff;
6649 /* ldr[b]<cond> rt, [rn, rm], etc.
6651 ldr[b]<cond> r0, [r2, r3]. */
6653 dsc
->modinsn
[0] = (insn1
& 0xfff0) | 0x2;
6654 dsc
->modinsn
[1] = (insn2
& 0x0ff0) | 0x3;
6664 arm_copy_ldr_str_ldrb_strb (struct gdbarch
*gdbarch
, uint32_t insn
,
6665 struct regcache
*regs
,
6666 struct displaced_step_closure
*dsc
,
6667 int load
, int size
, int usermode
)
6669 int immed
= !bit (insn
, 25);
6670 int writeback
= (bit (insn
, 24) == 0 || bit (insn
, 21) != 0);
6671 unsigned int rt
= bits (insn
, 12, 15);
6672 unsigned int rn
= bits (insn
, 16, 19);
6673 unsigned int rm
= bits (insn
, 0, 3); /* Only valid if !immed. */
6675 if (!insn_references_pc (insn
, 0x000ff00ful
))
6676 return arm_copy_unmodified (gdbarch
, insn
, "load/store", dsc
);
6678 if (debug_displaced
)
6679 fprintf_unfiltered (gdb_stdlog
,
6680 "displaced: copying %s%s r%d [r%d] insn %.8lx\n",
6681 load
? (size
== 1 ? "ldrb" : "ldr")
6682 : (size
== 1 ? "strb" : "str"), usermode
? "t" : "",
6684 (unsigned long) insn
);
6686 install_load_store (gdbarch
, regs
, dsc
, load
, immed
, writeback
, size
,
6687 usermode
, rt
, rm
, rn
);
6689 if (load
|| rt
!= ARM_PC_REGNUM
)
6691 dsc
->u
.ldst
.restore_r4
= 0;
6694 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
6696 {ldr,str}[b]<cond> r0, [r2, #imm]. */
6697 dsc
->modinsn
[0] = (insn
& 0xfff00fff) | 0x20000;
6699 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
6701 {ldr,str}[b]<cond> r0, [r2, r3]. */
6702 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x20003;
6706 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
6707 dsc
->u
.ldst
.restore_r4
= 1;
6708 dsc
->modinsn
[0] = 0xe92d8000; /* push {pc} */
6709 dsc
->modinsn
[1] = 0xe8bd0010; /* pop {r4} */
6710 dsc
->modinsn
[2] = 0xe044400f; /* sub r4, r4, pc. */
6711 dsc
->modinsn
[3] = 0xe2844008; /* add r4, r4, #8. */
6712 dsc
->modinsn
[4] = 0xe0800004; /* add r0, r0, r4. */
6716 dsc
->modinsn
[5] = (insn
& 0xfff00fff) | 0x20000;
6718 dsc
->modinsn
[5] = (insn
& 0xfff00ff0) | 0x20003;
6723 dsc
->cleanup
= load
? &cleanup_load
: &cleanup_store
;
6728 /* Cleanup LDM instructions with fully-populated register list. This is an
6729 unfortunate corner case: it's impossible to implement correctly by modifying
6730 the instruction. The issue is as follows: we have an instruction,
6734 which we must rewrite to avoid loading PC. A possible solution would be to
6735 do the load in two halves, something like (with suitable cleanup
6739 ldm[id][ab] r8!, {r0-r7}
6741 ldm[id][ab] r8, {r7-r14}
6744 but at present there's no suitable place for <temp>, since the scratch space
6745 is overwritten before the cleanup routine is called. For now, we simply
6746 emulate the instruction. */
6749 cleanup_block_load_all (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6750 struct displaced_step_closure
*dsc
)
6752 int inc
= dsc
->u
.block
.increment
;
6753 int bump_before
= dsc
->u
.block
.before
? (inc
? 4 : -4) : 0;
6754 int bump_after
= dsc
->u
.block
.before
? 0 : (inc
? 4 : -4);
6755 uint32_t regmask
= dsc
->u
.block
.regmask
;
6756 int regno
= inc
? 0 : 15;
6757 CORE_ADDR xfer_addr
= dsc
->u
.block
.xfer_addr
;
6758 int exception_return
= dsc
->u
.block
.load
&& dsc
->u
.block
.user
6759 && (regmask
& 0x8000) != 0;
6760 uint32_t status
= displaced_read_reg (regs
, dsc
, ARM_PS_REGNUM
);
6761 int do_transfer
= condition_true (dsc
->u
.block
.cond
, status
);
6762 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
6767 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6768 sensible we can do here. Complain loudly. */
6769 if (exception_return
)
6770 error (_("Cannot single-step exception return"));
6772 /* We don't handle any stores here for now. */
6773 gdb_assert (dsc
->u
.block
.load
!= 0);
6775 if (debug_displaced
)
6776 fprintf_unfiltered (gdb_stdlog
, "displaced: emulating block transfer: "
6777 "%s %s %s\n", dsc
->u
.block
.load
? "ldm" : "stm",
6778 dsc
->u
.block
.increment
? "inc" : "dec",
6779 dsc
->u
.block
.before
? "before" : "after");
6786 while (regno
<= ARM_PC_REGNUM
&& (regmask
& (1 << regno
)) == 0)
6789 while (regno
>= 0 && (regmask
& (1 << regno
)) == 0)
6792 xfer_addr
+= bump_before
;
6794 memword
= read_memory_unsigned_integer (xfer_addr
, 4, byte_order
);
6795 displaced_write_reg (regs
, dsc
, regno
, memword
, LOAD_WRITE_PC
);
6797 xfer_addr
+= bump_after
;
6799 regmask
&= ~(1 << regno
);
6802 if (dsc
->u
.block
.writeback
)
6803 displaced_write_reg (regs
, dsc
, dsc
->u
.block
.rn
, xfer_addr
,
6807 /* Clean up an STM which included the PC in the register list. */
6810 cleanup_block_store_pc (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6811 struct displaced_step_closure
*dsc
)
6813 uint32_t status
= displaced_read_reg (regs
, dsc
, ARM_PS_REGNUM
);
6814 int store_executed
= condition_true (dsc
->u
.block
.cond
, status
);
6815 CORE_ADDR pc_stored_at
, transferred_regs
= bitcount (dsc
->u
.block
.regmask
);
6816 CORE_ADDR stm_insn_addr
;
6819 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
6821 /* If condition code fails, there's nothing else to do. */
6822 if (!store_executed
)
6825 if (dsc
->u
.block
.increment
)
6827 pc_stored_at
= dsc
->u
.block
.xfer_addr
+ 4 * transferred_regs
;
6829 if (dsc
->u
.block
.before
)
6834 pc_stored_at
= dsc
->u
.block
.xfer_addr
;
6836 if (dsc
->u
.block
.before
)
6840 pc_val
= read_memory_unsigned_integer (pc_stored_at
, 4, byte_order
);
6841 stm_insn_addr
= dsc
->scratch_base
;
6842 offset
= pc_val
- stm_insn_addr
;
6844 if (debug_displaced
)
6845 fprintf_unfiltered (gdb_stdlog
, "displaced: detected PC offset %.8lx for "
6846 "STM instruction\n", offset
);
6848 /* Rewrite the stored PC to the proper value for the non-displaced original
6850 write_memory_unsigned_integer (pc_stored_at
, 4, byte_order
,
6851 dsc
->insn_addr
+ offset
);
6854 /* Clean up an LDM which includes the PC in the register list. We clumped all
6855 the registers in the transferred list into a contiguous range r0...rX (to
6856 avoid loading PC directly and losing control of the debugged program), so we
6857 must undo that here. */
6860 cleanup_block_load_pc (struct gdbarch
*gdbarch
,
6861 struct regcache
*regs
,
6862 struct displaced_step_closure
*dsc
)
6864 uint32_t status
= displaced_read_reg (regs
, dsc
, ARM_PS_REGNUM
);
6865 int load_executed
= condition_true (dsc
->u
.block
.cond
, status
), i
;
6866 unsigned int mask
= dsc
->u
.block
.regmask
, write_reg
= ARM_PC_REGNUM
;
6867 unsigned int regs_loaded
= bitcount (mask
);
6868 unsigned int num_to_shuffle
= regs_loaded
, clobbered
;
6870 /* The method employed here will fail if the register list is fully populated
6871 (we need to avoid loading PC directly). */
6872 gdb_assert (num_to_shuffle
< 16);
6877 clobbered
= (1 << num_to_shuffle
) - 1;
6879 while (num_to_shuffle
> 0)
6881 if ((mask
& (1 << write_reg
)) != 0)
6883 unsigned int read_reg
= num_to_shuffle
- 1;
6885 if (read_reg
!= write_reg
)
6887 ULONGEST rval
= displaced_read_reg (regs
, dsc
, read_reg
);
6888 displaced_write_reg (regs
, dsc
, write_reg
, rval
, LOAD_WRITE_PC
);
6889 if (debug_displaced
)
6890 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM: move "
6891 "loaded register r%d to r%d\n"), read_reg
,
6894 else if (debug_displaced
)
6895 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM: register "
6896 "r%d already in the right place\n"),
6899 clobbered
&= ~(1 << write_reg
);
6907 /* Restore any registers we scribbled over. */
6908 for (write_reg
= 0; clobbered
!= 0; write_reg
++)
6910 if ((clobbered
& (1 << write_reg
)) != 0)
6912 displaced_write_reg (regs
, dsc
, write_reg
, dsc
->tmp
[write_reg
],
6914 if (debug_displaced
)
6915 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM: restored "
6916 "clobbered register r%d\n"), write_reg
);
6917 clobbered
&= ~(1 << write_reg
);
6921 /* Perform register writeback manually. */
6922 if (dsc
->u
.block
.writeback
)
6924 ULONGEST new_rn_val
= dsc
->u
.block
.xfer_addr
;
6926 if (dsc
->u
.block
.increment
)
6927 new_rn_val
+= regs_loaded
* 4;
6929 new_rn_val
-= regs_loaded
* 4;
6931 displaced_write_reg (regs
, dsc
, dsc
->u
.block
.rn
, new_rn_val
,
6936 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6937 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6940 arm_copy_block_xfer (struct gdbarch
*gdbarch
, uint32_t insn
,
6941 struct regcache
*regs
,
6942 struct displaced_step_closure
*dsc
)
6944 int load
= bit (insn
, 20);
6945 int user
= bit (insn
, 22);
6946 int increment
= bit (insn
, 23);
6947 int before
= bit (insn
, 24);
6948 int writeback
= bit (insn
, 21);
6949 int rn
= bits (insn
, 16, 19);
6951 /* Block transfers which don't mention PC can be run directly
6953 if (rn
!= ARM_PC_REGNUM
&& (insn
& 0x8000) == 0)
6954 return arm_copy_unmodified (gdbarch
, insn
, "ldm/stm", dsc
);
6956 if (rn
== ARM_PC_REGNUM
)
6958 warning (_("displaced: Unpredictable LDM or STM with "
6959 "base register r15"));
6960 return arm_copy_unmodified (gdbarch
, insn
, "unpredictable ldm/stm", dsc
);
6963 if (debug_displaced
)
6964 fprintf_unfiltered (gdb_stdlog
, "displaced: copying block transfer insn "
6965 "%.8lx\n", (unsigned long) insn
);
6967 dsc
->u
.block
.xfer_addr
= displaced_read_reg (regs
, dsc
, rn
);
6968 dsc
->u
.block
.rn
= rn
;
6970 dsc
->u
.block
.load
= load
;
6971 dsc
->u
.block
.user
= user
;
6972 dsc
->u
.block
.increment
= increment
;
6973 dsc
->u
.block
.before
= before
;
6974 dsc
->u
.block
.writeback
= writeback
;
6975 dsc
->u
.block
.cond
= bits (insn
, 28, 31);
6977 dsc
->u
.block
.regmask
= insn
& 0xffff;
6981 if ((insn
& 0xffff) == 0xffff)
6983 /* LDM with a fully-populated register list. This case is
6984 particularly tricky. Implement for now by fully emulating the
6985 instruction (which might not behave perfectly in all cases, but
6986 these instructions should be rare enough for that not to matter
6988 dsc
->modinsn
[0] = ARM_NOP
;
6990 dsc
->cleanup
= &cleanup_block_load_all
;
6994 /* LDM of a list of registers which includes PC. Implement by
6995 rewriting the list of registers to be transferred into a
6996 contiguous chunk r0...rX before doing the transfer, then shuffling
6997 registers into the correct places in the cleanup routine. */
6998 unsigned int regmask
= insn
& 0xffff;
6999 unsigned int num_in_list
= bitcount (regmask
), new_regmask
, bit
= 1;
7000 unsigned int to
= 0, from
= 0, i
, new_rn
;
7002 for (i
= 0; i
< num_in_list
; i
++)
7003 dsc
->tmp
[i
] = displaced_read_reg (regs
, dsc
, i
);
7005 /* Writeback makes things complicated. We need to avoid clobbering
7006 the base register with one of the registers in our modified
7007 register list, but just using a different register can't work in
7010 ldm r14!, {r0-r13,pc}
7012 which would need to be rewritten as:
7016 but that can't work, because there's no free register for N.
7018 Solve this by turning off the writeback bit, and emulating
7019 writeback manually in the cleanup routine. */
7024 new_regmask
= (1 << num_in_list
) - 1;
7026 if (debug_displaced
)
7027 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM r%d%s, "
7028 "{..., pc}: original reg list %.4x, modified "
7029 "list %.4x\n"), rn
, writeback
? "!" : "",
7030 (int) insn
& 0xffff, new_regmask
);
7032 dsc
->modinsn
[0] = (insn
& ~0xffff) | (new_regmask
& 0xffff);
7034 dsc
->cleanup
= &cleanup_block_load_pc
;
7039 /* STM of a list of registers which includes PC. Run the instruction
7040 as-is, but out of line: this will store the wrong value for the PC,
7041 so we must manually fix up the memory in the cleanup routine.
7042 Doing things this way has the advantage that we can auto-detect
7043 the offset of the PC write (which is architecture-dependent) in
7044 the cleanup routine. */
7045 dsc
->modinsn
[0] = insn
;
7047 dsc
->cleanup
= &cleanup_block_store_pc
;
7054 thumb2_copy_block_xfer (struct gdbarch
*gdbarch
, uint16_t insn1
, uint16_t insn2
,
7055 struct regcache
*regs
,
7056 struct displaced_step_closure
*dsc
)
7058 int rn
= bits (insn1
, 0, 3);
7059 int load
= bit (insn1
, 4);
7060 int writeback
= bit (insn1
, 5);
7062 /* Block transfers which don't mention PC can be run directly
7064 if (rn
!= ARM_PC_REGNUM
&& (insn2
& 0x8000) == 0)
7065 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
, "ldm/stm", dsc
);
7067 if (rn
== ARM_PC_REGNUM
)
7069 warning (_("displaced: Unpredictable LDM or STM with "
7070 "base register r15"));
7071 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7072 "unpredictable ldm/stm", dsc
);
7075 if (debug_displaced
)
7076 fprintf_unfiltered (gdb_stdlog
, "displaced: copying block transfer insn "
7077 "%.4x%.4x\n", insn1
, insn2
);
7079 /* Clear bit 13, since it should be always zero. */
7080 dsc
->u
.block
.regmask
= (insn2
& 0xdfff);
7081 dsc
->u
.block
.rn
= rn
;
7083 dsc
->u
.block
.load
= load
;
7084 dsc
->u
.block
.user
= 0;
7085 dsc
->u
.block
.increment
= bit (insn1
, 7);
7086 dsc
->u
.block
.before
= bit (insn1
, 8);
7087 dsc
->u
.block
.writeback
= writeback
;
7088 dsc
->u
.block
.cond
= INST_AL
;
7089 dsc
->u
.block
.xfer_addr
= displaced_read_reg (regs
, dsc
, rn
);
7093 if (dsc
->u
.block
.regmask
== 0xffff)
7095 /* This branch is impossible to happen. */
7100 unsigned int regmask
= dsc
->u
.block
.regmask
;
7101 unsigned int num_in_list
= bitcount (regmask
), new_regmask
, bit
= 1;
7102 unsigned int to
= 0, from
= 0, i
, new_rn
;
7104 for (i
= 0; i
< num_in_list
; i
++)
7105 dsc
->tmp
[i
] = displaced_read_reg (regs
, dsc
, i
);
7110 new_regmask
= (1 << num_in_list
) - 1;
7112 if (debug_displaced
)
7113 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM r%d%s, "
7114 "{..., pc}: original reg list %.4x, modified "
7115 "list %.4x\n"), rn
, writeback
? "!" : "",
7116 (int) dsc
->u
.block
.regmask
, new_regmask
);
7118 dsc
->modinsn
[0] = insn1
;
7119 dsc
->modinsn
[1] = (new_regmask
& 0xffff);
7122 dsc
->cleanup
= &cleanup_block_load_pc
;
7127 dsc
->modinsn
[0] = insn1
;
7128 dsc
->modinsn
[1] = insn2
;
7130 dsc
->cleanup
= &cleanup_block_store_pc
;
7135 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
7136 for Linux, where some SVC instructions must be treated specially. */
7139 cleanup_svc (struct gdbarch
*gdbarch
, struct regcache
*regs
,
7140 struct displaced_step_closure
*dsc
)
7142 CORE_ADDR resume_addr
= dsc
->insn_addr
+ dsc
->insn_size
;
7144 if (debug_displaced
)
7145 fprintf_unfiltered (gdb_stdlog
, "displaced: cleanup for svc, resume at "
7146 "%.8lx\n", (unsigned long) resume_addr
);
7148 displaced_write_reg (regs
, dsc
, ARM_PC_REGNUM
, resume_addr
, BRANCH_WRITE_PC
);
7152 /* Common copy routine for svc instruciton. */
7155 install_svc (struct gdbarch
*gdbarch
, struct regcache
*regs
,
7156 struct displaced_step_closure
*dsc
)
7158 /* Preparation: none.
7159 Insn: unmodified svc.
7160 Cleanup: pc <- insn_addr + insn_size. */
7162 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
7164 dsc
->wrote_to_pc
= 1;
7166 /* Allow OS-specific code to override SVC handling. */
7167 if (dsc
->u
.svc
.copy_svc_os
)
7168 return dsc
->u
.svc
.copy_svc_os (gdbarch
, regs
, dsc
);
7171 dsc
->cleanup
= &cleanup_svc
;
7177 arm_copy_svc (struct gdbarch
*gdbarch
, uint32_t insn
,
7178 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
7181 if (debug_displaced
)
7182 fprintf_unfiltered (gdb_stdlog
, "displaced: copying svc insn %.8lx\n",
7183 (unsigned long) insn
);
7185 dsc
->modinsn
[0] = insn
;
7187 return install_svc (gdbarch
, regs
, dsc
);
7191 thumb_copy_svc (struct gdbarch
*gdbarch
, uint16_t insn
,
7192 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
7195 if (debug_displaced
)
7196 fprintf_unfiltered (gdb_stdlog
, "displaced: copying svc insn %.4x\n",
7199 dsc
->modinsn
[0] = insn
;
7201 return install_svc (gdbarch
, regs
, dsc
);
7204 /* Copy undefined instructions. */
7207 arm_copy_undef (struct gdbarch
*gdbarch
, uint32_t insn
,
7208 struct displaced_step_closure
*dsc
)
7210 if (debug_displaced
)
7211 fprintf_unfiltered (gdb_stdlog
,
7212 "displaced: copying undefined insn %.8lx\n",
7213 (unsigned long) insn
);
7215 dsc
->modinsn
[0] = insn
;
7221 thumb_32bit_copy_undef (struct gdbarch
*gdbarch
, uint16_t insn1
, uint16_t insn2
,
7222 struct displaced_step_closure
*dsc
)
7225 if (debug_displaced
)
7226 fprintf_unfiltered (gdb_stdlog
, "displaced: copying undefined insn "
7227 "%.4x %.4x\n", (unsigned short) insn1
,
7228 (unsigned short) insn2
);
7230 dsc
->modinsn
[0] = insn1
;
7231 dsc
->modinsn
[1] = insn2
;
7237 /* Copy unpredictable instructions. */
7240 arm_copy_unpred (struct gdbarch
*gdbarch
, uint32_t insn
,
7241 struct displaced_step_closure
*dsc
)
7243 if (debug_displaced
)
7244 fprintf_unfiltered (gdb_stdlog
, "displaced: copying unpredictable insn "
7245 "%.8lx\n", (unsigned long) insn
);
7247 dsc
->modinsn
[0] = insn
;
7252 /* The decode_* functions are instruction decoding helpers. They mostly follow
7253 the presentation in the ARM ARM. */
7256 arm_decode_misc_memhint_neon (struct gdbarch
*gdbarch
, uint32_t insn
,
7257 struct regcache
*regs
,
7258 struct displaced_step_closure
*dsc
)
7260 unsigned int op1
= bits (insn
, 20, 26), op2
= bits (insn
, 4, 7);
7261 unsigned int rn
= bits (insn
, 16, 19);
7263 if (op1
== 0x10 && (op2
& 0x2) == 0x0 && (rn
& 0xe) == 0x0)
7264 return arm_copy_unmodified (gdbarch
, insn
, "cps", dsc
);
7265 else if (op1
== 0x10 && op2
== 0x0 && (rn
& 0xe) == 0x1)
7266 return arm_copy_unmodified (gdbarch
, insn
, "setend", dsc
);
7267 else if ((op1
& 0x60) == 0x20)
7268 return arm_copy_unmodified (gdbarch
, insn
, "neon dataproc", dsc
);
7269 else if ((op1
& 0x71) == 0x40)
7270 return arm_copy_unmodified (gdbarch
, insn
, "neon elt/struct load/store",
7272 else if ((op1
& 0x77) == 0x41)
7273 return arm_copy_unmodified (gdbarch
, insn
, "unallocated mem hint", dsc
);
7274 else if ((op1
& 0x77) == 0x45)
7275 return arm_copy_preload (gdbarch
, insn
, regs
, dsc
); /* pli. */
7276 else if ((op1
& 0x77) == 0x51)
7279 return arm_copy_preload (gdbarch
, insn
, regs
, dsc
); /* pld/pldw. */
7281 return arm_copy_unpred (gdbarch
, insn
, dsc
);
7283 else if ((op1
& 0x77) == 0x55)
7284 return arm_copy_preload (gdbarch
, insn
, regs
, dsc
); /* pld/pldw. */
7285 else if (op1
== 0x57)
7288 case 0x1: return arm_copy_unmodified (gdbarch
, insn
, "clrex", dsc
);
7289 case 0x4: return arm_copy_unmodified (gdbarch
, insn
, "dsb", dsc
);
7290 case 0x5: return arm_copy_unmodified (gdbarch
, insn
, "dmb", dsc
);
7291 case 0x6: return arm_copy_unmodified (gdbarch
, insn
, "isb", dsc
);
7292 default: return arm_copy_unpred (gdbarch
, insn
, dsc
);
7294 else if ((op1
& 0x63) == 0x43)
7295 return arm_copy_unpred (gdbarch
, insn
, dsc
);
7296 else if ((op2
& 0x1) == 0x0)
7297 switch (op1
& ~0x80)
7300 return arm_copy_unmodified (gdbarch
, insn
, "unallocated mem hint", dsc
);
7302 return arm_copy_preload_reg (gdbarch
, insn
, regs
, dsc
); /* pli reg. */
7303 case 0x71: case 0x75:
7305 return arm_copy_preload_reg (gdbarch
, insn
, regs
, dsc
);
7306 case 0x63: case 0x67: case 0x73: case 0x77:
7307 return arm_copy_unpred (gdbarch
, insn
, dsc
);
7309 return arm_copy_undef (gdbarch
, insn
, dsc
);
7312 return arm_copy_undef (gdbarch
, insn
, dsc
); /* Probably unreachable. */
7316 arm_decode_unconditional (struct gdbarch
*gdbarch
, uint32_t insn
,
7317 struct regcache
*regs
,
7318 struct displaced_step_closure
*dsc
)
7320 if (bit (insn
, 27) == 0)
7321 return arm_decode_misc_memhint_neon (gdbarch
, insn
, regs
, dsc
);
7322 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
7323 else switch (((insn
& 0x7000000) >> 23) | ((insn
& 0x100000) >> 20))
7326 return arm_copy_unmodified (gdbarch
, insn
, "srs", dsc
);
7329 return arm_copy_unmodified (gdbarch
, insn
, "rfe", dsc
);
7331 case 0x4: case 0x5: case 0x6: case 0x7:
7332 return arm_copy_b_bl_blx (gdbarch
, insn
, regs
, dsc
);
7335 switch ((insn
& 0xe00000) >> 21)
7337 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
7339 return arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7342 return arm_copy_unmodified (gdbarch
, insn
, "mcrr/mcrr2", dsc
);
7345 return arm_copy_undef (gdbarch
, insn
, dsc
);
7350 int rn_f
= (bits (insn
, 16, 19) == 0xf);
7351 switch ((insn
& 0xe00000) >> 21)
7354 /* ldc/ldc2 imm (undefined for rn == pc). */
7355 return rn_f
? arm_copy_undef (gdbarch
, insn
, dsc
)
7356 : arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7359 return arm_copy_unmodified (gdbarch
, insn
, "mrrc/mrrc2", dsc
);
7361 case 0x4: case 0x5: case 0x6: case 0x7:
7362 /* ldc/ldc2 lit (undefined for rn != pc). */
7363 return rn_f
? arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
)
7364 : arm_copy_undef (gdbarch
, insn
, dsc
);
7367 return arm_copy_undef (gdbarch
, insn
, dsc
);
7372 return arm_copy_unmodified (gdbarch
, insn
, "stc/stc2", dsc
);
7375 if (bits (insn
, 16, 19) == 0xf)
7377 return arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7379 return arm_copy_undef (gdbarch
, insn
, dsc
);
7383 return arm_copy_unmodified (gdbarch
, insn
, "mcr/mcr2", dsc
);
7385 return arm_copy_unmodified (gdbarch
, insn
, "cdp/cdp2", dsc
);
7389 return arm_copy_unmodified (gdbarch
, insn
, "mrc/mrc2", dsc
);
7391 return arm_copy_unmodified (gdbarch
, insn
, "cdp/cdp2", dsc
);
7394 return arm_copy_undef (gdbarch
, insn
, dsc
);
7398 /* Decode miscellaneous instructions in dp/misc encoding space. */
7401 arm_decode_miscellaneous (struct gdbarch
*gdbarch
, uint32_t insn
,
7402 struct regcache
*regs
,
7403 struct displaced_step_closure
*dsc
)
7405 unsigned int op2
= bits (insn
, 4, 6);
7406 unsigned int op
= bits (insn
, 21, 22);
7407 unsigned int op1
= bits (insn
, 16, 19);
7412 return arm_copy_unmodified (gdbarch
, insn
, "mrs/msr", dsc
);
7415 if (op
== 0x1) /* bx. */
7416 return arm_copy_bx_blx_reg (gdbarch
, insn
, regs
, dsc
);
7418 return arm_copy_unmodified (gdbarch
, insn
, "clz", dsc
);
7420 return arm_copy_undef (gdbarch
, insn
, dsc
);
7424 /* Not really supported. */
7425 return arm_copy_unmodified (gdbarch
, insn
, "bxj", dsc
);
7427 return arm_copy_undef (gdbarch
, insn
, dsc
);
7431 return arm_copy_bx_blx_reg (gdbarch
, insn
,
7432 regs
, dsc
); /* blx register. */
7434 return arm_copy_undef (gdbarch
, insn
, dsc
);
7437 return arm_copy_unmodified (gdbarch
, insn
, "saturating add/sub", dsc
);
7441 return arm_copy_unmodified (gdbarch
, insn
, "bkpt", dsc
);
7443 /* Not really supported. */
7444 return arm_copy_unmodified (gdbarch
, insn
, "smc", dsc
);
7447 return arm_copy_undef (gdbarch
, insn
, dsc
);
7452 arm_decode_dp_misc (struct gdbarch
*gdbarch
, uint32_t insn
,
7453 struct regcache
*regs
,
7454 struct displaced_step_closure
*dsc
)
7457 switch (bits (insn
, 20, 24))
7460 return arm_copy_unmodified (gdbarch
, insn
, "movw", dsc
);
7463 return arm_copy_unmodified (gdbarch
, insn
, "movt", dsc
);
7465 case 0x12: case 0x16:
7466 return arm_copy_unmodified (gdbarch
, insn
, "msr imm", dsc
);
7469 return arm_copy_alu_imm (gdbarch
, insn
, regs
, dsc
);
7473 uint32_t op1
= bits (insn
, 20, 24), op2
= bits (insn
, 4, 7);
7475 if ((op1
& 0x19) != 0x10 && (op2
& 0x1) == 0x0)
7476 return arm_copy_alu_reg (gdbarch
, insn
, regs
, dsc
);
7477 else if ((op1
& 0x19) != 0x10 && (op2
& 0x9) == 0x1)
7478 return arm_copy_alu_shifted_reg (gdbarch
, insn
, regs
, dsc
);
7479 else if ((op1
& 0x19) == 0x10 && (op2
& 0x8) == 0x0)
7480 return arm_decode_miscellaneous (gdbarch
, insn
, regs
, dsc
);
7481 else if ((op1
& 0x19) == 0x10 && (op2
& 0x9) == 0x8)
7482 return arm_copy_unmodified (gdbarch
, insn
, "halfword mul/mla", dsc
);
7483 else if ((op1
& 0x10) == 0x00 && op2
== 0x9)
7484 return arm_copy_unmodified (gdbarch
, insn
, "mul/mla", dsc
);
7485 else if ((op1
& 0x10) == 0x10 && op2
== 0x9)
7486 return arm_copy_unmodified (gdbarch
, insn
, "synch", dsc
);
7487 else if (op2
== 0xb || (op2
& 0xd) == 0xd)
7488 /* 2nd arg means "unpriveleged". */
7489 return arm_copy_extra_ld_st (gdbarch
, insn
, (op1
& 0x12) == 0x02, regs
,
7493 /* Should be unreachable. */
7498 arm_decode_ld_st_word_ubyte (struct gdbarch
*gdbarch
, uint32_t insn
,
7499 struct regcache
*regs
,
7500 struct displaced_step_closure
*dsc
)
7502 int a
= bit (insn
, 25), b
= bit (insn
, 4);
7503 uint32_t op1
= bits (insn
, 20, 24);
7504 int rn_f
= bits (insn
, 16, 19) == 0xf;
7506 if ((!a
&& (op1
& 0x05) == 0x00 && (op1
& 0x17) != 0x02)
7507 || (a
&& (op1
& 0x05) == 0x00 && (op1
& 0x17) != 0x02 && !b
))
7508 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 4, 0);
7509 else if ((!a
&& (op1
& 0x17) == 0x02)
7510 || (a
&& (op1
& 0x17) == 0x02 && !b
))
7511 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 4, 1);
7512 else if ((!a
&& (op1
& 0x05) == 0x01 && (op1
& 0x17) != 0x03)
7513 || (a
&& (op1
& 0x05) == 0x01 && (op1
& 0x17) != 0x03 && !b
))
7514 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 4, 0);
7515 else if ((!a
&& (op1
& 0x17) == 0x03)
7516 || (a
&& (op1
& 0x17) == 0x03 && !b
))
7517 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 4, 1);
7518 else if ((!a
&& (op1
& 0x05) == 0x04 && (op1
& 0x17) != 0x06)
7519 || (a
&& (op1
& 0x05) == 0x04 && (op1
& 0x17) != 0x06 && !b
))
7520 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 1, 0);
7521 else if ((!a
&& (op1
& 0x17) == 0x06)
7522 || (a
&& (op1
& 0x17) == 0x06 && !b
))
7523 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 1, 1);
7524 else if ((!a
&& (op1
& 0x05) == 0x05 && (op1
& 0x17) != 0x07)
7525 || (a
&& (op1
& 0x05) == 0x05 && (op1
& 0x17) != 0x07 && !b
))
7526 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 1, 0);
7527 else if ((!a
&& (op1
& 0x17) == 0x07)
7528 || (a
&& (op1
& 0x17) == 0x07 && !b
))
7529 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 1, 1);
7531 /* Should be unreachable. */
7536 arm_decode_media (struct gdbarch
*gdbarch
, uint32_t insn
,
7537 struct displaced_step_closure
*dsc
)
7539 switch (bits (insn
, 20, 24))
7541 case 0x00: case 0x01: case 0x02: case 0x03:
7542 return arm_copy_unmodified (gdbarch
, insn
, "parallel add/sub signed", dsc
);
7544 case 0x04: case 0x05: case 0x06: case 0x07:
7545 return arm_copy_unmodified (gdbarch
, insn
, "parallel add/sub unsigned", dsc
);
7547 case 0x08: case 0x09: case 0x0a: case 0x0b:
7548 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
7549 return arm_copy_unmodified (gdbarch
, insn
,
7550 "decode/pack/unpack/saturate/reverse", dsc
);
7553 if (bits (insn
, 5, 7) == 0) /* op2. */
7555 if (bits (insn
, 12, 15) == 0xf)
7556 return arm_copy_unmodified (gdbarch
, insn
, "usad8", dsc
);
7558 return arm_copy_unmodified (gdbarch
, insn
, "usada8", dsc
);
7561 return arm_copy_undef (gdbarch
, insn
, dsc
);
7563 case 0x1a: case 0x1b:
7564 if (bits (insn
, 5, 6) == 0x2) /* op2[1:0]. */
7565 return arm_copy_unmodified (gdbarch
, insn
, "sbfx", dsc
);
7567 return arm_copy_undef (gdbarch
, insn
, dsc
);
7569 case 0x1c: case 0x1d:
7570 if (bits (insn
, 5, 6) == 0x0) /* op2[1:0]. */
7572 if (bits (insn
, 0, 3) == 0xf)
7573 return arm_copy_unmodified (gdbarch
, insn
, "bfc", dsc
);
7575 return arm_copy_unmodified (gdbarch
, insn
, "bfi", dsc
);
7578 return arm_copy_undef (gdbarch
, insn
, dsc
);
7580 case 0x1e: case 0x1f:
7581 if (bits (insn
, 5, 6) == 0x2) /* op2[1:0]. */
7582 return arm_copy_unmodified (gdbarch
, insn
, "ubfx", dsc
);
7584 return arm_copy_undef (gdbarch
, insn
, dsc
);
7587 /* Should be unreachable. */
7592 arm_decode_b_bl_ldmstm (struct gdbarch
*gdbarch
, int32_t insn
,
7593 struct regcache
*regs
,
7594 struct displaced_step_closure
*dsc
)
7597 return arm_copy_b_bl_blx (gdbarch
, insn
, regs
, dsc
);
7599 return arm_copy_block_xfer (gdbarch
, insn
, regs
, dsc
);
7603 arm_decode_ext_reg_ld_st (struct gdbarch
*gdbarch
, uint32_t insn
,
7604 struct regcache
*regs
,
7605 struct displaced_step_closure
*dsc
)
7607 unsigned int opcode
= bits (insn
, 20, 24);
7611 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
7612 return arm_copy_unmodified (gdbarch
, insn
, "vfp/neon mrrc/mcrr", dsc
);
7614 case 0x08: case 0x0a: case 0x0c: case 0x0e:
7615 case 0x12: case 0x16:
7616 return arm_copy_unmodified (gdbarch
, insn
, "vfp/neon vstm/vpush", dsc
);
7618 case 0x09: case 0x0b: case 0x0d: case 0x0f:
7619 case 0x13: case 0x17:
7620 return arm_copy_unmodified (gdbarch
, insn
, "vfp/neon vldm/vpop", dsc
);
7622 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7623 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7624 /* Note: no writeback for these instructions. Bit 25 will always be
7625 zero though (via caller), so the following works OK. */
7626 return arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7629 /* Should be unreachable. */
7633 /* Decode shifted register instructions. */
7636 thumb2_decode_dp_shift_reg (struct gdbarch
*gdbarch
, uint16_t insn1
,
7637 uint16_t insn2
, struct regcache
*regs
,
7638 struct displaced_step_closure
*dsc
)
7640 /* PC is only allowed to be used in instruction MOV. */
7642 unsigned int op
= bits (insn1
, 5, 8);
7643 unsigned int rn
= bits (insn1
, 0, 3);
7645 if (op
== 0x2 && rn
== 0xf) /* MOV */
7646 return thumb2_copy_alu_imm (gdbarch
, insn1
, insn2
, regs
, dsc
);
7648 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7649 "dp (shift reg)", dsc
);
7653 /* Decode extension register load/store. Exactly the same as
7654 arm_decode_ext_reg_ld_st. */
7657 thumb2_decode_ext_reg_ld_st (struct gdbarch
*gdbarch
, uint16_t insn1
,
7658 uint16_t insn2
, struct regcache
*regs
,
7659 struct displaced_step_closure
*dsc
)
7661 unsigned int opcode
= bits (insn1
, 4, 8);
7665 case 0x04: case 0x05:
7666 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7667 "vfp/neon vmov", dsc
);
7669 case 0x08: case 0x0c: /* 01x00 */
7670 case 0x0a: case 0x0e: /* 01x10 */
7671 case 0x12: case 0x16: /* 10x10 */
7672 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7673 "vfp/neon vstm/vpush", dsc
);
7675 case 0x09: case 0x0d: /* 01x01 */
7676 case 0x0b: case 0x0f: /* 01x11 */
7677 case 0x13: case 0x17: /* 10x11 */
7678 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7679 "vfp/neon vldm/vpop", dsc
);
7681 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7682 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7684 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7685 return thumb2_copy_copro_load_store (gdbarch
, insn1
, insn2
, regs
, dsc
);
7688 /* Should be unreachable. */
7693 arm_decode_svc_copro (struct gdbarch
*gdbarch
, uint32_t insn
, CORE_ADDR to
,
7694 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
7696 unsigned int op1
= bits (insn
, 20, 25);
7697 int op
= bit (insn
, 4);
7698 unsigned int coproc
= bits (insn
, 8, 11);
7699 unsigned int rn
= bits (insn
, 16, 19);
7701 if ((op1
& 0x20) == 0x00 && (op1
& 0x3a) != 0x00 && (coproc
& 0xe) == 0xa)
7702 return arm_decode_ext_reg_ld_st (gdbarch
, insn
, regs
, dsc
);
7703 else if ((op1
& 0x21) == 0x00 && (op1
& 0x3a) != 0x00
7704 && (coproc
& 0xe) != 0xa)
7706 return arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7707 else if ((op1
& 0x21) == 0x01 && (op1
& 0x3a) != 0x00
7708 && (coproc
& 0xe) != 0xa)
7709 /* ldc/ldc2 imm/lit. */
7710 return arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7711 else if ((op1
& 0x3e) == 0x00)
7712 return arm_copy_undef (gdbarch
, insn
, dsc
);
7713 else if ((op1
& 0x3e) == 0x04 && (coproc
& 0xe) == 0xa)
7714 return arm_copy_unmodified (gdbarch
, insn
, "neon 64bit xfer", dsc
);
7715 else if (op1
== 0x04 && (coproc
& 0xe) != 0xa)
7716 return arm_copy_unmodified (gdbarch
, insn
, "mcrr/mcrr2", dsc
);
7717 else if (op1
== 0x05 && (coproc
& 0xe) != 0xa)
7718 return arm_copy_unmodified (gdbarch
, insn
, "mrrc/mrrc2", dsc
);
7719 else if ((op1
& 0x30) == 0x20 && !op
)
7721 if ((coproc
& 0xe) == 0xa)
7722 return arm_copy_unmodified (gdbarch
, insn
, "vfp dataproc", dsc
);
7724 return arm_copy_unmodified (gdbarch
, insn
, "cdp/cdp2", dsc
);
7726 else if ((op1
& 0x30) == 0x20 && op
)
7727 return arm_copy_unmodified (gdbarch
, insn
, "neon 8/16/32 bit xfer", dsc
);
7728 else if ((op1
& 0x31) == 0x20 && op
&& (coproc
& 0xe) != 0xa)
7729 return arm_copy_unmodified (gdbarch
, insn
, "mcr/mcr2", dsc
);
7730 else if ((op1
& 0x31) == 0x21 && op
&& (coproc
& 0xe) != 0xa)
7731 return arm_copy_unmodified (gdbarch
, insn
, "mrc/mrc2", dsc
);
7732 else if ((op1
& 0x30) == 0x30)
7733 return arm_copy_svc (gdbarch
, insn
, regs
, dsc
);
7735 return arm_copy_undef (gdbarch
, insn
, dsc
); /* Possibly unreachable. */
7739 thumb2_decode_svc_copro (struct gdbarch
*gdbarch
, uint16_t insn1
,
7740 uint16_t insn2
, struct regcache
*regs
,
7741 struct displaced_step_closure
*dsc
)
7743 unsigned int coproc
= bits (insn2
, 8, 11);
7744 unsigned int op1
= bits (insn1
, 4, 9);
7745 unsigned int bit_5_8
= bits (insn1
, 5, 8);
7746 unsigned int bit_9
= bit (insn1
, 9);
7747 unsigned int bit_4
= bit (insn1
, 4);
7748 unsigned int rn
= bits (insn1
, 0, 3);
7753 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7754 "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
7756 else if (bit_5_8
== 0) /* UNDEFINED. */
7757 return thumb_32bit_copy_undef (gdbarch
, insn1
, insn2
, dsc
);
7760 /*coproc is 101x. SIMD/VFP, ext registers load/store. */
7761 if ((coproc
& 0xe) == 0xa)
7762 return thumb2_decode_ext_reg_ld_st (gdbarch
, insn1
, insn2
, regs
,
7764 else /* coproc is not 101x. */
7766 if (bit_4
== 0) /* STC/STC2. */
7767 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7769 else /* LDC/LDC2 {literal, immeidate}. */
7770 return thumb2_copy_copro_load_store (gdbarch
, insn1
, insn2
,
7776 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
, "coproc", dsc
);
7782 install_pc_relative (struct gdbarch
*gdbarch
, struct regcache
*regs
,
7783 struct displaced_step_closure
*dsc
, int rd
)
7789 Preparation: Rd <- PC
7795 int val
= displaced_read_reg (regs
, dsc
, ARM_PC_REGNUM
);
7796 displaced_write_reg (regs
, dsc
, rd
, val
, CANNOT_WRITE_PC
);
7800 thumb_copy_pc_relative_16bit (struct gdbarch
*gdbarch
, struct regcache
*regs
,
7801 struct displaced_step_closure
*dsc
,
7802 int rd
, unsigned int imm
)
7805 /* Encoding T2: ADDS Rd, #imm */
7806 dsc
->modinsn
[0] = (0x3000 | (rd
<< 8) | imm
);
7808 install_pc_relative (gdbarch
, regs
, dsc
, rd
);
7814 thumb_decode_pc_relative_16bit (struct gdbarch
*gdbarch
, uint16_t insn
,
7815 struct regcache
*regs
,
7816 struct displaced_step_closure
*dsc
)
7818 unsigned int rd
= bits (insn
, 8, 10);
7819 unsigned int imm8
= bits (insn
, 0, 7);
7821 if (debug_displaced
)
7822 fprintf_unfiltered (gdb_stdlog
,
7823 "displaced: copying thumb adr r%d, #%d insn %.4x\n",
7826 return thumb_copy_pc_relative_16bit (gdbarch
, regs
, dsc
, rd
, imm8
);
7830 thumb_copy_pc_relative_32bit (struct gdbarch
*gdbarch
, uint16_t insn1
,
7831 uint16_t insn2
, struct regcache
*regs
,
7832 struct displaced_step_closure
*dsc
)
7834 unsigned int rd
= bits (insn2
, 8, 11);
7835 /* Since immediate has the same encoding in ADR ADD and SUB, so we simply
7836 extract raw immediate encoding rather than computing immediate. When
7837 generating ADD or SUB instruction, we can simply perform OR operation to
7838 set immediate into ADD. */
7839 unsigned int imm_3_8
= insn2
& 0x70ff;
7840 unsigned int imm_i
= insn1
& 0x0400; /* Clear all bits except bit 10. */
7842 if (debug_displaced
)
7843 fprintf_unfiltered (gdb_stdlog
,
7844 "displaced: copying thumb adr r%d, #%d:%d insn %.4x%.4x\n",
7845 rd
, imm_i
, imm_3_8
, insn1
, insn2
);
7847 if (bit (insn1
, 7)) /* Encoding T2 */
7849 /* Encoding T3: SUB Rd, Rd, #imm */
7850 dsc
->modinsn
[0] = (0xf1a0 | rd
| imm_i
);
7851 dsc
->modinsn
[1] = ((rd
<< 8) | imm_3_8
);
7853 else /* Encoding T3 */
7855 /* Encoding T3: ADD Rd, Rd, #imm */
7856 dsc
->modinsn
[0] = (0xf100 | rd
| imm_i
);
7857 dsc
->modinsn
[1] = ((rd
<< 8) | imm_3_8
);
7861 install_pc_relative (gdbarch
, regs
, dsc
, rd
);
7867 thumb_copy_16bit_ldr_literal (struct gdbarch
*gdbarch
, unsigned short insn1
,
7868 struct regcache
*regs
,
7869 struct displaced_step_closure
*dsc
)
7871 unsigned int rt
= bits (insn1
, 8, 10);
7873 int imm8
= (bits (insn1
, 0, 7) << 2);
7874 CORE_ADDR from
= dsc
->insn_addr
;
7880 Preparation: tmp0 <- R0, tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
7882 Insn: LDR R0, [R2, R3];
7883 Cleanup: R2 <- tmp2, R3 <- tmp3, Rd <- R0, R0 <- tmp0 */
7885 if (debug_displaced
)
7886 fprintf_unfiltered (gdb_stdlog
,
7887 "displaced: copying thumb ldr r%d [pc #%d]\n"
7890 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
7891 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
7892 dsc
->tmp
[3] = displaced_read_reg (regs
, dsc
, 3);
7893 pc
= displaced_read_reg (regs
, dsc
, ARM_PC_REGNUM
);
7894 /* The assembler calculates the required value of the offset from the
7895 Align(PC,4) value of this instruction to the label. */
7896 pc
= pc
& 0xfffffffc;
7898 displaced_write_reg (regs
, dsc
, 2, pc
, CANNOT_WRITE_PC
);
7899 displaced_write_reg (regs
, dsc
, 3, imm8
, CANNOT_WRITE_PC
);
7902 dsc
->u
.ldst
.xfersize
= 4;
7904 dsc
->u
.ldst
.immed
= 0;
7905 dsc
->u
.ldst
.writeback
= 0;
7906 dsc
->u
.ldst
.restore_r4
= 0;
7908 dsc
->modinsn
[0] = 0x58d0; /* ldr r0, [r2, r3]*/
7910 dsc
->cleanup
= &cleanup_load
;
7915 /* Copy Thumb cbnz/cbz insruction. */
7918 thumb_copy_cbnz_cbz (struct gdbarch
*gdbarch
, uint16_t insn1
,
7919 struct regcache
*regs
,
7920 struct displaced_step_closure
*dsc
)
7922 int non_zero
= bit (insn1
, 11);
7923 unsigned int imm5
= (bit (insn1
, 9) << 6) | (bits (insn1
, 3, 7) << 1);
7924 CORE_ADDR from
= dsc
->insn_addr
;
7925 int rn
= bits (insn1
, 0, 2);
7926 int rn_val
= displaced_read_reg (regs
, dsc
, rn
);
7928 dsc
->u
.branch
.cond
= (rn_val
&& non_zero
) || (!rn_val
&& !non_zero
);
7929 /* CBNZ and CBZ do not affect the condition flags. If condition is true,
7930 set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
7931 condition is false, let it be, cleanup_branch will do nothing. */
7932 if (dsc
->u
.branch
.cond
)
7934 dsc
->u
.branch
.cond
= INST_AL
;
7935 dsc
->u
.branch
.dest
= from
+ 4 + imm5
;
7938 dsc
->u
.branch
.dest
= from
+ 2;
7940 dsc
->u
.branch
.link
= 0;
7941 dsc
->u
.branch
.exchange
= 0;
7943 if (debug_displaced
)
7944 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %s [r%d = 0x%x]"
7945 " insn %.4x to %.8lx\n", non_zero
? "cbnz" : "cbz",
7946 rn
, rn_val
, insn1
, dsc
->u
.branch
.dest
);
7948 dsc
->modinsn
[0] = THUMB_NOP
;
7950 dsc
->cleanup
= &cleanup_branch
;
7954 /* Copy Table Branch Byte/Halfword */
7956 thumb2_copy_table_branch (struct gdbarch
*gdbarch
, uint16_t insn1
,
7957 uint16_t insn2
, struct regcache
*regs
,
7958 struct displaced_step_closure
*dsc
)
7960 ULONGEST rn_val
, rm_val
;
7961 int is_tbh
= bit (insn2
, 4);
7962 CORE_ADDR halfwords
= 0;
7963 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
7965 rn_val
= displaced_read_reg (regs
, dsc
, bits (insn1
, 0, 3));
7966 rm_val
= displaced_read_reg (regs
, dsc
, bits (insn2
, 0, 3));
7972 target_read_memory (rn_val
+ 2 * rm_val
, buf
, 2);
7973 halfwords
= extract_unsigned_integer (buf
, 2, byte_order
);
7979 target_read_memory (rn_val
+ rm_val
, buf
, 1);
7980 halfwords
= extract_unsigned_integer (buf
, 1, byte_order
);
7983 if (debug_displaced
)
7984 fprintf_unfiltered (gdb_stdlog
, "displaced: %s base 0x%x offset 0x%x"
7985 " offset 0x%x\n", is_tbh
? "tbh" : "tbb",
7986 (unsigned int) rn_val
, (unsigned int) rm_val
,
7987 (unsigned int) halfwords
);
7989 dsc
->u
.branch
.cond
= INST_AL
;
7990 dsc
->u
.branch
.link
= 0;
7991 dsc
->u
.branch
.exchange
= 0;
7992 dsc
->u
.branch
.dest
= dsc
->insn_addr
+ 4 + 2 * halfwords
;
7994 dsc
->cleanup
= &cleanup_branch
;
8000 cleanup_pop_pc_16bit_all (struct gdbarch
*gdbarch
, struct regcache
*regs
,
8001 struct displaced_step_closure
*dsc
)
8004 int val
= displaced_read_reg (regs
, dsc
, 7);
8005 displaced_write_reg (regs
, dsc
, ARM_PC_REGNUM
, val
, BX_WRITE_PC
);
8008 val
= displaced_read_reg (regs
, dsc
, 8);
8009 displaced_write_reg (regs
, dsc
, 7, val
, CANNOT_WRITE_PC
);
8012 displaced_write_reg (regs
, dsc
, 8, dsc
->tmp
[0], CANNOT_WRITE_PC
);
8017 thumb_copy_pop_pc_16bit (struct gdbarch
*gdbarch
, unsigned short insn1
,
8018 struct regcache
*regs
,
8019 struct displaced_step_closure
*dsc
)
8021 dsc
->u
.block
.regmask
= insn1
& 0x00ff;
8023 /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
8026 (1) register list is full, that is, r0-r7 are used.
8027 Prepare: tmp[0] <- r8
8029 POP {r0, r1, ...., r6, r7}; remove PC from reglist
8030 MOV r8, r7; Move value of r7 to r8;
8031 POP {r7}; Store PC value into r7.
8033 Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
8035 (2) register list is not full, supposing there are N registers in
8036 register list (except PC, 0 <= N <= 7).
8037 Prepare: for each i, 0 - N, tmp[i] <- ri.
8039 POP {r0, r1, ...., rN};
8041 Cleanup: Set registers in original reglist from r0 - rN. Restore r0 - rN
8042 from tmp[] properly.
8044 if (debug_displaced
)
8045 fprintf_unfiltered (gdb_stdlog
,
8046 "displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
8047 dsc
->u
.block
.regmask
, insn1
);
8049 if (dsc
->u
.block
.regmask
== 0xff)
8051 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 8);
8053 dsc
->modinsn
[0] = (insn1
& 0xfeff); /* POP {r0,r1,...,r6, r7} */
8054 dsc
->modinsn
[1] = 0x46b8; /* MOV r8, r7 */
8055 dsc
->modinsn
[2] = 0xbc80; /* POP {r7} */
8058 dsc
->cleanup
= &cleanup_pop_pc_16bit_all
;
8062 unsigned int num_in_list
= bitcount (dsc
->u
.block
.regmask
);
8063 unsigned int new_regmask
, bit
= 1;
8064 unsigned int to
= 0, from
= 0, i
, new_rn
;
8066 for (i
= 0; i
< num_in_list
+ 1; i
++)
8067 dsc
->tmp
[i
] = displaced_read_reg (regs
, dsc
, i
);
8069 new_regmask
= (1 << (num_in_list
+ 1)) - 1;
8071 if (debug_displaced
)
8072 fprintf_unfiltered (gdb_stdlog
, _("displaced: POP "
8073 "{..., pc}: original reg list %.4x,"
8074 " modified list %.4x\n"),
8075 (int) dsc
->u
.block
.regmask
, new_regmask
);
8077 dsc
->u
.block
.regmask
|= 0x8000;
8078 dsc
->u
.block
.writeback
= 0;
8079 dsc
->u
.block
.cond
= INST_AL
;
8081 dsc
->modinsn
[0] = (insn1
& ~0x1ff) | (new_regmask
& 0xff);
8083 dsc
->cleanup
= &cleanup_block_load_pc
;
8090 thumb_process_displaced_16bit_insn (struct gdbarch
*gdbarch
, uint16_t insn1
,
8091 struct regcache
*regs
,
8092 struct displaced_step_closure
*dsc
)
8094 unsigned short op_bit_12_15
= bits (insn1
, 12, 15);
8095 unsigned short op_bit_10_11
= bits (insn1
, 10, 11);
8098 /* 16-bit thumb instructions. */
8099 switch (op_bit_12_15
)
8101 /* Shift (imme), add, subtract, move and compare. */
8102 case 0: case 1: case 2: case 3:
8103 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
,
8104 "shift/add/sub/mov/cmp",
8108 switch (op_bit_10_11
)
8110 case 0: /* Data-processing */
8111 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
,
8115 case 1: /* Special data instructions and branch and exchange. */
8117 unsigned short op
= bits (insn1
, 7, 9);
8118 if (op
== 6 || op
== 7) /* BX or BLX */
8119 err
= thumb_copy_bx_blx_reg (gdbarch
, insn1
, regs
, dsc
);
8120 else if (bits (insn1
, 6, 7) != 0) /* ADD/MOV/CMP high registers. */
8121 err
= thumb_copy_alu_reg (gdbarch
, insn1
, regs
, dsc
);
8123 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "special data",
8127 default: /* LDR (literal) */
8128 err
= thumb_copy_16bit_ldr_literal (gdbarch
, insn1
, regs
, dsc
);
8131 case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
8132 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "ldr/str", dsc
);
8135 if (op_bit_10_11
< 2) /* Generate PC-relative address */
8136 err
= thumb_decode_pc_relative_16bit (gdbarch
, insn1
, regs
, dsc
);
8137 else /* Generate SP-relative address */
8138 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "sp-relative", dsc
);
8140 case 11: /* Misc 16-bit instructions */
8142 switch (bits (insn1
, 8, 11))
8144 case 1: case 3: case 9: case 11: /* CBNZ, CBZ */
8145 err
= thumb_copy_cbnz_cbz (gdbarch
, insn1
, regs
, dsc
);
8147 case 12: case 13: /* POP */
8148 if (bit (insn1
, 8)) /* PC is in register list. */
8149 err
= thumb_copy_pop_pc_16bit (gdbarch
, insn1
, regs
, dsc
);
8151 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "pop", dsc
);
8153 case 15: /* If-Then, and hints */
8154 if (bits (insn1
, 0, 3))
8155 /* If-Then makes up to four following instructions conditional.
8156 IT instruction itself is not conditional, so handle it as a
8157 common unmodified instruction. */
8158 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "If-Then",
8161 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "hints", dsc
);
8164 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "misc", dsc
);
8169 if (op_bit_10_11
< 2) /* Store multiple registers */
8170 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "stm", dsc
);
8171 else /* Load multiple registers */
8172 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "ldm", dsc
);
8174 case 13: /* Conditional branch and supervisor call */
8175 if (bits (insn1
, 9, 11) != 7) /* conditional branch */
8176 err
= thumb_copy_b (gdbarch
, insn1
, dsc
);
8178 err
= thumb_copy_svc (gdbarch
, insn1
, regs
, dsc
);
8180 case 14: /* Unconditional branch */
8181 err
= thumb_copy_b (gdbarch
, insn1
, dsc
);
8188 internal_error (__FILE__
, __LINE__
,
8189 _("thumb_process_displaced_16bit_insn: Instruction decode error"));
8193 decode_thumb_32bit_ld_mem_hints (struct gdbarch
*gdbarch
,
8194 uint16_t insn1
, uint16_t insn2
,
8195 struct regcache
*regs
,
8196 struct displaced_step_closure
*dsc
)
8198 int rt
= bits (insn2
, 12, 15);
8199 int rn
= bits (insn1
, 0, 3);
8200 int op1
= bits (insn1
, 7, 8);
8203 switch (bits (insn1
, 5, 6))
8205 case 0: /* Load byte and memory hints */
8206 if (rt
== 0xf) /* PLD/PLI */
8209 /* PLD literal or Encoding T3 of PLI(immediate, literal). */
8210 return thumb2_copy_preload (gdbarch
, insn1
, insn2
, regs
, dsc
);
8212 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8217 if (rn
== 0xf) /* LDRB/LDRSB (literal) */
8218 return thumb2_copy_load_literal (gdbarch
, insn1
, insn2
, regs
, dsc
,
8221 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8222 "ldrb{reg, immediate}/ldrbt",
8227 case 1: /* Load halfword and memory hints. */
8228 if (rt
== 0xf) /* PLD{W} and Unalloc memory hint. */
8229 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8230 "pld/unalloc memhint", dsc
);
8234 return thumb2_copy_load_literal (gdbarch
, insn1
, insn2
, regs
, dsc
,
8237 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8241 case 2: /* Load word */
8243 int insn2_bit_8_11
= bits (insn2
, 8, 11);
8246 return thumb2_copy_load_literal (gdbarch
, insn1
, insn2
, regs
, dsc
, 4);
8247 else if (op1
== 0x1) /* Encoding T3 */
8248 return thumb2_copy_load_reg_imm (gdbarch
, insn1
, insn2
, regs
, dsc
,
8250 else /* op1 == 0x0 */
8252 if (insn2_bit_8_11
== 0xc || (insn2_bit_8_11
& 0x9) == 0x9)
8253 /* LDR (immediate) */
8254 return thumb2_copy_load_reg_imm (gdbarch
, insn1
, insn2
, regs
,
8255 dsc
, bit (insn2
, 8), 1);
8256 else if (insn2_bit_8_11
== 0xe) /* LDRT */
8257 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8260 /* LDR (register) */
8261 return thumb2_copy_load_reg_imm (gdbarch
, insn1
, insn2
, regs
,
8267 return thumb_32bit_copy_undef (gdbarch
, insn1
, insn2
, dsc
);
8274 thumb_process_displaced_32bit_insn (struct gdbarch
*gdbarch
, uint16_t insn1
,
8275 uint16_t insn2
, struct regcache
*regs
,
8276 struct displaced_step_closure
*dsc
)
8279 unsigned short op
= bit (insn2
, 15);
8280 unsigned int op1
= bits (insn1
, 11, 12);
8286 switch (bits (insn1
, 9, 10))
8291 /* Load/store {dual, execlusive}, table branch. */
8292 if (bits (insn1
, 7, 8) == 1 && bits (insn1
, 4, 5) == 1
8293 && bits (insn2
, 5, 7) == 0)
8294 err
= thumb2_copy_table_branch (gdbarch
, insn1
, insn2
, regs
,
8297 /* PC is not allowed to use in load/store {dual, exclusive}
8299 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8300 "load/store dual/ex", dsc
);
8302 else /* load/store multiple */
8304 switch (bits (insn1
, 7, 8))
8306 case 0: case 3: /* SRS, RFE */
8307 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8310 case 1: case 2: /* LDM/STM/PUSH/POP */
8311 err
= thumb2_copy_block_xfer (gdbarch
, insn1
, insn2
, regs
, dsc
);
8318 /* Data-processing (shift register). */
8319 err
= thumb2_decode_dp_shift_reg (gdbarch
, insn1
, insn2
, regs
,
8322 default: /* Coprocessor instructions. */
8323 err
= thumb2_decode_svc_copro (gdbarch
, insn1
, insn2
, regs
, dsc
);
8328 case 2: /* op1 = 2 */
8329 if (op
) /* Branch and misc control. */
8331 if (bit (insn2
, 14) /* BLX/BL */
8332 || bit (insn2
, 12) /* Unconditional branch */
8333 || (bits (insn1
, 7, 9) != 0x7)) /* Conditional branch */
8334 err
= thumb2_copy_b_bl_blx (gdbarch
, insn1
, insn2
, regs
, dsc
);
8336 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8341 if (bit (insn1
, 9)) /* Data processing (plain binary imm). */
8343 int op
= bits (insn1
, 4, 8);
8344 int rn
= bits (insn1
, 0, 3);
8345 if ((op
== 0 || op
== 0xa) && rn
== 0xf)
8346 err
= thumb_copy_pc_relative_32bit (gdbarch
, insn1
, insn2
,
8349 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8352 else /* Data processing (modified immeidate) */
8353 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8357 case 3: /* op1 = 3 */
8358 switch (bits (insn1
, 9, 10))
8362 err
= decode_thumb_32bit_ld_mem_hints (gdbarch
, insn1
, insn2
,
8364 else /* NEON Load/Store and Store single data item */
8365 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8366 "neon elt/struct load/store",
8369 case 1: /* op1 = 3, bits (9, 10) == 1 */
8370 switch (bits (insn1
, 7, 8))
8372 case 0: case 1: /* Data processing (register) */
8373 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8376 case 2: /* Multiply and absolute difference */
8377 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8378 "mul/mua/diff", dsc
);
8380 case 3: /* Long multiply and divide */
8381 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8386 default: /* Coprocessor instructions */
8387 err
= thumb2_decode_svc_copro (gdbarch
, insn1
, insn2
, regs
, dsc
);
8396 internal_error (__FILE__
, __LINE__
,
8397 _("thumb_process_displaced_32bit_insn: Instruction decode error"));
8402 thumb_process_displaced_insn (struct gdbarch
*gdbarch
, CORE_ADDR from
,
8403 CORE_ADDR to
, struct regcache
*regs
,
8404 struct displaced_step_closure
*dsc
)
8406 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
8408 = read_memory_unsigned_integer (from
, 2, byte_order_for_code
);
8410 if (debug_displaced
)
8411 fprintf_unfiltered (gdb_stdlog
, "displaced: process thumb insn %.4x "
8412 "at %.8lx\n", insn1
, (unsigned long) from
);
8415 dsc
->insn_size
= thumb_insn_size (insn1
);
8416 if (thumb_insn_size (insn1
) == 4)
8419 = read_memory_unsigned_integer (from
+ 2, 2, byte_order_for_code
);
8420 thumb_process_displaced_32bit_insn (gdbarch
, insn1
, insn2
, regs
, dsc
);
8423 thumb_process_displaced_16bit_insn (gdbarch
, insn1
, regs
, dsc
);
8427 arm_process_displaced_insn (struct gdbarch
*gdbarch
, CORE_ADDR from
,
8428 CORE_ADDR to
, struct regcache
*regs
,
8429 struct displaced_step_closure
*dsc
)
8432 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
8435 /* Most displaced instructions use a 1-instruction scratch space, so set this
8436 here and override below if/when necessary. */
8438 dsc
->insn_addr
= from
;
8439 dsc
->scratch_base
= to
;
8440 dsc
->cleanup
= NULL
;
8441 dsc
->wrote_to_pc
= 0;
8443 if (!displaced_in_arm_mode (regs
))
8444 return thumb_process_displaced_insn (gdbarch
, from
, to
, regs
, dsc
);
8448 insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
8449 if (debug_displaced
)
8450 fprintf_unfiltered (gdb_stdlog
, "displaced: stepping insn %.8lx "
8451 "at %.8lx\n", (unsigned long) insn
,
8452 (unsigned long) from
);
8454 if ((insn
& 0xf0000000) == 0xf0000000)
8455 err
= arm_decode_unconditional (gdbarch
, insn
, regs
, dsc
);
8456 else switch (((insn
& 0x10) >> 4) | ((insn
& 0xe000000) >> 24))
8458 case 0x0: case 0x1: case 0x2: case 0x3:
8459 err
= arm_decode_dp_misc (gdbarch
, insn
, regs
, dsc
);
8462 case 0x4: case 0x5: case 0x6:
8463 err
= arm_decode_ld_st_word_ubyte (gdbarch
, insn
, regs
, dsc
);
8467 err
= arm_decode_media (gdbarch
, insn
, dsc
);
8470 case 0x8: case 0x9: case 0xa: case 0xb:
8471 err
= arm_decode_b_bl_ldmstm (gdbarch
, insn
, regs
, dsc
);
8474 case 0xc: case 0xd: case 0xe: case 0xf:
8475 err
= arm_decode_svc_copro (gdbarch
, insn
, to
, regs
, dsc
);
8480 internal_error (__FILE__
, __LINE__
,
8481 _("arm_process_displaced_insn: Instruction decode error"));
8484 /* Actually set up the scratch space for a displaced instruction. */
8487 arm_displaced_init_closure (struct gdbarch
*gdbarch
, CORE_ADDR from
,
8488 CORE_ADDR to
, struct displaced_step_closure
*dsc
)
8490 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
8491 unsigned int i
, len
, offset
;
8492 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
8493 int size
= dsc
->is_thumb
? 2 : 4;
8494 const unsigned char *bkp_insn
;
8497 /* Poke modified instruction(s). */
8498 for (i
= 0; i
< dsc
->numinsns
; i
++)
8500 if (debug_displaced
)
8502 fprintf_unfiltered (gdb_stdlog
, "displaced: writing insn ");
8504 fprintf_unfiltered (gdb_stdlog
, "%.8lx",
8507 fprintf_unfiltered (gdb_stdlog
, "%.4x",
8508 (unsigned short)dsc
->modinsn
[i
]);
8510 fprintf_unfiltered (gdb_stdlog
, " at %.8lx\n",
8511 (unsigned long) to
+ offset
);
8514 write_memory_unsigned_integer (to
+ offset
, size
,
8515 byte_order_for_code
,
8520 /* Choose the correct breakpoint instruction. */
8523 bkp_insn
= tdep
->thumb_breakpoint
;
8524 len
= tdep
->thumb_breakpoint_size
;
8528 bkp_insn
= tdep
->arm_breakpoint
;
8529 len
= tdep
->arm_breakpoint_size
;
8532 /* Put breakpoint afterwards. */
8533 write_memory (to
+ offset
, bkp_insn
, len
);
8535 if (debug_displaced
)
8536 fprintf_unfiltered (gdb_stdlog
, "displaced: copy %s->%s: ",
8537 paddress (gdbarch
, from
), paddress (gdbarch
, to
));
8540 /* Entry point for copying an instruction into scratch space for displaced
8543 struct displaced_step_closure
*
8544 arm_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
8545 CORE_ADDR from
, CORE_ADDR to
,
8546 struct regcache
*regs
)
8548 struct displaced_step_closure
*dsc
8549 = xmalloc (sizeof (struct displaced_step_closure
));
8550 arm_process_displaced_insn (gdbarch
, from
, to
, regs
, dsc
);
8551 arm_displaced_init_closure (gdbarch
, from
, to
, dsc
);
8556 /* Entry point for cleaning things up after a displaced instruction has been
8560 arm_displaced_step_fixup (struct gdbarch
*gdbarch
,
8561 struct displaced_step_closure
*dsc
,
8562 CORE_ADDR from
, CORE_ADDR to
,
8563 struct regcache
*regs
)
8566 dsc
->cleanup (gdbarch
, regs
, dsc
);
8568 if (!dsc
->wrote_to_pc
)
8569 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
,
8570 dsc
->insn_addr
+ dsc
->insn_size
);
8574 #include "bfd-in2.h"
8575 #include "libcoff.h"
8578 gdb_print_insn_arm (bfd_vma memaddr
, disassemble_info
*info
)
8580 struct gdbarch
*gdbarch
= info
->application_data
;
8582 if (arm_pc_is_thumb (gdbarch
, memaddr
))
8584 static asymbol
*asym
;
8585 static combined_entry_type ce
;
8586 static struct coff_symbol_struct csym
;
8587 static struct bfd fake_bfd
;
8588 static bfd_target fake_target
;
8590 if (csym
.native
== NULL
)
8592 /* Create a fake symbol vector containing a Thumb symbol.
8593 This is solely so that the code in print_insn_little_arm()
8594 and print_insn_big_arm() in opcodes/arm-dis.c will detect
8595 the presence of a Thumb symbol and switch to decoding
8596 Thumb instructions. */
8598 fake_target
.flavour
= bfd_target_coff_flavour
;
8599 fake_bfd
.xvec
= &fake_target
;
8600 ce
.u
.syment
.n_sclass
= C_THUMBEXTFUNC
;
8602 csym
.symbol
.the_bfd
= &fake_bfd
;
8603 csym
.symbol
.name
= "fake";
8604 asym
= (asymbol
*) & csym
;
8607 memaddr
= UNMAKE_THUMB_ADDR (memaddr
);
8608 info
->symbols
= &asym
;
8611 info
->symbols
= NULL
;
8613 if (info
->endian
== BFD_ENDIAN_BIG
)
8614 return print_insn_big_arm (memaddr
, info
);
8616 return print_insn_little_arm (memaddr
, info
);
8619 /* The following define instruction sequences that will cause ARM
8620 cpu's to take an undefined instruction trap. These are used to
8621 signal a breakpoint to GDB.
8623 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
8624 modes. A different instruction is required for each mode. The ARM
8625 cpu's can also be big or little endian. Thus four different
8626 instructions are needed to support all cases.
8628 Note: ARMv4 defines several new instructions that will take the
8629 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
8630 not in fact add the new instructions. The new undefined
8631 instructions in ARMv4 are all instructions that had no defined
8632 behaviour in earlier chips. There is no guarantee that they will
8633 raise an exception, but may be treated as NOP's. In practice, it
8634 may only safe to rely on instructions matching:
8636 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
8637 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
8638 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
8640 Even this may only true if the condition predicate is true. The
8641 following use a condition predicate of ALWAYS so it is always TRUE.
8643 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
8644 and NetBSD all use a software interrupt rather than an undefined
8645 instruction to force a trap. This can be handled by by the
8646 abi-specific code during establishment of the gdbarch vector. */
8648 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
8649 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
8650 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
8651 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
8653 static const char arm_default_arm_le_breakpoint
[] = ARM_LE_BREAKPOINT
;
8654 static const char arm_default_arm_be_breakpoint
[] = ARM_BE_BREAKPOINT
;
8655 static const char arm_default_thumb_le_breakpoint
[] = THUMB_LE_BREAKPOINT
;
8656 static const char arm_default_thumb_be_breakpoint
[] = THUMB_BE_BREAKPOINT
;
8658 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
8659 the program counter value to determine whether a 16-bit or 32-bit
8660 breakpoint should be used. It returns a pointer to a string of
8661 bytes that encode a breakpoint instruction, stores the length of
8662 the string to *lenptr, and adjusts the program counter (if
8663 necessary) to point to the actual memory location where the
8664 breakpoint should be inserted. */
8666 static const unsigned char *
8667 arm_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
, int *lenptr
)
8669 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
8670 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
8672 if (arm_pc_is_thumb (gdbarch
, *pcptr
))
8674 *pcptr
= UNMAKE_THUMB_ADDR (*pcptr
);
8676 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
8677 check whether we are replacing a 32-bit instruction. */
8678 if (tdep
->thumb2_breakpoint
!= NULL
)
8681 if (target_read_memory (*pcptr
, buf
, 2) == 0)
8683 unsigned short inst1
;
8684 inst1
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
8685 if (thumb_insn_size (inst1
) == 4)
8687 *lenptr
= tdep
->thumb2_breakpoint_size
;
8688 return tdep
->thumb2_breakpoint
;
8693 *lenptr
= tdep
->thumb_breakpoint_size
;
8694 return tdep
->thumb_breakpoint
;
8698 *lenptr
= tdep
->arm_breakpoint_size
;
8699 return tdep
->arm_breakpoint
;
8704 arm_remote_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
,
8707 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
8709 arm_breakpoint_from_pc (gdbarch
, pcptr
, kindptr
);
8711 if (arm_pc_is_thumb (gdbarch
, *pcptr
) && *kindptr
== 4)
8712 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
8713 that this is not confused with a 32-bit ARM breakpoint. */
8717 /* Extract from an array REGBUF containing the (raw) register state a
8718 function return value of type TYPE, and copy that, in virtual
8719 format, into VALBUF. */
8722 arm_extract_return_value (struct type
*type
, struct regcache
*regs
,
8725 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
8726 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
8728 if (TYPE_CODE_FLT
== TYPE_CODE (type
))
8730 switch (gdbarch_tdep (gdbarch
)->fp_model
)
8734 /* The value is in register F0 in internal format. We need to
8735 extract the raw value and then convert it to the desired
8737 bfd_byte tmpbuf
[FP_REGISTER_SIZE
];
8739 regcache_cooked_read (regs
, ARM_F0_REGNUM
, tmpbuf
);
8740 convert_from_extended (floatformat_from_type (type
), tmpbuf
,
8741 valbuf
, gdbarch_byte_order (gdbarch
));
8745 case ARM_FLOAT_SOFT_FPA
:
8746 case ARM_FLOAT_SOFT_VFP
:
8747 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8748 not using the VFP ABI code. */
8750 regcache_cooked_read (regs
, ARM_A1_REGNUM
, valbuf
);
8751 if (TYPE_LENGTH (type
) > 4)
8752 regcache_cooked_read (regs
, ARM_A1_REGNUM
+ 1,
8753 valbuf
+ INT_REGISTER_SIZE
);
8757 internal_error (__FILE__
, __LINE__
,
8758 _("arm_extract_return_value: "
8759 "Floating point model not supported"));
8763 else if (TYPE_CODE (type
) == TYPE_CODE_INT
8764 || TYPE_CODE (type
) == TYPE_CODE_CHAR
8765 || TYPE_CODE (type
) == TYPE_CODE_BOOL
8766 || TYPE_CODE (type
) == TYPE_CODE_PTR
8767 || TYPE_CODE (type
) == TYPE_CODE_REF
8768 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
8770 /* If the type is a plain integer, then the access is
8771 straight-forward. Otherwise we have to play around a bit
8773 int len
= TYPE_LENGTH (type
);
8774 int regno
= ARM_A1_REGNUM
;
8779 /* By using store_unsigned_integer we avoid having to do
8780 anything special for small big-endian values. */
8781 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
8782 store_unsigned_integer (valbuf
,
8783 (len
> INT_REGISTER_SIZE
8784 ? INT_REGISTER_SIZE
: len
),
8786 len
-= INT_REGISTER_SIZE
;
8787 valbuf
+= INT_REGISTER_SIZE
;
8792 /* For a structure or union the behaviour is as if the value had
8793 been stored to word-aligned memory and then loaded into
8794 registers with 32-bit load instruction(s). */
8795 int len
= TYPE_LENGTH (type
);
8796 int regno
= ARM_A1_REGNUM
;
8797 bfd_byte tmpbuf
[INT_REGISTER_SIZE
];
8801 regcache_cooked_read (regs
, regno
++, tmpbuf
);
8802 memcpy (valbuf
, tmpbuf
,
8803 len
> INT_REGISTER_SIZE
? INT_REGISTER_SIZE
: len
);
8804 len
-= INT_REGISTER_SIZE
;
8805 valbuf
+= INT_REGISTER_SIZE
;
8811 /* Will a function return an aggregate type in memory or in a
8812 register? Return 0 if an aggregate type can be returned in a
8813 register, 1 if it must be returned in memory. */
8816 arm_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
8819 enum type_code code
;
8821 CHECK_TYPEDEF (type
);
8823 /* In the ARM ABI, "integer" like aggregate types are returned in
8824 registers. For an aggregate type to be integer like, its size
8825 must be less than or equal to INT_REGISTER_SIZE and the
8826 offset of each addressable subfield must be zero. Note that bit
8827 fields are not addressable, and all addressable subfields of
8828 unions always start at offset zero.
8830 This function is based on the behaviour of GCC 2.95.1.
8831 See: gcc/arm.c: arm_return_in_memory() for details.
8833 Note: All versions of GCC before GCC 2.95.2 do not set up the
8834 parameters correctly for a function returning the following
8835 structure: struct { float f;}; This should be returned in memory,
8836 not a register. Richard Earnshaw sent me a patch, but I do not
8837 know of any way to detect if a function like the above has been
8838 compiled with the correct calling convention. */
8840 /* All aggregate types that won't fit in a register must be returned
8842 if (TYPE_LENGTH (type
) > INT_REGISTER_SIZE
)
8847 /* The AAPCS says all aggregates not larger than a word are returned
8849 if (gdbarch_tdep (gdbarch
)->arm_abi
!= ARM_ABI_APCS
)
8852 /* The only aggregate types that can be returned in a register are
8853 structs and unions. Arrays must be returned in memory. */
8854 code
= TYPE_CODE (type
);
8855 if ((TYPE_CODE_STRUCT
!= code
) && (TYPE_CODE_UNION
!= code
))
8860 /* Assume all other aggregate types can be returned in a register.
8861 Run a check for structures, unions and arrays. */
8864 if ((TYPE_CODE_STRUCT
== code
) || (TYPE_CODE_UNION
== code
))
8867 /* Need to check if this struct/union is "integer" like. For
8868 this to be true, its size must be less than or equal to
8869 INT_REGISTER_SIZE and the offset of each addressable
8870 subfield must be zero. Note that bit fields are not
8871 addressable, and unions always start at offset zero. If any
8872 of the subfields is a floating point type, the struct/union
8873 cannot be an integer type. */
8875 /* For each field in the object, check:
8876 1) Is it FP? --> yes, nRc = 1;
8877 2) Is it addressable (bitpos != 0) and
8878 not packed (bitsize == 0)?
8882 for (i
= 0; i
< TYPE_NFIELDS (type
); i
++)
8884 enum type_code field_type_code
;
8885 field_type_code
= TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type
,
8888 /* Is it a floating point type field? */
8889 if (field_type_code
== TYPE_CODE_FLT
)
8895 /* If bitpos != 0, then we have to care about it. */
8896 if (TYPE_FIELD_BITPOS (type
, i
) != 0)
8898 /* Bitfields are not addressable. If the field bitsize is
8899 zero, then the field is not packed. Hence it cannot be
8900 a bitfield or any other packed type. */
8901 if (TYPE_FIELD_BITSIZE (type
, i
) == 0)
8913 /* Write into appropriate registers a function return value of type
8914 TYPE, given in virtual format. */
8917 arm_store_return_value (struct type
*type
, struct regcache
*regs
,
8918 const gdb_byte
*valbuf
)
8920 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
8921 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
8923 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
8925 char buf
[MAX_REGISTER_SIZE
];
8927 switch (gdbarch_tdep (gdbarch
)->fp_model
)
8931 convert_to_extended (floatformat_from_type (type
), buf
, valbuf
,
8932 gdbarch_byte_order (gdbarch
));
8933 regcache_cooked_write (regs
, ARM_F0_REGNUM
, buf
);
8936 case ARM_FLOAT_SOFT_FPA
:
8937 case ARM_FLOAT_SOFT_VFP
:
8938 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8939 not using the VFP ABI code. */
8941 regcache_cooked_write (regs
, ARM_A1_REGNUM
, valbuf
);
8942 if (TYPE_LENGTH (type
) > 4)
8943 regcache_cooked_write (regs
, ARM_A1_REGNUM
+ 1,
8944 valbuf
+ INT_REGISTER_SIZE
);
8948 internal_error (__FILE__
, __LINE__
,
8949 _("arm_store_return_value: Floating "
8950 "point model not supported"));
8954 else if (TYPE_CODE (type
) == TYPE_CODE_INT
8955 || TYPE_CODE (type
) == TYPE_CODE_CHAR
8956 || TYPE_CODE (type
) == TYPE_CODE_BOOL
8957 || TYPE_CODE (type
) == TYPE_CODE_PTR
8958 || TYPE_CODE (type
) == TYPE_CODE_REF
8959 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
8961 if (TYPE_LENGTH (type
) <= 4)
8963 /* Values of one word or less are zero/sign-extended and
8965 bfd_byte tmpbuf
[INT_REGISTER_SIZE
];
8966 LONGEST val
= unpack_long (type
, valbuf
);
8968 store_signed_integer (tmpbuf
, INT_REGISTER_SIZE
, byte_order
, val
);
8969 regcache_cooked_write (regs
, ARM_A1_REGNUM
, tmpbuf
);
8973 /* Integral values greater than one word are stored in consecutive
8974 registers starting with r0. This will always be a multiple of
8975 the regiser size. */
8976 int len
= TYPE_LENGTH (type
);
8977 int regno
= ARM_A1_REGNUM
;
8981 regcache_cooked_write (regs
, regno
++, valbuf
);
8982 len
-= INT_REGISTER_SIZE
;
8983 valbuf
+= INT_REGISTER_SIZE
;
8989 /* For a structure or union the behaviour is as if the value had
8990 been stored to word-aligned memory and then loaded into
8991 registers with 32-bit load instruction(s). */
8992 int len
= TYPE_LENGTH (type
);
8993 int regno
= ARM_A1_REGNUM
;
8994 bfd_byte tmpbuf
[INT_REGISTER_SIZE
];
8998 memcpy (tmpbuf
, valbuf
,
8999 len
> INT_REGISTER_SIZE
? INT_REGISTER_SIZE
: len
);
9000 regcache_cooked_write (regs
, regno
++, tmpbuf
);
9001 len
-= INT_REGISTER_SIZE
;
9002 valbuf
+= INT_REGISTER_SIZE
;
9008 /* Handle function return values. */
9010 static enum return_value_convention
9011 arm_return_value (struct gdbarch
*gdbarch
, struct type
*func_type
,
9012 struct type
*valtype
, struct regcache
*regcache
,
9013 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
9015 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
9016 enum arm_vfp_cprc_base_type vfp_base_type
;
9019 if (arm_vfp_abi_for_function (gdbarch
, func_type
)
9020 && arm_vfp_call_candidate (valtype
, &vfp_base_type
, &vfp_base_count
))
9022 int reg_char
= arm_vfp_cprc_reg_char (vfp_base_type
);
9023 int unit_length
= arm_vfp_cprc_unit_length (vfp_base_type
);
9025 for (i
= 0; i
< vfp_base_count
; i
++)
9027 if (reg_char
== 'q')
9030 arm_neon_quad_write (gdbarch
, regcache
, i
,
9031 writebuf
+ i
* unit_length
);
9034 arm_neon_quad_read (gdbarch
, regcache
, i
,
9035 readbuf
+ i
* unit_length
);
9042 sprintf (name_buf
, "%c%d", reg_char
, i
);
9043 regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
9046 regcache_cooked_write (regcache
, regnum
,
9047 writebuf
+ i
* unit_length
);
9049 regcache_cooked_read (regcache
, regnum
,
9050 readbuf
+ i
* unit_length
);
9053 return RETURN_VALUE_REGISTER_CONVENTION
;
9056 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
9057 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
9058 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
9060 if (tdep
->struct_return
== pcc_struct_return
9061 || arm_return_in_memory (gdbarch
, valtype
))
9062 return RETURN_VALUE_STRUCT_CONVENTION
;
9065 /* AAPCS returns complex types longer than a register in memory. */
9066 if (tdep
->arm_abi
!= ARM_ABI_APCS
9067 && TYPE_CODE (valtype
) == TYPE_CODE_COMPLEX
9068 && TYPE_LENGTH (valtype
) > INT_REGISTER_SIZE
)
9069 return RETURN_VALUE_STRUCT_CONVENTION
;
9072 arm_store_return_value (valtype
, regcache
, writebuf
);
9075 arm_extract_return_value (valtype
, regcache
, readbuf
);
9077 return RETURN_VALUE_REGISTER_CONVENTION
;
9082 arm_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
9084 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
9085 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
9086 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
9088 char buf
[INT_REGISTER_SIZE
];
9090 jb_addr
= get_frame_register_unsigned (frame
, ARM_A1_REGNUM
);
9092 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
9096 *pc
= extract_unsigned_integer (buf
, INT_REGISTER_SIZE
, byte_order
);
9100 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
9101 return the target PC. Otherwise return 0. */
9104 arm_skip_stub (struct frame_info
*frame
, CORE_ADDR pc
)
9108 CORE_ADDR start_addr
;
9110 /* Find the starting address and name of the function containing the PC. */
9111 if (find_pc_partial_function (pc
, &name
, &start_addr
, NULL
) == 0)
9114 /* If PC is in a Thumb call or return stub, return the address of the
9115 target PC, which is in a register. The thunk functions are called
9116 _call_via_xx, where x is the register name. The possible names
9117 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
9118 functions, named __ARM_call_via_r[0-7]. */
9119 if (strncmp (name
, "_call_via_", 10) == 0
9120 || strncmp (name
, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
9122 /* Use the name suffix to determine which register contains the
9124 static char *table
[15] =
9125 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
9126 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
9129 int offset
= strlen (name
) - 2;
9131 for (regno
= 0; regno
<= 14; regno
++)
9132 if (strcmp (&name
[offset
], table
[regno
]) == 0)
9133 return get_frame_register_unsigned (frame
, regno
);
9136 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
9137 non-interworking calls to foo. We could decode the stubs
9138 to find the target but it's easier to use the symbol table. */
9139 namelen
= strlen (name
);
9140 if (name
[0] == '_' && name
[1] == '_'
9141 && ((namelen
> 2 + strlen ("_from_thumb")
9142 && strncmp (name
+ namelen
- strlen ("_from_thumb"), "_from_thumb",
9143 strlen ("_from_thumb")) == 0)
9144 || (namelen
> 2 + strlen ("_from_arm")
9145 && strncmp (name
+ namelen
- strlen ("_from_arm"), "_from_arm",
9146 strlen ("_from_arm")) == 0)))
9149 int target_len
= namelen
- 2;
9150 struct minimal_symbol
*minsym
;
9151 struct objfile
*objfile
;
9152 struct obj_section
*sec
;
9154 if (name
[namelen
- 1] == 'b')
9155 target_len
-= strlen ("_from_thumb");
9157 target_len
-= strlen ("_from_arm");
9159 target_name
= alloca (target_len
+ 1);
9160 memcpy (target_name
, name
+ 2, target_len
);
9161 target_name
[target_len
] = '\0';
9163 sec
= find_pc_section (pc
);
9164 objfile
= (sec
== NULL
) ? NULL
: sec
->objfile
;
9165 minsym
= lookup_minimal_symbol (target_name
, NULL
, objfile
);
9167 return SYMBOL_VALUE_ADDRESS (minsym
);
9172 return 0; /* not a stub */
9176 set_arm_command (char *args
, int from_tty
)
9178 printf_unfiltered (_("\
9179 \"set arm\" must be followed by an apporpriate subcommand.\n"));
9180 help_list (setarmcmdlist
, "set arm ", all_commands
, gdb_stdout
);
9184 show_arm_command (char *args
, int from_tty
)
9186 cmd_show_list (showarmcmdlist
, from_tty
, "");
9190 arm_update_current_architecture (void)
9192 struct gdbarch_info info
;
9194 /* If the current architecture is not ARM, we have nothing to do. */
9195 if (gdbarch_bfd_arch_info (target_gdbarch
)->arch
!= bfd_arch_arm
)
9198 /* Update the architecture. */
9199 gdbarch_info_init (&info
);
9201 if (!gdbarch_update_p (info
))
9202 internal_error (__FILE__
, __LINE__
, _("could not update architecture"));
9206 set_fp_model_sfunc (char *args
, int from_tty
,
9207 struct cmd_list_element
*c
)
9209 enum arm_float_model fp_model
;
9211 for (fp_model
= ARM_FLOAT_AUTO
; fp_model
!= ARM_FLOAT_LAST
; fp_model
++)
9212 if (strcmp (current_fp_model
, fp_model_strings
[fp_model
]) == 0)
9214 arm_fp_model
= fp_model
;
9218 if (fp_model
== ARM_FLOAT_LAST
)
9219 internal_error (__FILE__
, __LINE__
, _("Invalid fp model accepted: %s."),
9222 arm_update_current_architecture ();
9226 show_fp_model (struct ui_file
*file
, int from_tty
,
9227 struct cmd_list_element
*c
, const char *value
)
9229 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
9231 if (arm_fp_model
== ARM_FLOAT_AUTO
9232 && gdbarch_bfd_arch_info (target_gdbarch
)->arch
== bfd_arch_arm
)
9233 fprintf_filtered (file
, _("\
9234 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
9235 fp_model_strings
[tdep
->fp_model
]);
9237 fprintf_filtered (file
, _("\
9238 The current ARM floating point model is \"%s\".\n"),
9239 fp_model_strings
[arm_fp_model
]);
9243 arm_set_abi (char *args
, int from_tty
,
9244 struct cmd_list_element
*c
)
9246 enum arm_abi_kind arm_abi
;
9248 for (arm_abi
= ARM_ABI_AUTO
; arm_abi
!= ARM_ABI_LAST
; arm_abi
++)
9249 if (strcmp (arm_abi_string
, arm_abi_strings
[arm_abi
]) == 0)
9251 arm_abi_global
= arm_abi
;
9255 if (arm_abi
== ARM_ABI_LAST
)
9256 internal_error (__FILE__
, __LINE__
, _("Invalid ABI accepted: %s."),
9259 arm_update_current_architecture ();
9263 arm_show_abi (struct ui_file
*file
, int from_tty
,
9264 struct cmd_list_element
*c
, const char *value
)
9266 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
9268 if (arm_abi_global
== ARM_ABI_AUTO
9269 && gdbarch_bfd_arch_info (target_gdbarch
)->arch
== bfd_arch_arm
)
9270 fprintf_filtered (file
, _("\
9271 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
9272 arm_abi_strings
[tdep
->arm_abi
]);
9274 fprintf_filtered (file
, _("The current ARM ABI is \"%s\".\n"),
9279 arm_show_fallback_mode (struct ui_file
*file
, int from_tty
,
9280 struct cmd_list_element
*c
, const char *value
)
9282 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
9284 fprintf_filtered (file
,
9285 _("The current execution mode assumed "
9286 "(when symbols are unavailable) is \"%s\".\n"),
9287 arm_fallback_mode_string
);
9291 arm_show_force_mode (struct ui_file
*file
, int from_tty
,
9292 struct cmd_list_element
*c
, const char *value
)
9294 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
9296 fprintf_filtered (file
,
9297 _("The current execution mode assumed "
9298 "(even when symbols are available) is \"%s\".\n"),
9299 arm_force_mode_string
);
9302 /* If the user changes the register disassembly style used for info
9303 register and other commands, we have to also switch the style used
9304 in opcodes for disassembly output. This function is run in the "set
9305 arm disassembly" command, and does that. */
9308 set_disassembly_style_sfunc (char *args
, int from_tty
,
9309 struct cmd_list_element
*c
)
9311 set_disassembly_style ();
9314 /* Return the ARM register name corresponding to register I. */
9316 arm_register_name (struct gdbarch
*gdbarch
, int i
)
9318 const int num_regs
= gdbarch_num_regs (gdbarch
);
9320 if (gdbarch_tdep (gdbarch
)->have_vfp_pseudos
9321 && i
>= num_regs
&& i
< num_regs
+ 32)
9323 static const char *const vfp_pseudo_names
[] = {
9324 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
9325 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
9326 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
9327 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
9330 return vfp_pseudo_names
[i
- num_regs
];
9333 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
9334 && i
>= num_regs
+ 32 && i
< num_regs
+ 32 + 16)
9336 static const char *const neon_pseudo_names
[] = {
9337 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
9338 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
9341 return neon_pseudo_names
[i
- num_regs
- 32];
9344 if (i
>= ARRAY_SIZE (arm_register_names
))
9345 /* These registers are only supported on targets which supply
9346 an XML description. */
9349 return arm_register_names
[i
];
9353 set_disassembly_style (void)
9357 /* Find the style that the user wants. */
9358 for (current
= 0; current
< num_disassembly_options
; current
++)
9359 if (disassembly_style
== valid_disassembly_styles
[current
])
9361 gdb_assert (current
< num_disassembly_options
);
9363 /* Synchronize the disassembler. */
9364 set_arm_regname_option (current
);
9367 /* Test whether the coff symbol specific value corresponds to a Thumb
9371 coff_sym_is_thumb (int val
)
9373 return (val
== C_THUMBEXT
9374 || val
== C_THUMBSTAT
9375 || val
== C_THUMBEXTFUNC
9376 || val
== C_THUMBSTATFUNC
9377 || val
== C_THUMBLABEL
);
9380 /* arm_coff_make_msymbol_special()
9381 arm_elf_make_msymbol_special()
9383 These functions test whether the COFF or ELF symbol corresponds to
9384 an address in thumb code, and set a "special" bit in a minimal
9385 symbol to indicate that it does. */
9388 arm_elf_make_msymbol_special(asymbol
*sym
, struct minimal_symbol
*msym
)
9390 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type
*)sym
)->internal_elf_sym
)
9391 == ST_BRANCH_TO_THUMB
)
9392 MSYMBOL_SET_SPECIAL (msym
);
9396 arm_coff_make_msymbol_special(int val
, struct minimal_symbol
*msym
)
9398 if (coff_sym_is_thumb (val
))
9399 MSYMBOL_SET_SPECIAL (msym
);
9403 arm_objfile_data_free (struct objfile
*objfile
, void *arg
)
9405 struct arm_per_objfile
*data
= arg
;
9408 for (i
= 0; i
< objfile
->obfd
->section_count
; i
++)
9409 VEC_free (arm_mapping_symbol_s
, data
->section_maps
[i
]);
9413 arm_record_special_symbol (struct gdbarch
*gdbarch
, struct objfile
*objfile
,
9416 const char *name
= bfd_asymbol_name (sym
);
9417 struct arm_per_objfile
*data
;
9418 VEC(arm_mapping_symbol_s
) **map_p
;
9419 struct arm_mapping_symbol new_map_sym
;
9421 gdb_assert (name
[0] == '$');
9422 if (name
[1] != 'a' && name
[1] != 't' && name
[1] != 'd')
9425 data
= objfile_data (objfile
, arm_objfile_data_key
);
9428 data
= OBSTACK_ZALLOC (&objfile
->objfile_obstack
,
9429 struct arm_per_objfile
);
9430 set_objfile_data (objfile
, arm_objfile_data_key
, data
);
9431 data
->section_maps
= OBSTACK_CALLOC (&objfile
->objfile_obstack
,
9432 objfile
->obfd
->section_count
,
9433 VEC(arm_mapping_symbol_s
) *);
9435 map_p
= &data
->section_maps
[bfd_get_section (sym
)->index
];
9437 new_map_sym
.value
= sym
->value
;
9438 new_map_sym
.type
= name
[1];
9440 /* Assume that most mapping symbols appear in order of increasing
9441 value. If they were randomly distributed, it would be faster to
9442 always push here and then sort at first use. */
9443 if (!VEC_empty (arm_mapping_symbol_s
, *map_p
))
9445 struct arm_mapping_symbol
*prev_map_sym
;
9447 prev_map_sym
= VEC_last (arm_mapping_symbol_s
, *map_p
);
9448 if (prev_map_sym
->value
>= sym
->value
)
9451 idx
= VEC_lower_bound (arm_mapping_symbol_s
, *map_p
, &new_map_sym
,
9452 arm_compare_mapping_symbols
);
9453 VEC_safe_insert (arm_mapping_symbol_s
, *map_p
, idx
, &new_map_sym
);
9458 VEC_safe_push (arm_mapping_symbol_s
, *map_p
, &new_map_sym
);
9462 arm_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
9464 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
9465 regcache_cooked_write_unsigned (regcache
, ARM_PC_REGNUM
, pc
);
9467 /* If necessary, set the T bit. */
9470 ULONGEST val
, t_bit
;
9471 regcache_cooked_read_unsigned (regcache
, ARM_PS_REGNUM
, &val
);
9472 t_bit
= arm_psr_thumb_bit (gdbarch
);
9473 if (arm_pc_is_thumb (gdbarch
, pc
))
9474 regcache_cooked_write_unsigned (regcache
, ARM_PS_REGNUM
,
9477 regcache_cooked_write_unsigned (regcache
, ARM_PS_REGNUM
,
9482 /* Read the contents of a NEON quad register, by reading from two
9483 double registers. This is used to implement the quad pseudo
9484 registers, and for argument passing in case the quad registers are
9485 missing; vectors are passed in quad registers when using the VFP
9486 ABI, even if a NEON unit is not present. REGNUM is the index of
9487 the quad register, in [0, 15]. */
9489 static enum register_status
9490 arm_neon_quad_read (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
9491 int regnum
, gdb_byte
*buf
)
9494 gdb_byte reg_buf
[8];
9495 int offset
, double_regnum
;
9496 enum register_status status
;
9498 sprintf (name_buf
, "d%d", regnum
<< 1);
9499 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
9502 /* d0 is always the least significant half of q0. */
9503 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
9508 status
= regcache_raw_read (regcache
, double_regnum
, reg_buf
);
9509 if (status
!= REG_VALID
)
9511 memcpy (buf
+ offset
, reg_buf
, 8);
9513 offset
= 8 - offset
;
9514 status
= regcache_raw_read (regcache
, double_regnum
+ 1, reg_buf
);
9515 if (status
!= REG_VALID
)
9517 memcpy (buf
+ offset
, reg_buf
, 8);
9522 static enum register_status
9523 arm_pseudo_read (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
9524 int regnum
, gdb_byte
*buf
)
9526 const int num_regs
= gdbarch_num_regs (gdbarch
);
9528 gdb_byte reg_buf
[8];
9529 int offset
, double_regnum
;
9531 gdb_assert (regnum
>= num_regs
);
9534 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
&& regnum
>= 32 && regnum
< 48)
9535 /* Quad-precision register. */
9536 return arm_neon_quad_read (gdbarch
, regcache
, regnum
- 32, buf
);
9539 enum register_status status
;
9541 /* Single-precision register. */
9542 gdb_assert (regnum
< 32);
9544 /* s0 is always the least significant half of d0. */
9545 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
9546 offset
= (regnum
& 1) ? 0 : 4;
9548 offset
= (regnum
& 1) ? 4 : 0;
9550 sprintf (name_buf
, "d%d", regnum
>> 1);
9551 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
9554 status
= regcache_raw_read (regcache
, double_regnum
, reg_buf
);
9555 if (status
== REG_VALID
)
9556 memcpy (buf
, reg_buf
+ offset
, 4);
9561 /* Store the contents of BUF to a NEON quad register, by writing to
9562 two double registers. This is used to implement the quad pseudo
9563 registers, and for argument passing in case the quad registers are
9564 missing; vectors are passed in quad registers when using the VFP
9565 ABI, even if a NEON unit is not present. REGNUM is the index
9566 of the quad register, in [0, 15]. */
9569 arm_neon_quad_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
9570 int regnum
, const gdb_byte
*buf
)
9573 gdb_byte reg_buf
[8];
9574 int offset
, double_regnum
;
9576 sprintf (name_buf
, "d%d", regnum
<< 1);
9577 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
9580 /* d0 is always the least significant half of q0. */
9581 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
9586 regcache_raw_write (regcache
, double_regnum
, buf
+ offset
);
9587 offset
= 8 - offset
;
9588 regcache_raw_write (regcache
, double_regnum
+ 1, buf
+ offset
);
9592 arm_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
9593 int regnum
, const gdb_byte
*buf
)
9595 const int num_regs
= gdbarch_num_regs (gdbarch
);
9597 gdb_byte reg_buf
[8];
9598 int offset
, double_regnum
;
9600 gdb_assert (regnum
>= num_regs
);
9603 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
&& regnum
>= 32 && regnum
< 48)
9604 /* Quad-precision register. */
9605 arm_neon_quad_write (gdbarch
, regcache
, regnum
- 32, buf
);
9608 /* Single-precision register. */
9609 gdb_assert (regnum
< 32);
9611 /* s0 is always the least significant half of d0. */
9612 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
9613 offset
= (regnum
& 1) ? 0 : 4;
9615 offset
= (regnum
& 1) ? 4 : 0;
9617 sprintf (name_buf
, "d%d", regnum
>> 1);
9618 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
9621 regcache_raw_read (regcache
, double_regnum
, reg_buf
);
9622 memcpy (reg_buf
+ offset
, buf
, 4);
9623 regcache_raw_write (regcache
, double_regnum
, reg_buf
);
9627 static struct value
*
9628 value_of_arm_user_reg (struct frame_info
*frame
, const void *baton
)
9630 const int *reg_p
= baton
;
9631 return value_of_register (*reg_p
, frame
);
9634 static enum gdb_osabi
9635 arm_elf_osabi_sniffer (bfd
*abfd
)
9637 unsigned int elfosabi
;
9638 enum gdb_osabi osabi
= GDB_OSABI_UNKNOWN
;
9640 elfosabi
= elf_elfheader (abfd
)->e_ident
[EI_OSABI
];
9642 if (elfosabi
== ELFOSABI_ARM
)
9643 /* GNU tools use this value. Check note sections in this case,
9645 bfd_map_over_sections (abfd
,
9646 generic_elf_osabi_sniff_abi_tag_sections
,
9649 /* Anything else will be handled by the generic ELF sniffer. */
9654 arm_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
9655 struct reggroup
*group
)
9657 /* FPS register's type is INT, but belongs to float_reggroup. Beside
9658 this, FPS register belongs to save_regroup, restore_reggroup, and
9659 all_reggroup, of course. */
9660 if (regnum
== ARM_FPS_REGNUM
)
9661 return (group
== float_reggroup
9662 || group
== save_reggroup
9663 || group
== restore_reggroup
9664 || group
== all_reggroup
);
9666 return default_register_reggroup_p (gdbarch
, regnum
, group
);
9670 /* For backward-compatibility we allow two 'g' packet lengths with
9671 the remote protocol depending on whether FPA registers are
9672 supplied. M-profile targets do not have FPA registers, but some
9673 stubs already exist in the wild which use a 'g' packet which
9674 supplies them albeit with dummy values. The packet format which
9675 includes FPA registers should be considered deprecated for
9676 M-profile targets. */
9679 arm_register_g_packet_guesses (struct gdbarch
*gdbarch
)
9681 if (gdbarch_tdep (gdbarch
)->is_m
)
9683 /* If we know from the executable this is an M-profile target,
9684 cater for remote targets whose register set layout is the
9685 same as the FPA layout. */
9686 register_remote_g_packet_guess (gdbarch
,
9687 /* r0-r12,sp,lr,pc; f0-f7; fps,cpsr */
9688 (16 * INT_REGISTER_SIZE
)
9689 + (8 * FP_REGISTER_SIZE
)
9690 + (2 * INT_REGISTER_SIZE
),
9691 tdesc_arm_with_m_fpa_layout
);
9693 /* The regular M-profile layout. */
9694 register_remote_g_packet_guess (gdbarch
,
9695 /* r0-r12,sp,lr,pc; xpsr */
9696 (16 * INT_REGISTER_SIZE
)
9697 + INT_REGISTER_SIZE
,
9701 /* Otherwise we don't have a useful guess. */
9705 /* Initialize the current architecture based on INFO. If possible,
9706 re-use an architecture from ARCHES, which is a list of
9707 architectures already created during this debugging session.
9709 Called e.g. at program startup, when reading a core file, and when
9710 reading a binary file. */
9712 static struct gdbarch
*
9713 arm_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
9715 struct gdbarch_tdep
*tdep
;
9716 struct gdbarch
*gdbarch
;
9717 struct gdbarch_list
*best_arch
;
9718 enum arm_abi_kind arm_abi
= arm_abi_global
;
9719 enum arm_float_model fp_model
= arm_fp_model
;
9720 struct tdesc_arch_data
*tdesc_data
= NULL
;
9722 int have_vfp_registers
= 0, have_vfp_pseudos
= 0, have_neon_pseudos
= 0;
9724 int have_fpa_registers
= 1;
9725 const struct target_desc
*tdesc
= info
.target_desc
;
9727 /* If we have an object to base this architecture on, try to determine
9730 if (arm_abi
== ARM_ABI_AUTO
&& info
.abfd
!= NULL
)
9732 int ei_osabi
, e_flags
;
9734 switch (bfd_get_flavour (info
.abfd
))
9736 case bfd_target_aout_flavour
:
9737 /* Assume it's an old APCS-style ABI. */
9738 arm_abi
= ARM_ABI_APCS
;
9741 case bfd_target_coff_flavour
:
9742 /* Assume it's an old APCS-style ABI. */
9744 arm_abi
= ARM_ABI_APCS
;
9747 case bfd_target_elf_flavour
:
9748 ei_osabi
= elf_elfheader (info
.abfd
)->e_ident
[EI_OSABI
];
9749 e_flags
= elf_elfheader (info
.abfd
)->e_flags
;
9751 if (ei_osabi
== ELFOSABI_ARM
)
9753 /* GNU tools used to use this value, but do not for EABI
9754 objects. There's nowhere to tag an EABI version
9755 anyway, so assume APCS. */
9756 arm_abi
= ARM_ABI_APCS
;
9758 else if (ei_osabi
== ELFOSABI_NONE
)
9760 int eabi_ver
= EF_ARM_EABI_VERSION (e_flags
);
9761 int attr_arch
, attr_profile
;
9765 case EF_ARM_EABI_UNKNOWN
:
9766 /* Assume GNU tools. */
9767 arm_abi
= ARM_ABI_APCS
;
9770 case EF_ARM_EABI_VER4
:
9771 case EF_ARM_EABI_VER5
:
9772 arm_abi
= ARM_ABI_AAPCS
;
9773 /* EABI binaries default to VFP float ordering.
9774 They may also contain build attributes that can
9775 be used to identify if the VFP argument-passing
9777 if (fp_model
== ARM_FLOAT_AUTO
)
9780 switch (bfd_elf_get_obj_attr_int (info
.abfd
,
9785 /* "The user intended FP parameter/result
9786 passing to conform to AAPCS, base
9788 fp_model
= ARM_FLOAT_SOFT_VFP
;
9791 /* "The user intended FP parameter/result
9792 passing to conform to AAPCS, VFP
9794 fp_model
= ARM_FLOAT_VFP
;
9797 /* "The user intended FP parameter/result
9798 passing to conform to tool chain-specific
9799 conventions" - we don't know any such
9800 conventions, so leave it as "auto". */
9803 /* Attribute value not mentioned in the
9804 October 2008 ABI, so leave it as
9809 fp_model
= ARM_FLOAT_SOFT_VFP
;
9815 /* Leave it as "auto". */
9816 warning (_("unknown ARM EABI version 0x%x"), eabi_ver
);
9821 /* Detect M-profile programs. This only works if the
9822 executable file includes build attributes; GCC does
9823 copy them to the executable, but e.g. RealView does
9825 attr_arch
= bfd_elf_get_obj_attr_int (info
.abfd
, OBJ_ATTR_PROC
,
9827 attr_profile
= bfd_elf_get_obj_attr_int (info
.abfd
,
9829 Tag_CPU_arch_profile
);
9830 /* GCC specifies the profile for v6-M; RealView only
9831 specifies the profile for architectures starting with
9832 V7 (as opposed to architectures with a tag
9833 numerically greater than TAG_CPU_ARCH_V7). */
9834 if (!tdesc_has_registers (tdesc
)
9835 && (attr_arch
== TAG_CPU_ARCH_V6_M
9836 || attr_arch
== TAG_CPU_ARCH_V6S_M
9837 || attr_profile
== 'M'))
9842 if (fp_model
== ARM_FLOAT_AUTO
)
9844 int e_flags
= elf_elfheader (info
.abfd
)->e_flags
;
9846 switch (e_flags
& (EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
))
9849 /* Leave it as "auto". Strictly speaking this case
9850 means FPA, but almost nobody uses that now, and
9851 many toolchains fail to set the appropriate bits
9852 for the floating-point model they use. */
9854 case EF_ARM_SOFT_FLOAT
:
9855 fp_model
= ARM_FLOAT_SOFT_FPA
;
9857 case EF_ARM_VFP_FLOAT
:
9858 fp_model
= ARM_FLOAT_VFP
;
9860 case EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
:
9861 fp_model
= ARM_FLOAT_SOFT_VFP
;
9866 if (e_flags
& EF_ARM_BE8
)
9867 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
9872 /* Leave it as "auto". */
9877 /* Check any target description for validity. */
9878 if (tdesc_has_registers (tdesc
))
9880 /* For most registers we require GDB's default names; but also allow
9881 the numeric names for sp / lr / pc, as a convenience. */
9882 static const char *const arm_sp_names
[] = { "r13", "sp", NULL
};
9883 static const char *const arm_lr_names
[] = { "r14", "lr", NULL
};
9884 static const char *const arm_pc_names
[] = { "r15", "pc", NULL
};
9886 const struct tdesc_feature
*feature
;
9889 feature
= tdesc_find_feature (tdesc
,
9890 "org.gnu.gdb.arm.core");
9891 if (feature
== NULL
)
9893 feature
= tdesc_find_feature (tdesc
,
9894 "org.gnu.gdb.arm.m-profile");
9895 if (feature
== NULL
)
9901 tdesc_data
= tdesc_data_alloc ();
9904 for (i
= 0; i
< ARM_SP_REGNUM
; i
++)
9905 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
, i
,
9906 arm_register_names
[i
]);
9907 valid_p
&= tdesc_numbered_register_choices (feature
, tdesc_data
,
9910 valid_p
&= tdesc_numbered_register_choices (feature
, tdesc_data
,
9913 valid_p
&= tdesc_numbered_register_choices (feature
, tdesc_data
,
9917 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
9918 ARM_PS_REGNUM
, "xpsr");
9920 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
9921 ARM_PS_REGNUM
, "cpsr");
9925 tdesc_data_cleanup (tdesc_data
);
9929 feature
= tdesc_find_feature (tdesc
,
9930 "org.gnu.gdb.arm.fpa");
9931 if (feature
!= NULL
)
9934 for (i
= ARM_F0_REGNUM
; i
<= ARM_FPS_REGNUM
; i
++)
9935 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
, i
,
9936 arm_register_names
[i
]);
9939 tdesc_data_cleanup (tdesc_data
);
9944 have_fpa_registers
= 0;
9946 feature
= tdesc_find_feature (tdesc
,
9947 "org.gnu.gdb.xscale.iwmmxt");
9948 if (feature
!= NULL
)
9950 static const char *const iwmmxt_names
[] = {
9951 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
9952 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
9953 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
9954 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
9958 for (i
= ARM_WR0_REGNUM
; i
<= ARM_WR15_REGNUM
; i
++)
9960 &= tdesc_numbered_register (feature
, tdesc_data
, i
,
9961 iwmmxt_names
[i
- ARM_WR0_REGNUM
]);
9963 /* Check for the control registers, but do not fail if they
9965 for (i
= ARM_WC0_REGNUM
; i
<= ARM_WCASF_REGNUM
; i
++)
9966 tdesc_numbered_register (feature
, tdesc_data
, i
,
9967 iwmmxt_names
[i
- ARM_WR0_REGNUM
]);
9969 for (i
= ARM_WCGR0_REGNUM
; i
<= ARM_WCGR3_REGNUM
; i
++)
9971 &= tdesc_numbered_register (feature
, tdesc_data
, i
,
9972 iwmmxt_names
[i
- ARM_WR0_REGNUM
]);
9976 tdesc_data_cleanup (tdesc_data
);
9981 /* If we have a VFP unit, check whether the single precision registers
9982 are present. If not, then we will synthesize them as pseudo
9984 feature
= tdesc_find_feature (tdesc
,
9985 "org.gnu.gdb.arm.vfp");
9986 if (feature
!= NULL
)
9988 static const char *const vfp_double_names
[] = {
9989 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
9990 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
9991 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
9992 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
9995 /* Require the double precision registers. There must be either
9998 for (i
= 0; i
< 32; i
++)
10000 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
10002 vfp_double_names
[i
]);
10006 if (!valid_p
&& i
== 16)
10009 /* Also require FPSCR. */
10010 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
10011 ARM_FPSCR_REGNUM
, "fpscr");
10014 tdesc_data_cleanup (tdesc_data
);
10018 if (tdesc_unnumbered_register (feature
, "s0") == 0)
10019 have_vfp_pseudos
= 1;
10021 have_vfp_registers
= 1;
10023 /* If we have VFP, also check for NEON. The architecture allows
10024 NEON without VFP (integer vector operations only), but GDB
10025 does not support that. */
10026 feature
= tdesc_find_feature (tdesc
,
10027 "org.gnu.gdb.arm.neon");
10028 if (feature
!= NULL
)
10030 /* NEON requires 32 double-precision registers. */
10033 tdesc_data_cleanup (tdesc_data
);
10037 /* If there are quad registers defined by the stub, use
10038 their type; otherwise (normally) provide them with
10039 the default type. */
10040 if (tdesc_unnumbered_register (feature
, "q0") == 0)
10041 have_neon_pseudos
= 1;
10048 /* If there is already a candidate, use it. */
10049 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
10051 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
10053 if (arm_abi
!= ARM_ABI_AUTO
10054 && arm_abi
!= gdbarch_tdep (best_arch
->gdbarch
)->arm_abi
)
10057 if (fp_model
!= ARM_FLOAT_AUTO
10058 && fp_model
!= gdbarch_tdep (best_arch
->gdbarch
)->fp_model
)
10061 /* There are various other properties in tdep that we do not
10062 need to check here: those derived from a target description,
10063 since gdbarches with a different target description are
10064 automatically disqualified. */
10066 /* Do check is_m, though, since it might come from the binary. */
10067 if (is_m
!= gdbarch_tdep (best_arch
->gdbarch
)->is_m
)
10070 /* Found a match. */
10074 if (best_arch
!= NULL
)
10076 if (tdesc_data
!= NULL
)
10077 tdesc_data_cleanup (tdesc_data
);
10078 return best_arch
->gdbarch
;
10081 tdep
= xcalloc (1, sizeof (struct gdbarch_tdep
));
10082 gdbarch
= gdbarch_alloc (&info
, tdep
);
10084 /* Record additional information about the architecture we are defining.
10085 These are gdbarch discriminators, like the OSABI. */
10086 tdep
->arm_abi
= arm_abi
;
10087 tdep
->fp_model
= fp_model
;
10089 tdep
->have_fpa_registers
= have_fpa_registers
;
10090 tdep
->have_vfp_registers
= have_vfp_registers
;
10091 tdep
->have_vfp_pseudos
= have_vfp_pseudos
;
10092 tdep
->have_neon_pseudos
= have_neon_pseudos
;
10093 tdep
->have_neon
= have_neon
;
10095 arm_register_g_packet_guesses (gdbarch
);
10098 switch (info
.byte_order_for_code
)
10100 case BFD_ENDIAN_BIG
:
10101 tdep
->arm_breakpoint
= arm_default_arm_be_breakpoint
;
10102 tdep
->arm_breakpoint_size
= sizeof (arm_default_arm_be_breakpoint
);
10103 tdep
->thumb_breakpoint
= arm_default_thumb_be_breakpoint
;
10104 tdep
->thumb_breakpoint_size
= sizeof (arm_default_thumb_be_breakpoint
);
10108 case BFD_ENDIAN_LITTLE
:
10109 tdep
->arm_breakpoint
= arm_default_arm_le_breakpoint
;
10110 tdep
->arm_breakpoint_size
= sizeof (arm_default_arm_le_breakpoint
);
10111 tdep
->thumb_breakpoint
= arm_default_thumb_le_breakpoint
;
10112 tdep
->thumb_breakpoint_size
= sizeof (arm_default_thumb_le_breakpoint
);
10117 internal_error (__FILE__
, __LINE__
,
10118 _("arm_gdbarch_init: bad byte order for float format"));
10121 /* On ARM targets char defaults to unsigned. */
10122 set_gdbarch_char_signed (gdbarch
, 0);
10124 /* Note: for displaced stepping, this includes the breakpoint, and one word
10125 of additional scratch space. This setting isn't used for anything beside
10126 displaced stepping at present. */
10127 set_gdbarch_max_insn_length (gdbarch
, 4 * DISPLACED_MODIFIED_INSNS
);
10129 /* This should be low enough for everything. */
10130 tdep
->lowest_pc
= 0x20;
10131 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
10133 /* The default, for both APCS and AAPCS, is to return small
10134 structures in registers. */
10135 tdep
->struct_return
= reg_struct_return
;
10137 set_gdbarch_push_dummy_call (gdbarch
, arm_push_dummy_call
);
10138 set_gdbarch_frame_align (gdbarch
, arm_frame_align
);
10140 set_gdbarch_write_pc (gdbarch
, arm_write_pc
);
10142 /* Frame handling. */
10143 set_gdbarch_dummy_id (gdbarch
, arm_dummy_id
);
10144 set_gdbarch_unwind_pc (gdbarch
, arm_unwind_pc
);
10145 set_gdbarch_unwind_sp (gdbarch
, arm_unwind_sp
);
10147 frame_base_set_default (gdbarch
, &arm_normal_base
);
10149 /* Address manipulation. */
10150 set_gdbarch_smash_text_address (gdbarch
, arm_smash_text_address
);
10151 set_gdbarch_addr_bits_remove (gdbarch
, arm_addr_bits_remove
);
10153 /* Advance PC across function entry code. */
10154 set_gdbarch_skip_prologue (gdbarch
, arm_skip_prologue
);
10156 /* Detect whether PC is in function epilogue. */
10157 set_gdbarch_in_function_epilogue_p (gdbarch
, arm_in_function_epilogue_p
);
10159 /* Skip trampolines. */
10160 set_gdbarch_skip_trampoline_code (gdbarch
, arm_skip_stub
);
10162 /* The stack grows downward. */
10163 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
10165 /* Breakpoint manipulation. */
10166 set_gdbarch_breakpoint_from_pc (gdbarch
, arm_breakpoint_from_pc
);
10167 set_gdbarch_remote_breakpoint_from_pc (gdbarch
,
10168 arm_remote_breakpoint_from_pc
);
10170 /* Information about registers, etc. */
10171 set_gdbarch_sp_regnum (gdbarch
, ARM_SP_REGNUM
);
10172 set_gdbarch_pc_regnum (gdbarch
, ARM_PC_REGNUM
);
10173 set_gdbarch_num_regs (gdbarch
, ARM_NUM_REGS
);
10174 set_gdbarch_register_type (gdbarch
, arm_register_type
);
10175 set_gdbarch_register_reggroup_p (gdbarch
, arm_register_reggroup_p
);
10177 /* This "info float" is FPA-specific. Use the generic version if we
10178 do not have FPA. */
10179 if (gdbarch_tdep (gdbarch
)->have_fpa_registers
)
10180 set_gdbarch_print_float_info (gdbarch
, arm_print_float_info
);
10182 /* Internal <-> external register number maps. */
10183 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, arm_dwarf_reg_to_regnum
);
10184 set_gdbarch_register_sim_regno (gdbarch
, arm_register_sim_regno
);
10186 set_gdbarch_register_name (gdbarch
, arm_register_name
);
10188 /* Returning results. */
10189 set_gdbarch_return_value (gdbarch
, arm_return_value
);
10192 set_gdbarch_print_insn (gdbarch
, gdb_print_insn_arm
);
10194 /* Minsymbol frobbing. */
10195 set_gdbarch_elf_make_msymbol_special (gdbarch
, arm_elf_make_msymbol_special
);
10196 set_gdbarch_coff_make_msymbol_special (gdbarch
,
10197 arm_coff_make_msymbol_special
);
10198 set_gdbarch_record_special_symbol (gdbarch
, arm_record_special_symbol
);
10200 /* Thumb-2 IT block support. */
10201 set_gdbarch_adjust_breakpoint_address (gdbarch
,
10202 arm_adjust_breakpoint_address
);
10204 /* Virtual tables. */
10205 set_gdbarch_vbit_in_delta (gdbarch
, 1);
10207 /* Hook in the ABI-specific overrides, if they have been registered. */
10208 gdbarch_init_osabi (info
, gdbarch
);
10210 dwarf2_frame_set_init_reg (gdbarch
, arm_dwarf2_frame_init_reg
);
10212 /* Add some default predicates. */
10213 frame_unwind_append_unwinder (gdbarch
, &arm_stub_unwind
);
10214 dwarf2_append_unwinders (gdbarch
);
10215 frame_unwind_append_unwinder (gdbarch
, &arm_exidx_unwind
);
10216 frame_unwind_append_unwinder (gdbarch
, &arm_prologue_unwind
);
10218 /* Now we have tuned the configuration, set a few final things,
10219 based on what the OS ABI has told us. */
10221 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
10222 binaries are always marked. */
10223 if (tdep
->arm_abi
== ARM_ABI_AUTO
)
10224 tdep
->arm_abi
= ARM_ABI_APCS
;
10226 /* Watchpoints are not steppable. */
10227 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
10229 /* We used to default to FPA for generic ARM, but almost nobody
10230 uses that now, and we now provide a way for the user to force
10231 the model. So default to the most useful variant. */
10232 if (tdep
->fp_model
== ARM_FLOAT_AUTO
)
10233 tdep
->fp_model
= ARM_FLOAT_SOFT_FPA
;
10235 if (tdep
->jb_pc
>= 0)
10236 set_gdbarch_get_longjmp_target (gdbarch
, arm_get_longjmp_target
);
10238 /* Floating point sizes and format. */
10239 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
10240 if (tdep
->fp_model
== ARM_FLOAT_SOFT_FPA
|| tdep
->fp_model
== ARM_FLOAT_FPA
)
10242 set_gdbarch_double_format
10243 (gdbarch
, floatformats_ieee_double_littlebyte_bigword
);
10244 set_gdbarch_long_double_format
10245 (gdbarch
, floatformats_ieee_double_littlebyte_bigword
);
10249 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
10250 set_gdbarch_long_double_format (gdbarch
, floatformats_ieee_double
);
10253 if (have_vfp_pseudos
)
10255 /* NOTE: These are the only pseudo registers used by
10256 the ARM target at the moment. If more are added, a
10257 little more care in numbering will be needed. */
10259 int num_pseudos
= 32;
10260 if (have_neon_pseudos
)
10262 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudos
);
10263 set_gdbarch_pseudo_register_read (gdbarch
, arm_pseudo_read
);
10264 set_gdbarch_pseudo_register_write (gdbarch
, arm_pseudo_write
);
10269 set_tdesc_pseudo_register_name (gdbarch
, arm_register_name
);
10271 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
10273 /* Override tdesc_register_type to adjust the types of VFP
10274 registers for NEON. */
10275 set_gdbarch_register_type (gdbarch
, arm_register_type
);
10278 /* Add standard register aliases. We add aliases even for those
10279 nanes which are used by the current architecture - it's simpler,
10280 and does no harm, since nothing ever lists user registers. */
10281 for (i
= 0; i
< ARRAY_SIZE (arm_register_aliases
); i
++)
10282 user_reg_add (gdbarch
, arm_register_aliases
[i
].name
,
10283 value_of_arm_user_reg
, &arm_register_aliases
[i
].regnum
);
10289 arm_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
10291 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
10296 fprintf_unfiltered (file
, _("arm_dump_tdep: Lowest pc = 0x%lx"),
10297 (unsigned long) tdep
->lowest_pc
);
10300 extern initialize_file_ftype _initialize_arm_tdep
; /* -Wmissing-prototypes */
10303 _initialize_arm_tdep (void)
10305 struct ui_file
*stb
;
10307 struct cmd_list_element
*new_set
, *new_show
;
10308 const char *setname
;
10309 const char *setdesc
;
10310 const char *const *regnames
;
10312 static char *helptext
;
10313 char regdesc
[1024], *rdptr
= regdesc
;
10314 size_t rest
= sizeof (regdesc
);
10316 gdbarch_register (bfd_arch_arm
, arm_gdbarch_init
, arm_dump_tdep
);
10318 arm_objfile_data_key
10319 = register_objfile_data_with_cleanup (NULL
, arm_objfile_data_free
);
10321 /* Add ourselves to objfile event chain. */
10322 observer_attach_new_objfile (arm_exidx_new_objfile
);
10324 = register_objfile_data_with_cleanup (NULL
, arm_exidx_data_free
);
10326 /* Register an ELF OS ABI sniffer for ARM binaries. */
10327 gdbarch_register_osabi_sniffer (bfd_arch_arm
,
10328 bfd_target_elf_flavour
,
10329 arm_elf_osabi_sniffer
);
10331 /* Initialize the standard target descriptions. */
10332 initialize_tdesc_arm_with_m ();
10333 initialize_tdesc_arm_with_m_fpa_layout ();
10334 initialize_tdesc_arm_with_iwmmxt ();
10335 initialize_tdesc_arm_with_vfpv2 ();
10336 initialize_tdesc_arm_with_vfpv3 ();
10337 initialize_tdesc_arm_with_neon ();
10339 /* Get the number of possible sets of register names defined in opcodes. */
10340 num_disassembly_options
= get_arm_regname_num_options ();
10342 /* Add root prefix command for all "set arm"/"show arm" commands. */
10343 add_prefix_cmd ("arm", no_class
, set_arm_command
,
10344 _("Various ARM-specific commands."),
10345 &setarmcmdlist
, "set arm ", 0, &setlist
);
10347 add_prefix_cmd ("arm", no_class
, show_arm_command
,
10348 _("Various ARM-specific commands."),
10349 &showarmcmdlist
, "show arm ", 0, &showlist
);
10351 /* Sync the opcode insn printer with our register viewer. */
10352 parse_arm_disassembler_option ("reg-names-std");
10354 /* Initialize the array that will be passed to
10355 add_setshow_enum_cmd(). */
10356 valid_disassembly_styles
10357 = xmalloc ((num_disassembly_options
+ 1) * sizeof (char *));
10358 for (i
= 0; i
< num_disassembly_options
; i
++)
10360 numregs
= get_arm_regnames (i
, &setname
, &setdesc
, ®names
);
10361 valid_disassembly_styles
[i
] = setname
;
10362 length
= snprintf (rdptr
, rest
, "%s - %s\n", setname
, setdesc
);
10365 /* When we find the default names, tell the disassembler to use
10367 if (!strcmp (setname
, "std"))
10369 disassembly_style
= setname
;
10370 set_arm_regname_option (i
);
10373 /* Mark the end of valid options. */
10374 valid_disassembly_styles
[num_disassembly_options
] = NULL
;
10376 /* Create the help text. */
10377 stb
= mem_fileopen ();
10378 fprintf_unfiltered (stb
, "%s%s%s",
10379 _("The valid values are:\n"),
10381 _("The default is \"std\"."));
10382 helptext
= ui_file_xstrdup (stb
, NULL
);
10383 ui_file_delete (stb
);
10385 add_setshow_enum_cmd("disassembler", no_class
,
10386 valid_disassembly_styles
, &disassembly_style
,
10387 _("Set the disassembly style."),
10388 _("Show the disassembly style."),
10390 set_disassembly_style_sfunc
,
10391 NULL
, /* FIXME: i18n: The disassembly style is
10393 &setarmcmdlist
, &showarmcmdlist
);
10395 add_setshow_boolean_cmd ("apcs32", no_class
, &arm_apcs_32
,
10396 _("Set usage of ARM 32-bit mode."),
10397 _("Show usage of ARM 32-bit mode."),
10398 _("When off, a 26-bit PC will be used."),
10400 NULL
, /* FIXME: i18n: Usage of ARM 32-bit
10402 &setarmcmdlist
, &showarmcmdlist
);
10404 /* Add a command to allow the user to force the FPU model. */
10405 add_setshow_enum_cmd ("fpu", no_class
, fp_model_strings
, ¤t_fp_model
,
10406 _("Set the floating point type."),
10407 _("Show the floating point type."),
10408 _("auto - Determine the FP typefrom the OS-ABI.\n\
10409 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
10410 fpa - FPA co-processor (GCC compiled).\n\
10411 softvfp - Software FP with pure-endian doubles.\n\
10412 vfp - VFP co-processor."),
10413 set_fp_model_sfunc
, show_fp_model
,
10414 &setarmcmdlist
, &showarmcmdlist
);
10416 /* Add a command to allow the user to force the ABI. */
10417 add_setshow_enum_cmd ("abi", class_support
, arm_abi_strings
, &arm_abi_string
,
10419 _("Show the ABI."),
10420 NULL
, arm_set_abi
, arm_show_abi
,
10421 &setarmcmdlist
, &showarmcmdlist
);
10423 /* Add two commands to allow the user to force the assumed
10425 add_setshow_enum_cmd ("fallback-mode", class_support
,
10426 arm_mode_strings
, &arm_fallback_mode_string
,
10427 _("Set the mode assumed when symbols are unavailable."),
10428 _("Show the mode assumed when symbols are unavailable."),
10429 NULL
, NULL
, arm_show_fallback_mode
,
10430 &setarmcmdlist
, &showarmcmdlist
);
10431 add_setshow_enum_cmd ("force-mode", class_support
,
10432 arm_mode_strings
, &arm_force_mode_string
,
10433 _("Set the mode assumed even when symbols are available."),
10434 _("Show the mode assumed even when symbols are available."),
10435 NULL
, NULL
, arm_show_force_mode
,
10436 &setarmcmdlist
, &showarmcmdlist
);
10438 /* Debugging flag. */
10439 add_setshow_boolean_cmd ("arm", class_maintenance
, &arm_debug
,
10440 _("Set ARM debugging."),
10441 _("Show ARM debugging."),
10442 _("When on, arm-specific debugging is enabled."),
10444 NULL
, /* FIXME: i18n: "ARM debugging is %s. */
10445 &setdebuglist
, &showdebuglist
);