+ cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
+ cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
+
+ cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
+
+ return cache;
+}
+
+/* Our frame ID for a stub frame is the current SP and LR. */
+
+static void
+arm_stub_this_id (struct frame_info *this_frame,
+ void **this_cache,
+ struct frame_id *this_id)
+{
+ struct arm_prologue_cache *cache;
+
+ if (*this_cache == NULL)
+ *this_cache = arm_make_stub_cache (this_frame);
+ cache = *this_cache;
+
+ *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
+}
+
+static int
+arm_stub_unwind_sniffer (const struct frame_unwind *self,
+ struct frame_info *this_frame,
+ void **this_prologue_cache)
+{
+ CORE_ADDR addr_in_block;
+ char dummy[4];
+
+ addr_in_block = get_frame_address_in_block (this_frame);
+ if (in_plt_section (addr_in_block, NULL)
+ /* We also use the stub winder if the target memory is unreadable
+ to avoid having the prologue unwinder trying to read it. */
+ || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
+ return 1;
+
+ return 0;
+}
+
+struct frame_unwind arm_stub_unwind = {
+ NORMAL_FRAME,
+ default_frame_unwind_stop_reason,
+ arm_stub_this_id,
+ arm_prologue_prev_register,
+ NULL,
+ arm_stub_unwind_sniffer
+};
+
+static CORE_ADDR
+arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
+{
+ struct arm_prologue_cache *cache;
+
+ if (*this_cache == NULL)
+ *this_cache = arm_make_prologue_cache (this_frame);
+ cache = *this_cache;
+
+ return cache->prev_sp - cache->framesize;
+}
+
+struct frame_base arm_normal_base = {
+ &arm_prologue_unwind,
+ arm_normal_frame_base,
+ arm_normal_frame_base,
+ arm_normal_frame_base
+};
+
+/* Assuming THIS_FRAME is a dummy, return the frame ID of that
+ dummy frame. The frame ID's base needs to match the TOS value
+ saved by save_dummy_frame_tos() and returned from
+ arm_push_dummy_call, and the PC needs to match the dummy frame's
+ breakpoint. */
+
+static struct frame_id
+arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
+{
+ return frame_id_build (get_frame_register_unsigned (this_frame,
+ ARM_SP_REGNUM),
+ get_frame_pc (this_frame));
+}
+
+/* Given THIS_FRAME, find the previous frame's resume PC (which will
+ be used to construct the previous frame's ID, after looking up the
+ containing function). */
+
+static CORE_ADDR
+arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
+{
+ CORE_ADDR pc;
+ pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
+ return arm_addr_bits_remove (gdbarch, pc);
+}
+
+static CORE_ADDR
+arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
+{
+ return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
+}
+
+static struct value *
+arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
+ int regnum)
+{
+ struct gdbarch * gdbarch = get_frame_arch (this_frame);
+ CORE_ADDR lr, cpsr;
+ ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
+
+ switch (regnum)
+ {
+ case ARM_PC_REGNUM:
+ /* The PC is normally copied from the return column, which
+ describes saves of LR. However, that version may have an
+ extra bit set to indicate Thumb state. The bit is not
+ part of the PC. */
+ lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
+ return frame_unwind_got_constant (this_frame, regnum,
+ arm_addr_bits_remove (gdbarch, lr));
+
+ case ARM_PS_REGNUM:
+ /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
+ cpsr = get_frame_register_unsigned (this_frame, regnum);
+ lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
+ if (IS_THUMB_ADDR (lr))
+ cpsr |= t_bit;
+ else
+ cpsr &= ~t_bit;
+ return frame_unwind_got_constant (this_frame, regnum, cpsr);
+
+ default:
+ internal_error (__FILE__, __LINE__,
+ _("Unexpected register %d"), regnum);
+ }
+}
+
+static void
+arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
+ struct dwarf2_frame_state_reg *reg,
+ struct frame_info *this_frame)
+{
+ switch (regnum)
+ {
+ case ARM_PC_REGNUM:
+ case ARM_PS_REGNUM:
+ reg->how = DWARF2_FRAME_REG_FN;
+ reg->loc.fn = arm_dwarf2_prev_register;
+ break;
+ case ARM_SP_REGNUM:
+ reg->how = DWARF2_FRAME_REG_CFA;
+ break;
+ }
+}
+
+/* Return true if we are in the function's epilogue, i.e. after the
+ instruction that destroyed the function's stack frame. */
+
+static int
+thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
+{
+ enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+ unsigned int insn, insn2;
+ int found_return = 0, found_stack_adjust = 0;
+ CORE_ADDR func_start, func_end;
+ CORE_ADDR scan_pc;
+ gdb_byte buf[4];
+
+ if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
+ return 0;
+
+ /* The epilogue is a sequence of instructions along the following lines:
+
+ - add stack frame size to SP or FP
+ - [if frame pointer used] restore SP from FP
+ - restore registers from SP [may include PC]
+ - a return-type instruction [if PC wasn't already restored]
+
+ In a first pass, we scan forward from the current PC and verify the
+ instructions we find as compatible with this sequence, ending in a
+ return instruction.
+
+ However, this is not sufficient to distinguish indirect function calls
+ within a function from indirect tail calls in the epilogue in some cases.
+ Therefore, if we didn't already find any SP-changing instruction during
+ forward scan, we add a backward scanning heuristic to ensure we actually
+ are in the epilogue. */
+
+ scan_pc = pc;
+ while (scan_pc < func_end && !found_return)
+ {
+ if (target_read_memory (scan_pc, buf, 2))
+ break;
+
+ scan_pc += 2;
+ insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
+
+ if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
+ found_return = 1;
+ else if (insn == 0x46f7) /* mov pc, lr */
+ found_return = 1;
+ else if (insn == 0x46bd) /* mov sp, r7 */
+ found_stack_adjust = 1;
+ else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
+ found_stack_adjust = 1;
+ else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
+ {
+ found_stack_adjust = 1;
+ if (insn & 0x0100) /* <registers> include PC. */
+ found_return = 1;
+ }
+ else if ((insn & 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
+ {
+ if (target_read_memory (scan_pc, buf, 2))
+ break;
+
+ scan_pc += 2;
+ insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
+
+ if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
+ {
+ found_stack_adjust = 1;
+ if (insn2 & 0x8000) /* <registers> include PC. */
+ found_return = 1;
+ }
+ else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
+ && (insn2 & 0x0fff) == 0x0b04)
+ {
+ found_stack_adjust = 1;
+ if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
+ found_return = 1;
+ }
+ else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
+ && (insn2 & 0x0e00) == 0x0a00)
+ found_stack_adjust = 1;
+ else
+ break;
+ }
+ else
+ break;
+ }
+
+ if (!found_return)
+ return 0;
+
+ /* Since any instruction in the epilogue sequence, with the possible
+ exception of return itself, updates the stack pointer, we need to
+ scan backwards for at most one instruction. Try either a 16-bit or
+ a 32-bit instruction. This is just a heuristic, so we do not worry
+ too much about false positives. */
+
+ if (!found_stack_adjust)
+ {
+ if (pc - 4 < func_start)
+ return 0;
+ if (target_read_memory (pc - 4, buf, 4))
+ return 0;
+
+ insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
+ insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
+
+ if (insn2 == 0x46bd) /* mov sp, r7 */
+ found_stack_adjust = 1;
+ else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
+ found_stack_adjust = 1;
+ else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
+ found_stack_adjust = 1;
+ else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
+ found_stack_adjust = 1;
+ else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
+ && (insn2 & 0x0fff) == 0x0b04)
+ found_stack_adjust = 1;
+ else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
+ && (insn2 & 0x0e00) == 0x0a00)
+ found_stack_adjust = 1;
+ }
+
+ return found_stack_adjust;
+}
+
+/* Return true if we are in the function's epilogue, i.e. after the
+ instruction that destroyed the function's stack frame. */
+
+static int
+arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
+{
+ enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+ unsigned int insn;
+ int found_return, found_stack_adjust;
+ CORE_ADDR func_start, func_end;
+
+ if (arm_pc_is_thumb (gdbarch, pc))
+ return thumb_in_function_epilogue_p (gdbarch, pc);
+
+ if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
+ return 0;
+
+ /* We are in the epilogue if the previous instruction was a stack
+ adjustment and the next instruction is a possible return (bx, mov
+ pc, or pop). We could have to scan backwards to find the stack
+ adjustment, or forwards to find the return, but this is a decent
+ approximation. First scan forwards. */
+
+ found_return = 0;
+ insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
+ if (bits (insn, 28, 31) != INST_NV)
+ {
+ if ((insn & 0x0ffffff0) == 0x012fff10)
+ /* BX. */
+ found_return = 1;
+ else if ((insn & 0x0ffffff0) == 0x01a0f000)
+ /* MOV PC. */
+ found_return = 1;
+ else if ((insn & 0x0fff0000) == 0x08bd0000
+ && (insn & 0x0000c000) != 0)
+ /* POP (LDMIA), including PC or LR. */
+ found_return = 1;
+ }
+
+ if (!found_return)
+ return 0;
+
+ /* Scan backwards. This is just a heuristic, so do not worry about
+ false positives from mode changes. */
+
+ if (pc < func_start + 4)
+ return 0;
+
+ found_stack_adjust = 0;
+ insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
+ if (bits (insn, 28, 31) != INST_NV)
+ {
+ if ((insn & 0x0df0f000) == 0x0080d000)
+ /* ADD SP (register or immediate). */
+ found_stack_adjust = 1;
+ else if ((insn & 0x0df0f000) == 0x0040d000)
+ /* SUB SP (register or immediate). */
+ found_stack_adjust = 1;
+ else if ((insn & 0x0ffffff0) == 0x01a0d000)
+ /* MOV SP. */
+ found_stack_adjust = 1;
+ else if ((insn & 0x0fff0000) == 0x08bd0000)
+ /* POP (LDMIA). */
+ found_stack_adjust = 1;
+ }
+
+ if (found_stack_adjust)
+ return 1;
+
+ return 0;
+}
+
+
+/* When arguments must be pushed onto the stack, they go on in reverse
+ order. The code below implements a FILO (stack) to do this. */
+
+struct stack_item
+{
+ int len;
+ struct stack_item *prev;
+ void *data;
+};
+
+static struct stack_item *
+push_stack_item (struct stack_item *prev, const void *contents, int len)
+{
+ struct stack_item *si;
+ si = xmalloc (sizeof (struct stack_item));
+ si->data = xmalloc (len);
+ si->len = len;
+ si->prev = prev;
+ memcpy (si->data, contents, len);
+ return si;
+}
+
+static struct stack_item *
+pop_stack_item (struct stack_item *si)
+{
+ struct stack_item *dead = si;
+ si = si->prev;
+ xfree (dead->data);
+ xfree (dead);
+ return si;
+}
+
+
+/* Return the alignment (in bytes) of the given type. */
+
+static int
+arm_type_align (struct type *t)
+{
+ int n;
+ int align;
+ int falign;
+
+ t = check_typedef (t);
+ switch (TYPE_CODE (t))
+ {
+ default:
+ /* Should never happen. */
+ internal_error (__FILE__, __LINE__, _("unknown type alignment"));
+ return 4;
+
+ case TYPE_CODE_PTR:
+ case TYPE_CODE_ENUM:
+ case TYPE_CODE_INT:
+ case TYPE_CODE_FLT:
+ case TYPE_CODE_SET:
+ case TYPE_CODE_RANGE:
+ case TYPE_CODE_BITSTRING:
+ case TYPE_CODE_REF:
+ case TYPE_CODE_CHAR:
+ case TYPE_CODE_BOOL:
+ return TYPE_LENGTH (t);
+
+ case TYPE_CODE_ARRAY:
+ case TYPE_CODE_COMPLEX:
+ /* TODO: What about vector types? */
+ return arm_type_align (TYPE_TARGET_TYPE (t));
+
+ case TYPE_CODE_STRUCT:
+ case TYPE_CODE_UNION:
+ align = 1;
+ for (n = 0; n < TYPE_NFIELDS (t); n++)
+ {
+ falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
+ if (falign > align)
+ align = falign;
+ }
+ return align;
+ }
+}
+
+/* Possible base types for a candidate for passing and returning in
+ VFP registers. */
+
+enum arm_vfp_cprc_base_type
+{
+ VFP_CPRC_UNKNOWN,
+ VFP_CPRC_SINGLE,
+ VFP_CPRC_DOUBLE,
+ VFP_CPRC_VEC64,
+ VFP_CPRC_VEC128
+};
+
+/* The length of one element of base type B. */
+
+static unsigned
+arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
+{
+ switch (b)
+ {
+ case VFP_CPRC_SINGLE:
+ return 4;
+ case VFP_CPRC_DOUBLE:
+ return 8;
+ case VFP_CPRC_VEC64:
+ return 8;
+ case VFP_CPRC_VEC128:
+ return 16;
+ default:
+ internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
+ (int) b);
+ }
+}
+
+/* The character ('s', 'd' or 'q') for the type of VFP register used
+ for passing base type B. */
+
+static int
+arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
+{
+ switch (b)
+ {
+ case VFP_CPRC_SINGLE:
+ return 's';
+ case VFP_CPRC_DOUBLE:
+ return 'd';
+ case VFP_CPRC_VEC64:
+ return 'd';
+ case VFP_CPRC_VEC128:
+ return 'q';
+ default:
+ internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
+ (int) b);
+ }
+}
+
+/* Determine whether T may be part of a candidate for passing and
+ returning in VFP registers, ignoring the limit on the total number
+ of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
+ classification of the first valid component found; if it is not
+ VFP_CPRC_UNKNOWN, all components must have the same classification
+ as *BASE_TYPE. If it is found that T contains a type not permitted
+ for passing and returning in VFP registers, a type differently
+ classified from *BASE_TYPE, or two types differently classified
+ from each other, return -1, otherwise return the total number of
+ base-type elements found (possibly 0 in an empty structure or
+ array). Vectors and complex types are not currently supported,
+ matching the generic AAPCS support. */
+
+static int
+arm_vfp_cprc_sub_candidate (struct type *t,
+ enum arm_vfp_cprc_base_type *base_type)
+{
+ t = check_typedef (t);
+ switch (TYPE_CODE (t))
+ {
+ case TYPE_CODE_FLT:
+ switch (TYPE_LENGTH (t))
+ {
+ case 4:
+ if (*base_type == VFP_CPRC_UNKNOWN)
+ *base_type = VFP_CPRC_SINGLE;
+ else if (*base_type != VFP_CPRC_SINGLE)
+ return -1;
+ return 1;
+
+ case 8:
+ if (*base_type == VFP_CPRC_UNKNOWN)
+ *base_type = VFP_CPRC_DOUBLE;
+ else if (*base_type != VFP_CPRC_DOUBLE)
+ return -1;
+ return 1;
+
+ default:
+ return -1;
+ }
+ break;
+
+ case TYPE_CODE_ARRAY:
+ {
+ int count;
+ unsigned unitlen;
+ count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
+ if (count == -1)
+ return -1;
+ if (TYPE_LENGTH (t) == 0)
+ {
+ gdb_assert (count == 0);
+ return 0;
+ }
+ else if (count == 0)
+ return -1;
+ unitlen = arm_vfp_cprc_unit_length (*base_type);
+ gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
+ return TYPE_LENGTH (t) / unitlen;
+ }
+ break;
+
+ case TYPE_CODE_STRUCT:
+ {
+ int count = 0;
+ unsigned unitlen;
+ int i;
+ for (i = 0; i < TYPE_NFIELDS (t); i++)
+ {
+ int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
+ base_type);
+ if (sub_count == -1)
+ return -1;
+ count += sub_count;
+ }
+ if (TYPE_LENGTH (t) == 0)
+ {
+ gdb_assert (count == 0);
+ return 0;
+ }
+ else if (count == 0)
+ return -1;
+ unitlen = arm_vfp_cprc_unit_length (*base_type);
+ if (TYPE_LENGTH (t) != unitlen * count)
+ return -1;
+ return count;
+ }
+
+ case TYPE_CODE_UNION:
+ {
+ int count = 0;
+ unsigned unitlen;
+ int i;
+ for (i = 0; i < TYPE_NFIELDS (t); i++)
+ {
+ int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
+ base_type);
+ if (sub_count == -1)
+ return -1;
+ count = (count > sub_count ? count : sub_count);
+ }
+ if (TYPE_LENGTH (t) == 0)
+ {
+ gdb_assert (count == 0);
+ return 0;
+ }
+ else if (count == 0)
+ return -1;
+ unitlen = arm_vfp_cprc_unit_length (*base_type);
+ if (TYPE_LENGTH (t) != unitlen * count)
+ return -1;
+ return count;
+ }
+
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+/* Determine whether T is a VFP co-processor register candidate (CPRC)
+ if passed to or returned from a non-variadic function with the VFP
+ ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
+ *BASE_TYPE to the base type for T and *COUNT to the number of
+ elements of that base type before returning. */
+
+static int
+arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
+ int *count)
+{
+ enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
+ int c = arm_vfp_cprc_sub_candidate (t, &b);
+ if (c <= 0 || c > 4)
+ return 0;
+ *base_type = b;
+ *count = c;
+ return 1;
+}
+
+/* Return 1 if the VFP ABI should be used for passing arguments to and
+ returning values from a function of type FUNC_TYPE, 0
+ otherwise. */
+
+static int
+arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
+{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ /* Variadic functions always use the base ABI. Assume that functions
+ without debug info are not variadic. */
+ if (func_type && TYPE_VARARGS (check_typedef (func_type)))
+ return 0;
+ /* The VFP ABI is only supported as a variant of AAPCS. */
+ if (tdep->arm_abi != ARM_ABI_AAPCS)
+ return 0;
+ return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
+}
+
+/* We currently only support passing parameters in integer registers, which
+ conforms with GCC's default model, and VFP argument passing following
+ the VFP variant of AAPCS. Several other variants exist and
+ we should probably support some of them based on the selected ABI. */
+
+static CORE_ADDR
+arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
+ struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
+ struct value **args, CORE_ADDR sp, int struct_return,
+ CORE_ADDR struct_addr)
+{
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ int argnum;
+ int argreg;
+ int nstack;
+ struct stack_item *si = NULL;
+ int use_vfp_abi;
+ struct type *ftype;
+ unsigned vfp_regs_free = (1 << 16) - 1;
+
+ /* Determine the type of this function and whether the VFP ABI
+ applies. */
+ ftype = check_typedef (value_type (function));
+ if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
+ ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
+ use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
+
+ /* Set the return address. For the ARM, the return breakpoint is
+ always at BP_ADDR. */
+ if (arm_pc_is_thumb (gdbarch, bp_addr))
+ bp_addr |= 1;
+ regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
+
+ /* Walk through the list of args and determine how large a temporary
+ stack is required. Need to take care here as structs may be
+ passed on the stack, and we have to to push them. */
+ nstack = 0;
+
+ argreg = ARM_A1_REGNUM;
+ nstack = 0;
+
+ /* The struct_return pointer occupies the first parameter
+ passing register. */
+ if (struct_return)
+ {
+ if (arm_debug)
+ fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
+ gdbarch_register_name (gdbarch, argreg),
+ paddress (gdbarch, struct_addr));
+ regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
+ argreg++;
+ }
+
+ for (argnum = 0; argnum < nargs; argnum++)
+ {
+ int len;
+ struct type *arg_type;
+ struct type *target_type;
+ enum type_code typecode;
+ const bfd_byte *val;
+ int align;
+ enum arm_vfp_cprc_base_type vfp_base_type;
+ int vfp_base_count;
+ int may_use_core_reg = 1;
+
+ arg_type = check_typedef (value_type (args[argnum]));
+ len = TYPE_LENGTH (arg_type);
+ target_type = TYPE_TARGET_TYPE (arg_type);
+ typecode = TYPE_CODE (arg_type);
+ val = value_contents (args[argnum]);
+
+ align = arm_type_align (arg_type);
+ /* Round alignment up to a whole number of words. */
+ align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
+ /* Different ABIs have different maximum alignments. */
+ if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
+ {
+ /* The APCS ABI only requires word alignment. */
+ align = INT_REGISTER_SIZE;
+ }
+ else
+ {
+ /* The AAPCS requires at most doubleword alignment. */
+ if (align > INT_REGISTER_SIZE * 2)
+ align = INT_REGISTER_SIZE * 2;
+ }
+
+ if (use_vfp_abi
+ && arm_vfp_call_candidate (arg_type, &vfp_base_type,
+ &vfp_base_count))
+ {
+ int regno;
+ int unit_length;
+ int shift;
+ unsigned mask;
+
+ /* Because this is a CPRC it cannot go in a core register or
+ cause a core register to be skipped for alignment.
+ Either it goes in VFP registers and the rest of this loop
+ iteration is skipped for this argument, or it goes on the
+ stack (and the stack alignment code is correct for this
+ case). */
+ may_use_core_reg = 0;
+
+ unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
+ shift = unit_length / 4;
+ mask = (1 << (shift * vfp_base_count)) - 1;
+ for (regno = 0; regno < 16; regno += shift)
+ if (((vfp_regs_free >> regno) & mask) == mask)
+ break;
+
+ if (regno < 16)
+ {
+ int reg_char;
+ int reg_scaled;
+ int i;
+
+ vfp_regs_free &= ~(mask << regno);
+ reg_scaled = regno / shift;
+ reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
+ for (i = 0; i < vfp_base_count; i++)
+ {
+ char name_buf[4];
+ int regnum;
+ if (reg_char == 'q')
+ arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
+ val + i * unit_length);
+ else
+ {
+ sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
+ regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
+ strlen (name_buf));
+ regcache_cooked_write (regcache, regnum,
+ val + i * unit_length);
+ }
+ }
+ continue;
+ }
+ else
+ {
+ /* This CPRC could not go in VFP registers, so all VFP
+ registers are now marked as used. */
+ vfp_regs_free = 0;
+ }
+ }
+
+ /* Push stack padding for dowubleword alignment. */
+ if (nstack & (align - 1))
+ {
+ si = push_stack_item (si, val, INT_REGISTER_SIZE);
+ nstack += INT_REGISTER_SIZE;
+ }
+
+ /* Doubleword aligned quantities must go in even register pairs. */
+ if (may_use_core_reg
+ && argreg <= ARM_LAST_ARG_REGNUM
+ && align > INT_REGISTER_SIZE
+ && argreg & 1)
+ argreg++;
+
+ /* If the argument is a pointer to a function, and it is a
+ Thumb function, create a LOCAL copy of the value and set
+ the THUMB bit in it. */
+ if (TYPE_CODE_PTR == typecode
+ && target_type != NULL
+ && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
+ {
+ CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
+ if (arm_pc_is_thumb (gdbarch, regval))
+ {
+ bfd_byte *copy = alloca (len);
+ store_unsigned_integer (copy, len, byte_order,
+ MAKE_THUMB_ADDR (regval));
+ val = copy;
+ }
+ }
+
+ /* Copy the argument to general registers or the stack in
+ register-sized pieces. Large arguments are split between
+ registers and stack. */
+ while (len > 0)
+ {
+ int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
+
+ if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
+ {
+ /* The argument is being passed in a general purpose
+ register. */
+ CORE_ADDR regval
+ = extract_unsigned_integer (val, partial_len, byte_order);
+ if (byte_order == BFD_ENDIAN_BIG)
+ regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
+ if (arm_debug)
+ fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
+ argnum,
+ gdbarch_register_name
+ (gdbarch, argreg),
+ phex (regval, INT_REGISTER_SIZE));
+ regcache_cooked_write_unsigned (regcache, argreg, regval);
+ argreg++;
+ }
+ else
+ {
+ /* Push the arguments onto the stack. */
+ if (arm_debug)
+ fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
+ argnum, nstack);
+ si = push_stack_item (si, val, INT_REGISTER_SIZE);
+ nstack += INT_REGISTER_SIZE;
+ }
+
+ len -= partial_len;
+ val += partial_len;
+ }
+ }
+ /* If we have an odd number of words to push, then decrement the stack
+ by one word now, so first stack argument will be dword aligned. */
+ if (nstack & 4)
+ sp -= 4;
+
+ while (si)
+ {
+ sp -= si->len;
+ write_memory (sp, si->data, si->len);
+ si = pop_stack_item (si);
+ }
+
+ /* Finally, update teh SP register. */
+ regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
+
+ return sp;
+}
+
+
+/* Always align the frame to an 8-byte boundary. This is required on
+ some platforms and harmless on the rest. */
+
+static CORE_ADDR
+arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
+{
+ /* Align the stack to eight bytes. */
+ return sp & ~ (CORE_ADDR) 7;
+}
+
+static void
+print_fpu_flags (int flags)
+{
+ if (flags & (1 << 0))
+ fputs ("IVO ", stdout);
+ if (flags & (1 << 1))
+ fputs ("DVZ ", stdout);
+ if (flags & (1 << 2))
+ fputs ("OFL ", stdout);
+ if (flags & (1 << 3))
+ fputs ("UFL ", stdout);
+ if (flags & (1 << 4))
+ fputs ("INX ", stdout);
+ putchar ('\n');
+}
+
+/* Print interesting information about the floating point processor
+ (if present) or emulator. */
+static void
+arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
+ struct frame_info *frame, const char *args)
+{
+ unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
+ int type;
+
+ type = (status >> 24) & 127;
+ if (status & (1 << 31))
+ printf (_("Hardware FPU type %d\n"), type);
+ else
+ printf (_("Software FPU type %d\n"), type);
+ /* i18n: [floating point unit] mask */
+ fputs (_("mask: "), stdout);
+ print_fpu_flags (status >> 16);
+ /* i18n: [floating point unit] flags */
+ fputs (_("flags: "), stdout);
+ print_fpu_flags (status);
+}
+
+/* Construct the ARM extended floating point type. */
+static struct type *
+arm_ext_type (struct gdbarch *gdbarch)
+{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+
+ if (!tdep->arm_ext_type)
+ tdep->arm_ext_type
+ = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
+ floatformats_arm_ext);
+
+ return tdep->arm_ext_type;
+}
+
+static struct type *
+arm_neon_double_type (struct gdbarch *gdbarch)
+{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+
+ if (tdep->neon_double_type == NULL)
+ {
+ struct type *t, *elem;
+
+ t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
+ TYPE_CODE_UNION);
+ elem = builtin_type (gdbarch)->builtin_uint8;
+ append_composite_type_field (t, "u8", init_vector_type (elem, 8));
+ elem = builtin_type (gdbarch)->builtin_uint16;
+ append_composite_type_field (t, "u16", init_vector_type (elem, 4));
+ elem = builtin_type (gdbarch)->builtin_uint32;
+ append_composite_type_field (t, "u32", init_vector_type (elem, 2));
+ elem = builtin_type (gdbarch)->builtin_uint64;
+ append_composite_type_field (t, "u64", elem);
+ elem = builtin_type (gdbarch)->builtin_float;
+ append_composite_type_field (t, "f32", init_vector_type (elem, 2));
+ elem = builtin_type (gdbarch)->builtin_double;
+ append_composite_type_field (t, "f64", elem);
+
+ TYPE_VECTOR (t) = 1;
+ TYPE_NAME (t) = "neon_d";
+ tdep->neon_double_type = t;
+ }
+
+ return tdep->neon_double_type;
+}
+
+/* FIXME: The vector types are not correctly ordered on big-endian
+ targets. Just as s0 is the low bits of d0, d0[0] is also the low
+ bits of d0 - regardless of what unit size is being held in d0. So
+ the offset of the first uint8 in d0 is 7, but the offset of the
+ first float is 4. This code works as-is for little-endian
+ targets. */
+
+static struct type *
+arm_neon_quad_type (struct gdbarch *gdbarch)
+{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+
+ if (tdep->neon_quad_type == NULL)
+ {
+ struct type *t, *elem;
+
+ t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
+ TYPE_CODE_UNION);
+ elem = builtin_type (gdbarch)->builtin_uint8;
+ append_composite_type_field (t, "u8", init_vector_type (elem, 16));
+ elem = builtin_type (gdbarch)->builtin_uint16;
+ append_composite_type_field (t, "u16", init_vector_type (elem, 8));
+ elem = builtin_type (gdbarch)->builtin_uint32;
+ append_composite_type_field (t, "u32", init_vector_type (elem, 4));
+ elem = builtin_type (gdbarch)->builtin_uint64;
+ append_composite_type_field (t, "u64", init_vector_type (elem, 2));
+ elem = builtin_type (gdbarch)->builtin_float;
+ append_composite_type_field (t, "f32", init_vector_type (elem, 4));
+ elem = builtin_type (gdbarch)->builtin_double;
+ append_composite_type_field (t, "f64", init_vector_type (elem, 2));
+
+ TYPE_VECTOR (t) = 1;
+ TYPE_NAME (t) = "neon_q";
+ tdep->neon_quad_type = t;
+ }
+
+ return tdep->neon_quad_type;
+}
+
+/* Return the GDB type object for the "standard" data type of data in
+ register N. */
+
+static struct type *
+arm_register_type (struct gdbarch *gdbarch, int regnum)
+{
+ int num_regs = gdbarch_num_regs (gdbarch);
+
+ if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
+ && regnum >= num_regs && regnum < num_regs + 32)
+ return builtin_type (gdbarch)->builtin_float;
+
+ if (gdbarch_tdep (gdbarch)->have_neon_pseudos
+ && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
+ return arm_neon_quad_type (gdbarch);
+
+ /* If the target description has register information, we are only
+ in this function so that we can override the types of
+ double-precision registers for NEON. */
+ if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
+ {
+ struct type *t = tdesc_register_type (gdbarch, regnum);
+
+ if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
+ && TYPE_CODE (t) == TYPE_CODE_FLT
+ && gdbarch_tdep (gdbarch)->have_neon)
+ return arm_neon_double_type (gdbarch);
+ else
+ return t;
+ }
+
+ if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
+ {
+ if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
+ return builtin_type (gdbarch)->builtin_void;
+
+ return arm_ext_type (gdbarch);
+ }
+ else if (regnum == ARM_SP_REGNUM)
+ return builtin_type (gdbarch)->builtin_data_ptr;
+ else if (regnum == ARM_PC_REGNUM)
+ return builtin_type (gdbarch)->builtin_func_ptr;
+ else if (regnum >= ARRAY_SIZE (arm_register_names))
+ /* These registers are only supported on targets which supply
+ an XML description. */
+ return builtin_type (gdbarch)->builtin_int0;
+ else
+ return builtin_type (gdbarch)->builtin_uint32;
+}
+
+/* Map a DWARF register REGNUM onto the appropriate GDB register
+ number. */
+
+static int
+arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
+{
+ /* Core integer regs. */
+ if (reg >= 0 && reg <= 15)
+ return reg;
+
+ /* Legacy FPA encoding. These were once used in a way which
+ overlapped with VFP register numbering, so their use is
+ discouraged, but GDB doesn't support the ARM toolchain
+ which used them for VFP. */
+ if (reg >= 16 && reg <= 23)
+ return ARM_F0_REGNUM + reg - 16;
+
+ /* New assignments for the FPA registers. */
+ if (reg >= 96 && reg <= 103)
+ return ARM_F0_REGNUM + reg - 96;
+
+ /* WMMX register assignments. */
+ if (reg >= 104 && reg <= 111)
+ return ARM_WCGR0_REGNUM + reg - 104;
+
+ if (reg >= 112 && reg <= 127)
+ return ARM_WR0_REGNUM + reg - 112;
+
+ if (reg >= 192 && reg <= 199)
+ return ARM_WC0_REGNUM + reg - 192;
+
+ /* VFP v2 registers. A double precision value is actually
+ in d1 rather than s2, but the ABI only defines numbering
+ for the single precision registers. This will "just work"
+ in GDB for little endian targets (we'll read eight bytes,
+ starting in s0 and then progressing to s1), but will be
+ reversed on big endian targets with VFP. This won't
+ be a problem for the new Neon quad registers; you're supposed
+ to use DW_OP_piece for those. */
+ if (reg >= 64 && reg <= 95)
+ {
+ char name_buf[4];
+
+ sprintf (name_buf, "s%d", reg - 64);
+ return user_reg_map_name_to_regnum (gdbarch, name_buf,
+ strlen (name_buf));
+ }
+
+ /* VFP v3 / Neon registers. This range is also used for VFP v2
+ registers, except that it now describes d0 instead of s0. */
+ if (reg >= 256 && reg <= 287)
+ {
+ char name_buf[4];
+
+ sprintf (name_buf, "d%d", reg - 256);
+ return user_reg_map_name_to_regnum (gdbarch, name_buf,
+ strlen (name_buf));
+ }
+
+ return -1;
+}
+
+/* Map GDB internal REGNUM onto the Arm simulator register numbers. */
+static int
+arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
+{
+ int reg = regnum;
+ gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
+
+ if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
+ return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
+
+ if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
+ return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
+
+ if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
+ return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
+
+ if (reg < NUM_GREGS)
+ return SIM_ARM_R0_REGNUM + reg;
+ reg -= NUM_GREGS;
+
+ if (reg < NUM_FREGS)
+ return SIM_ARM_FP0_REGNUM + reg;
+ reg -= NUM_FREGS;
+
+ if (reg < NUM_SREGS)
+ return SIM_ARM_FPS_REGNUM + reg;
+ reg -= NUM_SREGS;
+
+ internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
+}
+
+/* NOTE: cagney/2001-08-20: Both convert_from_extended() and
+ convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
+ It is thought that this is is the floating-point register format on
+ little-endian systems. */
+
+static void
+convert_from_extended (const struct floatformat *fmt, const void *ptr,
+ void *dbl, int endianess)
+{
+ DOUBLEST d;
+
+ if (endianess == BFD_ENDIAN_BIG)
+ floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
+ else
+ floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
+ ptr, &d);
+ floatformat_from_doublest (fmt, &d, dbl);
+}
+
+static void
+convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
+ int endianess)
+{
+ DOUBLEST d;
+
+ floatformat_to_doublest (fmt, ptr, &d);
+ if (endianess == BFD_ENDIAN_BIG)
+ floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
+ else
+ floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
+ &d, dbl);
+}
+
+static int
+condition_true (unsigned long cond, unsigned long status_reg)
+{
+ if (cond == INST_AL || cond == INST_NV)
+ return 1;
+
+ switch (cond)
+ {
+ case INST_EQ:
+ return ((status_reg & FLAG_Z) != 0);
+ case INST_NE:
+ return ((status_reg & FLAG_Z) == 0);
+ case INST_CS:
+ return ((status_reg & FLAG_C) != 0);
+ case INST_CC:
+ return ((status_reg & FLAG_C) == 0);
+ case INST_MI:
+ return ((status_reg & FLAG_N) != 0);
+ case INST_PL:
+ return ((status_reg & FLAG_N) == 0);
+ case INST_VS:
+ return ((status_reg & FLAG_V) != 0);
+ case INST_VC:
+ return ((status_reg & FLAG_V) == 0);
+ case INST_HI:
+ return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
+ case INST_LS:
+ return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
+ case INST_GE:
+ return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
+ case INST_LT:
+ return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
+ case INST_GT:
+ return (((status_reg & FLAG_Z) == 0)
+ && (((status_reg & FLAG_N) == 0)
+ == ((status_reg & FLAG_V) == 0)));
+ case INST_LE:
+ return (((status_reg & FLAG_Z) != 0)
+ || (((status_reg & FLAG_N) == 0)
+ != ((status_reg & FLAG_V) == 0)));
+ }
+ return 1;
+}
+
+static unsigned long
+shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
+ unsigned long pc_val, unsigned long status_reg)
+{
+ unsigned long res, shift;
+ int rm = bits (inst, 0, 3);
+ unsigned long shifttype = bits (inst, 5, 6);
+
+ if (bit (inst, 4))
+ {
+ int rs = bits (inst, 8, 11);
+ shift = (rs == 15 ? pc_val + 8
+ : get_frame_register_unsigned (frame, rs)) & 0xFF;
+ }
+ else
+ shift = bits (inst, 7, 11);
+
+ res = (rm == ARM_PC_REGNUM
+ ? (pc_val + (bit (inst, 4) ? 12 : 8))
+ : get_frame_register_unsigned (frame, rm));
+
+ switch (shifttype)
+ {
+ case 0: /* LSL */
+ res = shift >= 32 ? 0 : res << shift;
+ break;
+
+ case 1: /* LSR */
+ res = shift >= 32 ? 0 : res >> shift;
+ break;
+
+ case 2: /* ASR */
+ if (shift >= 32)
+ shift = 31;
+ res = ((res & 0x80000000L)
+ ? ~((~res) >> shift) : res >> shift);
+ break;
+
+ case 3: /* ROR/RRX */
+ shift &= 31;
+ if (shift == 0)
+ res = (res >> 1) | (carry ? 0x80000000L : 0);
+ else
+ res = (res >> shift) | (res << (32 - shift));
+ break;
+ }
+
+ return res & 0xffffffff;
+}
+
+/* Return number of 1-bits in VAL. */
+
+static int
+bitcount (unsigned long val)
+{
+ int nbits;
+ for (nbits = 0; val != 0; nbits++)
+ val &= val - 1; /* Delete rightmost 1-bit in val. */
+ return nbits;
+}
+
+/* Return the size in bytes of the complete Thumb instruction whose
+ first halfword is INST1. */
+
+static int
+thumb_insn_size (unsigned short inst1)
+{
+ if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
+ return 4;
+ else
+ return 2;
+}
+
+static int
+thumb_advance_itstate (unsigned int itstate)
+{
+ /* Preserve IT[7:5], the first three bits of the condition. Shift
+ the upcoming condition flags left by one bit. */
+ itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
+
+ /* If we have finished the IT block, clear the state. */
+ if ((itstate & 0x0f) == 0)
+ itstate = 0;
+
+ return itstate;
+}
+
+/* Find the next PC after the current instruction executes. In some
+ cases we can not statically determine the answer (see the IT state
+ handling in this function); in that case, a breakpoint may be
+ inserted in addition to the returned PC, which will be used to set
+ another breakpoint by our caller. */
+
+static CORE_ADDR
+thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
+{
+ struct gdbarch *gdbarch = get_frame_arch (frame);
+ struct address_space *aspace = get_frame_address_space (frame);
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+ unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
+ unsigned short inst1;
+ CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
+ unsigned long offset;
+ ULONGEST status, itstate;
+
+ nextpc = MAKE_THUMB_ADDR (nextpc);
+ pc_val = MAKE_THUMB_ADDR (pc_val);
+
+ inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
+
+ /* Thumb-2 conditional execution support. There are eight bits in
+ the CPSR which describe conditional execution state. Once
+ reconstructed (they're in a funny order), the low five bits
+ describe the low bit of the condition for each instruction and
+ how many instructions remain. The high three bits describe the
+ base condition. One of the low four bits will be set if an IT
+ block is active. These bits read as zero on earlier
+ processors. */
+ status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
+ itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
+
+ /* If-Then handling. On GNU/Linux, where this routine is used, we
+ use an undefined instruction as a breakpoint. Unlike BKPT, IT
+ can disable execution of the undefined instruction. So we might
+ miss the breakpoint if we set it on a skipped conditional
+ instruction. Because conditional instructions can change the
+ flags, affecting the execution of further instructions, we may
+ need to set two breakpoints. */
+
+ if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
+ {
+ if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
+ {
+ /* An IT instruction. Because this instruction does not
+ modify the flags, we can accurately predict the next
+ executed instruction. */
+ itstate = inst1 & 0x00ff;
+ pc += thumb_insn_size (inst1);
+
+ while (itstate != 0 && ! condition_true (itstate >> 4, status))
+ {
+ inst1 = read_memory_unsigned_integer (pc, 2,
+ byte_order_for_code);
+ pc += thumb_insn_size (inst1);
+ itstate = thumb_advance_itstate (itstate);
+ }
+
+ return MAKE_THUMB_ADDR (pc);
+ }
+ else if (itstate != 0)
+ {
+ /* We are in a conditional block. Check the condition. */
+ if (! condition_true (itstate >> 4, status))
+ {
+ /* Advance to the next executed instruction. */
+ pc += thumb_insn_size (inst1);
+ itstate = thumb_advance_itstate (itstate);
+
+ while (itstate != 0 && ! condition_true (itstate >> 4, status))
+ {
+ inst1 = read_memory_unsigned_integer (pc, 2,
+ byte_order_for_code);
+ pc += thumb_insn_size (inst1);
+ itstate = thumb_advance_itstate (itstate);
+ }
+
+ return MAKE_THUMB_ADDR (pc);
+ }
+ else if ((itstate & 0x0f) == 0x08)
+ {
+ /* This is the last instruction of the conditional
+ block, and it is executed. We can handle it normally
+ because the following instruction is not conditional,
+ and we must handle it normally because it is
+ permitted to branch. Fall through. */
+ }
+ else
+ {
+ int cond_negated;
+
+ /* There are conditional instructions after this one.
+ If this instruction modifies the flags, then we can
+ not predict what the next executed instruction will
+ be. Fortunately, this instruction is architecturally
+ forbidden to branch; we know it will fall through.
+ Start by skipping past it. */
+ pc += thumb_insn_size (inst1);
+ itstate = thumb_advance_itstate (itstate);
+
+ /* Set a breakpoint on the following instruction. */
+ gdb_assert ((itstate & 0x0f) != 0);
+ if (insert_bkpt)
+ insert_single_step_breakpoint (gdbarch, aspace, pc);
+ cond_negated = (itstate >> 4) & 1;
+
+ /* Skip all following instructions with the same
+ condition. If there is a later instruction in the IT
+ block with the opposite condition, set the other
+ breakpoint there. If not, then set a breakpoint on
+ the instruction after the IT block. */
+ do
+ {
+ inst1 = read_memory_unsigned_integer (pc, 2,
+ byte_order_for_code);
+ pc += thumb_insn_size (inst1);
+ itstate = thumb_advance_itstate (itstate);
+ }
+ while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
+
+ return MAKE_THUMB_ADDR (pc);
+ }
+ }
+ }
+ else if (itstate & 0x0f)
+ {
+ /* We are in a conditional block. Check the condition. */
+ int cond = itstate >> 4;
+
+ if (! condition_true (cond, status))
+ {
+ /* Advance to the next instruction. All the 32-bit
+ instructions share a common prefix. */
+ if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
+ return MAKE_THUMB_ADDR (pc + 4);
+ else
+ return MAKE_THUMB_ADDR (pc + 2);
+ }
+
+ /* Otherwise, handle the instruction normally. */
+ }
+
+ if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
+ {
+ CORE_ADDR sp;
+
+ /* Fetch the saved PC from the stack. It's stored above
+ all of the other registers. */
+ offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
+ sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
+ nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
+ }
+ else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
+ {
+ unsigned long cond = bits (inst1, 8, 11);
+ if (cond == 0x0f) /* 0x0f = SWI */
+ {
+ struct gdbarch_tdep *tdep;
+ tdep = gdbarch_tdep (gdbarch);
+
+ if (tdep->syscall_next_pc != NULL)
+ nextpc = tdep->syscall_next_pc (frame);
+
+ }
+ else if (cond != 0x0f && condition_true (cond, status))
+ nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
+ }
+ else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
+ {
+ nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
+ }
+ else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
+ {
+ unsigned short inst2;
+ inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
+
+ /* Default to the next instruction. */
+ nextpc = pc + 4;
+ nextpc = MAKE_THUMB_ADDR (nextpc);
+
+ if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
+ {
+ /* Branches and miscellaneous control instructions. */
+
+ if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
+ {
+ /* B, BL, BLX. */
+ int j1, j2, imm1, imm2;
+
+ imm1 = sbits (inst1, 0, 10);
+ imm2 = bits (inst2, 0, 10);
+ j1 = bit (inst2, 13);
+ j2 = bit (inst2, 11);
+
+ offset = ((imm1 << 12) + (imm2 << 1));
+ offset ^= ((!j2) << 22) | ((!j1) << 23);
+
+ nextpc = pc_val + offset;
+ /* For BLX make sure to clear the low bits. */
+ if (bit (inst2, 12) == 0)
+ nextpc = nextpc & 0xfffffffc;
+ }
+ else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
+ {
+ /* SUBS PC, LR, #imm8. */
+ nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
+ nextpc -= inst2 & 0x00ff;
+ }
+ else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
+ {
+ /* Conditional branch. */
+ if (condition_true (bits (inst1, 6, 9), status))
+ {
+ int sign, j1, j2, imm1, imm2;
+
+ sign = sbits (inst1, 10, 10);
+ imm1 = bits (inst1, 0, 5);
+ imm2 = bits (inst2, 0, 10);
+ j1 = bit (inst2, 13);
+ j2 = bit (inst2, 11);
+
+ offset = (sign << 20) + (j2 << 19) + (j1 << 18);
+ offset += (imm1 << 12) + (imm2 << 1);
+
+ nextpc = pc_val + offset;
+ }
+ }
+ }
+ else if ((inst1 & 0xfe50) == 0xe810)
+ {
+ /* Load multiple or RFE. */
+ int rn, offset, load_pc = 1;
+
+ rn = bits (inst1, 0, 3);
+ if (bit (inst1, 7) && !bit (inst1, 8))
+ {
+ /* LDMIA or POP */
+ if (!bit (inst2, 15))
+ load_pc = 0;
+ offset = bitcount (inst2) * 4 - 4;
+ }
+ else if (!bit (inst1, 7) && bit (inst1, 8))
+ {
+ /* LDMDB */
+ if (!bit (inst2, 15))
+ load_pc = 0;
+ offset = -4;
+ }
+ else if (bit (inst1, 7) && bit (inst1, 8))
+ {
+ /* RFEIA */
+ offset = 0;
+ }
+ else if (!bit (inst1, 7) && !bit (inst1, 8))
+ {
+ /* RFEDB */
+ offset = -8;
+ }
+ else
+ load_pc = 0;
+
+ if (load_pc)
+ {
+ CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
+ nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
+ }
+ }
+ else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
+ {
+ /* MOV PC or MOVS PC. */
+ nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
+ nextpc = MAKE_THUMB_ADDR (nextpc);
+ }
+ else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
+ {
+ /* LDR PC. */
+ CORE_ADDR base;
+ int rn, load_pc = 1;
+
+ rn = bits (inst1, 0, 3);
+ base = get_frame_register_unsigned (frame, rn);
+ if (rn == ARM_PC_REGNUM)
+ {
+ base = (base + 4) & ~(CORE_ADDR) 0x3;
+ if (bit (inst1, 7))
+ base += bits (inst2, 0, 11);
+ else
+ base -= bits (inst2, 0, 11);
+ }
+ else if (bit (inst1, 7))
+ base += bits (inst2, 0, 11);
+ else if (bit (inst2, 11))
+ {
+ if (bit (inst2, 10))
+ {
+ if (bit (inst2, 9))
+ base += bits (inst2, 0, 7);
+ else
+ base -= bits (inst2, 0, 7);
+ }
+ }
+ else if ((inst2 & 0x0fc0) == 0x0000)
+ {
+ int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
+ base += get_frame_register_unsigned (frame, rm) << shift;
+ }
+ else
+ /* Reserved. */
+ load_pc = 0;
+
+ if (load_pc)
+ nextpc = get_frame_memory_unsigned (frame, base, 4);
+ }
+ else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
+ {
+ /* TBB. */
+ CORE_ADDR tbl_reg, table, offset, length;
+
+ tbl_reg = bits (inst1, 0, 3);
+ if (tbl_reg == 0x0f)
+ table = pc + 4; /* Regcache copy of PC isn't right yet. */
+ else
+ table = get_frame_register_unsigned (frame, tbl_reg);
+
+ offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
+ length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
+ nextpc = pc_val + length;
+ }
+ else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
+ {
+ /* TBH. */
+ CORE_ADDR tbl_reg, table, offset, length;
+
+ tbl_reg = bits (inst1, 0, 3);
+ if (tbl_reg == 0x0f)
+ table = pc + 4; /* Regcache copy of PC isn't right yet. */
+ else
+ table = get_frame_register_unsigned (frame, tbl_reg);
+
+ offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
+ length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
+ nextpc = pc_val + length;
+ }
+ }
+ else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
+ {
+ if (bits (inst1, 3, 6) == 0x0f)
+ nextpc = pc_val;
+ else
+ nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
+ }
+ else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
+ {
+ if (bits (inst1, 3, 6) == 0x0f)
+ nextpc = pc_val;
+ else
+ nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
+
+ nextpc = MAKE_THUMB_ADDR (nextpc);
+ }
+ else if ((inst1 & 0xf500) == 0xb100)
+ {
+ /* CBNZ or CBZ. */
+ int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
+ ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
+
+ if (bit (inst1, 11) && reg != 0)
+ nextpc = pc_val + imm;
+ else if (!bit (inst1, 11) && reg == 0)
+ nextpc = pc_val + imm;
+ }
+ return nextpc;
+}
+
+/* Get the raw next address. PC is the current program counter, in
+ FRAME. INSERT_BKPT should be TRUE if we want a breakpoint set on
+ the alternative next instruction if there are two options.
+
+ The value returned has the execution state of the next instruction
+ encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
+ in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
+ address. */
+
+static CORE_ADDR
+arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
+{
+ struct gdbarch *gdbarch = get_frame_arch (frame);
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+ unsigned long pc_val;
+ unsigned long this_instr;
+ unsigned long status;
+ CORE_ADDR nextpc;
+
+ if (arm_frame_is_thumb (frame))
+ return thumb_get_next_pc_raw (frame, pc, insert_bkpt);
+
+ pc_val = (unsigned long) pc;
+ this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
+
+ status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
+ nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
+
+ if (bits (this_instr, 28, 31) == INST_NV)
+ switch (bits (this_instr, 24, 27))
+ {
+ case 0xa:
+ case 0xb:
+ {
+ /* Branch with Link and change to Thumb. */
+ nextpc = BranchDest (pc, this_instr);
+ nextpc |= bit (this_instr, 24) << 1;
+ nextpc = MAKE_THUMB_ADDR (nextpc);
+ break;
+ }
+ case 0xc:
+ case 0xd:
+ case 0xe:
+ /* Coprocessor register transfer. */
+ if (bits (this_instr, 12, 15) == 15)
+ error (_("Invalid update to pc in instruction"));
+ break;
+ }
+ else if (condition_true (bits (this_instr, 28, 31), status))
+ {
+ switch (bits (this_instr, 24, 27))
+ {
+ case 0x0:
+ case 0x1: /* data processing */
+ case 0x2:
+ case 0x3:
+ {
+ unsigned long operand1, operand2, result = 0;
+ unsigned long rn;
+ int c;
+
+ if (bits (this_instr, 12, 15) != 15)
+ break;
+
+ if (bits (this_instr, 22, 25) == 0
+ && bits (this_instr, 4, 7) == 9) /* multiply */
+ error (_("Invalid update to pc in instruction"));
+
+ /* BX <reg>, BLX <reg> */
+ if (bits (this_instr, 4, 27) == 0x12fff1
+ || bits (this_instr, 4, 27) == 0x12fff3)
+ {
+ rn = bits (this_instr, 0, 3);
+ nextpc = ((rn == ARM_PC_REGNUM)
+ ? (pc_val + 8)
+ : get_frame_register_unsigned (frame, rn));
+
+ return nextpc;
+ }
+
+ /* Multiply into PC. */
+ c = (status & FLAG_C) ? 1 : 0;
+ rn = bits (this_instr, 16, 19);
+ operand1 = ((rn == ARM_PC_REGNUM)
+ ? (pc_val + 8)
+ : get_frame_register_unsigned (frame, rn));
+
+ if (bit (this_instr, 25))
+ {
+ unsigned long immval = bits (this_instr, 0, 7);
+ unsigned long rotate = 2 * bits (this_instr, 8, 11);
+ operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
+ & 0xffffffff;
+ }
+ else /* operand 2 is a shifted register. */
+ operand2 = shifted_reg_val (frame, this_instr, c,
+ pc_val, status);
+
+ switch (bits (this_instr, 21, 24))
+ {
+ case 0x0: /*and */
+ result = operand1 & operand2;
+ break;
+
+ case 0x1: /*eor */
+ result = operand1 ^ operand2;
+ break;
+
+ case 0x2: /*sub */
+ result = operand1 - operand2;
+ break;
+
+ case 0x3: /*rsb */
+ result = operand2 - operand1;
+ break;
+
+ case 0x4: /*add */
+ result = operand1 + operand2;
+ break;
+
+ case 0x5: /*adc */
+ result = operand1 + operand2 + c;
+ break;