X-Git-Url: http://git.efficios.com/?a=blobdiff_plain;f=gdb%2Farm-tdep.c;h=bcee29cca44652256d4b54adc9a9bc720b2fcb11;hb=791bb1f4a6310cd7f894e370607dfc05c9cb0727;hp=4df28464be8f76bf9976f6dbaba067b2ffcb0a6e;hpb=3b7344d5ab495cd82b6c72ec5e00d018549837fb;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c index 4df28464be..bcee29cca4 100644 --- a/gdb/arm-tdep.c +++ b/gdb/arm-tdep.c @@ -1,6 +1,6 @@ /* Common target dependent code for GDB on ARM systems. - Copyright (C) 1988-2014 Free Software Foundation, Inc. + Copyright (C) 1988-2015 Free Software Foundation, Inc. This file is part of GDB. @@ -23,9 +23,9 @@ #include "frame.h" #include "inferior.h" +#include "infrun.h" #include "gdbcmd.h" #include "gdbcore.h" -#include #include "dis-asm.h" /* For register styles. */ #include "regcache.h" #include "reggroups.h" @@ -52,7 +52,6 @@ #include "coff/internal.h" #include "elf/arm.h" -#include "gdb_assert.h" #include "vec.h" #include "record.h" @@ -472,7 +471,7 @@ skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb) msym = lookup_minimal_symbol_by_pc (pc); if (msym.minsym != NULL - && MSYMBOL_VALUE_ADDRESS (msym.minsym) == pc + && BMSYMBOL_VALUE_ADDRESS (msym) == pc && MSYMBOL_LINKAGE_NAME (msym.minsym) != NULL) { const char *name = MSYMBOL_LINKAGE_NAME (msym.minsym); @@ -485,15 +484,15 @@ skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb) /* On soft-float targets, __truncdfsf2 is called to convert promoted arguments to their argument types in non-prototyped functions. */ - if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0) + if (startswith (name, "__truncdfsf2")) return 1; - if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0) + if (startswith (name, "__aeabi_d2f")) return 1; /* Internal functions related to thread-local storage. */ - if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0) + if (startswith (name, "__tls_get_addr")) return 1; - if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0) + if (startswith (name, "__aeabi_read_tp")) return 1; } else @@ -684,6 +683,17 @@ thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2) return 0; } +/* Return 1 if the 16-bit Thumb instruction INSN restores SP in + epilogue, 0 otherwise. */ + +static int +thumb_instruction_restores_sp (unsigned short insn) +{ + return (insn == 0x46bd /* mov sp, r7 */ + || (insn & 0xff80) == 0xb000 /* add sp, imm */ + || (insn & 0xfe00) == 0xbc00); /* pop */ +} + /* Analyze a Thumb prologue, looking for a recognizable stack frame and frame pointer. Scan until we encounter a store that could clobber the stack frame unexpectedly, or an unknown instruction. @@ -736,16 +746,16 @@ thumb_analyze_prologue (struct gdbarch *gdbarch, pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]); } } - else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR - sub sp, #simm */ + else if ((insn & 0xff80) == 0xb080) /* sub sp, #imm */ { offset = (insn & 0x7f) << 2; /* get scaled offset */ - if (insn & 0x80) /* Check for SUB. */ - regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], - -offset); - else - regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], - offset); + regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], + -offset); + } + else if (thumb_instruction_restores_sp (insn)) + { + /* Don't scan past the epilogue. */ + break; } else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */ regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM], @@ -1071,7 +1081,7 @@ thumb_analyze_prologue (struct gdbarch *gdbarch, unsigned int constant; CORE_ADDR loc; - offset = bits (insn, 0, 11); + offset = bits (inst2, 0, 11); if (insn & 0x0080) loc = start + 4 + offset; else @@ -1087,7 +1097,7 @@ thumb_analyze_prologue (struct gdbarch *gdbarch, unsigned int constant; CORE_ADDR loc; - offset = bits (insn, 0, 7) << 2; + offset = bits (inst2, 0, 7) << 2; if (insn & 0x0080) loc = start + 4 + offset; else @@ -1194,7 +1204,9 @@ arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch, { *destreg = bits (insn1, 8, 10); *offset = 2; - address = bits (insn1, 0, 7); + address = (pc & 0xfffffffc) + 4 + (bits (insn1, 0, 7) << 2); + address = read_memory_unsigned_integer (address, 4, + byte_order_for_code); } else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */ { @@ -1223,9 +1235,12 @@ arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch, unsigned int insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code); - if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */ + if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, [PC, #immed] */ { - address = bits (insn, 0, 11); + address = bits (insn, 0, 11) + pc + 8; + address = read_memory_unsigned_integer (address, 4, + byte_order_for_code); + *destreg = bits (insn, 12, 15); *offset = 4; } @@ -1296,13 +1311,10 @@ arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch) return pc; stack_chk_guard = lookup_minimal_symbol_by_pc (addr); - /* If name of symbol doesn't start with '__stack_chk_guard', this - instruction sequence is not for stack protector. If symbol is - removed, we conservatively think this sequence is for stack protector. */ - if (stack_chk_guard.minsym - && strncmp (MSYMBOL_LINKAGE_NAME (stack_chk_guard.minsym), - "__stack_chk_guard", - strlen ("__stack_chk_guard")) != 0) + /* ADDR must correspond to a symbol whose name is __stack_chk_guard. + Otherwise, this sequence cannot be for stack protector. */ + if (stack_chk_guard.minsym == NULL + || !startswith (MSYMBOL_LINKAGE_NAME (stack_chk_guard.minsym), "__stack_chk_guard")) return pc; if (is_thumb) @@ -1374,7 +1386,6 @@ arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc) { enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); unsigned long inst; - CORE_ADDR skip_pc; CORE_ADDR func_addr, limit_pc; /* See if we can determine the end of the prologue via the symbol table. @@ -1384,7 +1395,7 @@ arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc) { CORE_ADDR post_prologue_pc = skip_prologue_using_sal (gdbarch, func_addr); - struct symtab *s = find_pc_symtab (func_addr); + struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr); if (post_prologue_pc) post_prologue_pc @@ -1398,10 +1409,10 @@ arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc) will have producer information for most binaries; if it is missing (e.g. for -gstabs), assuming the GNU tools. */ if (post_prologue_pc - && (s == NULL - || s->producer == NULL - || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0 - || strncmp (s->producer, "clang ", sizeof ("clang ") - 1) == 0)) + && (cust == NULL + || COMPUNIT_PRODUCER (cust) == NULL + || startswith (COMPUNIT_PRODUCER (cust), "GNU ") + || startswith (COMPUNIT_PRODUCER (cust), "clang "))) return post_prologue_pc; if (post_prologue_pc != 0) @@ -1446,65 +1457,8 @@ arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc) /* Check if this is Thumb code. */ if (arm_pc_is_thumb (gdbarch, pc)) return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL); - - for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4) - { - inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code); - - /* "mov ip, sp" is no longer a required part of the prologue. */ - if (inst == 0xe1a0c00d) /* mov ip, sp */ - continue; - - if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */ - continue; - - if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */ - continue; - - /* Some prologues begin with "str lr, [sp, #-4]!". */ - if (inst == 0xe52de004) /* str lr, [sp, #-4]! */ - continue; - - if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */ - continue; - - if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */ - continue; - - /* Any insns after this point may float into the code, if it makes - for better instruction scheduling, so we skip them only if we - find them, but still consider the function to be frame-ful. */ - - /* We may have either one sfmfd instruction here, or several stfe - insns, depending on the version of floating point code we - support. */ - if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, , [sp]! */ - continue; - - if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */ - continue; - - if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */ - continue; - - if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */ - continue; - - if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */ - || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */ - || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */ - continue; - - if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */ - || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */ - || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */ - continue; - - /* Un-recognized instruction; stop scanning. */ - break; - } - - return skip_pc; /* End of prologue. */ + else + return arm_analyze_prologue (gdbarch, pc, limit_pc, NULL); } /* *INDENT-OFF* */ @@ -1648,6 +1602,30 @@ arm_instruction_changes_pc (uint32_t this_instr) } } +/* Return 1 if the ARM instruction INSN restores SP in epilogue, 0 + otherwise. */ + +static int +arm_instruction_restores_sp (unsigned int insn) +{ + if (bits (insn, 28, 31) != INST_NV) + { + if ((insn & 0x0df0f000) == 0x0080d000 + /* ADD SP (register or immediate). */ + || (insn & 0x0df0f000) == 0x0040d000 + /* SUB SP (register or immediate). */ + || (insn & 0x0ffffff0) == 0x01a0d000 + /* MOV SP. */ + || (insn & 0x0fff0000) == 0x08bd0000 + /* POP (LDMIA). */ + || (insn & 0x0fff0000) == 0x049d0000) + /* POP of a single register. */ + return 1; + } + + return 0; +} + /* Analyze an ARM mode prologue starting at PROLOGUE_START and continuing no further than PROLOGUE_END. If CACHE is non-NULL, fill it in. Return the first address not recognized as a prologue @@ -1670,7 +1648,6 @@ arm_analyze_prologue (struct gdbarch *gdbarch, pv_t regs[ARM_FPS_REGNUM]; struct pv_area *stack; struct cleanup *back_to; - int framereg, framesize; CORE_ADDR unrecognized_pc = 0; /* Search the prologue looking for instructions that set up the @@ -1846,6 +1823,11 @@ arm_analyze_prologue (struct gdbarch *gdbarch, else if (arm_instruction_changes_pc (insn)) /* Don't scan past anything that might change control flow. */ break; + else if (arm_instruction_restores_sp (insn)) + { + /* Don't scan past the epilogue. */ + break; + } else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */ && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM)) /* Ignore block loads from the stack, potentially copying @@ -1861,33 +1843,42 @@ arm_analyze_prologue (struct gdbarch *gdbarch, continue; else { - /* The optimizer might shove anything into the prologue, - so we just skip what we don't recognize. */ + /* The optimizer might shove anything into the prologue, if + we build up cache (cache != NULL) from scanning prologue, + we just skip what we don't recognize and scan further to + make cache as complete as possible. However, if we skip + prologue, we'll stop immediately on unrecognized + instruction. */ unrecognized_pc = current_pc; - continue; + if (cache != NULL) + continue; + else + break; } } if (unrecognized_pc == 0) unrecognized_pc = current_pc; - /* The frame size is just the distance from the frame register - to the original stack pointer. */ - if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM)) - { - /* Frame pointer is fp. */ - framereg = ARM_FP_REGNUM; - framesize = -regs[ARM_FP_REGNUM].k; - } - else - { - /* Try the stack pointer... this is a bit desperate. */ - framereg = ARM_SP_REGNUM; - framesize = -regs[ARM_SP_REGNUM].k; - } - if (cache) { + int framereg, framesize; + + /* The frame size is just the distance from the frame register + to the original stack pointer. */ + if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM)) + { + /* Frame pointer is fp. */ + framereg = ARM_FP_REGNUM; + framesize = -regs[ARM_FP_REGNUM].k; + } + else + { + /* Try the stack pointer... this is a bit desperate. */ + framereg = ARM_SP_REGNUM; + framesize = -regs[ARM_SP_REGNUM].k; + } + cache->framereg = framereg; cache->framesize = framesize; @@ -2026,6 +2017,31 @@ arm_make_prologue_cache (struct frame_info *this_frame) return cache; } +/* Implementation of the stop_reason hook for arm_prologue frames. */ + +static enum unwind_stop_reason +arm_prologue_unwind_stop_reason (struct frame_info *this_frame, + void **this_cache) +{ + struct arm_prologue_cache *cache; + CORE_ADDR pc; + + if (*this_cache == NULL) + *this_cache = arm_make_prologue_cache (this_frame); + cache = *this_cache; + + /* This is meant to halt the backtrace at "_start". */ + pc = get_frame_pc (this_frame); + if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc) + return UNWIND_OUTERMOST; + + /* If we've hit a wall, stop. */ + if (cache->prev_sp == 0) + return UNWIND_OUTERMOST; + + return UNWIND_NO_REASON; +} + /* Our frame ID for a normal frame is the current function's starting PC and the caller's SP when we were called. */ @@ -2042,18 +2058,10 @@ arm_prologue_this_id (struct frame_info *this_frame, *this_cache = arm_make_prologue_cache (this_frame); cache = *this_cache; - /* This is meant to halt the backtrace at "_start". */ - pc = get_frame_pc (this_frame); - if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc) - return; - - /* If we've hit a wall, stop. */ - if (cache->prev_sp == 0) - return; - /* Use function start address as part of the frame ID. If we cannot identify the start address (due to missing symbol information), fall back to just using the current PC. */ + pc = get_frame_pc (this_frame); func = get_frame_func (this_frame); if (!func) func = pc; @@ -2122,7 +2130,7 @@ arm_prologue_prev_register (struct frame_info *this_frame, struct frame_unwind arm_prologue_unwind = { NORMAL_FRAME, - default_frame_unwind_stop_reason, + arm_prologue_unwind_stop_reason, arm_prologue_this_id, arm_prologue_prev_register, NULL, @@ -2869,6 +2877,64 @@ struct frame_unwind arm_exidx_unwind = { arm_exidx_unwind_sniffer }; +/* Recognize GCC's trampoline for thumb call-indirect. If we are in a + trampoline, return the target PC. Otherwise return 0. + + void call0a (char c, short s, int i, long l) {} + + int main (void) + { + (*pointer_to_call0a) (c, s, i, l); + } + + Instead of calling a stub library function _call_via_xx (xx is + the register name), GCC may inline the trampoline in the object + file as below (register r2 has the address of call0a). + + .global main + .type main, %function + ... + bl .L1 + ... + .size main, .-main + + .L1: + bx r2 + + The trampoline 'bx r2' doesn't belong to main. */ + +static CORE_ADDR +arm_skip_bx_reg (struct frame_info *frame, CORE_ADDR pc) +{ + /* The heuristics of recognizing such trampoline is that FRAME is + executing in Thumb mode and the instruction on PC is 'bx Rm'. */ + if (arm_frame_is_thumb (frame)) + { + gdb_byte buf[2]; + + if (target_read_memory (pc, buf, 2) == 0) + { + struct gdbarch *gdbarch = get_frame_arch (frame); + enum bfd_endian byte_order_for_code + = gdbarch_byte_order_for_code (gdbarch); + uint16_t insn + = extract_unsigned_integer (buf, 2, byte_order_for_code); + + if ((insn & 0xff80) == 0x4700) /* bx */ + { + CORE_ADDR dest + = get_frame_register_unsigned (frame, bits (insn, 3, 6)); + + /* Clear the LSB so that gdb core sets step-resume + breakpoint at the right address. */ + return UNMAKE_THUMB_ADDR (dest); + } + } + } + + return 0; +} + static struct arm_prologue_cache * arm_make_stub_cache (struct frame_info *this_frame) { @@ -2905,12 +2971,19 @@ arm_stub_unwind_sniffer (const struct frame_unwind *self, { CORE_ADDR addr_in_block; gdb_byte dummy[4]; + CORE_ADDR pc, start_addr; + const char *name; addr_in_block = get_frame_address_in_block (this_frame); + pc = get_frame_pc (this_frame); if (in_plt_section (addr_in_block) /* We also use the stub winder if the target memory is unreadable to avoid having the prologue unwinder trying to read it. */ - || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0) + || target_read_memory (pc, dummy, 4) != 0) + return 1; + + if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0 + && arm_skip_bx_reg (this_frame, pc) != 0) return 1; return 0; @@ -3150,11 +3223,10 @@ arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum, } } -/* Return true if we are in the function's epilogue, i.e. after the - instruction that destroyed the function's stack frame. */ +/* Implement the stack_frame_destroyed_p gdbarch method. */ static int -thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc) +thumb_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc) { enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); unsigned int insn, insn2; @@ -3196,14 +3268,9 @@ thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc) found_return = 1; else if (insn == 0x46f7) /* mov pc, lr */ found_return = 1; - else if (insn == 0x46bd) /* mov sp, r7 */ - found_stack_adjust = 1; - else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */ - found_stack_adjust = 1; - else if ((insn & 0xfe00) == 0xbc00) /* pop */ - { - found_stack_adjust = 1; - if (insn & 0x0100) /* include PC. */ + else if (thumb_instruction_restores_sp (insn)) + { + if ((insn & 0xff00) == 0xbd00) /* pop */ found_return = 1; } else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instruction */ @@ -3216,20 +3283,18 @@ thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc) if (insn == 0xe8bd) /* ldm.w sp!, */ { - found_stack_adjust = 1; if (insn2 & 0x8000) /* include PC. */ found_return = 1; } else if (insn == 0xf85d /* ldr.w , [sp], #4 */ && (insn2 & 0x0fff) == 0x0b04) { - found_stack_adjust = 1; if ((insn2 & 0xf000) == 0xf000) /* is PC. */ found_return = 1; } else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, */ && (insn2 & 0x0e00) == 0x0a00) - found_stack_adjust = 1; + ; else break; } @@ -3246,48 +3311,40 @@ thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc) a 32-bit instruction. This is just a heuristic, so we do not worry too much about false positives. */ - if (!found_stack_adjust) - { - if (pc - 4 < func_start) - return 0; - if (target_read_memory (pc - 4, buf, 4)) - return 0; + if (pc - 4 < func_start) + return 0; + if (target_read_memory (pc - 4, buf, 4)) + return 0; - insn = extract_unsigned_integer (buf, 2, byte_order_for_code); - insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code); - - if (insn2 == 0x46bd) /* mov sp, r7 */ - found_stack_adjust = 1; - else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */ - found_stack_adjust = 1; - else if ((insn2 & 0xff00) == 0xbc00) /* pop without PC */ - found_stack_adjust = 1; - else if (insn == 0xe8bd) /* ldm.w sp!, */ - found_stack_adjust = 1; - else if (insn == 0xf85d /* ldr.w , [sp], #4 */ - && (insn2 & 0x0fff) == 0x0b04) - found_stack_adjust = 1; - else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, */ - && (insn2 & 0x0e00) == 0x0a00) - found_stack_adjust = 1; - } + insn = extract_unsigned_integer (buf, 2, byte_order_for_code); + insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code); + + if (thumb_instruction_restores_sp (insn2)) + found_stack_adjust = 1; + else if (insn == 0xe8bd) /* ldm.w sp!, */ + found_stack_adjust = 1; + else if (insn == 0xf85d /* ldr.w , [sp], #4 */ + && (insn2 & 0x0fff) == 0x0b04) + found_stack_adjust = 1; + else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, */ + && (insn2 & 0x0e00) == 0x0a00) + found_stack_adjust = 1; return found_stack_adjust; } -/* Return true if we are in the function's epilogue, i.e. after the - instruction that destroyed the function's stack frame. */ +/* Implement the stack_frame_destroyed_p gdbarch method. */ static int -arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc) +arm_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc) { enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); unsigned int insn; - int found_return, found_stack_adjust; + int found_return; CORE_ADDR func_start, func_end; if (arm_pc_is_thumb (gdbarch, pc)) - return thumb_in_function_epilogue_p (gdbarch, pc); + return thumb_stack_frame_destroyed_p (gdbarch, pc); if (!find_pc_partial_function (pc, NULL, &func_start, &func_end)) return 0; @@ -3323,28 +3380,8 @@ arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc) if (pc < func_start + 4) return 0; - found_stack_adjust = 0; insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code); - if (bits (insn, 28, 31) != INST_NV) - { - if ((insn & 0x0df0f000) == 0x0080d000) - /* ADD SP (register or immediate). */ - found_stack_adjust = 1; - else if ((insn & 0x0df0f000) == 0x0040d000) - /* SUB SP (register or immediate). */ - found_stack_adjust = 1; - else if ((insn & 0x0ffffff0) == 0x01a0d000) - /* MOV SP. */ - found_stack_adjust = 1; - else if ((insn & 0x0fff0000) == 0x08bd0000) - /* POP (LDMIA). */ - found_stack_adjust = 1; - else if ((insn & 0x0fff0000) == 0x049d0000) - /* POP of a single register. */ - found_stack_adjust = 1; - } - - if (found_stack_adjust) + if (arm_instruction_restores_sp (insn)) return 1; return 0; @@ -3365,7 +3402,7 @@ static struct stack_item * push_stack_item (struct stack_item *prev, const void *contents, int len) { struct stack_item *si; - si = xmalloc (sizeof (struct stack_item)); + si = XNEW (struct stack_item); si->data = xmalloc (len); si->len = len; si->prev = prev; @@ -3495,8 +3532,8 @@ arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b) classified from *BASE_TYPE, or two types differently classified from each other, return -1, otherwise return the total number of base-type elements found (possibly 0 in an empty structure or - array). Vectors and complex types are not currently supported, - matching the generic AAPCS support. */ + array). Vector types are not currently supported, matching the + generic AAPCS support. */ static int arm_vfp_cprc_sub_candidate (struct type *t, @@ -3527,6 +3564,38 @@ arm_vfp_cprc_sub_candidate (struct type *t, } break; + case TYPE_CODE_COMPLEX: + /* Arguments of complex T where T is one of the types float or + double get treated as if they are implemented as: + + struct complexT + { + T real; + T imag; + }; + + */ + switch (TYPE_LENGTH (t)) + { + case 8: + if (*base_type == VFP_CPRC_UNKNOWN) + *base_type = VFP_CPRC_SINGLE; + else if (*base_type != VFP_CPRC_SINGLE) + return -1; + return 2; + + case 16: + if (*base_type == VFP_CPRC_UNKNOWN) + *base_type = VFP_CPRC_DOUBLE; + else if (*base_type != VFP_CPRC_DOUBLE) + return -1; + return 2; + + default: + return -1; + } + break; + case TYPE_CODE_ARRAY: { int count; @@ -4858,6 +4927,13 @@ arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc) case 0x5: /* data transfer */ case 0x6: case 0x7: + if (bits (this_instr, 25, 27) == 0x3 && bit (this_instr, 4) == 1) + { + /* Media instructions and architecturally undefined + instructions. */ + break; + } + if (bit (this_instr, 20)) { /* load */ @@ -6336,7 +6412,7 @@ install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs, Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2; r0, r1, r2 <- rd, rn, rm - Insn: r0, r1, r2 [, ] + Insn: r0, [r1,] r2 [, ] Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3 */ @@ -6383,22 +6459,21 @@ thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn, struct regcache *regs, struct displaced_step_closure *dsc) { - unsigned rn, rm, rd; + unsigned rm, rd; - rd = bits (insn, 3, 6); - rn = (bit (insn, 7) << 3) | bits (insn, 0, 2); - rm = 2; + rm = bits (insn, 3, 6); + rd = (bit (insn, 7) << 3) | bits (insn, 0, 2); - if (rd != ARM_PC_REGNUM && rn != ARM_PC_REGNUM) + if (rd != ARM_PC_REGNUM && rm != ARM_PC_REGNUM) return thumb_copy_unmodified_16bit (gdbarch, insn, "ALU reg", dsc); if (debug_displaced) - fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x\n", - "ALU", (unsigned short) insn); + fprintf_unfiltered (gdb_stdlog, "displaced: copying ALU reg insn %.4x\n", + (unsigned short) insn); - dsc->modinsn[0] = ((insn & 0xff00) | 0x08); + dsc->modinsn[0] = ((insn & 0xff00) | 0x10); - install_alu_reg (gdbarch, regs, dsc, rd, rn, rm); + install_alu_reg (gdbarch, regs, dsc, rd, rd, rm); return 0; } @@ -8662,8 +8737,8 @@ arm_displaced_step_copy_insn (struct gdbarch *gdbarch, CORE_ADDR from, CORE_ADDR to, struct regcache *regs) { - struct displaced_step_closure *dsc - = xmalloc (sizeof (struct displaced_step_closure)); + struct displaced_step_closure *dsc = XNEW (struct displaced_step_closure); + arm_process_displaced_insn (gdbarch, from, to, regs, dsc); arm_displaced_init_closure (gdbarch, from, to, dsc); @@ -8933,7 +9008,7 @@ arm_return_in_memory (struct gdbarch *gdbarch, struct type *type) int nRc; enum type_code code; - CHECK_TYPEDEF (type); + type = check_typedef (type); /* In the ARM ABI, "integer" like aggregate types are returned in registers. For an aggregate type to be integer like, its size @@ -9225,15 +9300,23 @@ arm_skip_stub (struct frame_info *frame, CORE_ADDR pc) /* Find the starting address and name of the function containing the PC. */ if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0) - return 0; + { + /* Trampoline 'bx reg' doesn't belong to any functions. Do the + check here. */ + start_addr = arm_skip_bx_reg (frame, pc); + if (start_addr != 0) + return start_addr; + + return 0; + } /* If PC is in a Thumb call or return stub, return the address of the target PC, which is in a register. The thunk functions are called _call_via_xx, where x is the register name. The possible names are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar functions, named __ARM_call_via_r[0-7]. */ - if (strncmp (name, "_call_via_", 10) == 0 - || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0) + if (startswith (name, "_call_via_") + || startswith (name, "__ARM_call_via_")) { /* Use the name suffix to determine which register contains the target PC. */ @@ -9255,11 +9338,9 @@ arm_skip_stub (struct frame_info *frame, CORE_ADDR pc) namelen = strlen (name); if (name[0] == '_' && name[1] == '_' && ((namelen > 2 + strlen ("_from_thumb") - && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb", - strlen ("_from_thumb")) == 0) + && startswith (name + namelen - strlen ("_from_thumb"), "_from_thumb")) || (namelen > 2 + strlen ("_from_arm") - && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm", - strlen ("_from_arm")) == 0))) + && startswith (name + namelen - strlen ("_from_arm"), "_from_arm")))) { char *target_name; int target_len = namelen - 2; @@ -9280,7 +9361,7 @@ arm_skip_stub (struct frame_info *frame, CORE_ADDR pc) objfile = (sec == NULL) ? NULL : sec->objfile; minsym = lookup_minimal_symbol (target_name, NULL, objfile); if (minsym.minsym != NULL) - return MSYMBOL_VALUE_ADDRESS (minsym.minsym); + return BMSYMBOL_VALUE_ADDRESS (minsym); else return 0; } @@ -9322,12 +9403,12 @@ static void set_fp_model_sfunc (char *args, int from_tty, struct cmd_list_element *c) { - enum arm_float_model fp_model; + int fp_model; for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++) if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0) { - arm_fp_model = fp_model; + arm_fp_model = (enum arm_float_model) fp_model; break; } @@ -9359,12 +9440,12 @@ static void arm_set_abi (char *args, int from_tty, struct cmd_list_element *c) { - enum arm_abi_kind arm_abi; + int arm_abi; for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++) if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0) { - arm_abi_global = arm_abi; + arm_abi_global = (enum arm_abi_kind) arm_abi; break; } @@ -9840,7 +9921,8 @@ arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) enum arm_float_model fp_model = arm_fp_model; struct tdesc_arch_data *tdesc_data = NULL; int i, is_m = 0; - int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0; + int vfp_register_count = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0; + int have_wmmx_registers = 0; int have_neon = 0; int have_fpa_registers = 1; const struct target_desc *tdesc = info.target_desc; @@ -9876,7 +9958,7 @@ arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) anyway, so assume APCS. */ arm_abi = ARM_ABI_APCS; } - else if (ei_osabi == ELFOSABI_NONE) + else if (ei_osabi == ELFOSABI_NONE || ei_osabi == ELFOSABI_GNU) { int eabi_ver = EF_ARM_EABI_VERSION (e_flags); int attr_arch, attr_profile; @@ -9902,27 +9984,34 @@ arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) OBJ_ATTR_PROC, Tag_ABI_VFP_args)) { - case 0: + case AEABI_VFP_args_base: /* "The user intended FP parameter/result passing to conform to AAPCS, base variant". */ fp_model = ARM_FLOAT_SOFT_VFP; break; - case 1: + case AEABI_VFP_args_vfp: /* "The user intended FP parameter/result passing to conform to AAPCS, VFP variant". */ fp_model = ARM_FLOAT_VFP; break; - case 2: + case AEABI_VFP_args_toolchain: /* "The user intended FP parameter/result passing to conform to tool chain-specific conventions" - we don't know any such conventions, so leave it as "auto". */ break; + case AEABI_VFP_args_compatible: + /* "Code is compatible with both the base + and VFP variants; the user did not permit + non-variadic functions to pass FP + parameters/results" - leave it as + "auto". */ + break; default: /* Attribute value not mentioned in the - October 2008 ABI, so leave it as + November 2012 ABI, so leave it as "auto". */ break; } @@ -10097,6 +10186,8 @@ arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) tdesc_data_cleanup (tdesc_data); return NULL; } + + have_wmmx_registers = 1; } /* If we have a VFP unit, check whether the single precision registers @@ -10139,7 +10230,7 @@ arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) if (tdesc_unnumbered_register (feature, "s0") == 0) have_vfp_pseudos = 1; - have_vfp_registers = 1; + vfp_register_count = i; /* If we have VFP, also check for NEON. The architecture allows NEON without VFP (integer vector operations only), but GDB @@ -10199,7 +10290,7 @@ arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) return best_arch->gdbarch; } - tdep = xcalloc (1, sizeof (struct gdbarch_tdep)); + tdep = XCNEW (struct gdbarch_tdep); gdbarch = gdbarch_alloc (&info, tdep); /* Record additional information about the architecture we are defining. @@ -10208,7 +10299,11 @@ arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) tdep->fp_model = fp_model; tdep->is_m = is_m; tdep->have_fpa_registers = have_fpa_registers; - tdep->have_vfp_registers = have_vfp_registers; + tdep->have_wmmx_registers = have_wmmx_registers; + gdb_assert (vfp_register_count == 0 + || vfp_register_count == 16 + || vfp_register_count == 32); + tdep->vfp_register_count = vfp_register_count; tdep->have_vfp_pseudos = have_vfp_pseudos; tdep->have_neon_pseudos = have_neon_pseudos; tdep->have_neon = have_neon; @@ -10273,8 +10368,8 @@ arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) /* Advance PC across function entry code. */ set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue); - /* Detect whether PC is in function epilogue. */ - set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p); + /* Detect whether PC is at a point where the stack has been destroyed. */ + set_gdbarch_stack_frame_destroyed_p (gdbarch, arm_stack_frame_destroyed_p); /* Skip trampolines. */ set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub); @@ -10476,8 +10571,8 @@ _initialize_arm_tdep (void) /* Initialize the array that will be passed to add_setshow_enum_cmd(). */ - valid_disassembly_styles - = xmalloc ((num_disassembly_options + 1) * sizeof (char *)); + valid_disassembly_styles = XNEWVEC (const char *, + num_disassembly_options + 1); for (i = 0; i < num_disassembly_options; i++) { numregs = get_arm_regnames (i, &setname, &setdesc, ®names); @@ -10575,6 +10670,8 @@ vfp - VFP co-processor."), #define THUMB2_INSN_SIZE_BYTES 4 +/* Position of the bit within a 32-bit ARM instruction + that defines whether the instruction is a load or store. */ #define INSN_S_L_BIT_NUM 20 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \ @@ -11372,110 +11469,90 @@ arm_record_data_proc_imm (insn_decode_record *arm_insn_r) return 0; } -/* Handling opcode 010 insns. */ +/* Handle ARM mode instructions with opcode 010. */ static int arm_record_ld_st_imm_offset (insn_decode_record *arm_insn_r) { struct regcache *reg_cache = arm_insn_r->regcache; - uint32_t reg_src1 = 0 , reg_dest = 0; - uint32_t offset_12 = 0, tgt_mem_addr = 0; + uint32_t reg_base , reg_dest; + uint32_t offset_12, tgt_mem_addr; uint32_t record_buf[8], record_buf_mem[8]; + unsigned char wback; + ULONGEST u_regval; - ULONGEST u_regval = 0; + /* Calculate wback. */ + wback = (bit (arm_insn_r->arm_insn, 24) == 0) + || (bit (arm_insn_r->arm_insn, 21) == 1); - arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24); - arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7); + arm_insn_r->reg_rec_count = 0; + reg_base = bits (arm_insn_r->arm_insn, 16, 19); if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)) { + /* LDR (immediate), LDR (literal), LDRB (immediate), LDRB (literal), LDRBT + and LDRT. */ + reg_dest = bits (arm_insn_r->arm_insn, 12, 15); - /* LDR insn has a capability to do branching, if - MOV LR, PC is precedded by LDR insn having Rn as R15 - in that case, it emulates branch and link insn, and hence we - need to save CSPR and PC as well. */ - if (ARM_PC_REGNUM != reg_dest) - { - record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15); - arm_insn_r->reg_rec_count = 1; - } - else - { - record_buf[0] = reg_dest; - record_buf[1] = ARM_PS_REGNUM; - arm_insn_r->reg_rec_count = 2; - } + record_buf[arm_insn_r->reg_rec_count++] = reg_dest; + + /* The LDR instruction is capable of doing branching. If MOV LR, PC + preceeds a LDR instruction having R15 as reg_base, it + emulates a branch and link instruction, and hence we need to save + CPSR and PC as well. */ + if (ARM_PC_REGNUM == reg_dest) + record_buf[arm_insn_r->reg_rec_count++] = ARM_PS_REGNUM; + + /* If wback is true, also save the base register, which is going to be + written to. */ + if (wback) + record_buf[arm_insn_r->reg_rec_count++] = reg_base; } else { - /* Store, immediate offset, immediate pre-indexed, - immediate post-indexed. */ - reg_src1 = bits (arm_insn_r->arm_insn, 16, 19); + /* STR (immediate), STRB (immediate), STRBT and STRT. */ + offset_12 = bits (arm_insn_r->arm_insn, 0, 11); - regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval); - /* U == 1 */ + regcache_raw_read_unsigned (reg_cache, reg_base, &u_regval); + + /* Handle bit U. */ if (bit (arm_insn_r->arm_insn, 23)) - { - tgt_mem_addr = u_regval + offset_12; - } + { + /* U == 1: Add the offset. */ + tgt_mem_addr = (uint32_t) u_regval + offset_12; + } else - { - tgt_mem_addr = u_regval - offset_12; - } + { + /* U == 0: subtract the offset. */ + tgt_mem_addr = (uint32_t) u_regval - offset_12; + } + + /* Bit 22 tells us whether the store instruction writes 1 byte or 4 + bytes. */ + if (bit (arm_insn_r->arm_insn, 22)) + { + /* STRB and STRBT: 1 byte. */ + record_buf_mem[0] = 1; + } + else + { + /* STR and STRT: 4 bytes. */ + record_buf_mem[0] = 4; + } + + /* Handle bit P. */ + if (bit (arm_insn_r->arm_insn, 24)) + record_buf_mem[1] = tgt_mem_addr; + else + record_buf_mem[1] = (uint32_t) u_regval; - switch (arm_insn_r->opcode) - { - /* STR. */ - case 8: - case 12: - /* STR. */ - case 9: - case 13: - /* STRT. */ - case 1: - case 5: - /* STR. */ - case 4: - case 0: - record_buf_mem[0] = 4; - break; - - /* STRB. */ - case 10: - case 14: - /* STRB. */ - case 11: - case 15: - /* STRBT. */ - case 3: - case 7: - /* STRB. */ - case 2: - case 6: - record_buf_mem[0] = 1; - break; - - default: - gdb_assert_not_reached ("no decoding pattern found"); - break; - } - record_buf_mem[1] = tgt_mem_addr; arm_insn_r->mem_rec_count = 1; - if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode - || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode - || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode - || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode - || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode - || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode - ) - { - /* We are handling pre-indexed mode; post-indexed mode; - where Rn is going to be changed. */ - record_buf[0] = reg_src1; - arm_insn_r->reg_rec_count = 1; - } + /* If wback is true, also save the base register, which is going to be + written to. */ + if (wback) + record_buf[arm_insn_r->reg_rec_count++] = reg_base; } REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf); @@ -11746,134 +11823,99 @@ arm_record_ld_st_reg_offset (insn_decode_record *arm_insn_r) return 0; } -/* Handling opcode 100 insns. */ +/* Handle ARM mode instructions with opcode 100. */ static int arm_record_ld_st_multiple (insn_decode_record *arm_insn_r) { struct regcache *reg_cache = arm_insn_r->regcache; - - uint32_t register_list[16] = {0}, register_count = 0, register_bits = 0; - uint32_t reg_src1 = 0, addr_mode = 0, no_of_regs = 0; - uint32_t start_address = 0, index = 0; + uint32_t register_count = 0, register_bits; + uint32_t reg_base, addr_mode; uint32_t record_buf[24], record_buf_mem[48]; + uint32_t wback; + ULONGEST u_regval; - ULONGEST u_regval[2] = {0}; + /* Fetch the list of registers. */ + register_bits = bits (arm_insn_r->arm_insn, 0, 15); + arm_insn_r->reg_rec_count = 0; + + /* Fetch the base register that contains the address we are loading data + to. */ + reg_base = bits (arm_insn_r->arm_insn, 16, 19); - /* This mode is exclusively for load and store multiple. */ - /* Handle incremenrt after/before and decrment after.before mode; - Rn is changing depending on W bit, but as of now we store Rn too - without optimization. */ + /* Calculate wback. */ + wback = (bit (arm_insn_r->arm_insn, 21) == 1); if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)) { - /* LDM (1,2,3) where LDM (3) changes CPSR too. */ + /* LDM/LDMIA/LDMFD, LDMDA/LDMFA, LDMDB and LDMIB. */ - if (bit (arm_insn_r->arm_insn, 20) && !bit (arm_insn_r->arm_insn, 22)) - { - register_bits = bits (arm_insn_r->arm_insn, 0, 15); - no_of_regs = 15; - } - else - { - register_bits = bits (arm_insn_r->arm_insn, 0, 14); - no_of_regs = 14; - } - /* Get Rn. */ - reg_src1 = bits (arm_insn_r->arm_insn, 16, 19); + /* Find out which registers are going to be loaded from memory. */ while (register_bits) - { - if (register_bits & 0x00000001) - record_buf[index++] = register_count; - register_bits = register_bits >> 1; - register_count++; - } + { + if (register_bits & 0x00000001) + record_buf[arm_insn_r->reg_rec_count++] = register_count; + register_bits = register_bits >> 1; + register_count++; + } + + + /* If wback is true, also save the base register, which is going to be + written to. */ + if (wback) + record_buf[arm_insn_r->reg_rec_count++] = reg_base; - /* Extra space for Base Register and CPSR; wihtout optimization. */ - record_buf[index++] = reg_src1; - record_buf[index++] = ARM_PS_REGNUM; - arm_insn_r->reg_rec_count = index; + /* Save the CPSR register. */ + record_buf[arm_insn_r->reg_rec_count++] = ARM_PS_REGNUM; } else { - /* It handles both STM(1) and STM(2). */ - addr_mode = bits (arm_insn_r->arm_insn, 23, 24); + /* STM (STMIA, STMEA), STMDA (STMED), STMDB (STMFD) and STMIB (STMFA). */ - register_bits = bits (arm_insn_r->arm_insn, 0, 15); - /* Get Rn. */ - reg_src1 = bits (arm_insn_r->arm_insn, 16, 19); - regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]); + addr_mode = bits (arm_insn_r->arm_insn, 23, 24); + + regcache_raw_read_unsigned (reg_cache, reg_base, &u_regval); + + /* Find out how many registers are going to be stored to memory. */ while (register_bits) - { - if (register_bits & 0x00000001) - register_count++; - register_bits = register_bits >> 1; - } + { + if (register_bits & 0x00000001) + register_count++; + register_bits = register_bits >> 1; + } switch (addr_mode) - { - /* Decrement after. */ - case 0: - start_address = (u_regval[0]) - (register_count * 4) + 4; - arm_insn_r->mem_rec_count = register_count; - while (register_count) - { - record_buf_mem[(register_count * 2) - 1] = start_address; - record_buf_mem[(register_count * 2) - 2] = 4; - start_address = start_address + 4; - register_count--; - } - break; - - /* Increment after. */ - case 1: - start_address = u_regval[0]; - arm_insn_r->mem_rec_count = register_count; - while (register_count) - { - record_buf_mem[(register_count * 2) - 1] = start_address; - record_buf_mem[(register_count * 2) - 2] = 4; - start_address = start_address + 4; - register_count--; - } - break; - - /* Decrement before. */ - case 2: - - start_address = (u_regval[0]) - (register_count * 4); - arm_insn_r->mem_rec_count = register_count; - while (register_count) - { - record_buf_mem[(register_count * 2) - 1] = start_address; - record_buf_mem[(register_count * 2) - 2] = 4; - start_address = start_address + 4; - register_count--; - } - break; - - /* Increment before. */ - case 3: - start_address = u_regval[0] + 4; - arm_insn_r->mem_rec_count = register_count; - while (register_count) - { - record_buf_mem[(register_count * 2) - 1] = start_address; - record_buf_mem[(register_count * 2) - 2] = 4; - start_address = start_address + 4; - register_count--; - } - break; - - default: - gdb_assert_not_reached ("no decoding pattern found"); - break; - } + { + /* STMDA (STMED): Decrement after. */ + case 0: + record_buf_mem[1] = (uint32_t) u_regval + - register_count * INT_REGISTER_SIZE + 4; + break; + /* STM (STMIA, STMEA): Increment after. */ + case 1: + record_buf_mem[1] = (uint32_t) u_regval; + break; + /* STMDB (STMFD): Decrement before. */ + case 2: + record_buf_mem[1] = (uint32_t) u_regval + - register_count * INT_REGISTER_SIZE; + break; + /* STMIB (STMFA): Increment before. */ + case 3: + record_buf_mem[1] = (uint32_t) u_regval + INT_REGISTER_SIZE; + break; + default: + gdb_assert_not_reached ("no decoding pattern found"); + break; + } - /* Base register also changes; based on condition and W bit. */ - /* We save it anyway without optimization. */ - record_buf[0] = reg_src1; - arm_insn_r->reg_rec_count = 1; + record_buf_mem[0] = register_count * INT_REGISTER_SIZE; + arm_insn_r->mem_rec_count = 1; + + /* If wback is true, also save the base register, which is going to be + written to. */ + if (wback) + record_buf[arm_insn_r->reg_rec_count++] = reg_base; } REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf); @@ -11915,20 +11957,559 @@ arm_record_unsupported_insn (insn_decode_record *arm_insn_r) return -1; } +/* Record handler for vector data transfer instructions. */ + +static int +arm_record_vdata_transfer_insn (insn_decode_record *arm_insn_r) +{ + uint32_t bits_a, bit_c, bit_l, reg_t, reg_v; + uint32_t record_buf[4]; + + const int num_regs = gdbarch_num_regs (arm_insn_r->gdbarch); + reg_t = bits (arm_insn_r->arm_insn, 12, 15); + reg_v = bits (arm_insn_r->arm_insn, 21, 23); + bits_a = bits (arm_insn_r->arm_insn, 21, 23); + bit_l = bit (arm_insn_r->arm_insn, 20); + bit_c = bit (arm_insn_r->arm_insn, 8); + + /* Handle VMOV instruction. */ + if (bit_l && bit_c) + { + record_buf[0] = reg_t; + arm_insn_r->reg_rec_count = 1; + } + else if (bit_l && !bit_c) + { + /* Handle VMOV instruction. */ + if (bits_a == 0x00) + { + if (bit (arm_insn_r->arm_insn, 20)) + record_buf[0] = reg_t; + else + record_buf[0] = num_regs + (bit (arm_insn_r->arm_insn, 7) | + (reg_v << 1)); + + arm_insn_r->reg_rec_count = 1; + } + /* Handle VMRS instruction. */ + else if (bits_a == 0x07) + { + if (reg_t == 15) + reg_t = ARM_PS_REGNUM; + + record_buf[0] = reg_t; + arm_insn_r->reg_rec_count = 1; + } + } + else if (!bit_l && !bit_c) + { + /* Handle VMOV instruction. */ + if (bits_a == 0x00) + { + if (bit (arm_insn_r->arm_insn, 20)) + record_buf[0] = reg_t; + else + record_buf[0] = num_regs + (bit (arm_insn_r->arm_insn, 7) | + (reg_v << 1)); + + arm_insn_r->reg_rec_count = 1; + } + /* Handle VMSR instruction. */ + else if (bits_a == 0x07) + { + record_buf[0] = ARM_FPSCR_REGNUM; + arm_insn_r->reg_rec_count = 1; + } + } + else if (!bit_l && bit_c) + { + /* Handle VMOV instruction. */ + if (!(bits_a & 0x04)) + { + record_buf[0] = (reg_v | (bit (arm_insn_r->arm_insn, 7) << 4)) + + ARM_D0_REGNUM; + arm_insn_r->reg_rec_count = 1; + } + /* Handle VDUP instruction. */ + else + { + if (bit (arm_insn_r->arm_insn, 21)) + { + reg_v = reg_v | (bit (arm_insn_r->arm_insn, 7) << 4); + record_buf[0] = reg_v + ARM_D0_REGNUM; + record_buf[1] = reg_v + ARM_D0_REGNUM + 1; + arm_insn_r->reg_rec_count = 2; + } + else + { + reg_v = reg_v | (bit (arm_insn_r->arm_insn, 7) << 4); + record_buf[0] = reg_v + ARM_D0_REGNUM; + arm_insn_r->reg_rec_count = 1; + } + } + } + + REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf); + return 0; +} + +/* Record handler for extension register load/store instructions. */ + +static int +arm_record_exreg_ld_st_insn (insn_decode_record *arm_insn_r) +{ + uint32_t opcode, single_reg; + uint8_t op_vldm_vstm; + uint32_t record_buf[8], record_buf_mem[128]; + ULONGEST u_regval = 0; + + struct regcache *reg_cache = arm_insn_r->regcache; + const int num_regs = gdbarch_num_regs (arm_insn_r->gdbarch); + + opcode = bits (arm_insn_r->arm_insn, 20, 24); + single_reg = bit (arm_insn_r->arm_insn, 8); + op_vldm_vstm = opcode & 0x1b; + + /* Handle VMOV instructions. */ + if ((opcode & 0x1e) == 0x04) + { + if (bit (arm_insn_r->arm_insn, 4)) + { + record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15); + record_buf[1] = bits (arm_insn_r->arm_insn, 16, 19); + arm_insn_r->reg_rec_count = 2; + } + else + { + uint8_t reg_m = (bits (arm_insn_r->arm_insn, 0, 3) << 1) + | bit (arm_insn_r->arm_insn, 5); + + if (!single_reg) + { + record_buf[0] = num_regs + reg_m; + record_buf[1] = num_regs + reg_m + 1; + arm_insn_r->reg_rec_count = 2; + } + else + { + record_buf[0] = reg_m + ARM_D0_REGNUM; + arm_insn_r->reg_rec_count = 1; + } + } + } + /* Handle VSTM and VPUSH instructions. */ + else if (op_vldm_vstm == 0x08 || op_vldm_vstm == 0x0a + || op_vldm_vstm == 0x12) + { + uint32_t start_address, reg_rn, imm_off32, imm_off8, memory_count; + uint32_t memory_index = 0; + + reg_rn = bits (arm_insn_r->arm_insn, 16, 19); + regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval); + imm_off8 = bits (arm_insn_r->arm_insn, 0, 7); + imm_off32 = imm_off8 << 24; + memory_count = imm_off8; + + if (bit (arm_insn_r->arm_insn, 23)) + start_address = u_regval; + else + start_address = u_regval - imm_off32; + + if (bit (arm_insn_r->arm_insn, 21)) + { + record_buf[0] = reg_rn; + arm_insn_r->reg_rec_count = 1; + } + + while (memory_count > 0) + { + if (!single_reg) + { + record_buf_mem[memory_index] = start_address; + record_buf_mem[memory_index + 1] = 4; + start_address = start_address + 4; + memory_index = memory_index + 2; + } + else + { + record_buf_mem[memory_index] = start_address; + record_buf_mem[memory_index + 1] = 4; + record_buf_mem[memory_index + 2] = start_address + 4; + record_buf_mem[memory_index + 3] = 4; + start_address = start_address + 8; + memory_index = memory_index + 4; + } + memory_count--; + } + arm_insn_r->mem_rec_count = (memory_index >> 1); + } + /* Handle VLDM instructions. */ + else if (op_vldm_vstm == 0x09 || op_vldm_vstm == 0x0b + || op_vldm_vstm == 0x13) + { + uint32_t reg_count, reg_vd; + uint32_t reg_index = 0; + + reg_vd = bits (arm_insn_r->arm_insn, 12, 15); + reg_count = bits (arm_insn_r->arm_insn, 0, 7); + + if (single_reg) + reg_vd = reg_vd | (bit (arm_insn_r->arm_insn, 22) << 4); + else + reg_vd = (reg_vd << 1) | bit (arm_insn_r->arm_insn, 22); + + if (bit (arm_insn_r->arm_insn, 21)) + record_buf[reg_index++] = bits (arm_insn_r->arm_insn, 16, 19); + + while (reg_count > 0) + { + if (single_reg) + record_buf[reg_index++] = num_regs + reg_vd + reg_count - 1; + else + record_buf[reg_index++] = ARM_D0_REGNUM + reg_vd + reg_count - 1; + + reg_count--; + } + arm_insn_r->reg_rec_count = reg_index; + } + /* VSTR Vector store register. */ + else if ((opcode & 0x13) == 0x10) + { + uint32_t start_address, reg_rn, imm_off32, imm_off8, memory_count; + uint32_t memory_index = 0; + + reg_rn = bits (arm_insn_r->arm_insn, 16, 19); + regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval); + imm_off8 = bits (arm_insn_r->arm_insn, 0, 7); + imm_off32 = imm_off8 << 24; + memory_count = imm_off8; + + if (bit (arm_insn_r->arm_insn, 23)) + start_address = u_regval + imm_off32; + else + start_address = u_regval - imm_off32; + + if (single_reg) + { + record_buf_mem[memory_index] = start_address; + record_buf_mem[memory_index + 1] = 4; + arm_insn_r->mem_rec_count = 1; + } + else + { + record_buf_mem[memory_index] = start_address; + record_buf_mem[memory_index + 1] = 4; + record_buf_mem[memory_index + 2] = start_address + 4; + record_buf_mem[memory_index + 3] = 4; + arm_insn_r->mem_rec_count = 2; + } + } + /* VLDR Vector load register. */ + else if ((opcode & 0x13) == 0x11) + { + uint32_t reg_vd = bits (arm_insn_r->arm_insn, 12, 15); + + if (!single_reg) + { + reg_vd = reg_vd | (bit (arm_insn_r->arm_insn, 22) << 4); + record_buf[0] = ARM_D0_REGNUM + reg_vd; + } + else + { + reg_vd = (reg_vd << 1) | bit (arm_insn_r->arm_insn, 22); + record_buf[0] = num_regs + reg_vd; + } + arm_insn_r->reg_rec_count = 1; + } + + REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf); + MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem); + return 0; +} + +/* Record handler for arm/thumb mode VFP data processing instructions. */ + +static int +arm_record_vfp_data_proc_insn (insn_decode_record *arm_insn_r) +{ + uint32_t opc1, opc2, opc3, dp_op_sz, bit_d, reg_vd; + uint32_t record_buf[4]; + enum insn_types {INSN_T0, INSN_T1, INSN_T2, INSN_T3, INSN_INV}; + enum insn_types curr_insn_type = INSN_INV; + + reg_vd = bits (arm_insn_r->arm_insn, 12, 15); + opc1 = bits (arm_insn_r->arm_insn, 20, 23); + opc2 = bits (arm_insn_r->arm_insn, 16, 19); + opc3 = bits (arm_insn_r->arm_insn, 6, 7); + dp_op_sz = bit (arm_insn_r->arm_insn, 8); + bit_d = bit (arm_insn_r->arm_insn, 22); + opc1 = opc1 & 0x04; + + /* Handle VMLA, VMLS. */ + if (opc1 == 0x00) + { + if (bit (arm_insn_r->arm_insn, 10)) + { + if (bit (arm_insn_r->arm_insn, 6)) + curr_insn_type = INSN_T0; + else + curr_insn_type = INSN_T1; + } + else + { + if (dp_op_sz) + curr_insn_type = INSN_T1; + else + curr_insn_type = INSN_T2; + } + } + /* Handle VNMLA, VNMLS, VNMUL. */ + else if (opc1 == 0x01) + { + if (dp_op_sz) + curr_insn_type = INSN_T1; + else + curr_insn_type = INSN_T2; + } + /* Handle VMUL. */ + else if (opc1 == 0x02 && !(opc3 & 0x01)) + { + if (bit (arm_insn_r->arm_insn, 10)) + { + if (bit (arm_insn_r->arm_insn, 6)) + curr_insn_type = INSN_T0; + else + curr_insn_type = INSN_T1; + } + else + { + if (dp_op_sz) + curr_insn_type = INSN_T1; + else + curr_insn_type = INSN_T2; + } + } + /* Handle VADD, VSUB. */ + else if (opc1 == 0x03) + { + if (!bit (arm_insn_r->arm_insn, 9)) + { + if (bit (arm_insn_r->arm_insn, 6)) + curr_insn_type = INSN_T0; + else + curr_insn_type = INSN_T1; + } + else + { + if (dp_op_sz) + curr_insn_type = INSN_T1; + else + curr_insn_type = INSN_T2; + } + } + /* Handle VDIV. */ + else if (opc1 == 0x0b) + { + if (dp_op_sz) + curr_insn_type = INSN_T1; + else + curr_insn_type = INSN_T2; + } + /* Handle all other vfp data processing instructions. */ + else if (opc1 == 0x0b) + { + /* Handle VMOV. */ + if (!(opc3 & 0x01) || (opc2 == 0x00 && opc3 == 0x01)) + { + if (bit (arm_insn_r->arm_insn, 4)) + { + if (bit (arm_insn_r->arm_insn, 6)) + curr_insn_type = INSN_T0; + else + curr_insn_type = INSN_T1; + } + else + { + if (dp_op_sz) + curr_insn_type = INSN_T1; + else + curr_insn_type = INSN_T2; + } + } + /* Handle VNEG and VABS. */ + else if ((opc2 == 0x01 && opc3 == 0x01) + || (opc2 == 0x00 && opc3 == 0x03)) + { + if (!bit (arm_insn_r->arm_insn, 11)) + { + if (bit (arm_insn_r->arm_insn, 6)) + curr_insn_type = INSN_T0; + else + curr_insn_type = INSN_T1; + } + else + { + if (dp_op_sz) + curr_insn_type = INSN_T1; + else + curr_insn_type = INSN_T2; + } + } + /* Handle VSQRT. */ + else if (opc2 == 0x01 && opc3 == 0x03) + { + if (dp_op_sz) + curr_insn_type = INSN_T1; + else + curr_insn_type = INSN_T2; + } + /* Handle VCVT. */ + else if (opc2 == 0x07 && opc3 == 0x03) + { + if (!dp_op_sz) + curr_insn_type = INSN_T1; + else + curr_insn_type = INSN_T2; + } + else if (opc3 & 0x01) + { + /* Handle VCVT. */ + if ((opc2 == 0x08) || (opc2 & 0x0e) == 0x0c) + { + if (!bit (arm_insn_r->arm_insn, 18)) + curr_insn_type = INSN_T2; + else + { + if (dp_op_sz) + curr_insn_type = INSN_T1; + else + curr_insn_type = INSN_T2; + } + } + /* Handle VCVT. */ + else if ((opc2 & 0x0e) == 0x0a || (opc2 & 0x0e) == 0x0e) + { + if (dp_op_sz) + curr_insn_type = INSN_T1; + else + curr_insn_type = INSN_T2; + } + /* Handle VCVTB, VCVTT. */ + else if ((opc2 & 0x0e) == 0x02) + curr_insn_type = INSN_T2; + /* Handle VCMP, VCMPE. */ + else if ((opc2 & 0x0e) == 0x04) + curr_insn_type = INSN_T3; + } + } + + switch (curr_insn_type) + { + case INSN_T0: + reg_vd = reg_vd | (bit_d << 4); + record_buf[0] = reg_vd + ARM_D0_REGNUM; + record_buf[1] = reg_vd + ARM_D0_REGNUM + 1; + arm_insn_r->reg_rec_count = 2; + break; + + case INSN_T1: + reg_vd = reg_vd | (bit_d << 4); + record_buf[0] = reg_vd + ARM_D0_REGNUM; + arm_insn_r->reg_rec_count = 1; + break; + + case INSN_T2: + reg_vd = (reg_vd << 1) | bit_d; + record_buf[0] = reg_vd + ARM_D0_REGNUM; + arm_insn_r->reg_rec_count = 1; + break; + + case INSN_T3: + record_buf[0] = ARM_FPSCR_REGNUM; + arm_insn_r->reg_rec_count = 1; + break; + + default: + gdb_assert_not_reached ("no decoding pattern found"); + break; + } + + REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf); + return 0; +} + +/* Handling opcode 110 insns. */ + +static int +arm_record_asimd_vfp_coproc (insn_decode_record *arm_insn_r) +{ + uint32_t op, op1, op1_sbit, op1_ebit, coproc; + + coproc = bits (arm_insn_r->arm_insn, 8, 11); + op1 = bits (arm_insn_r->arm_insn, 20, 25); + op1_ebit = bit (arm_insn_r->arm_insn, 20); + + if ((coproc & 0x0e) == 0x0a) + { + /* Handle extension register ld/st instructions. */ + if (!(op1 & 0x20)) + return arm_record_exreg_ld_st_insn (arm_insn_r); + + /* 64-bit transfers between arm core and extension registers. */ + if ((op1 & 0x3e) == 0x04) + return arm_record_exreg_ld_st_insn (arm_insn_r); + } + else + { + /* Handle coprocessor ld/st instructions. */ + if (!(op1 & 0x3a)) + { + /* Store. */ + if (!op1_ebit) + return arm_record_unsupported_insn (arm_insn_r); + else + /* Load. */ + return arm_record_unsupported_insn (arm_insn_r); + } + + /* Move to coprocessor from two arm core registers. */ + if (op1 == 0x4) + return arm_record_unsupported_insn (arm_insn_r); + + /* Move to two arm core registers from coprocessor. */ + if (op1 == 0x5) + { + uint32_t reg_t[2]; + + reg_t[0] = bits (arm_insn_r->arm_insn, 12, 15); + reg_t[1] = bits (arm_insn_r->arm_insn, 16, 19); + arm_insn_r->reg_rec_count = 2; + + REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, reg_t); + return 0; + } + } + return arm_record_unsupported_insn (arm_insn_r); +} + /* Handling opcode 111 insns. */ static int arm_record_coproc_data_proc (insn_decode_record *arm_insn_r) { + uint32_t op, op1_sbit, op1_ebit, coproc; struct gdbarch_tdep *tdep = gdbarch_tdep (arm_insn_r->gdbarch); struct regcache *reg_cache = arm_insn_r->regcache; - uint32_t ret = 0; /* function return value: -1:record failure ; 0:success */ ULONGEST u_regval = 0; arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 24, 27); + coproc = bits (arm_insn_r->arm_insn, 8, 11); + op1_sbit = bit (arm_insn_r->arm_insn, 24); + op1_ebit = bit (arm_insn_r->arm_insn, 20); + op = bit (arm_insn_r->arm_insn, 4); /* Handle arm SWI/SVC system call instructions. */ - if (15 == arm_insn_r->opcode) + if (op1_sbit) { if (tdep->arm_syscall_record != NULL) { @@ -11941,21 +12522,52 @@ arm_record_coproc_data_proc (insn_decode_record *arm_insn_r) else /* EABI. */ regcache_raw_read_unsigned (reg_cache, 7, &svc_number); - ret = tdep->arm_syscall_record (reg_cache, svc_number); + return tdep->arm_syscall_record (reg_cache, svc_number); } else { printf_unfiltered (_("no syscall record support\n")); - ret = -1; + return -1; } } + + if ((coproc & 0x0e) == 0x0a) + { + /* VFP data-processing instructions. */ + if (!op1_sbit && !op) + return arm_record_vfp_data_proc_insn (arm_insn_r); + + /* Advanced SIMD, VFP instructions. */ + if (!op1_sbit && op) + return arm_record_vdata_transfer_insn (arm_insn_r); + } else { - arm_record_unsupported_insn (arm_insn_r); - ret = -1; + /* Coprocessor data operations. */ + if (!op1_sbit && !op) + return arm_record_unsupported_insn (arm_insn_r); + + /* Move to Coprocessor from ARM core register. */ + if (!op1_sbit && !op1_ebit && op) + return arm_record_unsupported_insn (arm_insn_r); + + /* Move to arm core register from coprocessor. */ + if (!op1_sbit && op1_ebit && op) + { + uint32_t record_buf[1]; + + record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15); + if (record_buf[0] == 15) + record_buf[0] = ARM_PS_REGNUM; + + arm_insn_r->reg_rec_count = 1; + REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, + record_buf); + return 0; + } } - return ret; + return arm_record_unsupported_insn (arm_insn_r); } /* Handling opcode 000 insns. */ @@ -12871,6 +13483,206 @@ thumb2_record_lmul_lmla_div (insn_decode_record *thumb2_insn_r) return ARM_RECORD_SUCCESS; } +/* Record handler for thumb32 coprocessor instructions. */ + +static int +thumb2_record_coproc_insn (insn_decode_record *thumb2_insn_r) +{ + if (bit (thumb2_insn_r->arm_insn, 25)) + return arm_record_coproc_data_proc (thumb2_insn_r); + else + return arm_record_asimd_vfp_coproc (thumb2_insn_r); +} + +/* Record handler for advance SIMD structure load/store instructions. */ + +static int +thumb2_record_asimd_struct_ld_st (insn_decode_record *thumb2_insn_r) +{ + struct regcache *reg_cache = thumb2_insn_r->regcache; + uint32_t l_bit, a_bit, b_bits; + uint32_t record_buf[128], record_buf_mem[128]; + uint32_t reg_rn, reg_vd, address, f_esize, f_elem; + uint32_t index_r = 0, index_e = 0, bf_regs = 0, index_m = 0, loop_t = 0; + uint8_t f_ebytes; + + l_bit = bit (thumb2_insn_r->arm_insn, 21); + a_bit = bit (thumb2_insn_r->arm_insn, 23); + b_bits = bits (thumb2_insn_r->arm_insn, 8, 11); + reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19); + reg_vd = bits (thumb2_insn_r->arm_insn, 12, 15); + reg_vd = (bit (thumb2_insn_r->arm_insn, 22) << 4) | reg_vd; + f_ebytes = (1 << bits (thumb2_insn_r->arm_insn, 6, 7)); + f_esize = 8 * f_ebytes; + f_elem = 8 / f_ebytes; + + if (!l_bit) + { + ULONGEST u_regval = 0; + regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval); + address = u_regval; + + if (!a_bit) + { + /* Handle VST1. */ + if (b_bits == 0x02 || b_bits == 0x0a || (b_bits & 0x0e) == 0x06) + { + if (b_bits == 0x07) + bf_regs = 1; + else if (b_bits == 0x0a) + bf_regs = 2; + else if (b_bits == 0x06) + bf_regs = 3; + else if (b_bits == 0x02) + bf_regs = 4; + else + bf_regs = 0; + + for (index_r = 0; index_r < bf_regs; index_r++) + { + for (index_e = 0; index_e < f_elem; index_e++) + { + record_buf_mem[index_m++] = f_ebytes; + record_buf_mem[index_m++] = address; + address = address + f_ebytes; + thumb2_insn_r->mem_rec_count += 1; + } + } + } + /* Handle VST2. */ + else if (b_bits == 0x03 || (b_bits & 0x0e) == 0x08) + { + if (b_bits == 0x09 || b_bits == 0x08) + bf_regs = 1; + else if (b_bits == 0x03) + bf_regs = 2; + else + bf_regs = 0; + + for (index_r = 0; index_r < bf_regs; index_r++) + for (index_e = 0; index_e < f_elem; index_e++) + { + for (loop_t = 0; loop_t < 2; loop_t++) + { + record_buf_mem[index_m++] = f_ebytes; + record_buf_mem[index_m++] = address + (loop_t * f_ebytes); + thumb2_insn_r->mem_rec_count += 1; + } + address = address + (2 * f_ebytes); + } + } + /* Handle VST3. */ + else if ((b_bits & 0x0e) == 0x04) + { + for (index_e = 0; index_e < f_elem; index_e++) + { + for (loop_t = 0; loop_t < 3; loop_t++) + { + record_buf_mem[index_m++] = f_ebytes; + record_buf_mem[index_m++] = address + (loop_t * f_ebytes); + thumb2_insn_r->mem_rec_count += 1; + } + address = address + (3 * f_ebytes); + } + } + /* Handle VST4. */ + else if (!(b_bits & 0x0e)) + { + for (index_e = 0; index_e < f_elem; index_e++) + { + for (loop_t = 0; loop_t < 4; loop_t++) + { + record_buf_mem[index_m++] = f_ebytes; + record_buf_mem[index_m++] = address + (loop_t * f_ebytes); + thumb2_insn_r->mem_rec_count += 1; + } + address = address + (4 * f_ebytes); + } + } + } + else + { + uint8_t bft_size = bits (thumb2_insn_r->arm_insn, 10, 11); + + if (bft_size == 0x00) + f_ebytes = 1; + else if (bft_size == 0x01) + f_ebytes = 2; + else if (bft_size == 0x02) + f_ebytes = 4; + else + f_ebytes = 0; + + /* Handle VST1. */ + if (!(b_bits & 0x0b) || b_bits == 0x08) + thumb2_insn_r->mem_rec_count = 1; + /* Handle VST2. */ + else if ((b_bits & 0x0b) == 0x01 || b_bits == 0x09) + thumb2_insn_r->mem_rec_count = 2; + /* Handle VST3. */ + else if ((b_bits & 0x0b) == 0x02 || b_bits == 0x0a) + thumb2_insn_r->mem_rec_count = 3; + /* Handle VST4. */ + else if ((b_bits & 0x0b) == 0x03 || b_bits == 0x0b) + thumb2_insn_r->mem_rec_count = 4; + + for (index_m = 0; index_m < thumb2_insn_r->mem_rec_count; index_m++) + { + record_buf_mem[index_m] = f_ebytes; + record_buf_mem[index_m] = address + (index_m * f_ebytes); + } + } + } + else + { + if (!a_bit) + { + /* Handle VLD1. */ + if (b_bits == 0x02 || b_bits == 0x0a || (b_bits & 0x0e) == 0x06) + thumb2_insn_r->reg_rec_count = 1; + /* Handle VLD2. */ + else if (b_bits == 0x03 || (b_bits & 0x0e) == 0x08) + thumb2_insn_r->reg_rec_count = 2; + /* Handle VLD3. */ + else if ((b_bits & 0x0e) == 0x04) + thumb2_insn_r->reg_rec_count = 3; + /* Handle VLD4. */ + else if (!(b_bits & 0x0e)) + thumb2_insn_r->reg_rec_count = 4; + } + else + { + /* Handle VLD1. */ + if (!(b_bits & 0x0b) || b_bits == 0x08 || b_bits == 0x0c) + thumb2_insn_r->reg_rec_count = 1; + /* Handle VLD2. */ + else if ((b_bits & 0x0b) == 0x01 || b_bits == 0x09 || b_bits == 0x0d) + thumb2_insn_r->reg_rec_count = 2; + /* Handle VLD3. */ + else if ((b_bits & 0x0b) == 0x02 || b_bits == 0x0a || b_bits == 0x0e) + thumb2_insn_r->reg_rec_count = 3; + /* Handle VLD4. */ + else if ((b_bits & 0x0b) == 0x03 || b_bits == 0x0b || b_bits == 0x0f) + thumb2_insn_r->reg_rec_count = 4; + + for (index_r = 0; index_r < thumb2_insn_r->reg_rec_count; index_r++) + record_buf[index_r] = reg_vd + ARM_D0_REGNUM + index_r; + } + } + + if (bits (thumb2_insn_r->arm_insn, 0, 3) != 15) + { + record_buf[index_r] = reg_rn; + thumb2_insn_r->reg_rec_count += 1; + } + + REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count, + record_buf); + MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count, + record_buf_mem); + return 0; +} + /* Decodes thumb2 instruction type and invokes its record handler. */ static unsigned int @@ -12902,7 +13714,7 @@ thumb2_record_decode_insn_handler (insn_decode_record *thumb2_insn_r) else if (op2 & 0x40) { /* Co-processor instructions. */ - arm_record_unsupported_insn (thumb2_insn_r); + return thumb2_record_coproc_insn (thumb2_insn_r); } } else if (op1 == 0x02) @@ -12933,7 +13745,7 @@ thumb2_record_decode_insn_handler (insn_decode_record *thumb2_insn_r) else if (!((op2 & 0x71) ^ 0x10)) { /* Advanced SIMD or structure load/store instructions. */ - return arm_record_unsupported_insn (thumb2_insn_r); + return thumb2_record_asimd_struct_ld_st (thumb2_insn_r); } else if (!((op2 & 0x67) ^ 0x01)) { @@ -12968,7 +13780,7 @@ thumb2_record_decode_insn_handler (insn_decode_record *thumb2_insn_r) else if (op2 & 0x40) { /* Co-processor instructions. */ - return arm_record_unsupported_insn (thumb2_insn_r); + return thumb2_record_coproc_insn (thumb2_insn_r); } } @@ -12989,7 +13801,7 @@ extract_arm_insn (insn_decode_record *insn_record, uint32_t insn_size) return 1; insn_record->arm_insn = (uint32_t) extract_unsigned_integer (&buf[0], insn_size, - gdbarch_byte_order (insn_record->gdbarch)); + gdbarch_byte_order_for_code (insn_record->gdbarch)); return 0; } @@ -13004,7 +13816,7 @@ decode_insn (insn_decode_record *arm_record, record_type_t record_type, { /* (Starting from numerical 0); bits 25, 26, 27 decodes type of arm instruction. */ - static const sti_arm_hdl_fp_t const arm_handle_insn[8] = + static const sti_arm_hdl_fp_t arm_handle_insn[8] = { arm_record_data_proc_misc_ld_str, /* 000. */ arm_record_data_proc_imm, /* 001. */ @@ -13012,12 +13824,12 @@ decode_insn (insn_decode_record *arm_record, record_type_t record_type, arm_record_ld_st_reg_offset, /* 011. */ arm_record_ld_st_multiple, /* 100. */ arm_record_b_bl, /* 101. */ - arm_record_unsupported_insn, /* 110. */ + arm_record_asimd_vfp_coproc, /* 110. */ arm_record_coproc_data_proc /* 111. */ }; /* (Starting from numerical 0); bits 13,14,15 decodes type of thumb instruction. */ - static const sti_arm_hdl_fp_t const thumb_handle_insn[8] = + static const sti_arm_hdl_fp_t thumb_handle_insn[8] = { \ thumb_record_shift_add_sub, /* 000. */ thumb_record_add_sub_cmp_mov, /* 001. */