X-Git-Url: http://git.efficios.com/?a=blobdiff_plain;f=gas%2Fconfig%2Ftc-arm.c;h=f325dcf1f1161d105b098b1463f09dfe0833a99b;hb=886e1c739b5441aca92a9725c932d0d446097a32;hp=14d114adbeec62b11fa3bc7d4f7cb3cdd030bf8f;hpb=e2b0ab597857bfe9d7c8742ff50bbb77c70936c4;p=deliverable%2Fbinutils-gdb.git diff --git a/gas/config/tc-arm.c b/gas/config/tc-arm.c index 14d114adbe..f325dcf1f1 100644 --- a/gas/config/tc-arm.c +++ b/gas/config/tc-arm.c @@ -302,6 +302,10 @@ static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE_COPROC (FPU_NEON_EXT_V1); static const arm_feature_set fpu_vfp_v3_or_neon_ext = ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3); +static const arm_feature_set mve_ext = + ARM_FEATURE_COPROC (FPU_MVE); +static const arm_feature_set mve_fp_ext = + ARM_FEATURE_COPROC (FPU_MVE_FP); #ifdef OBJ_ELF static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16); @@ -449,16 +453,20 @@ struct neon_type unsigned elems; }; -enum it_instruction_type +enum pred_instruction_type { - OUTSIDE_IT_INSN, + OUTSIDE_PRED_INSN, + INSIDE_VPT_INSN, INSIDE_IT_INSN, INSIDE_IT_LAST_INSN, IF_INSIDE_IT_LAST_INSN, /* Either outside or inside; if inside, should be the last one. */ NEUTRAL_IT_INSN, /* This could be either inside or outside, i.e. BKPT and NOP. */ - IT_INSN /* The IT insn has been parsed. */ + IT_INSN, /* The IT insn has been parsed. */ + VPT_INSN, /* The VPT/VPST insn has been parsed. */ + MVE_OUTSIDE_PRED_INSN /* Instruction to indicate a MVE instruction without + a predication code. */ }; /* The maximum number of operands we need. */ @@ -490,7 +498,7 @@ struct arm_it int pc_rel; } relocs[ARM_IT_MAX_RELOCS]; - enum it_instruction_type it_insn_type; + enum pred_instruction_type pred_insn_type; struct { @@ -507,7 +515,7 @@ struct arm_it instructions. This allows us to disambiguate ARM <-> vector insns. */ unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */ unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */ - unsigned isquad : 1; /* Operand is Neon quad-precision register. */ + unsigned isquad : 1; /* Operand is SIMD quad register. */ unsigned issingle : 1; /* Operand is VFP single-precision register. */ unsigned hasreloc : 1; /* Operand has relocation suffix. */ unsigned writeback : 1; /* Operand has trailing ! */ @@ -528,9 +536,6 @@ const char * fp_const[] = "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0 }; -/* Number of littlenums required to hold an extended precision number. */ -#define MAX_LITTLENUMS 6 - LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS]; #define FAIL (-1) @@ -629,12 +634,13 @@ enum arm_reg_type REG_TYPE_MVFX, REG_TYPE_MVDX, REG_TYPE_MVAX, + REG_TYPE_MQ, REG_TYPE_DSPSC, REG_TYPE_MMXWR, REG_TYPE_MMXWC, REG_TYPE_MMXWCG, REG_TYPE_XSCALE, - REG_TYPE_RNB + REG_TYPE_RNB, }; /* Structure for a hash table entry for a register. @@ -676,6 +682,7 @@ const char * const reg_expected_msgs[] = [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"), [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"), [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"), + [REG_TYPE_MQ] = N_("MVE vector register expected"), [REG_TYPE_RNB] = N_("") }; @@ -701,7 +708,7 @@ struct asm_opcode unsigned int tag : 4; /* Basic instruction code. */ - unsigned int avalue : 28; + unsigned int avalue; /* Thumb-format instruction code. */ unsigned int tvalue; @@ -715,6 +722,9 @@ struct asm_opcode /* Function to call to encode instruction in Thumb format. */ void (* tencode) (void); + + /* Indicates whether this instruction may be vector predicated. */ + unsigned int mayBeVecPred : 1; }; /* Defines for various bits that we will want to toggle. */ @@ -837,9 +847,12 @@ struct asm_opcode #define THUMB_LOAD_BIT 0x0800 #define THUMB2_LOAD_BIT 0x00100000 +#define BAD_SYNTAX _("syntax error") #define BAD_ARGS _("bad arguments to instruction") #define BAD_SP _("r13 not allowed here") #define BAD_PC _("r15 not allowed here") +#define BAD_ODD _("Odd register not allowed here") +#define BAD_EVEN _("Even register not allowed here") #define BAD_COND _("instruction cannot be conditional") #define BAD_OVERLAP _("registers may not be the same") #define BAD_HIREG _("lo register required") @@ -848,9 +861,13 @@ struct asm_opcode #define BAD_BRANCH _("branch must be last instruction in IT block") #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2") #define BAD_NOT_IT _("instruction not allowed in IT block") +#define BAD_NOT_VPT _("instruction missing MVE vector predication code") #define BAD_FPU _("selected FPU does not support instruction") #define BAD_OUT_IT _("thumb conditional instruction should be in IT block") +#define BAD_OUT_VPT \ + _("vector predicated instruction should be in VPT/VPST block") #define BAD_IT_COND _("incorrect condition in IT block") +#define BAD_VPT_COND _("incorrect condition in VPT/VPST block") #define BAD_IT_IT _("IT falling in the range of a previous IT block") #define MISSING_FNSTART _("missing .fnstart before unwinding directive") #define BAD_PC_ADDRESSING \ @@ -861,9 +878,24 @@ struct asm_opcode #define BAD_FP16 _("selected processor does not support fp16 instruction") #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour") #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only") +#define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \ + "block") +#define MVE_NOT_VPT _("Warning: instruction is UNPREDICTABLE in a VPT " \ + "block") +#define MVE_BAD_PC _("Warning: instruction is UNPREDICTABLE with PC" \ + " operand") +#define MVE_BAD_SP _("Warning: instruction is UNPREDICTABLE with SP" \ + " operand") +#define BAD_SIMD_TYPE _("bad type in SIMD instruction") +#define BAD_MVE_AUTO \ + _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \ + " use a valid -march or -mcpu option.") +#define BAD_MVE_SRCDEST _("Warning: 32-bit element size and same destination "\ + "and source operands makes instruction UNPREDICTABLE") static struct hash_control * arm_ops_hsh; static struct hash_control * arm_cond_hsh; +static struct hash_control * arm_vcond_hsh; static struct hash_control * arm_shift_hsh; static struct hash_control * arm_psr_hsh; static struct hash_control * arm_v7m_psr_hsh; @@ -915,15 +947,15 @@ typedef enum asmfunc_states static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC; #ifdef OBJ_ELF -# define now_it seg_info (now_seg)->tc_segment_info_data.current_it +# define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred #else -static struct current_it now_it; +static struct current_pred now_pred; #endif static inline int -now_it_compatible (int cond) +now_pred_compatible (int cond) { - return (cond & ~1) == (now_it.cc & ~1); + return (cond & ~1) == (now_pred.cc & ~1); } static inline int @@ -932,39 +964,39 @@ conditional_insn (void) return inst.cond != COND_ALWAYS; } -static int in_it_block (void); +static int in_pred_block (void); -static int handle_it_state (void); +static int handle_pred_state (void); static void force_automatic_it_block_close (void); static void it_fsm_post_encode (void); -#define set_it_insn_type(type) \ +#define set_pred_insn_type(type) \ do \ { \ - inst.it_insn_type = type; \ - if (handle_it_state () == FAIL) \ + inst.pred_insn_type = type; \ + if (handle_pred_state () == FAIL) \ return; \ } \ while (0) -#define set_it_insn_type_nonvoid(type, failret) \ +#define set_pred_insn_type_nonvoid(type, failret) \ do \ { \ - inst.it_insn_type = type; \ - if (handle_it_state () == FAIL) \ + inst.pred_insn_type = type; \ + if (handle_pred_state () == FAIL) \ return failret; \ } \ while(0) -#define set_it_insn_type_last() \ +#define set_pred_insn_type_last() \ do \ { \ if (inst.cond == COND_ALWAYS) \ - set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \ + set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); \ else \ - set_it_insn_type (INSIDE_IT_LAST_INSN); \ + set_pred_insn_type (INSIDE_IT_LAST_INSN); \ } \ while (0) @@ -1496,6 +1528,41 @@ parse_neon_operand_type (struct neon_type_el *vectype, char **ccp) #define NEON_ALL_LANES 15 #define NEON_INTERLEAVE_LANES 14 +/* Record a use of the given feature. */ +static void +record_feature_use (const arm_feature_set *feature) +{ + if (thumb_mode) + ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature); + else + ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature); +} + +/* If the given feature available in the selected CPU, mark it as used. + Returns TRUE iff feature is available. */ +static bfd_boolean +mark_feature_used (const arm_feature_set *feature) +{ + + /* Do not support the use of MVE only instructions when in auto-detection or + -march=all. */ + if (((feature == &mve_ext) || (feature == &mve_fp_ext)) + && ARM_CPU_IS_ANY (cpu_variant)) + { + first_error (BAD_MVE_AUTO); + return FALSE; + } + /* Ensure the option is valid on the current architecture. */ + if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature)) + return FALSE; + + /* Add the appropriate architecture feature for the barrier option used. + */ + record_feature_use (feature); + + return TRUE; +} + /* Parse either a register or a scalar, with an optional type. Return the register number, and optionally fill in the actual type of the register when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and @@ -1542,6 +1609,26 @@ parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type, && (reg->type == REG_TYPE_MMXWCG))) type = (enum arm_reg_type) reg->type; + if (type == REG_TYPE_MQ) + { + if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) + return FAIL; + + if (!reg || reg->type != REG_TYPE_NQ) + return FAIL; + + if (reg->number > 14 && !mark_feature_used (&fpu_vfp_ext_d32)) + { + first_error (_("expected MVE register [q0..q7]")); + return FAIL; + } + type = REG_TYPE_NQ; + } + else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext) + && (type == REG_TYPE_NQ)) + return FAIL; + + if (type != reg->type) return FAIL; @@ -1609,7 +1696,7 @@ parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type, return reg->number; } -/* Like arm_reg_parse, but allow allow the following extra features: +/* Like arm_reg_parse, but also allow the following extra features: - If RTYPE is non-zero, return the (possibly restricted) type of the register (e.g. Neon double or quad reg when either has been requested). - If this is a Neon vector type with additional type information, fill @@ -1688,14 +1775,29 @@ parse_scalar (char **ccp, int elsize, struct neon_type_el *type) return reg * 16 + atype.index; } +/* Types of registers in a list. */ + +enum reg_list_els +{ + REGLIST_RN, + REGLIST_CLRM, + REGLIST_VFP_S, + REGLIST_VFP_S_VPR, + REGLIST_VFP_D, + REGLIST_VFP_D_VPR, + REGLIST_NEON_D +}; + /* Parse an ARM register list. Returns the bitmask, or FAIL. */ static long -parse_reg_list (char ** strp) +parse_reg_list (char ** strp, enum reg_list_els etype) { - char * str = * strp; - long range = 0; - int another_range; + char *str = *strp; + long range = 0; + int another_range; + + gas_assert (etype == REGLIST_RN || etype == REGLIST_CLRM); /* We come back here if we get ranges concatenated by '+' or '|'. */ do @@ -1713,11 +1815,35 @@ parse_reg_list (char ** strp) do { int reg; + const char apsr_str[] = "apsr"; + int apsr_str_len = strlen (apsr_str); - if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL) + reg = arm_reg_parse (&str, REGLIST_RN); + if (etype == REGLIST_CLRM) { - first_error (_(reg_expected_msgs[REG_TYPE_RN])); - return FAIL; + if (reg == REG_SP || reg == REG_PC) + reg = FAIL; + else if (reg == FAIL + && !strncasecmp (str, apsr_str, apsr_str_len) + && !ISALPHA (*(str + apsr_str_len))) + { + reg = 15; + str += apsr_str_len; + } + + if (reg == FAIL) + { + first_error (_("r0-r12, lr or APSR expected")); + return FAIL; + } + } + else /* etype == REGLIST_RN. */ + { + if (reg == FAIL) + { + first_error (_(reg_expected_msgs[REGLIST_RN])); + return FAIL; + } } if (in_range) @@ -1761,7 +1887,7 @@ parse_reg_list (char ** strp) return FAIL; } } - else + else if (etype == REGLIST_RN) { expressionS exp; @@ -1816,15 +1942,6 @@ parse_reg_list (char ** strp) return range; } -/* Types of registers in a list. */ - -enum reg_list_els -{ - REGLIST_VFP_S, - REGLIST_VFP_D, - REGLIST_NEON_D -}; - /* Parse a VFP register list. If the string is invalid return FAIL. Otherwise return the number of registers, and set PBASE to the first register. Parses registers of type ETYPE. @@ -1841,7 +1958,8 @@ enum reg_list_els bug. */ static int -parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype) +parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype, + bfd_boolean *partial_match) { char *str = *ccp; int base_reg; @@ -1852,6 +1970,9 @@ parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype) int warned = 0; unsigned long mask = 0; int i; + bfd_boolean vpr_seen = FALSE; + bfd_boolean expect_vpr = + (etype == REGLIST_VFP_S_VPR) || (etype == REGLIST_VFP_D_VPR); if (skip_past_char (&str, '{') == FAIL) { @@ -1862,20 +1983,25 @@ parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype) switch (etype) { case REGLIST_VFP_S: + case REGLIST_VFP_S_VPR: regtype = REG_TYPE_VFS; max_regs = 32; break; case REGLIST_VFP_D: + case REGLIST_VFP_D_VPR: regtype = REG_TYPE_VFD; break; case REGLIST_NEON_D: regtype = REG_TYPE_NDQ; break; + + default: + gas_assert (0); } - if (etype != REGLIST_VFP_S) + if (etype != REGLIST_VFP_S && etype != REGLIST_VFP_S_VPR) { /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */ if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32)) @@ -1893,19 +2019,54 @@ parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype) } base_reg = max_regs; + *partial_match = FALSE; do { int setmask = 1, addregs = 1; + const char vpr_str[] = "vpr"; + int vpr_str_len = strlen (vpr_str); new_base = arm_typed_reg_parse (&str, regtype, ®type, NULL); - if (new_base == FAIL) + if (expect_vpr) + { + if (new_base == FAIL + && !strncasecmp (str, vpr_str, vpr_str_len) + && !ISALPHA (*(str + vpr_str_len)) + && !vpr_seen) + { + vpr_seen = TRUE; + str += vpr_str_len; + if (count == 0) + base_reg = 0; /* Canonicalize VPR only on d0 with 0 regs. */ + } + else if (vpr_seen) + { + first_error (_("VPR expected last")); + return FAIL; + } + else if (new_base == FAIL) + { + if (regtype == REG_TYPE_VFS) + first_error (_("VFP single precision register or VPR " + "expected")); + else /* regtype == REG_TYPE_VFD. */ + first_error (_("VFP/Neon double precision register or VPR " + "expected")); + return FAIL; + } + } + else if (new_base == FAIL) { first_error (_(reg_expected_msgs[regtype])); return FAIL; } + *partial_match = TRUE; + if (vpr_seen) + continue; + if (new_base >= max_regs) { first_error (_("register out of range in list")); @@ -1928,7 +2089,7 @@ parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype) return FAIL; } - if ((mask >> new_base) != 0 && ! warned) + if ((mask >> new_base) != 0 && ! warned && !vpr_seen) { as_tsktsk (_("register list not in ascending order")); warned = 1; @@ -1983,11 +2144,17 @@ parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype) str++; /* Sanity check -- should have raised a parse error above. */ - if (count == 0 || count > max_regs) + if ((!vpr_seen && count == 0) || count > max_regs) abort (); *pbase = base_reg; + if (expect_vpr && !vpr_seen) + { + first_error (_("VPR expected last")); + return FAIL; + } + /* Final test -- the registers must be consecutive. */ mask >>= base_reg; for (i = 0; i < count; i++) @@ -3681,10 +3848,10 @@ emit_insn (expressionS *exp, int nbytes) } else { - if (now_it.state == AUTOMATIC_IT_BLOCK) - set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0); + if (now_pred.state == AUTOMATIC_PRED_BLOCK) + set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN, 0); else - set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0); + set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN, 0); if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian) emit_thumb32_expr (exp); @@ -3988,7 +4155,7 @@ s_arm_unwind_save_core (void) long range; int n; - range = parse_reg_list (&input_line_pointer); + range = parse_reg_list (&input_line_pointer, REGLIST_RN); if (range == FAIL) { as_bad (_("expected register list")); @@ -4115,8 +4282,10 @@ s_arm_unwind_save_vfp_armv6 (void) valueT op; int num_vfpv3_regs = 0; int num_regs_below_16; + bfd_boolean partial_match; - count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D); + count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D, + &partial_match); if (count == FAIL) { as_bad (_("expected register list")); @@ -4163,8 +4332,10 @@ s_arm_unwind_save_vfp (void) int count; unsigned int reg; valueT op; + bfd_boolean partial_match; - count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D); + count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D, + &partial_match); if (count == FAIL) { as_bad (_("expected register list")); @@ -4662,7 +4833,7 @@ s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED) { int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC); - if (tag < NUM_KNOWN_OBJ_ATTRIBUTES) + if (tag >= 0 && tag < NUM_KNOWN_OBJ_ATTRIBUTES) attributes_set_explicitly[tag] = 1; } @@ -6044,6 +6215,39 @@ check_suffix: return FAIL; } +static int +parse_sys_vldr_vstr (char **str) +{ + unsigned i; + int val = FAIL; + struct { + const char *name; + int regl; + int regh; + } sysregs[] = { + {"FPSCR", 0x1, 0x0}, + {"FPSCR_nzcvqc", 0x2, 0x0}, + {"VPR", 0x4, 0x1}, + {"P0", 0x5, 0x1}, + {"FPCXTNS", 0x6, 0x1}, + {"FPCXTS", 0x7, 0x1} + }; + char *op_end = strchr (*str, ','); + size_t op_strlen = op_end - *str; + + for (i = 0; i < sizeof (sysregs) / sizeof (sysregs[0]); i++) + { + if (!strncmp (*str, sysregs[i].name, op_strlen)) + { + val = sysregs[i].regl | (sysregs[i].regh << 3); + *str = op_end; + break; + } + } + + return val; +} + /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a value suitable for splatting into the AIF field of the instruction. */ @@ -6175,31 +6379,6 @@ parse_cond (char **str) return c->value; } -/* Record a use of the given feature. */ -static void -record_feature_use (const arm_feature_set *feature) -{ - if (thumb_mode) - ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature); - else - ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature); -} - -/* If the given feature is currently allowed, mark it as used and return TRUE. - Return FALSE otherwise. */ -static bfd_boolean -mark_feature_used (const arm_feature_set *feature) -{ - /* Ensure the option is currently allowed. */ - if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature)) - return FALSE; - - /* Add the appropriate architecture feature for the barrier option used. */ - record_feature_use (feature); - - return TRUE; -} - /* Parse an option for a barrier instruction. Returns the encoding for the option, or FAIL. */ static int @@ -6525,10 +6704,15 @@ enum operand_parse_code OP_RVS, /* VFP single precision register */ OP_RVD, /* VFP double precision register (0..15) */ OP_RND, /* Neon double precision register (0..31) */ + OP_RNDMQ, /* Neon double precision (0..31) or MVE vector register. */ + OP_RNDMQR, /* Neon double precision (0..31), MVE vector or ARM register. + */ OP_RNQ, /* Neon quad precision register */ + OP_RNQMQ, /* Neon quad or MVE vector register. */ OP_RVSD, /* VFP single or double precision register */ OP_RNSD, /* Neon single or double precision register */ OP_RNDQ, /* Neon double or quad precision register */ + OP_RNDQMQ, /* Neon double, quad or MVE vector register. */ OP_RNSDQ, /* Neon single, double or quad precision register */ OP_RNSC, /* Neon scalar D[X] */ OP_RVC, /* VFP control register */ @@ -6543,12 +6727,26 @@ enum operand_parse_code OP_RIWG, /* iWMMXt wCG register */ OP_RXA, /* XScale accumulator register */ + OP_RNSDQMQ, /* Neon single, double or quad register or MVE vector register + */ + OP_RNSDQMQR, /* Neon single, double or quad register, MVE vector register or + GPR (no SP/SP) */ + OP_RMQ, /* MVE vector register. */ + + /* New operands for Armv8.1-M Mainline. */ + OP_LR, /* ARM LR register */ + OP_RRe, /* ARM register, only even numbered. */ + OP_RRo, /* ARM register, only odd numbered, not r13 or r15. */ + OP_RRnpcsp_I32, /* ARM register (no BadReg) or literal 1 .. 32 */ + OP_REGLST, /* ARM register list */ + OP_CLRMLST, /* CLRM register list */ OP_VRSLST, /* VFP single-precision register list */ OP_VRDLST, /* VFP double-precision register list */ OP_VRSDLST, /* VFP single or double-precision register list (& quad) */ OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */ OP_NSTRLST, /* Neon element/structure list */ + OP_VRSDVLST, /* VFP single or double-precision register list and VPR */ OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */ OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */ @@ -6556,12 +6754,15 @@ enum operand_parse_code OP_RR_RNSC, /* ARM reg or Neon scalar. */ OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */ OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */ + OP_RNSDQ_RNSC_MQ, /* Vector S, D or Q reg, Neon scalar or MVE vector register. + */ OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */ OP_RND_RNSC, /* Neon D reg, or Neon scalar. */ OP_VMOV, /* Neon VMOV operands. */ OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */ OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */ OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */ + OP_VLDR, /* VLDR operand. */ OP_I0, /* immediate zero */ OP_I7, /* immediate value 0 .. 7 */ @@ -6622,13 +6823,17 @@ enum operand_parse_code OP_oI255c, /* curly-brace enclosed, 0 .. 255 */ OP_oRR, /* ARM register */ + OP_oLR, /* ARM LR register */ OP_oRRnpc, /* ARM register, not the PC */ OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */ OP_oRRw, /* ARM register, not r15, optional trailing ! */ OP_oRND, /* Optional Neon double precision register */ OP_oRNQ, /* Optional Neon quad precision register */ + OP_oRNDQMQ, /* Optional Neon double, quad or MVE vector register. */ OP_oRNDQ, /* Optional Neon double or quad precision register */ OP_oRNSDQ, /* Optional single, double or quad precision vector register */ + OP_oRNSDQMQ, /* Optional single, double or quad register or MVE vector + register. */ OP_oSHll, /* LSL immediate */ OP_oSHar, /* ASR immediate */ OP_oSHllar, /* LSL or ASR immediate */ @@ -6657,6 +6862,7 @@ parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb) enum arm_reg_type rtype; parse_operand_result result; unsigned int op_parse_code; + bfd_boolean partial_match; #define po_char_or_fail(chr) \ do \ @@ -6790,6 +6996,10 @@ parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb) case OP_RRnpc: case OP_RRnpcsp: case OP_oRR: + case OP_RRe: + case OP_RRo: + case OP_LR: + case OP_oLR: case OP_RR: po_reg_or_fail (REG_TYPE_RN); break; case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break; case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break; @@ -6797,6 +7007,14 @@ parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb) case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break; case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break; case OP_oRND: + case OP_RNDMQR: + po_reg_or_goto (REG_TYPE_RN, try_rndmq); + break; + try_rndmq: + case OP_RNDMQ: + po_reg_or_goto (REG_TYPE_MQ, try_rnd); + break; + try_rnd: case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break; case OP_RVC: po_reg_or_goto (REG_TYPE_VFC, coproc_reg); @@ -6816,14 +7034,37 @@ parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb) case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break; case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break; case OP_oRNQ: + case OP_RNQMQ: + po_reg_or_goto (REG_TYPE_MQ, try_nq); + break; + try_nq: case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break; case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break; + case OP_oRNDQMQ: + case OP_RNDQMQ: + po_reg_or_goto (REG_TYPE_MQ, try_rndq); + break; + try_rndq: case OP_oRNDQ: case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break; case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break; case OP_oRNSDQ: case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break; - + case OP_RNSDQMQR: + po_reg_or_goto (REG_TYPE_RN, try_mq); + break; + try_mq: + case OP_oRNSDQMQ: + case OP_RNSDQMQ: + po_reg_or_goto (REG_TYPE_MQ, try_nsdq2); + break; + try_nsdq2: + po_reg_or_fail (REG_TYPE_NSDQ); + inst.error = 0; + break; + case OP_RMQ: + po_reg_or_fail (REG_TYPE_MQ); + break; /* Neon scalar. Using an element size of 8 means that some invalid scalars are accepted here, so deal with those in later code. */ case OP_RNSC: po_scalar_or_goto (8, failure); break; @@ -6866,6 +7107,10 @@ parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb) } break; + case OP_RNSDQ_RNSC_MQ: + po_reg_or_goto (REG_TYPE_MQ, try_rnsdq_rnsc); + break; + try_rnsdq_rnsc: case OP_RNSDQ_RNSC: { po_scalar_or_goto (8, try_nsdq); @@ -7131,6 +7376,13 @@ parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb) val = parse_psr (&str, op_parse_code == OP_wPSR); break; + case OP_VLDR: + po_reg_or_goto (REG_TYPE_VFSD, try_sysreg); + break; + try_sysreg: + val = parse_sys_vldr_vstr (&str); + break; + case OP_APSR_RR: po_reg_or_goto (REG_TYPE_RN, try_apsr); break; @@ -7166,7 +7418,7 @@ parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb) /* Register lists. */ case OP_REGLST: - val = parse_reg_list (&str); + val = parse_reg_list (&str, REGLIST_RN); if (*str == '^') { inst.operands[i].writeback = 1; @@ -7174,30 +7426,48 @@ parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb) } break; + case OP_CLRMLST: + val = parse_reg_list (&str, REGLIST_CLRM); + break; + case OP_VRSLST: - val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S); + val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S, + &partial_match); break; case OP_VRDLST: - val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D); + val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D, + &partial_match); break; case OP_VRSDLST: /* Allow Q registers too. */ val = parse_vfp_reg_list (&str, &inst.operands[i].reg, - REGLIST_NEON_D); + REGLIST_NEON_D, &partial_match); if (val == FAIL) { inst.error = NULL; val = parse_vfp_reg_list (&str, &inst.operands[i].reg, - REGLIST_VFP_S); + REGLIST_VFP_S, &partial_match); + inst.operands[i].issingle = 1; + } + break; + + case OP_VRSDVLST: + val = parse_vfp_reg_list (&str, &inst.operands[i].reg, + REGLIST_VFP_D_VPR, &partial_match); + if (val == FAIL && !partial_match) + { + inst.error = NULL; + val = parse_vfp_reg_list (&str, &inst.operands[i].reg, + REGLIST_VFP_S_VPR, &partial_match); inst.operands[i].issingle = 1; } break; case OP_NRDLST: val = parse_vfp_reg_list (&str, &inst.operands[i].reg, - REGLIST_NEON_D); + REGLIST_NEON_D, &partial_match); break; case OP_NSTRLST: @@ -7289,6 +7559,10 @@ parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb) inst.error = BAD_PC; break; + case OP_VLDR: + if (inst.operands[i].isreg) + break; + /* fall through. */ case OP_CPSF: case OP_ENDI: case OP_oROR: @@ -7297,9 +7571,11 @@ parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb) case OP_COND: case OP_oBARRIER_I15: case OP_REGLST: + case OP_CLRMLST: case OP_VRSLST: case OP_VRDLST: case OP_VRSDLST: + case OP_VRSDVLST: case OP_NRDLST: case OP_NSTRLST: if (val == FAIL) @@ -7307,6 +7583,30 @@ parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb) inst.operands[i].imm = val; break; + case OP_LR: + case OP_oLR: + if (inst.operands[i].reg != REG_LR) + inst.error = _("operand must be LR register"); + break; + + case OP_RRe: + if (inst.operands[i].isreg + && (inst.operands[i].reg & 0x00000001) != 0) + inst.error = BAD_ODD; + break; + + case OP_RRo: + if (inst.operands[i].isreg) + { + if ((inst.operands[i].reg & 0x00000001) != 1) + inst.error = BAD_EVEN; + else if (inst.operands[i].reg == REG_SP) + as_tsktsk (MVE_BAD_SP); + else if (inst.operands[i].reg == REG_PC) + inst.error = BAD_PC; + } + break; + default: break; } @@ -7324,7 +7624,7 @@ parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb) /* The parse routine should already have set inst.error, but set a default here just in case. */ if (!inst.error) - inst.error = _("syntax error"); + inst.error = BAD_SYNTAX; return FAIL; } @@ -7336,7 +7636,7 @@ parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb) && upat[i+1] == OP_stop) { if (!inst.error) - inst.error = _("syntax error"); + inst.error = BAD_SYNTAX; return FAIL; } @@ -7417,7 +7717,7 @@ parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb) static void do_scalar_fp16_v82_encode (void) { - if (inst.cond != COND_ALWAYS) + if (inst.cond < COND_ALWAYS) as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional," " the behaviour is UNPREDICTABLE")); constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16), @@ -8890,9 +9190,9 @@ do_it (void) inst.size = 0; if (unified_syntax) { - set_it_insn_type (IT_INSN); - now_it.mask = (inst.instruction & 0xf) | 0x10; - now_it.cc = inst.operands[0].imm; + set_pred_insn_type (IT_INSN); + now_pred.mask = (inst.instruction & 0xf) | 0x10; + now_pred.cc = inst.operands[0].imm; } } @@ -10505,6 +10805,11 @@ encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d) X(_asrs, 1000, fa50f000), \ X(_b, e000, f000b000), \ X(_bcond, d000, f0008000), \ + X(_bf, 0000, f040e001), \ + X(_bfcsel,0000, f000e001), \ + X(_bfx, 0000, f060e001), \ + X(_bfl, 0000, f000c001), \ + X(_bflx, 0000, f070e001), \ X(_bic, 4380, ea200000), \ X(_bics, 4380, ea300000), \ X(_cmn, 42c0, eb100f00), \ @@ -10513,6 +10818,7 @@ encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d) X(_cpsid, b670, f3af8600), \ X(_cpy, 4600, ea4f0000), \ X(_dec_sp,80dd, f1ad0d00), \ + X(_dls, 0000, f040e001), \ X(_eor, 4040, ea800000), \ X(_eors, 4040, ea900000), \ X(_inc_sp,00dd, f10d0d00), \ @@ -10525,6 +10831,7 @@ encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d) X(_ldr_pc,4800, f85f0000), \ X(_ldr_pc2,4800, f85f0000), \ X(_ldr_sp,9800, f85d0000), \ + X(_le, 0000, f00fc001), \ X(_lsl, 0000, fa00f000), \ X(_lsls, 0000, fa10f000), \ X(_lsr, 0800, fa20f000), \ @@ -10566,6 +10873,7 @@ encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d) X(_yield, bf10, f3af8001), \ X(_wfe, bf20, f3af8002), \ X(_wfi, bf30, f3af8003), \ + X(_wls, 0000, f040c001), \ X(_sev, bf40, f3af8004), \ X(_sevl, bf50, f3af8005), \ X(_udf, de00, f7f0a000) @@ -10626,7 +10934,7 @@ do_t_add_sub (void) : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ if (Rd == REG_PC) - set_it_insn_type_last (); + set_pred_insn_type_last (); if (unified_syntax) { @@ -10637,9 +10945,9 @@ do_t_add_sub (void) flags = (inst.instruction == T_MNEM_adds || inst.instruction == T_MNEM_subs); if (flags) - narrow = !in_it_block (); + narrow = !in_pred_block (); else - narrow = in_it_block (); + narrow = in_pred_block (); if (!inst.operands[2].isreg) { int add; @@ -10916,9 +11224,9 @@ do_t_arit3 (void) /* See if we can do this with a 16-bit instruction. */ if (THUMB_SETS_FLAGS (inst.instruction)) - narrow = !in_it_block (); + narrow = !in_pred_block (); else - narrow = in_it_block (); + narrow = in_pred_block (); if (Rd > 7 || Rn > 7 || Rs > 7) narrow = FALSE; @@ -11004,9 +11312,9 @@ do_t_arit3c (void) /* See if we can do this with a 16-bit instruction. */ if (THUMB_SETS_FLAGS (inst.instruction)) - narrow = !in_it_block (); + narrow = !in_pred_block (); else - narrow = in_it_block (); + narrow = in_pred_block (); if (Rd > 7 || Rn > 7 || Rs > 7) narrow = FALSE; @@ -11145,7 +11453,7 @@ do_t_bfx (void) static void do_t_blx (void) { - set_it_insn_type_last (); + set_pred_insn_type_last (); if (inst.operands[0].isreg) { @@ -11169,9 +11477,9 @@ do_t_branch (void) bfd_reloc_code_real_type reloc; cond = inst.cond; - set_it_insn_type (IF_INSIDE_IT_LAST_INSN); + set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); - if (in_it_block ()) + if (in_pred_block ()) { /* Conditional branches inside IT blocks are encoded as unconditional branches. */ @@ -11238,7 +11546,7 @@ do_t_bkpt_hlt1 (int range) inst.instruction |= inst.operands[0].imm; } - set_it_insn_type (NEUTRAL_IT_INSN); + set_pred_insn_type (NEUTRAL_IT_INSN); } static void @@ -11256,7 +11564,7 @@ do_t_bkpt (void) static void do_t_branch23 (void) { - set_it_insn_type_last (); + set_pred_insn_type_last (); encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23); /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in @@ -11284,7 +11592,7 @@ do_t_branch23 (void) static void do_t_bx (void) { - set_it_insn_type_last (); + set_pred_insn_type_last (); inst.instruction |= inst.operands[0].reg << 3; /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc should cause the alignment to be checked once it is known. This is @@ -11296,7 +11604,7 @@ do_t_bxj (void) { int Rm; - set_it_insn_type_last (); + set_pred_insn_type_last (); Rm = inst.operands[0].reg; reject_bad_reg (Rm); inst.instruction |= Rm << 16; @@ -11322,20 +11630,20 @@ do_t_clz (void) static void do_t_csdb (void) { - set_it_insn_type (OUTSIDE_IT_INSN); + set_pred_insn_type (OUTSIDE_PRED_INSN); } static void do_t_cps (void) { - set_it_insn_type (OUTSIDE_IT_INSN); + set_pred_insn_type (OUTSIDE_PRED_INSN); inst.instruction |= inst.operands[0].imm; } static void do_t_cpsi (void) { - set_it_insn_type (OUTSIDE_IT_INSN); + set_pred_insn_type (OUTSIDE_PRED_INSN); if (unified_syntax && (inst.operands[1].present || inst.size_req == 4) && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm)) @@ -11382,7 +11690,7 @@ do_t_cpy (void) static void do_t_cbz (void) { - set_it_insn_type (OUTSIDE_IT_INSN); + set_pred_insn_type (OUTSIDE_PRED_INSN); constraint (inst.operands[0].reg > 7, BAD_HIREG); inst.instruction |= inst.operands[0].reg; inst.relocs[0].pc_rel = 1; @@ -11428,10 +11736,11 @@ do_t_it (void) { unsigned int cond = inst.operands[0].imm; - set_it_insn_type (IT_INSN); - now_it.mask = (inst.instruction & 0xf) | 0x10; - now_it.cc = cond; - now_it.warn_deprecated = FALSE; + set_pred_insn_type (IT_INSN); + now_pred.mask = (inst.instruction & 0xf) | 0x10; + now_pred.cc = cond; + now_pred.warn_deprecated = FALSE; + now_pred.type = SCALAR_PRED; /* If the condition is a negative condition, invert the mask. */ if ((cond & 0x1) == 0x0) @@ -11441,22 +11750,22 @@ do_t_it (void) if ((mask & 0x7) == 0) { /* No conversion needed. */ - now_it.block_length = 1; + now_pred.block_length = 1; } else if ((mask & 0x3) == 0) { mask ^= 0x8; - now_it.block_length = 2; + now_pred.block_length = 2; } else if ((mask & 0x1) == 0) { mask ^= 0xC; - now_it.block_length = 3; + now_pred.block_length = 3; } else { mask ^= 0xE; - now_it.block_length = 4; + now_pred.block_length = 4; } inst.instruction &= 0xfff0; @@ -11466,18 +11775,33 @@ do_t_it (void) inst.instruction |= cond << 4; } +static void +do_mve_vpt (void) +{ + /* We are dealing with a vector predicated block. */ + set_pred_insn_type (VPT_INSN); + now_pred.cc = 0; + now_pred.mask = ((inst.instruction & 0x00400000) >> 19) + | ((inst.instruction & 0xe000) >> 13); + now_pred.warn_deprecated = FALSE; + now_pred.type = VECTOR_PRED; +} + /* Helper function used for both push/pop and ldm/stm. */ static void -encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback) +encode_thumb2_multi (bfd_boolean do_io, int base, unsigned mask, + bfd_boolean writeback) { - bfd_boolean load; + bfd_boolean load, store; - load = (inst.instruction & (1 << 20)) != 0; + gas_assert (base != -1 || !do_io); + load = do_io && ((inst.instruction & (1 << 20)) != 0); + store = do_io && !load; if (mask & (1 << 13)) inst.error = _("SP not allowed in register list"); - if ((mask & (1 << base)) != 0 + if (do_io && (mask & (1 << base)) != 0 && writeback) inst.error = _("having the base register in the register list when " "using write back is UNPREDICTABLE"); @@ -11489,16 +11813,16 @@ encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback) if (mask & (1 << 14)) inst.error = _("LR and PC should not both be in register list"); else - set_it_insn_type_last (); + set_pred_insn_type_last (); } } - else + else if (store) { if (mask & (1 << 15)) inst.error = _("PC not allowed in register list"); } - if ((mask & (mask - 1)) == 0) + if (do_io && ((mask & (mask - 1)) == 0)) { /* Single register transfers implemented as str/ldr. */ if (writeback) @@ -11527,7 +11851,8 @@ encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback) inst.instruction |= WRITE_BACK; inst.instruction |= mask; - inst.instruction |= base << 16; + if (do_io) + inst.instruction |= base << 16; } static void @@ -11622,8 +11947,9 @@ do_t_ldmstm (void) if (inst.instruction < 0xffff) inst.instruction = THUMB_OP32 (inst.instruction); - encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm, - inst.operands[0].writeback); + encode_thumb2_multi (TRUE /* do_io */, inst.operands[0].reg, + inst.operands[1].imm, + inst.operands[0].writeback); } } else @@ -11701,7 +12027,7 @@ do_t_ldst (void) if (inst.operands[0].isreg && !inst.operands[0].preind && inst.operands[0].reg == REG_PC) - set_it_insn_type_last (); + set_pred_insn_type_last (); opcode = inst.instruction; if (unified_syntax) @@ -11960,7 +12286,7 @@ do_t_mov_cmp (void) Rm = inst.operands[1].reg; if (Rn == REG_PC) - set_it_insn_type_last (); + set_pred_insn_type_last (); if (unified_syntax) { @@ -11972,7 +12298,7 @@ do_t_mov_cmp (void) low_regs = (Rn <= 7 && Rm <= 7); opcode = inst.instruction; - if (in_it_block ()) + if (in_pred_block ()) narrow = opcode != T_MNEM_movs; else narrow = opcode != T_MNEM_movs || low_regs; @@ -12043,7 +12369,7 @@ do_t_mov_cmp (void) if (!inst.operands[1].isreg) { /* Immediate operand. */ - if (!in_it_block () && opcode == T_MNEM_mov) + if (!in_pred_block () && opcode == T_MNEM_mov) narrow = 0; if (low_regs && narrow) { @@ -12079,7 +12405,7 @@ do_t_mov_cmp (void) /* Register shifts are encoded as separate shift instructions. */ bfd_boolean flags = (inst.instruction == T_MNEM_movs); - if (in_it_block ()) + if (in_pred_block ()) narrow = !flags; else narrow = flags; @@ -12135,7 +12461,7 @@ do_t_mov_cmp (void) && (inst.instruction == T_MNEM_mov || inst.instruction == T_MNEM_movs)) { - if (in_it_block ()) + if (in_pred_block ()) narrow = (inst.instruction == T_MNEM_mov); else narrow = (inst.instruction == T_MNEM_movs); @@ -12314,9 +12640,9 @@ do_t_mvn_tst (void) || inst.instruction == T_MNEM_tst) narrow = TRUE; else if (THUMB_SETS_FLAGS (inst.instruction)) - narrow = !in_it_block (); + narrow = !in_pred_block (); else - narrow = in_it_block (); + narrow = in_pred_block (); if (!inst.operands[1].isreg) { @@ -12481,9 +12807,9 @@ do_t_mul (void) || Rm > 7) narrow = FALSE; else if (inst.instruction == T_MNEM_muls) - narrow = !in_it_block (); + narrow = !in_pred_block (); else - narrow = in_it_block (); + narrow = in_pred_block (); } else { @@ -12549,7 +12875,7 @@ do_t_mull (void) static void do_t_nop (void) { - set_it_insn_type (NEUTRAL_IT_INSN); + set_pred_insn_type (NEUTRAL_IT_INSN); if (unified_syntax) { @@ -12587,9 +12913,9 @@ do_t_neg (void) bfd_boolean narrow; if (THUMB_SETS_FLAGS (inst.instruction)) - narrow = !in_it_block (); + narrow = !in_pred_block (); else - narrow = in_it_block (); + narrow = in_pred_block (); if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7) narrow = FALSE; if (inst.size_req == 4) @@ -12730,8 +13056,20 @@ do_t_push_pop (void) else if (unified_syntax) { inst.instruction = THUMB_OP32 (inst.instruction); - encode_thumb2_ldmstm (13, mask, TRUE); + encode_thumb2_multi (TRUE /* do_io */, 13, mask, TRUE); + } + else + { + inst.error = _("invalid register list to push/pop instruction"); + return; } +} + +static void +do_t_clrm (void) +{ + if (unified_syntax) + encode_thumb2_multi (FALSE /* do_io */, -1, inst.operands[0].imm, FALSE); else { inst.error = _("invalid register list to push/pop instruction"); @@ -12739,6 +13077,24 @@ do_t_push_pop (void) } } +static void +do_t_vscclrm (void) +{ + if (inst.operands[0].issingle) + { + inst.instruction |= (inst.operands[0].reg & 0x1) << 22; + inst.instruction |= (inst.operands[0].reg & 0x1e) << 11; + inst.instruction |= inst.operands[0].imm; + } + else + { + inst.instruction |= (inst.operands[0].reg & 0x10) << 18; + inst.instruction |= (inst.operands[0].reg & 0xf) << 12; + inst.instruction |= 1 << 8; + inst.instruction |= inst.operands[0].imm << 1; + } +} + static void do_t_rbit (void) { @@ -12821,9 +13177,9 @@ do_t_rsb (void) bfd_boolean narrow; if ((inst.instruction & 0x00100000) != 0) - narrow = !in_it_block (); + narrow = !in_pred_block (); else - narrow = in_it_block (); + narrow = in_pred_block (); if (Rd > 7 || Rs > 7) narrow = FALSE; @@ -12861,7 +13217,7 @@ do_t_setend (void) && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) as_tsktsk (_("setend use is deprecated for ARMv8")); - set_it_insn_type (OUTSIDE_IT_INSN); + set_pred_insn_type (OUTSIDE_PRED_INSN); if (inst.operands[0].imm) inst.instruction |= 0x8; } @@ -12891,9 +13247,9 @@ do_t_shift (void) } if (THUMB_SETS_FLAGS (inst.instruction)) - narrow = !in_it_block (); + narrow = !in_pred_block (); else - narrow = in_it_block (); + narrow = in_pred_block (); if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7) narrow = FALSE; if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR) @@ -13063,7 +13419,7 @@ do_t_smc (void) inst.instruction |= (value & 0x0ff0); inst.instruction |= (value & 0x000f) << 16; /* PR gas/15623: SMC instructions must be last in an IT block. */ - set_it_insn_type_last (); + set_pred_insn_type_last (); } static void @@ -13238,7 +13594,7 @@ do_t_tb (void) int half; half = (inst.instruction & 0x10) != 0; - set_it_insn_type_last (); + set_pred_insn_type_last (); constraint (inst.operands[0].immisreg, _("instruction requires register index")); @@ -13274,7 +13630,7 @@ do_t_udf (void) inst.instruction |= inst.operands[0].imm; } - set_it_insn_type (NEUTRAL_IT_INSN); + set_pred_insn_type (NEUTRAL_IT_INSN); } @@ -13320,40 +13676,223 @@ v8_1_branch_value_check (int val, int nbits, int is_signed) return SUCCESS; } -/* Neon instruction encoder helpers. */ +/* For branches in Armv8.1-M Mainline. */ +static void +do_t_branch_future (void) +{ + unsigned long insn = inst.instruction; -/* Encodings for the different types for various Neon opcodes. */ + inst.instruction = THUMB_OP32 (inst.instruction); + if (inst.operands[0].hasreloc == 0) + { + if (v8_1_branch_value_check (inst.operands[0].imm, 5, FALSE) == FAIL) + as_bad (BAD_BRANCH_OFF); -/* An "invalid" code for the following tables. */ -#define N_INV -1u + inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23; + } + else + { + inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH5; + inst.relocs[0].pc_rel = 1; + } -struct neon_tab_entry + switch (insn) + { + case T_MNEM_bf: + if (inst.operands[1].hasreloc == 0) + { + int val = inst.operands[1].imm; + if (v8_1_branch_value_check (inst.operands[1].imm, 17, TRUE) == FAIL) + as_bad (BAD_BRANCH_OFF); + + int immA = (val & 0x0001f000) >> 12; + int immB = (val & 0x00000ffc) >> 2; + int immC = (val & 0x00000002) >> 1; + inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11); + } + else + { + inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF17; + inst.relocs[1].pc_rel = 1; + } + break; + + case T_MNEM_bfl: + if (inst.operands[1].hasreloc == 0) + { + int val = inst.operands[1].imm; + if (v8_1_branch_value_check (inst.operands[1].imm, 19, TRUE) == FAIL) + as_bad (BAD_BRANCH_OFF); + + int immA = (val & 0x0007f000) >> 12; + int immB = (val & 0x00000ffc) >> 2; + int immC = (val & 0x00000002) >> 1; + inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11); + } + else + { + inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF19; + inst.relocs[1].pc_rel = 1; + } + break; + + case T_MNEM_bfcsel: + /* Operand 1. */ + if (inst.operands[1].hasreloc == 0) + { + int val = inst.operands[1].imm; + int immA = (val & 0x00001000) >> 12; + int immB = (val & 0x00000ffc) >> 2; + int immC = (val & 0x00000002) >> 1; + inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11); + } + else + { + inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF13; + inst.relocs[1].pc_rel = 1; + } + + /* Operand 2. */ + if (inst.operands[2].hasreloc == 0) + { + constraint ((inst.operands[0].hasreloc != 0), BAD_ARGS); + int val2 = inst.operands[2].imm; + int val0 = inst.operands[0].imm & 0x1f; + int diff = val2 - val0; + if (diff == 4) + inst.instruction |= 1 << 17; /* T bit. */ + else if (diff != 2) + as_bad (_("out of range label-relative fixup value")); + } + else + { + constraint ((inst.operands[0].hasreloc == 0), BAD_ARGS); + inst.relocs[2].type = BFD_RELOC_THUMB_PCREL_BFCSEL; + inst.relocs[2].pc_rel = 1; + } + + /* Operand 3. */ + constraint (inst.cond != COND_ALWAYS, BAD_COND); + inst.instruction |= (inst.operands[3].imm & 0xf) << 18; + break; + + case T_MNEM_bfx: + case T_MNEM_bflx: + inst.instruction |= inst.operands[1].reg << 16; + break; + + default: abort (); + } +} + +/* Helper function for do_t_loloop to handle relocations. */ +static void +v8_1_loop_reloc (int is_le) { - unsigned integer; - unsigned float_or_poly; - unsigned scalar_or_imm; -}; + if (inst.relocs[0].exp.X_op == O_constant) + { + int value = inst.relocs[0].exp.X_add_number; + value = (is_le) ? -value : value; -/* Map overloaded Neon opcodes to their respective encodings. */ -#define NEON_ENC_TAB \ - X(vabd, 0x0000700, 0x1200d00, N_INV), \ - X(vmax, 0x0000600, 0x0000f00, N_INV), \ - X(vmin, 0x0000610, 0x0200f00, N_INV), \ - X(vpadd, 0x0000b10, 0x1000d00, N_INV), \ - X(vpmax, 0x0000a00, 0x1000f00, N_INV), \ - X(vpmin, 0x0000a10, 0x1200f00, N_INV), \ - X(vadd, 0x0000800, 0x0000d00, N_INV), \ - X(vsub, 0x1000800, 0x0200d00, N_INV), \ - X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \ - X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \ - X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \ - /* Register variants of the following two instructions are encoded as - vcge / vcgt with the operands reversed. */ \ - X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \ - X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \ - X(vfma, N_INV, 0x0000c10, N_INV), \ - X(vfms, N_INV, 0x0200c10, N_INV), \ - X(vmla, 0x0000900, 0x0000d10, 0x0800040), \ + if (v8_1_branch_value_check (value, 12, FALSE) == FAIL) + as_bad (BAD_BRANCH_OFF); + + int imml, immh; + + immh = (value & 0x00000ffc) >> 2; + imml = (value & 0x00000002) >> 1; + + inst.instruction |= (imml << 11) | (immh << 1); + } + else + { + inst.relocs[0].type = BFD_RELOC_ARM_THUMB_LOOP12; + inst.relocs[0].pc_rel = 1; + } +} + +/* To handle the Scalar Low Overhead Loop instructions + in Armv8.1-M Mainline. */ +static void +do_t_loloop (void) +{ + unsigned long insn = inst.instruction; + + set_pred_insn_type (OUTSIDE_PRED_INSN); + inst.instruction = THUMB_OP32 (inst.instruction); + + switch (insn) + { + case T_MNEM_le: + /* le