/* Instruction printing code for the ARM
- Copyright (C) 1994-2019 Free Software Foundation, Inc.
+ Copyright (C) 1994-2020 Free Software Foundation, Inc.
Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
Modification by James G. Smith (jsmith@cygnus.co.uk)
MVE_VHSUB_T1,
MVE_VHSUB_T2,
MVE_VRHADD,
+ MVE_VLD2,
+ MVE_VLD4,
+ MVE_VST2,
+ MVE_VST4,
+ MVE_VLDRB_T1,
+ MVE_VLDRH_T2,
+ MVE_VLDRB_T5,
+ MVE_VLDRH_T6,
+ MVE_VLDRW_T7,
+ MVE_VSTRB_T1,
+ MVE_VSTRH_T2,
+ MVE_VSTRB_T5,
+ MVE_VSTRH_T6,
+ MVE_VSTRW_T7,
+ MVE_VLDRB_GATHER_T1,
+ MVE_VLDRH_GATHER_T2,
+ MVE_VLDRW_GATHER_T3,
+ MVE_VLDRD_GATHER_T4,
+ MVE_VLDRW_GATHER_T5,
+ MVE_VLDRD_GATHER_T6,
+ MVE_VSTRB_SCATTER_T1,
+ MVE_VSTRH_SCATTER_T2,
+ MVE_VSTRW_SCATTER_T3,
+ MVE_VSTRD_SCATTER_T4,
+ MVE_VSTRW_SCATTER_T5,
+ MVE_VSTRD_SCATTER_T6,
+ MVE_VCVT_FP_FIX_VEC,
+ MVE_VCVT_BETWEEN_FP_INT,
+ MVE_VCVT_FP_HALF_FP,
+ MVE_VCVT_FROM_FP_TO_INT,
+ MVE_VRINT_FP,
+ MVE_VMOV_HFP_TO_GP,
+ MVE_VMOV_GP_TO_VEC_LANE,
+ MVE_VMOV_IMM_TO_VEC,
+ MVE_VMOV_VEC_TO_VEC,
+ MVE_VMOV2_VEC_LANE_TO_GP,
+ MVE_VMOV2_GP_TO_VEC_LANE,
+ MVE_VMOV_VEC_LANE_TO_GP,
+ MVE_VMVN_IMM,
+ MVE_VMVN_REG,
+ MVE_VORR_IMM,
+ MVE_VORR_REG,
+ MVE_VORN,
+ MVE_VBIC_IMM,
+ MVE_VBIC_REG,
+ MVE_VMOVX,
+ MVE_VMOVL,
+ MVE_VMOVN,
+ MVE_VMULL_INT,
+ MVE_VMULL_POLY,
+ MVE_VQDMULL_T1,
+ MVE_VQDMULL_T2,
+ MVE_VQMOVN,
+ MVE_VQMOVUN,
+ MVE_VADDV,
+ MVE_VMLADAV_T1,
+ MVE_VMLADAV_T2,
+ MVE_VMLALDAV,
+ MVE_VMLAS,
+ MVE_VADDLV,
+ MVE_VMLSDAV_T1,
+ MVE_VMLSDAV_T2,
+ MVE_VMLSLDAV,
+ MVE_VRMLALDAVH,
+ MVE_VRMLSLDAVH,
+ MVE_VQDMLADH,
+ MVE_VQRDMLADH,
+ MVE_VQDMLAH,
+ MVE_VQRDMLAH,
+ MVE_VQDMLASH,
+ MVE_VQRDMLASH,
+ MVE_VQDMLSDH,
+ MVE_VQRDMLSDH,
+ MVE_VQDMULH_T1,
+ MVE_VQRDMULH_T2,
+ MVE_VQDMULH_T3,
+ MVE_VQRDMULH_T4,
+ MVE_VDDUP,
+ MVE_VDWDUP,
+ MVE_VIWDUP,
+ MVE_VIDUP,
+ MVE_VCADD_FP,
+ MVE_VCADD_VEC,
+ MVE_VHCADD,
+ MVE_VCMLA_FP,
+ MVE_VCMUL_FP,
+ MVE_VQRSHL_T1,
+ MVE_VQRSHL_T2,
+ MVE_VQRSHRN,
+ MVE_VQRSHRUN,
+ MVE_VQSHL_T1,
+ MVE_VQSHL_T2,
+ MVE_VQSHLU_T3,
+ MVE_VQSHL_T4,
+ MVE_VQSHRN,
+ MVE_VQSHRUN,
+ MVE_VRSHL_T1,
+ MVE_VRSHL_T2,
+ MVE_VRSHR,
+ MVE_VRSHRN,
+ MVE_VSHL_T1,
+ MVE_VSHL_T2,
+ MVE_VSHL_T3,
+ MVE_VSHLC,
+ MVE_VSHLL_T1,
+ MVE_VSHLL_T2,
+ MVE_VSHR,
+ MVE_VSHRN,
+ MVE_VSLI,
+ MVE_VSRI,
+ MVE_VADC,
+ MVE_VABAV,
+ MVE_VABD_FP,
+ MVE_VABD_VEC,
+ MVE_VABS_FP,
+ MVE_VABS_VEC,
+ MVE_VADD_FP_T1,
+ MVE_VADD_FP_T2,
+ MVE_VADD_VEC_T1,
+ MVE_VADD_VEC_T2,
+ MVE_VSBC,
+ MVE_VSUB_FP_T1,
+ MVE_VSUB_FP_T2,
+ MVE_VSUB_VEC_T1,
+ MVE_VSUB_VEC_T2,
+ MVE_VAND,
+ MVE_VBRSR,
+ MVE_VCLS,
+ MVE_VCLZ,
+ MVE_VCTP,
+ MVE_VMAX,
+ MVE_VMAXA,
+ MVE_VMAXNM_FP,
+ MVE_VMAXNMA_FP,
+ MVE_VMAXNMV_FP,
+ MVE_VMAXNMAV_FP,
+ MVE_VMAXV,
+ MVE_VMAXAV,
+ MVE_VMIN,
+ MVE_VMINA,
+ MVE_VMINNM_FP,
+ MVE_VMINNMA_FP,
+ MVE_VMINNMV_FP,
+ MVE_VMINNMAV_FP,
+ MVE_VMINV,
+ MVE_VMINAV,
+ MVE_VMLA,
+ MVE_VMUL_FP_T1,
+ MVE_VMUL_FP_T2,
+ MVE_VMUL_VEC_T1,
+ MVE_VMUL_VEC_T2,
+ MVE_VMULH,
+ MVE_VRMULH,
+ MVE_VNEG_FP,
+ MVE_VNEG_VEC,
+ MVE_VPNOT,
+ MVE_VPSEL,
+ MVE_VQABS,
+ MVE_VQADD_T1,
+ MVE_VQADD_T2,
+ MVE_VQSUB_T1,
+ MVE_VQSUB_T2,
+ MVE_VQNEG,
+ MVE_VREV16,
+ MVE_VREV32,
+ MVE_VREV64,
+ MVE_LSLL,
+ MVE_LSLLI,
+ MVE_LSRL,
+ MVE_ASRL,
+ MVE_ASRLI,
+ MVE_SQRSHRL,
+ MVE_SQRSHR,
+ MVE_UQRSHL,
+ MVE_UQRSHLL,
+ MVE_UQSHL,
+ MVE_UQSHLL,
+ MVE_URSHRL,
+ MVE_URSHR,
+ MVE_SRSHRL,
+ MVE_SRSHR,
+ MVE_SQSHLL,
+ MVE_SQSHL,
+ MVE_CINC,
+ MVE_CINV,
+ MVE_CNEG,
+ MVE_CSINC,
+ MVE_CSINV,
+ MVE_CSET,
+ MVE_CSETM,
+ MVE_CSNEG,
+ MVE_CSEL,
MVE_NONE
};
UNPRED_R13, /* Unpredictable because r13 (sp) or
r15 (sp) used. */
UNPRED_R15, /* Unpredictable because r15 (pc) is used. */
+ UNPRED_Q_GT_4, /* Unpredictable because
+ vec reg start > 4 (vld4/st4). */
+ UNPRED_Q_GT_6, /* Unpredictable because
+ vec reg start > 6 (vld2/st2). */
+ UNPRED_R13_AND_WB, /* Unpredictable becase gp reg = r13
+ and WB bit = 1. */
+ UNPRED_Q_REGS_EQUAL, /* Unpredictable because vector registers are
+ equal. */
+ UNPRED_OS, /* Unpredictable because offset scaled == 1. */
+ UNPRED_GP_REGS_EQUAL, /* Unpredictable because gp registers are the
+ same. */
+ UNPRED_Q_REGS_EQ_AND_SIZE_1, /* Unpredictable because q regs equal and
+ size = 1. */
+ UNPRED_Q_REGS_EQ_AND_SIZE_2, /* Unpredictable because q regs equal and
+ size = 2. */
UNPRED_NONE /* No unpredictable behavior. */
};
enum mve_undefined
{
+ UNDEF_SIZE, /* undefined size. */
+ UNDEF_SIZE_0, /* undefined because size == 0. */
+ UNDEF_SIZE_2, /* undefined because size == 2. */
UNDEF_SIZE_3, /* undefined because size == 3. */
+ UNDEF_SIZE_LE_1, /* undefined because size <= 1. */
+ UNDEF_SIZE_NOT_0, /* undefined because size != 0. */
+ UNDEF_SIZE_NOT_2, /* undefined because size != 2. */
+ UNDEF_SIZE_NOT_3, /* undefined because size != 3. */
+ UNDEF_NOT_UNS_SIZE_0, /* undefined because U == 0 and
+ size == 0. */
+ UNDEF_NOT_UNS_SIZE_1, /* undefined because U == 0 and
+ size == 1. */
+ UNDEF_NOT_UNSIGNED, /* undefined because U == 0. */
+ UNDEF_VCVT_IMM6, /* imm6 < 32. */
+ UNDEF_VCVT_FSI_IMM6, /* fsi = 0 and 32 >= imm6 <= 47. */
+ UNDEF_BAD_OP1_OP2, /* undefined with op2 = 2 and
+ op1 == (0 or 1). */
+ UNDEF_BAD_U_OP1_OP2, /* undefined with U = 1 and
+ op2 == 0 and op1 == (0 or 1). */
+ UNDEF_OP_0_BAD_CMODE, /* undefined because op == 0 and cmode
+ in {0xx1, x0x1}. */
+ UNDEF_XCHG_UNS, /* undefined because X == 1 and U == 1. */
UNDEF_NONE /* no undefined behavior. */
};
%% %
%c print condition code (always bits 28-31 in ARM mode)
+ %b print condition code allowing cp_num == 9
%q print shifter argument
%u print condition code (unconditional in ARM mode,
UNPREDICTABLE if not AL in Thumb)
/* Floating point coprocessor (VFP) instructions. */
{ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD),
0x0ee00a10, 0x0fff0fff, "vmsr%c\tfpsid, %12-15r"},
- {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD),
+ {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD | FPU_MVE),
0x0ee10a10, 0x0fff0fff, "vmsr%c\tfpscr, %12-15r"},
+ {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0x0ee20a10, 0x0fff0fff, "vmsr%c\tfpscr_nzcvqc, %12-15r"},
{ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD),
0x0ee60a10, 0x0fff0fff, "vmsr%c\tmvfr1, %12-15r"},
{ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD),
0x0ee90a10, 0x0fff0fff, "vmsr%c\tfpinst, %12-15r\t@ Impl def"},
{ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD),
0x0eea0a10, 0x0fff0fff, "vmsr%c\tfpinst2, %12-15r\t@ Impl def"},
+ {ANY, ARM_FEATURE_COPROC (FPU_MVE),
+ 0x0eec0a10, 0x0fff0fff, "vmsr%c\tvpr, %12-15r"},
+ {ANY, ARM_FEATURE_COPROC (FPU_MVE),
+ 0x0eed0a10, 0x0fff0fff, "vmsr%c\tp0, %12-15r"},
+ {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0x0eee0a10, 0x0fff0fff, "vmsr%c\tfpcxt_ns, %12-15r"},
+ {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0x0eef0a10, 0x0fff0fff, "vmsr%c\tfpcxt_s, %12-15r"},
{ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD),
0x0ef00a10, 0x0fff0fff, "vmrs%c\t%12-15r, fpsid"},
{ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD),
0x0ef1fa10, 0x0fffffff, "vmrs%c\tAPSR_nzcv, fpscr"},
- {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD),
+ {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD | FPU_MVE),
0x0ef10a10, 0x0fff0fff, "vmrs%c\t%12-15r, fpscr"},
+ {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0x0ef20a10, 0x0fff0fff, "vmrs%c\t%12-15r, fpscr_nzcvqc"},
{ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8),
0x0ef50a10, 0x0fff0fff, "vmrs%c\t%12-15r, mvfr2"},
{ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD),
0x0ef90a10, 0x0fff0fff, "vmrs%c\t%12-15r, fpinst\t@ Impl def"},
{ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD),
0x0efa0a10, 0x0fff0fff, "vmrs%c\t%12-15r, fpinst2\t@ Impl def"},
+ {ANY, ARM_FEATURE_COPROC (FPU_MVE),
+ 0x0efc0a10, 0x0fff0fff, "vmrs%c\t%12-15r, vpr"},
+ {ANY, ARM_FEATURE_COPROC (FPU_MVE),
+ 0x0efd0a10, 0x0fff0fff, "vmrs%c\t%12-15r, p0"},
+ {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0x0efe0a10, 0x0fff0fff, "vmrs%c\t%12-15r, fpcxt_ns"},
+ {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0x0eff0a10, 0x0fff0fff, "vmrs%c\t%12-15r, fpcxt_s"},
{ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1),
0x0e000b10, 0x0fd00fff, "vmov%c.32\t%z2[%21d], %12-15r"},
{ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1),
{ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8),
0xfeb80b40, 0xffbc0fd0, "vrint%16-17?mpna%u.f64\t%z1, %z0"},
- /* Generic coprocessor instructions. */
{ANY, ARM_FEATURE_CORE_LOW (0), SENTINEL_GENERIC_START, 0, "" },
- {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5E),
- 0x0c400000, 0x0ff00000, "mcrr%c\t%8-11d, %4-7d, %12-15R, %16-19r, cr%0-3d"},
- {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5E),
- 0x0c500000, 0x0ff00000,
- "mrrc%c\t%8-11d, %4-7d, %12-15Ru, %16-19Ru, cr%0-3d"},
- {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2),
- 0x0e000000, 0x0f000010,
- "cdp%c\t%8-11d, %20-23d, cr%12-15d, cr%16-19d, cr%0-3d, {%5-7d}"},
- {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2),
- 0x0e10f010, 0x0f10f010,
- "mrc%c\t%8-11d, %21-23d, APSR_nzcv, cr%16-19d, cr%0-3d, {%5-7d}"},
- {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2),
- 0x0e100010, 0x0f100010,
- "mrc%c\t%8-11d, %21-23d, %12-15r, cr%16-19d, cr%0-3d, {%5-7d}"},
- {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2),
- 0x0e000010, 0x0f100010,
- "mcr%c\t%8-11d, %21-23d, %12-15R, cr%16-19d, cr%0-3d, {%5-7d}"},
- {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2),
- 0x0c000000, 0x0e100000, "stc%22'l%c\t%8-11d, cr%12-15d, %A"},
- {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2),
- 0x0c100000, 0x0e100000, "ldc%22'l%c\t%8-11d, cr%12-15d, %A"},
-
- /* V6 coprocessor instructions. */
- {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0xfc500000, 0xfff00000,
- "mrrc2%c\t%8-11d, %4-7d, %12-15Ru, %16-19Ru, cr%0-3d"},
- {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0xfc400000, 0xfff00000,
- "mcrr2%c\t%8-11d, %4-7d, %12-15R, %16-19R, cr%0-3d"},
-
/* ARMv8.3 AdvSIMD instructions in the space of coprocessor 8. */
{ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A),
0xfc800800, 0xfeb00f10, "vcadd%c.f16\t%12-15,22V, %16-19,7V, %0-3,5V, #%24?29%24'70"},
{ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A),
0xfea00800, 0xffa00f10, "vcmla%c.f32\t%12-15,22V, %16-19,7V, %0-3,5D[0], #%20?21%20?780"},
+ /* BFloat16 instructions. */
+ {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16),
+ 0x0eb30940, 0x0fbf0f50, "vcvt%7?tb%b.bf16.f32\t%y1, %y0"},
+
/* Dot Product instructions in the space of coprocessor 13. */
{ANY, ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
0xfc200d00, 0xffb00f00, "v%4?usdot.%4?us8\t%12-15,22V, %16-19,7V, %0-3,5V"},
{ANY, ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
- 0xfe000d00, 0xff000f00, "v%4?usdot.%4?us8\t%12-15,22V, %16-19,7V, %0-3D[%5?10]"},
+ 0xfe200d00, 0xff200f00, "v%4?usdot.%4?us8\t%12-15,22V, %16-19,7V, %0-3D[%5?10]"},
/* ARMv8.2 FMAC Long instructions in the space of coprocessor 8. */
{ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST | ARM_EXT2_V8_2A),
{ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST | ARM_EXT2_V8_2A),
0xfe100850, 0xffb00f50, "vfmsl.f16\t%12-15,22Q, d%16-19,7d, d%0-2d[%3,5d]"},
- /* V5 coprocessor instructions. */
- {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
- 0xfc100000, 0xfe100000, "ldc2%22'l%c\t%8-11d, cr%12-15d, %A"},
- {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
- 0xfc000000, 0xfe100000, "stc2%22'l%c\t%8-11d, cr%12-15d, %A"},
- {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
- 0xfe000000, 0xff000010,
- "cdp2%c\t%8-11d, %20-23d, cr%12-15d, cr%16-19d, cr%0-3d, {%5-7d}"},
- {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
- 0xfe000010, 0xff100010,
- "mcr2%c\t%8-11d, %21-23d, %12-15R, cr%16-19d, cr%0-3d, {%5-7d}"},
- {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
- 0xfe100010, 0xff100010,
- "mrc2%c\t%8-11d, %21-23d, %12-15r, cr%16-19d, cr%0-3d, {%5-7d}"},
-
/* ARMv8.2 half-precision Floating point coprocessor 9 (VFP) instructions.
cp_num: bit <11:8> == 0b1001.
cond: bit <31:28> == 0b1110, otherwise, it's UNPREDICTABLE. */
{ANY, ARM_FEATURE_CORE_LOW (0), 0, 0, 0}
};
+/* Generic coprocessor instructions. These are only matched if a more specific
+ SIMD or co-processor instruction does not match first. */
+
+static const struct sopcode32 generic_coprocessor_opcodes[] =
+{
+ /* Generic coprocessor instructions. */
+ {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5E),
+ 0x0c400000, 0x0ff00000, "mcrr%c\t%8-11d, %4-7d, %12-15R, %16-19r, cr%0-3d"},
+ {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5E),
+ 0x0c500000, 0x0ff00000,
+ "mrrc%c\t%8-11d, %4-7d, %12-15Ru, %16-19Ru, cr%0-3d"},
+ {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2),
+ 0x0e000000, 0x0f000010,
+ "cdp%c\t%8-11d, %20-23d, cr%12-15d, cr%16-19d, cr%0-3d, {%5-7d}"},
+ {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2),
+ 0x0e10f010, 0x0f10f010,
+ "mrc%c\t%8-11d, %21-23d, APSR_nzcv, cr%16-19d, cr%0-3d, {%5-7d}"},
+ {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2),
+ 0x0e100010, 0x0f100010,
+ "mrc%c\t%8-11d, %21-23d, %12-15r, cr%16-19d, cr%0-3d, {%5-7d}"},
+ {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2),
+ 0x0e000010, 0x0f100010,
+ "mcr%c\t%8-11d, %21-23d, %12-15R, cr%16-19d, cr%0-3d, {%5-7d}"},
+ {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2),
+ 0x0c000000, 0x0e100000, "stc%22'l%c\t%8-11d, cr%12-15d, %A"},
+ {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2),
+ 0x0c100000, 0x0e100000, "ldc%22'l%c\t%8-11d, cr%12-15d, %A"},
+
+ /* V6 coprocessor instructions. */
+ {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0xfc500000, 0xfff00000,
+ "mrrc2%c\t%8-11d, %4-7d, %12-15Ru, %16-19Ru, cr%0-3d"},
+ {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0xfc400000, 0xfff00000,
+ "mcrr2%c\t%8-11d, %4-7d, %12-15R, %16-19R, cr%0-3d"},
+
+ /* V5 coprocessor instructions. */
+ {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
+ 0xfc100000, 0xfe100000, "ldc2%22'l%c\t%8-11d, cr%12-15d, %A"},
+ {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
+ 0xfc000000, 0xfe100000, "stc2%22'l%c\t%8-11d, cr%12-15d, %A"},
+ {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
+ 0xfe000000, 0xff000010,
+ "cdp2%c\t%8-11d, %20-23d, cr%12-15d, cr%16-19d, cr%0-3d, {%5-7d}"},
+ {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
+ 0xfe000010, 0xff100010,
+ "mcr2%c\t%8-11d, %21-23d, %12-15R, cr%16-19d, cr%0-3d, {%5-7d}"},
+ {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
+ 0xfe100010, 0xff100010,
+ "mrc2%c\t%8-11d, %21-23d, %12-15r, cr%16-19d, cr%0-3d, {%5-7d}"},
+
+ {ANY, ARM_FEATURE_CORE_LOW (0), 0, 0, 0}
+};
+
/* Neon opcode table: This does not encode the top byte -- that is
checked by the print_insn_neon routine, as it depends on whether we are
doing thumb32 or arm32 disassembly. */
{ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
0xf2300c10, 0xffb00f10, "vfms%c.f16\t%12-15,22R, %16-19,7R, %0-3,5R"},
+ /* BFloat16 instructions. */
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16),
+ 0xfc000d00, 0xffb00f10, "vdot.bf16\t%12-15,22R, %16-19,7R, %0-3,5R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16),
+ 0xfe000d00, 0xffb00f10, "vdot.bf16\t%12-15,22R, %16-19,7R, d%0-3d[%5d]"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16),
+ 0xfc000c40, 0xffb00f50, "vmmla.bf16\t%12-15,22R, %16-19,7R, %0-3,5R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16),
+ 0xf3b60640, 0xffbf0fd0, "vcvt%c.bf16.f32\t%12-15,22D, %0-3,5Q"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16),
+ 0xfc300810, 0xffb00f10, "vfma%6?tb.bf16\t%12-15,22Q, %16-19,7Q, %0-3,5Q"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16),
+ 0xfe300810, 0xffb00f10, "vfma%6?tb.bf16\t%12-15,22Q, %16-19,7Q, %0-2D[%3,5d]"},
+
+ /* Matrix Multiply instructions. */
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM),
+ 0xfc200c40, 0xffb00f50, "vsmmla.s8\t%12-15,22R, %16-19,7R, %0-3,5R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM),
+ 0xfc200c50, 0xffb00f50, "vummla.u8\t%12-15,22R, %16-19,7R, %0-3,5R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM),
+ 0xfca00c40, 0xffb00f50, "vusmmla.s8\t%12-15,22R, %16-19,7R, %0-3,5R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM),
+ 0xfca00d00, 0xffb00f10, "vusdot.s8\t%12-15,22R, %16-19,7R, %0-3,5R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM),
+ 0xfe800d00, 0xffb00f10, "vusdot.s8\t%12-15,22R, %16-19,7R, d%0-3d[%5d]"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM),
+ 0xfe800d10, 0xffb00f10, "vsudot.u8\t%12-15,22R, %16-19,7R, d%0-3d[%5d]"},
+
/* Two registers, miscellaneous. */
{ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8),
0xf3ba0400, 0xffbf0c10, "vrint%7-9?p?m?zaxn%u.f32\t%12-15,22R, %0-3,5R"},
%% %
+ %a print '+' or '-' or imm offset in vldr[bhwd] and
+ vstr[bhwd]
%c print condition code
+ %d print addr mode of MVE vldr[bhw] and vstr[bhw]
+ %u print 'U' (unsigned) or 'S' for various mve instructions
%i print MVE predicate(s) for vpt and vpst
+ %j print a 5-bit immediate from hw2[14:12,7:6]
+ %k print 48 if the 7th position bit is set else print 64.
+ %m print rounding mode for vcvt and vrint
%n print vector comparison code for predicated instruction
+ %s print size for various vcvt instructions
%v print vector predicate for instruction in predicated
block
+ %o print offset scaled for vldr[hwd] and vstr[hwd]
+ %w print writeback mode for MVE v{st,ld}[24]
+ %B print v{st,ld}[24] any one operands
+ %E print vmov, vmvn, vorr, vbic encoded constant
+ %N print generic index for vmov
+ %T print bottom ('b') or top ('t') of source register
+ %X print exchange field in vmla* instructions
+
%<bitfield>r print as an ARM register
+ %<bitfield>d print the bitfield in decimal
+ %<bitfield>A print accumulate or not
+ %<bitfield>c print bitfield as a condition code
+ %<bitfield>C print bitfield as an inverted condition code
%<bitfield>Q print as a MVE Q register
+ %<bitfield>F print as a MVE S register
%<bitfield>Z as %<>r but r15 is ZR instead of PC and r13 is
UNPREDICTABLE
+
+ %<bitfield>S as %<>r but r15 or r13 is UNPREDICTABLE
%<bitfield>s print size for vector predicate & non VMOV instructions
-*/
+ %<bitfield>I print carry flag or not
+ %<bitfield>i print immediate for vstr/vldr reg +/- imm
+ %<bitfield>h print high half of 64-bit destination reg
+ %<bitfield>k print immediate for vector conversion instruction
+ %<bitfield>l print low half of 64-bit destination reg
+ %<bitfield>o print rotate value for vcmul
+ %<bitfield>u print immediate value for vddup/vdwdup
+ %<bitfield>x print the bitfield in hex.
+ */
static const struct mopcode32 mve_opcodes[] =
{
0xfe011f40, 0xff811f50,
"vpt%i.s%20-21s\t%n, %17-19Q, %0-3Z"},
+ /* Vector VBIC immediate. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VBIC_IMM,
+ 0xef800070, 0xefb81070,
+ "vbic%v.i%8-11s\t%13-15,22Q, %E"},
+
+ /* Vector VBIC register. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VBIC_REG,
+ 0xef100150, 0xffb11f51,
+ "vbic%v\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VABAV. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VABAV,
+ 0xee800f01, 0xefc10f51,
+ "vabav%v.%u%20-21s\t%12-15r, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VABD floating point. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VABD_FP,
+ 0xff200d40, 0xffa11f51,
+ "vabd%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VABD. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VABD_VEC,
+ 0xef000740, 0xef811f51,
+ "vabd%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VABS floating point. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VABS_FP,
+ 0xFFB10740, 0xFFB31FD1,
+ "vabs%v.f%18-19s\t%13-15,22Q, %1-3,5Q"},
+ /* Vector VABS. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VABS_VEC,
+ 0xffb10340, 0xffb31fd1,
+ "vabs%v.s%18-19s\t%13-15,22Q, %1-3,5Q"},
+
+ /* Vector VADD floating point T1. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VADD_FP_T1,
+ 0xef000d40, 0xffa11f51,
+ "vadd%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+ /* Vector VADD floating point T2. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VADD_FP_T2,
+ 0xee300f40, 0xefb11f70,
+ "vadd%v.f%28s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+ /* Vector VADD T1. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VADD_VEC_T1,
+ 0xef000840, 0xff811f51,
+ "vadd%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+ /* Vector VADD T2. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VADD_VEC_T2,
+ 0xee010f40, 0xff811f70,
+ "vadd%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VADDLV. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VADDLV,
+ 0xee890f00, 0xef8f1fd1,
+ "vaddlv%5A%v.%u32\t%13-15l, %20-22h, %1-3Q"},
+
+ /* Vector VADDV. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VADDV,
+ 0xeef10f00, 0xeff31fd1,
+ "vaddv%5A%v.%u%18-19s\t%13-15l, %1-3Q"},
+
+ /* Vector VADC. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VADC,
+ 0xee300f00, 0xffb10f51,
+ "vadc%12I%v.i32\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VAND. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VAND,
+ 0xef000150, 0xffb11f51,
+ "vand%v\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VBRSR register. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VBRSR,
+ 0xfe011e60, 0xff811f70,
+ "vbrsr%v.%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VCADD floating point. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VCADD_FP,
+ 0xfc800840, 0xfea11f51,
+ "vcadd%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q, #%24o"},
+
+ /* Vector VCADD. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VCADD_VEC,
+ 0xfe000f00, 0xff810f51,
+ "vcadd%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q, #%12o"},
+
+ /* Vector VCLS. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VCLS,
+ 0xffb00440, 0xffb31fd1,
+ "vcls%v.s%18-19s\t%13-15,22Q, %1-3,5Q"},
+
+ /* Vector VCLZ. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VCLZ,
+ 0xffb004c0, 0xffb31fd1,
+ "vclz%v.i%18-19s\t%13-15,22Q, %1-3,5Q"},
+
+ /* Vector VCMLA. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VCMLA_FP,
+ 0xfc200840, 0xfe211f51,
+ "vcmla%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q, #%23-24o"},
+
/* Vector VCMP floating point T1. */
{ARM_FEATURE_COPROC (FPU_MVE_FP),
MVE_VCMP_FP_T1,
0xee001f40, 0xef811f70,
"vhsub%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+ /* Vector VCMUL. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VCMUL_FP,
+ 0xee300e00, 0xefb10f50,
+ "vcmul%v.f%28s\t%13-15,22Q, %17-19,7Q, %1-3,5Q, #%0,12o"},
+
+ /* Vector VCTP. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VCTP,
+ 0xf000e801, 0xffc0ffff,
+ "vctp%v.%20-21s\t%16-19r"},
+
/* Vector VDUP. */
{ARM_FEATURE_COPROC (FPU_MVE),
MVE_VDUP,
0xef000140, 0xef811f51,
"vrhadd%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
- {ARM_FEATURE_CORE_LOW (0),
- MVE_NONE,
- 0x00000000, 0x00000000, 0}
-};
+ /* Vector VCVT. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VCVT_FP_FIX_VEC,
+ 0xef800c50, 0xef801cd1,
+ "vcvt%v.%s\t%13-15,22Q, %1-3,5Q, #%16-21k"},
-/* Opcode tables: ARM, 16-bit Thumb, 32-bit Thumb. All three are partially
- ordered: they must be searched linearly from the top to obtain a correct
- match. */
+ /* Vector VCVT. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VCVT_BETWEEN_FP_INT,
+ 0xffb30640, 0xffb31e51,
+ "vcvt%v.%s\t%13-15,22Q, %1-3,5Q"},
-/* print_insn_arm recognizes the following format control codes:
+ /* Vector VCVT between single and half-precision float, bottom half. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VCVT_FP_HALF_FP,
+ 0xee3f0e01, 0xefbf1fd1,
+ "vcvtb%v.%s\t%13-15,22Q, %1-3,5Q"},
- %% %
+ /* Vector VCVT between single and half-precision float, top half. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VCVT_FP_HALF_FP,
+ 0xee3f1e01, 0xefbf1fd1,
+ "vcvtt%v.%s\t%13-15,22Q, %1-3,5Q"},
- %a print address for ldr/str instruction
- %s print address for ldr/str halfword/signextend instruction
- %S like %s but allow UNPREDICTABLE addressing
- %b print branch destination
- %c print condition code (always bits 28-31)
- %m print register mask for ldm/stm instruction
- %o print operand2 (immediate or register + shift)
- %p print 'p' iff bits 12-15 are 15
- %t print 't' iff bit 21 set and bit 24 clear
- %B print arm BLX(1) destination
- %C print the PSR sub type.
- %U print barrier type.
- %P print address for pli instruction.
+ /* Vector VCVT. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VCVT_FROM_FP_TO_INT,
+ 0xffb30040, 0xffb31c51,
+ "vcvt%m%v.%s\t%13-15,22Q, %1-3,5Q"},
- %<bitfield>r print as an ARM register
- %<bitfield>T print as an ARM register + 1
- %<bitfield>R as %r but r15 is UNPREDICTABLE
- %<bitfield>{r|R}u as %{r|R} but if matches the other %u field then is UNPREDICTABLE
- %<bitfield>{r|R}U as %{r|R} but if matches the other %U field then is UNPREDICTABLE
- %<bitfield>d print the bitfield in decimal
- %<bitfield>W print the bitfield plus one in decimal
- %<bitfield>x print the bitfield in hex
- %<bitfield>X print the bitfield as 1 hex digit without leading "0x"
+ /* Vector VDDUP. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VDDUP,
+ 0xee011f6e, 0xff811f7e,
+ "vddup%v.u%20-21s\t%13-15,22Q, %17-19l, #%0,7u"},
- %<bitfield>'c print specified char iff bitfield is all ones
- %<bitfield>`c print specified char iff bitfield is all zeroes
- %<bitfield>?ab... select from array of values in big endian order
+ /* Vector VDWDUP. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VDWDUP,
+ 0xee011f60, 0xff811f70,
+ "vdwdup%v.u%20-21s\t%13-15,22Q, %17-19l, %1-3h, #%0,7u"},
- %e print arm SMI operand (bits 0..7,8..19).
- %E print the LSB and WIDTH fields of a BFI or BFC instruction.
- %V print the 16-bit immediate field of a MOVT or MOVW instruction.
- %R print the SPSR/CPSR or banked register of an MRS. */
+ /* Vector VHCADD. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VHCADD,
+ 0xee000f00, 0xff810f51,
+ "vhcadd%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q, #%12o"},
-static const struct opcode32 arm_opcodes[] =
-{
- /* ARM instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0xe1a00000, 0xffffffff, "nop\t\t\t; (mov r0, r0)"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0xe7f000f0, 0xfff000f0, "udf\t#%e"},
+ /* Vector VIWDUP. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VIWDUP,
+ 0xee010f60, 0xff811f70,
+ "viwdup%v.u%20-21s\t%13-15,22Q, %17-19l, %1-3h, #%0,7u"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5),
- 0x012FFF10, 0x0ffffff0, "bx%c\t%0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V2),
- 0x00000090, 0x0fe000f0, "mul%20's%c\t%16-19R, %0-3R, %8-11R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V2),
- 0x00200090, 0x0fe000f0, "mla%20's%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V2S),
- 0x01000090, 0x0fb00ff0, "swp%22'b%c\t%12-15RU, %0-3Ru, [%16-19RuU]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V3M),
- 0x00800090, 0x0fa000f0,
- "%22?sumull%20's%c\t%12-15Ru, %16-19Ru, %0-3R, %8-11R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V3M),
- 0x00a00090, 0x0fa000f0,
- "%22?sumlal%20's%c\t%12-15Ru, %16-19Ru, %0-3R, %8-11R"},
+ /* Vector VIDUP. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VIDUP,
+ 0xee010f6e, 0xff811f7e,
+ "vidup%v.u%20-21s\t%13-15,22Q, %17-19l, #%0,7u"},
- /* V8.2 RAS extension instructions. */
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
- 0xe320f010, 0xffffffff, "esb"},
+ /* Vector VLD2. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VLD2,
+ 0xfc901e00, 0xff901e5f,
+ "vld2%5d.%7-8s\t%B, [%16-19r]%w"},
- /* V8 instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0x0320f005, 0x0fffffff, "sevl"},
- /* Defined in V8 but is in NOP space so available to all arch. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0xe1000070, 0xfff000f0, "hlt\t0x%16-19X%12-15X%8-11X%0-3X"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS),
- 0x01800e90, 0x0ff00ff0, "stlex%c\t%12-15r, %0-3r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
- 0x01900e9f, 0x0ff00fff, "ldaex%c\t%12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0x01a00e90, 0x0ff00ff0, "stlexd%c\t%12-15r, %0-3r, %0-3T, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0x01b00e9f, 0x0ff00fff, "ldaexd%c\t%12-15r, %12-15T, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
- 0x01c00e90, 0x0ff00ff0, "stlexb%c\t%12-15r, %0-3r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
- 0x01d00e9f, 0x0ff00fff, "ldaexb%c\t%12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
- 0x01e00e90, 0x0ff00ff0, "stlexh%c\t%12-15r, %0-3r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
- 0x01f00e9f, 0x0ff00fff, "ldaexh%c\t%12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
- 0x0180fc90, 0x0ff0fff0, "stl%c\t%0-3r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
- 0x01900c9f, 0x0ff00fff, "lda%c\t%12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
- 0x01c0fc90, 0x0ff0fff0, "stlb%c\t%0-3r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
- 0x01d00c9f, 0x0ff00fff, "ldab%c\t%12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
- 0x01e0fc90, 0x0ff0fff0, "stlh%c\t%0-3r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
- 0x01f00c9f, 0x0ff00fff, "ldah%c\t%12-15r, [%16-19R]"},
- /* CRC32 instructions. */
- {ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
- 0xe1000040, 0xfff00ff0, "crc32b\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
- 0xe1200040, 0xfff00ff0, "crc32h\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
- 0xe1400040, 0xfff00ff0, "crc32w\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
- 0xe1000240, 0xfff00ff0, "crc32cb\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
- 0xe1200240, 0xfff00ff0, "crc32ch\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
- 0xe1400240, 0xfff00ff0, "crc32cw\t%12-15R, %16-19R, %0-3R"},
+ /* Vector VLD4. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VLD4,
+ 0xfc901e01, 0xff901e1f,
+ "vld4%5-6d.%7-8s\t%B, [%16-19r]%w"},
- /* Privileged Access Never extension instructions. */
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
- 0xf1100000, 0xfffffdff, "setpan\t#%9-9d"},
+ /* Vector VLDRB gather load. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VLDRB_GATHER_T1,
+ 0xec900e00, 0xefb01e50,
+ "vldrb%v.%u%7-8s\t%13-15,22Q, [%16-19r, %1-3,5Q]"},
- /* Virtualization Extension instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT), 0x0160006e, 0x0fffffff, "eret%c"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT), 0x01400070, 0x0ff000f0, "hvc%c\t%e"},
+ /* Vector VLDRH gather load. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VLDRH_GATHER_T2,
+ 0xec900e10, 0xefb01e50,
+ "vldrh%v.%u%7-8s\t%13-15,22Q, [%16-19r, %1-3,5Q%o]"},
- /* Integer Divide Extension instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
- 0x0710f010, 0x0ff0f0f0, "sdiv%c\t%16-19r, %0-3r, %8-11r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
- 0x0730f010, 0x0ff0f0f0, "udiv%c\t%16-19r, %0-3r, %8-11r"},
+ /* Vector VLDRW gather load. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VLDRW_GATHER_T3,
+ 0xfc900f40, 0xffb01fd0,
+ "vldrw%v.u32\t%13-15,22Q, [%16-19r, %1-3,5Q%o]"},
- /* MP Extension instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_MP), 0xf410f000, 0xfc70f000, "pldw\t%a"},
+ /* Vector VLDRD gather load. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VLDRD_GATHER_T4,
+ 0xec900fd0, 0xefb01fd0,
+ "vldrd%v.u64\t%13-15,22Q, [%16-19r, %1-3,5Q%o]"},
- /* Speculation Barriers. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V3), 0xe320f014, 0xffffffff, "csdb"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V3), 0xf57ff040, 0xffffffff, "ssbb"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V3), 0xf57ff044, 0xffffffff, "pssbb"},
+ /* Vector VLDRW gather load. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VLDRW_GATHER_T5,
+ 0xfd101e00, 0xff111f00,
+ "vldrw%v.u32\t%13-15,22Q, [%17-19,7Q, #%a%0-6i]%w"},
- /* V7 instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf450f000, 0xfd70f000, "pli\t%P"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0x0320f0f0, 0x0ffffff0, "dbg%c\t#%0-3d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8), 0xf57ff051, 0xfffffff3, "dmb\t%U"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8), 0xf57ff041, 0xfffffff3, "dsb\t%U"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf57ff050, 0xfffffff0, "dmb\t%U"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf57ff040, 0xfffffff0, "dsb\t%U"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf57ff060, 0xfffffff0, "isb\t%U"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V7),
- 0x0320f000, 0x0fffffff, "nop%c\t{%0-7d}"},
+ /* Vector VLDRD gather load, variant T6. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VLDRD_GATHER_T6,
+ 0xfd101f00, 0xff111f00,
+ "vldrd%v.u64\t%13-15,22Q, [%17-19,7Q, #%a%0-6i]%w"},
- /* ARM V6T2 instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0x07c0001f, 0x0fe0007f, "bfc%c\t%12-15R, %E"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0x07c00010, 0x0fe00070, "bfi%c\t%12-15R, %0-3r, %E"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0x00600090, 0x0ff000f0, "mls%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0x002000b0, 0x0f3000f0, "strht%c\t%12-15R, %S"},
+ /* Vector VLDRB. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VLDRB_T1,
+ 0xec100e00, 0xee581e00,
+ "vldrb%v.%u%7-8s\t%13-15Q, %d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0x00300090, 0x0f3000f0, UNDEFINED_INSTRUCTION },
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0x00300090, 0x0f300090, "ldr%6's%5?hbt%c\t%12-15R, %S"},
+ /* Vector VLDRH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VLDRH_T2,
+ 0xec180e00, 0xee581e00,
+ "vldrh%v.%u%7-8s\t%13-15Q, %d"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
- 0x03000000, 0x0ff00000, "movw%c\t%12-15R, %V"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
- 0x03400000, 0x0ff00000, "movt%c\t%12-15R, %V"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0x06ff0f30, 0x0fff0ff0, "rbit%c\t%12-15R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0x07a00050, 0x0fa00070, "%22?usbfx%c\t%12-15r, %0-3r, #%7-11d, #%16-20W"},
+ /* Vector VLDRB unsigned, variant T5. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VLDRB_T5,
+ 0xec101e00, 0xfe101f80,
+ "vldrb%v.u8\t%13-15,22Q, %d"},
- /* ARM Security extension instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
- 0x01600070, 0x0ff000f0, "smc%c\t%e"},
+ /* Vector VLDRH unsigned, variant T6. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VLDRH_T6,
+ 0xec101e80, 0xfe101f80,
+ "vldrh%v.u16\t%13-15,22Q, %d"},
- /* ARM V6K instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
- 0xf57ff01f, 0xffffffff, "clrex"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
- 0x01d00f9f, 0x0ff00fff, "ldrexb%c\t%12-15R, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
- 0x01b00f9f, 0x0ff00fff, "ldrexd%c\t%12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
- 0x01f00f9f, 0x0ff00fff, "ldrexh%c\t%12-15R, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
- 0x01c00f90, 0x0ff00ff0, "strexb%c\t%12-15R, %0-3R, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
- 0x01a00f90, 0x0ff00ff0, "strexd%c\t%12-15R, %0-3r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
- 0x01e00f90, 0x0ff00ff0, "strexh%c\t%12-15R, %0-3R, [%16-19R]"},
+ /* Vector VLDRW unsigned, variant T7. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VLDRW_T7,
+ 0xec101f00, 0xfe101f80,
+ "vldrw%v.u32\t%13-15,22Q, %d"},
- /* ARMv8.5-A instructions. */
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB), 0xf57ff070, 0xffffffff, "sb"},
+ /* Vector VMAX. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMAX,
+ 0xef000640, 0xef811f51,
+ "vmax%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
- /* ARM V6K NOP hints. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
- 0x0320f001, 0x0fffffff, "yield%c"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
- 0x0320f002, 0x0fffffff, "wfe%c"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
- 0x0320f003, 0x0fffffff, "wfi%c"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
- 0x0320f004, 0x0fffffff, "sev%c"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
- 0x0320f000, 0x0fffff00, "nop%c\t{%0-7d}"},
+ /* Vector VMAXA. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMAXA,
+ 0xee330e81, 0xffb31fd1,
+ "vmaxa%v.s%18-19s\t%13-15,22Q, %1-3,5Q"},
- /* ARM V6 instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0xf1080000, 0xfffffe3f, "cpsie\t%8'a%7'i%6'f"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0xf10a0000, 0xfffffe20, "cpsie\t%8'a%7'i%6'f,#%0-4d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0xf10C0000, 0xfffffe3f, "cpsid\t%8'a%7'i%6'f"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0xf10e0000, 0xfffffe20, "cpsid\t%8'a%7'i%6'f,#%0-4d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0xf1000000, 0xfff1fe20, "cps\t#%0-4d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06800010, 0x0ff00ff0, "pkhbt%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06800010, 0x0ff00070, "pkhbt%c\t%12-15R, %16-19R, %0-3R, lsl #%7-11d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06800050, 0x0ff00ff0, "pkhtb%c\t%12-15R, %16-19R, %0-3R, asr #32"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06800050, 0x0ff00070, "pkhtb%c\t%12-15R, %16-19R, %0-3R, asr #%7-11d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x01900f9f, 0x0ff00fff, "ldrex%c\tr%12-15d, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06200f10, 0x0ff00ff0, "qadd16%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06200f90, 0x0ff00ff0, "qadd8%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06200f30, 0x0ff00ff0, "qasx%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06200f70, 0x0ff00ff0, "qsub16%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06200ff0, 0x0ff00ff0, "qsub8%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06200f50, 0x0ff00ff0, "qsax%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06100f10, 0x0ff00ff0, "sadd16%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06100f90, 0x0ff00ff0, "sadd8%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06100f30, 0x0ff00ff0, "sasx%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06300f10, 0x0ff00ff0, "shadd16%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06300f90, 0x0ff00ff0, "shadd8%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06300f30, 0x0ff00ff0, "shasx%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06300f70, 0x0ff00ff0, "shsub16%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06300ff0, 0x0ff00ff0, "shsub8%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06300f50, 0x0ff00ff0, "shsax%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06100f70, 0x0ff00ff0, "ssub16%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06100ff0, 0x0ff00ff0, "ssub8%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06100f50, 0x0ff00ff0, "ssax%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06500f10, 0x0ff00ff0, "uadd16%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06500f90, 0x0ff00ff0, "uadd8%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06500f30, 0x0ff00ff0, "uasx%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06700f10, 0x0ff00ff0, "uhadd16%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06700f90, 0x0ff00ff0, "uhadd8%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06700f30, 0x0ff00ff0, "uhasx%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06700f70, 0x0ff00ff0, "uhsub16%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06700ff0, 0x0ff00ff0, "uhsub8%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06700f50, 0x0ff00ff0, "uhsax%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06600f10, 0x0ff00ff0, "uqadd16%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06600f90, 0x0ff00ff0, "uqadd8%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06600f30, 0x0ff00ff0, "uqasx%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06600f70, 0x0ff00ff0, "uqsub16%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06600ff0, 0x0ff00ff0, "uqsub8%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06600f50, 0x0ff00ff0, "uqsax%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06500f70, 0x0ff00ff0, "usub16%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06500ff0, 0x0ff00ff0, "usub8%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06500f50, 0x0ff00ff0, "usax%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06bf0f30, 0x0fff0ff0, "rev%c\t%12-15R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06bf0fb0, 0x0fff0ff0, "rev16%c\t%12-15R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06ff0fb0, 0x0fff0ff0, "revsh%c\t%12-15R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0xf8100a00, 0xfe50ffff, "rfe%23?id%24?ba\t%16-19r%21'!"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06bf0070, 0x0fff0ff0, "sxth%c\t%12-15R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06bf0470, 0x0fff0ff0, "sxth%c\t%12-15R, %0-3R, ror #8"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06bf0870, 0x0fff0ff0, "sxth%c\t%12-15R, %0-3R, ror #16"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06bf0c70, 0x0fff0ff0, "sxth%c\t%12-15R, %0-3R, ror #24"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x068f0070, 0x0fff0ff0, "sxtb16%c\t%12-15R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x068f0470, 0x0fff0ff0, "sxtb16%c\t%12-15R, %0-3R, ror #8"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x068f0870, 0x0fff0ff0, "sxtb16%c\t%12-15R, %0-3R, ror #16"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x068f0c70, 0x0fff0ff0, "sxtb16%c\t%12-15R, %0-3R, ror #24"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06af0070, 0x0fff0ff0, "sxtb%c\t%12-15R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06af0470, 0x0fff0ff0, "sxtb%c\t%12-15R, %0-3R, ror #8"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06af0870, 0x0fff0ff0, "sxtb%c\t%12-15R, %0-3R, ror #16"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06af0c70, 0x0fff0ff0, "sxtb%c\t%12-15R, %0-3R, ror #24"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06ff0070, 0x0fff0ff0, "uxth%c\t%12-15R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06ff0470, 0x0fff0ff0, "uxth%c\t%12-15R, %0-3R, ror #8"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06ff0870, 0x0fff0ff0, "uxth%c\t%12-15R, %0-3R, ror #16"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06ff0c70, 0x0fff0ff0, "uxth%c\t%12-15R, %0-3R, ror #24"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06cf0070, 0x0fff0ff0, "uxtb16%c\t%12-15R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06cf0470, 0x0fff0ff0, "uxtb16%c\t%12-15R, %0-3R, ror #8"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06cf0870, 0x0fff0ff0, "uxtb16%c\t%12-15R, %0-3R, ror #16"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06cf0c70, 0x0fff0ff0, "uxtb16%c\t%12-15R, %0-3R, ror #24"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06ef0070, 0x0fff0ff0, "uxtb%c\t%12-15R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06ef0470, 0x0fff0ff0, "uxtb%c\t%12-15R, %0-3R, ror #8"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06ef0870, 0x0fff0ff0, "uxtb%c\t%12-15R, %0-3R, ror #16"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06ef0c70, 0x0fff0ff0, "uxtb%c\t%12-15R, %0-3R, ror #24"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06b00070, 0x0ff00ff0, "sxtah%c\t%12-15R, %16-19r, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06b00470, 0x0ff00ff0, "sxtah%c\t%12-15R, %16-19r, %0-3R, ror #8"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06b00870, 0x0ff00ff0, "sxtah%c\t%12-15R, %16-19r, %0-3R, ror #16"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06b00c70, 0x0ff00ff0, "sxtah%c\t%12-15R, %16-19r, %0-3R, ror #24"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06800070, 0x0ff00ff0, "sxtab16%c\t%12-15R, %16-19r, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06800470, 0x0ff00ff0, "sxtab16%c\t%12-15R, %16-19r, %0-3R, ror #8"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06800870, 0x0ff00ff0, "sxtab16%c\t%12-15R, %16-19r, %0-3R, ror #16"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06800c70, 0x0ff00ff0, "sxtab16%c\t%12-15R, %16-19r, %0-3R, ror #24"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06a00070, 0x0ff00ff0, "sxtab%c\t%12-15R, %16-19r, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06a00470, 0x0ff00ff0, "sxtab%c\t%12-15R, %16-19r, %0-3R, ror #8"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06a00870, 0x0ff00ff0, "sxtab%c\t%12-15R, %16-19r, %0-3R, ror #16"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06a00c70, 0x0ff00ff0, "sxtab%c\t%12-15R, %16-19r, %0-3R, ror #24"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06f00070, 0x0ff00ff0, "uxtah%c\t%12-15R, %16-19r, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06f00470, 0x0ff00ff0, "uxtah%c\t%12-15R, %16-19r, %0-3R, ror #8"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06f00870, 0x0ff00ff0, "uxtah%c\t%12-15R, %16-19r, %0-3R, ror #16"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06f00c70, 0x0ff00ff0, "uxtah%c\t%12-15R, %16-19r, %0-3R, ror #24"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06c00070, 0x0ff00ff0, "uxtab16%c\t%12-15R, %16-19r, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06c00470, 0x0ff00ff0, "uxtab16%c\t%12-15R, %16-19r, %0-3R, ror #8"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06c00870, 0x0ff00ff0, "uxtab16%c\t%12-15R, %16-19r, %0-3R, ror #16"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06c00c70, 0x0ff00ff0, "uxtab16%c\t%12-15R, %16-19r, %0-3R, ROR #24"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06e00070, 0x0ff00ff0, "uxtab%c\t%12-15R, %16-19r, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06e00470, 0x0ff00ff0, "uxtab%c\t%12-15R, %16-19r, %0-3R, ror #8"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06e00870, 0x0ff00ff0, "uxtab%c\t%12-15R, %16-19r, %0-3R, ror #16"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06e00c70, 0x0ff00ff0, "uxtab%c\t%12-15R, %16-19r, %0-3R, ror #24"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06800fb0, 0x0ff00ff0, "sel%c\t%12-15R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0xf1010000, 0xfffffc00, "setend\t%9?ble"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x0700f010, 0x0ff0f0d0, "smuad%5'x%c\t%16-19R, %0-3R, %8-11R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x0700f050, 0x0ff0f0d0, "smusd%5'x%c\t%16-19R, %0-3R, %8-11R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x07000010, 0x0ff000d0, "smlad%5'x%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x07400010, 0x0ff000d0, "smlald%5'x%c\t%12-15Ru, %16-19Ru, %0-3R, %8-11R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x07000050, 0x0ff000d0, "smlsd%5'x%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x07400050, 0x0ff000d0, "smlsld%5'x%c\t%12-15Ru, %16-19Ru, %0-3R, %8-11R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x0750f010, 0x0ff0f0d0, "smmul%5'r%c\t%16-19R, %0-3R, %8-11R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x07500010, 0x0ff000d0, "smmla%5'r%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x075000d0, 0x0ff000d0, "smmls%5'r%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0xf84d0500, 0xfe5fffe0, "srs%23?id%24?ba\t%16-19r%21'!, #%0-4d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06a00010, 0x0fe00ff0, "ssat%c\t%12-15R, #%16-20W, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06a00010, 0x0fe00070, "ssat%c\t%12-15R, #%16-20W, %0-3R, lsl #%7-11d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06a00050, 0x0fe00070, "ssat%c\t%12-15R, #%16-20W, %0-3R, asr #%7-11d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06a00f30, 0x0ff00ff0, "ssat16%c\t%12-15r, #%16-19W, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x01800f90, 0x0ff00ff0, "strex%c\t%12-15R, %0-3R, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x00400090, 0x0ff000f0, "umaal%c\t%12-15R, %16-19R, %0-3R, %8-11R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x0780f010, 0x0ff0f0f0, "usad8%c\t%16-19R, %0-3R, %8-11R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x07800010, 0x0ff000f0, "usada8%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06e00010, 0x0fe00ff0, "usat%c\t%12-15R, #%16-20d, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06e00010, 0x0fe00070, "usat%c\t%12-15R, #%16-20d, %0-3R, lsl #%7-11d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06e00050, 0x0fe00070, "usat%c\t%12-15R, #%16-20d, %0-3R, asr #%7-11d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
- 0x06e00f30, 0x0ff00ff0, "usat16%c\t%12-15R, #%16-19d, %0-3R"},
+ /* Vector VMAXNM floating point. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VMAXNM_FP,
+ 0xff000f50, 0xffa11f51,
+ "vmaxnm%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
- /* V5J instruction. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5J),
- 0x012fff20, 0x0ffffff0, "bxj%c\t%0-3R"},
+ /* Vector VMAXNMA floating point. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VMAXNMA_FP,
+ 0xee3f0e81, 0xefbf1fd1,
+ "vmaxnma%v.f%28s\t%13-15,22Q, %1-3,5Q"},
- /* V5 Instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
- 0xe1200070, 0xfff000f0,
- "bkpt\t0x%16-19X%12-15X%8-11X%0-3X"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
- 0xfa000000, 0xfe000000, "blx\t%B"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
- 0x012fff30, 0x0ffffff0, "blx%c\t%0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
- 0x016f0f10, 0x0fff0ff0, "clz%c\t%12-15R, %0-3R"},
+ /* Vector VMAXNMV floating point. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VMAXNMV_FP,
+ 0xeeee0f00, 0xefff0fd1,
+ "vmaxnmv%v.f%28s\t%12-15r, %1-3,5Q"},
- /* V5E "El Segundo" Instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5E),
- 0x000000d0, 0x0e1000f0, "ldrd%c\t%12-15r, %s"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5E),
- 0x000000f0, 0x0e1000f0, "strd%c\t%12-15r, %s"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5E),
- 0xf450f000, 0xfc70f000, "pld\t%a"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x01000080, 0x0ff000f0, "smlabb%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x010000a0, 0x0ff000f0, "smlatb%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x010000c0, 0x0ff000f0, "smlabt%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x010000e0, 0x0ff000f0, "smlatt%c\t%16-19r, %0-3r, %8-11R, %12-15R"},
+ /* Vector VMAXNMAV floating point. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VMAXNMAV_FP,
+ 0xeeec0f00, 0xefff0fd1,
+ "vmaxnmav%v.f%28s\t%12-15r, %1-3,5Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x01200080, 0x0ff000f0, "smlawb%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x012000c0, 0x0ff000f0, "smlawt%c\t%16-19R, %0-3r, %8-11R, %12-15R"},
+ /* Vector VMAXV. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMAXV,
+ 0xeee20f00, 0xeff30fd1,
+ "vmaxv%v.%u%18-19s\t%12-15r, %1-3,5Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x01400080, 0x0ff000f0, "smlalbb%c\t%12-15Ru, %16-19Ru, %0-3R, %8-11R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x014000a0, 0x0ff000f0, "smlaltb%c\t%12-15Ru, %16-19Ru, %0-3R, %8-11R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x014000c0, 0x0ff000f0, "smlalbt%c\t%12-15Ru, %16-19Ru, %0-3R, %8-11R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x014000e0, 0x0ff000f0, "smlaltt%c\t%12-15Ru, %16-19Ru, %0-3R, %8-11R"},
+ /* Vector VMAXAV. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMAXAV,
+ 0xeee00f00, 0xfff30fd1,
+ "vmaxav%v.s%18-19s\t%12-15r, %1-3,5Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x01600080, 0x0ff0f0f0, "smulbb%c\t%16-19R, %0-3R, %8-11R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x016000a0, 0x0ff0f0f0, "smultb%c\t%16-19R, %0-3R, %8-11R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x016000c0, 0x0ff0f0f0, "smulbt%c\t%16-19R, %0-3R, %8-11R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x016000e0, 0x0ff0f0f0, "smultt%c\t%16-19R, %0-3R, %8-11R"},
+ /* Vector VMIN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMIN,
+ 0xef000650, 0xef811f51,
+ "vmin%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x012000a0, 0x0ff0f0f0, "smulwb%c\t%16-19R, %0-3R, %8-11R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x012000e0, 0x0ff0f0f0, "smulwt%c\t%16-19R, %0-3R, %8-11R"},
+ /* Vector VMINA. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMINA,
+ 0xee331e81, 0xffb31fd1,
+ "vmina%v.s%18-19s\t%13-15,22Q, %1-3,5Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x01000050, 0x0ff00ff0, "qadd%c\t%12-15R, %0-3R, %16-19R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x01400050, 0x0ff00ff0, "qdadd%c\t%12-15R, %0-3R, %16-19R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x01200050, 0x0ff00ff0, "qsub%c\t%12-15R, %0-3R, %16-19R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
- 0x01600050, 0x0ff00ff0, "qdsub%c\t%12-15R, %0-3R, %16-19R"},
+ /* Vector VMINNM floating point. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VMINNM_FP,
+ 0xff200f50, 0xffa11f51,
+ "vminnm%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
- /* ARM Instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x052d0004, 0x0fff0fff, "push%c\t{%12-15r}\t\t; (str%c %12-15r, %a)"},
+ /* Vector VMINNMA floating point. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VMINNMA_FP,
+ 0xee3f1e81, 0xefbf1fd1,
+ "vminnma%v.f%28s\t%13-15,22Q, %1-3,5Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x04400000, 0x0e500000, "strb%t%c\t%12-15R, %a"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x04000000, 0x0e500000, "str%t%c\t%12-15r, %a"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x06400000, 0x0e500ff0, "strb%t%c\t%12-15R, %a"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x06000000, 0x0e500ff0, "str%t%c\t%12-15r, %a"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x04400000, 0x0c500010, "strb%t%c\t%12-15R, %a"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x04000000, 0x0c500010, "str%t%c\t%12-15r, %a"},
+ /* Vector VMINNMV floating point. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VMINNMV_FP,
+ 0xeeee0f80, 0xefff0fd1,
+ "vminnmv%v.f%28s\t%12-15r, %1-3,5Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x04400000, 0x0e500000, "strb%c\t%12-15R, %a"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x06400000, 0x0e500010, "strb%c\t%12-15R, %a"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x004000b0, 0x0e5000f0, "strh%c\t%12-15R, %s"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x000000b0, 0x0e500ff0, "strh%c\t%12-15R, %s"},
+ /* Vector VMINNMAV floating point. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VMINNMAV_FP,
+ 0xeeec0f80, 0xefff0fd1,
+ "vminnmav%v.f%28s\t%12-15r, %1-3,5Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00500090, 0x0e5000f0, UNDEFINED_INSTRUCTION},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00500090, 0x0e500090, "ldr%6's%5?hb%c\t%12-15R, %s"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00100090, 0x0e500ff0, UNDEFINED_INSTRUCTION},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00100090, 0x0e500f90, "ldr%6's%5?hb%c\t%12-15R, %s"},
+ /* Vector VMINV. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMINV,
+ 0xeee20f80, 0xeff30fd1,
+ "vminv%v.%u%18-19s\t%12-15r, %1-3,5Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x02000000, 0x0fe00000, "and%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00000000, 0x0fe00010, "and%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00000010, 0x0fe00090, "and%20's%c\t%12-15R, %16-19R, %o"},
+ /* Vector VMINAV. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMINAV,
+ 0xeee00f80, 0xfff30fd1,
+ "vminav%v.s%18-19s\t%12-15r, %1-3,5Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x02200000, 0x0fe00000, "eor%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00200000, 0x0fe00010, "eor%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00200010, 0x0fe00090, "eor%20's%c\t%12-15R, %16-19R, %o"},
+ /* Vector VMLA. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLA,
+ 0xee010e40, 0xef811f70,
+ "vmla%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x02400000, 0x0fe00000, "sub%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00400000, 0x0fe00010, "sub%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00400010, 0x0fe00090, "sub%20's%c\t%12-15R, %16-19R, %o"},
+ /* Vector VMLALDAV. Note must appear before VMLADAV due to instruction
+ opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLALDAV,
+ 0xee801e00, 0xef801f51,
+ "vmlaldav%5Ax%v.%u%16s\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x02600000, 0x0fe00000, "rsb%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00600000, 0x0fe00010, "rsb%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00600010, 0x0fe00090, "rsb%20's%c\t%12-15R, %16-19R, %o"},
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLALDAV,
+ 0xee800e00, 0xef801f51,
+ "vmlalv%5A%v.%u%16s\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x02800000, 0x0fe00000, "add%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00800000, 0x0fe00010, "add%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00800010, 0x0fe00090, "add%20's%c\t%12-15R, %16-19R, %o"},
+ /* Vector VMLAV T1 variant, same as VMLADAV but with X == 0. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLADAV_T1,
+ 0xeef00e00, 0xeff01f51,
+ "vmlav%5A%v.%u%16s\t%13-15l, %17-19,7Q, %1-3Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x02a00000, 0x0fe00000, "adc%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00a00000, 0x0fe00010, "adc%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00a00010, 0x0fe00090, "adc%20's%c\t%12-15R, %16-19R, %o"},
+ /* Vector VMLAV T2 variant, same as VMLADAV but with X == 0. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLADAV_T2,
+ 0xeef00f00, 0xeff11f51,
+ "vmlav%5A%v.%u8\t%13-15l, %17-19,7Q, %1-3Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x02c00000, 0x0fe00000, "sbc%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00c00000, 0x0fe00010, "sbc%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00c00010, 0x0fe00090, "sbc%20's%c\t%12-15R, %16-19R, %o"},
+ /* Vector VMLADAV T1 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLADAV_T1,
+ 0xeef01e00, 0xeff01f51,
+ "vmladav%5Ax%v.%u%16s\t%13-15l, %17-19,7Q, %1-3Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x02e00000, 0x0fe00000, "rsc%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00e00000, 0x0fe00010, "rsc%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00e00010, 0x0fe00090, "rsc%20's%c\t%12-15R, %16-19R, %o"},
+ /* Vector VMLADAV T2 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLADAV_T2,
+ 0xeef01f00, 0xeff11f51,
+ "vmladav%5Ax%v.%u8\t%13-15l, %17-19,7Q, %1-3Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
- 0x0120f200, 0x0fb0f200, "msr%c\t%C, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V3),
- 0x0120f000, 0x0db0f000, "msr%c\t%C, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V3),
- 0x01000000, 0x0fb00cff, "mrs%c\t%12-15R, %R"},
+ /* Vector VMLAS. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLAS,
+ 0xee011e40, 0xef811f70,
+ "vmlas%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x03000000, 0x0fe00000, "tst%p%c\t%16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01000000, 0x0fe00010, "tst%p%c\t%16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01000010, 0x0fe00090, "tst%p%c\t%16-19R, %o"},
+ /* Vector VRMLSLDAVH. Note must appear before VMLSDAV due to instruction
+ opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VRMLSLDAVH,
+ 0xfe800e01, 0xff810f51,
+ "vrmlsldavh%5A%X%v.s32\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x03300000, 0x0ff00000, "teq%p%c\t%16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01300000, 0x0ff00010, "teq%p%c\t%16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01300010, 0x0ff00010, "teq%p%c\t%16-19R, %o"},
+ /* Vector VMLSLDAV. Note must appear before VMLSDAV due to instruction
+ opcdoe aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLSLDAV,
+ 0xee800e01, 0xff800f51,
+ "vmlsldav%5A%X%v.%u%16s\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x03400000, 0x0fe00000, "cmp%p%c\t%16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01400000, 0x0fe00010, "cmp%p%c\t%16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01400010, 0x0fe00090, "cmp%p%c\t%16-19R, %o"},
+ /* Vector VMLSDAV T1 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLSDAV_T1,
+ 0xeef00e01, 0xfff00f51,
+ "vmlsdav%5A%X%v.s%16s\t%13-15l, %17-19,7Q, %1-3Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x03600000, 0x0fe00000, "cmn%p%c\t%16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01600000, 0x0fe00010, "cmn%p%c\t%16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01600010, 0x0fe00090, "cmn%p%c\t%16-19R, %o"},
+ /* Vector VMLSDAV T2 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLSDAV_T2,
+ 0xfef00e01, 0xfff10f51,
+ "vmlsdav%5A%X%v.s8\t%13-15l, %17-19,7Q, %1-3Q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x03800000, 0x0fe00000, "orr%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01800000, 0x0fe00010, "orr%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01800010, 0x0fe00090, "orr%20's%c\t%12-15R, %16-19R, %o"},
+ /* Vector VMOV between gpr and half precision register, op == 0. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VMOV_HFP_TO_GP,
+ 0xee000910, 0xfff00f7f,
+ "vmov.f16\t%7,16-19F, %12-15r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x03a00000, 0x0fef0000, "mov%20's%c\t%12-15r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01a00000, 0x0def0ff0, "mov%20's%c\t%12-15r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01a00000, 0x0def0060, "lsl%20's%c\t%12-15R, %q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01a00020, 0x0def0060, "lsr%20's%c\t%12-15R, %q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01a00040, 0x0def0060, "asr%20's%c\t%12-15R, %q"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01a00060, 0x0def0ff0, "rrx%20's%c\t%12-15r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01a00060, 0x0def0060, "ror%20's%c\t%12-15R, %q"},
+ /* Vector VMOV between gpr and half precision register, op == 1. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VMOV_HFP_TO_GP,
+ 0xee100910, 0xfff00f7f,
+ "vmov.f16\t%12-15r, %7,16-19F"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x03c00000, 0x0fe00000, "bic%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01c00000, 0x0fe00010, "bic%20's%c\t%12-15r, %16-19r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01c00010, 0x0fe00090, "bic%20's%c\t%12-15R, %16-19R, %o"},
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VMOV_GP_TO_VEC_LANE,
+ 0xee000b10, 0xff900f1f,
+ "vmov%c.%5-6,21-22s\t%17-19,7Q[%N], %12-15r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x03e00000, 0x0fe00000, "mvn%20's%c\t%12-15r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01e00000, 0x0fe00010, "mvn%20's%c\t%12-15r, %o"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x01e00010, 0x0fe00090, "mvn%20's%c\t%12-15R, %o"},
+ /* Vector VORR immediate to vector.
+ NOTE: MVE_VORR_IMM must appear in the table
+ before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VORR_IMM,
+ 0xef800050, 0xefb810f0,
+ "vorr%v.i%8-11s\t%13-15,22Q, %E"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x06000010, 0x0e000010, UNDEFINED_INSTRUCTION},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x049d0004, 0x0fff0fff, "pop%c\t{%12-15r}\t\t; (ldr%c %12-15r, %a)"},
+ /* Vector VQSHL T2 Variant.
+ NOTE: MVE_VQSHL_T2 must appear in the table before
+ before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQSHL_T2,
+ 0xef800750, 0xef801fd1,
+ "vqshl%v.%u%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x04500000, 0x0c500000, "ldrb%t%c\t%12-15R, %a"},
+ /* Vector VQSHLU T3 Variant
+ NOTE: MVE_VQSHL_T2 must appear in the table before
+ before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x04300000, 0x0d700000, "ldrt%c\t%12-15R, %a"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x04100000, 0x0c500000, "ldr%c\t%12-15r, %a"},
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQSHLU_T3,
+ 0xff800650, 0xff801fd1,
+ "vqshlu%v.s%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d0001, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d0002, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d0004, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d0008, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d0010, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d0020, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d0040, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d0080, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d0100, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d0200, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d0400, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d0800, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d1000, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d2000, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d4000, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d8000, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x092d0000, 0x0fff0000, "push%c\t%m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08800000, 0x0ff00000, "stm%c\t%16-19R%21'!, %m%22'^"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08000000, 0x0e100000, "stm%23?id%24?ba%c\t%16-19R%21'!, %m%22'^"},
+ /* Vector VRSHR
+ NOTE: MVE_VRSHR must appear in the table before
+ before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VRSHR,
+ 0xef800250, 0xef801fd1,
+ "vrshr%v.%u%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd0001, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd0002, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd0004, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd0008, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd0010, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd0020, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd0040, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd0080, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd0100, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd0200, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd0400, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd0800, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd1000, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd2000, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd4000, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd8000, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08bd0000, 0x0fff0000, "pop%c\t%m"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08900000, 0x0f900000, "ldm%c\t%16-19R%21'!, %m%22'^"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x08100000, 0x0e100000, "ldm%23?id%24?ba%c\t%16-19R%21'!, %m%22'^"},
+ /* Vector VSHL.
+ NOTE: MVE_VSHL must appear in the table before
+ before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSHL_T1,
+ 0xef800550, 0xff801fd1,
+ "vshl%v.i%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x0a000000, 0x0e000000, "b%24'l%c\t%b"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x0f000000, 0x0f000000, "svc%c\t%0-23x"},
+ /* Vector VSHR
+ NOTE: MVE_VSHR must appear in the table before
+ before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSHR,
+ 0xef800050, 0xef801fd1,
+ "vshr%v.%u%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
- /* The rest. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V7),
- 0x03200000, 0x0fff00ff, "nop%c\t{%0-7d}" UNPREDICTABLE_INSTRUCTION},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00000000, 0x00000000, UNDEFINED_INSTRUCTION},
- {ARM_FEATURE_CORE_LOW (0),
- 0x00000000, 0x00000000, 0}
-};
+ /* Vector VSLI
+ NOTE: MVE_VSLI must appear in the table before
+ before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSLI,
+ 0xff800550, 0xff801fd1,
+ "vsli%v.%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
-/* print_insn_thumb16 recognizes the following format control codes:
+ /* Vector VSRI
+ NOTE: MVE_VSRI must appear in the table before
+ before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSRI,
+ 0xff800450, 0xff801fd1,
+ "vsri%v.%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
- %S print Thumb register (bits 3..5 as high number if bit 6 set)
- %D print Thumb register (bits 0..2 as high number if bit 7 set)
- %<bitfield>I print bitfield as a signed decimal
- (top bit of range being the sign bit)
- %N print Thumb register mask (with LR)
- %O print Thumb register mask (with PC)
- %M print Thumb register mask
- %b print CZB's 6-bit unsigned branch destination
- %s print Thumb right-shift immediate (6..10; 0 == 32).
- %c print the condition code
- %C print the condition code, or "s" if not conditional
- %x print warning if conditional an not at end of IT block"
- %X print "\t; unpredictable <IT:code>" if conditional
- %I print IT instruction suffix and operands
- %W print Thumb Writeback indicator for LDMIA
- %<bitfield>r print bitfield as an ARM register
- %<bitfield>d print bitfield as a decimal
- %<bitfield>H print (bitfield * 2) as a decimal
- %<bitfield>W print (bitfield * 4) as a decimal
- %<bitfield>a print (bitfield * 4) as a pc-rel offset + decoded symbol
- %<bitfield>B print Thumb branch destination (signed displacement)
- %<bitfield>c print bitfield as a condition code
- %<bitnum>'c print specified char iff bit is one
- %<bitnum>?ab print a if bit is one else print b. */
+ /* Vector VMOV immediate to vector,
+ undefinded for cmode == 1111 */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMVN_IMM, 0xef800f70, 0xefb81ff0, UNDEFINED_INSTRUCTION},
-static const struct opcode16 thumb_opcodes[] =
-{
- /* Thumb instructions. */
+ /* Vector VMOV immediate to vector,
+ cmode == 1101 */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMOV_IMM_TO_VEC, 0xef800d50, 0xefb81fd0,
+ "vmov%v.%5,8-11s\t%13-15,22Q, %E"},
- /* ARMv8-M Security Extensions instructions. */
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M), 0x4784, 0xff87, "blxns\t%3-6r"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M), 0x4704, 0xff87, "bxns\t%3-6r"},
+ /* Vector VMOV immediate to vector. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMOV_IMM_TO_VEC,
+ 0xef800050, 0xefb810d0,
+ "vmov%v.%5,8-11s\t%13-15,22Q, %E"},
- /* ARM V8 instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8), 0xbf50, 0xffff, "sevl%c"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8), 0xba80, 0xffc0, "hlt\t%0-5x"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN), 0xb610, 0xfff7, "setpan\t#%3-3d"},
+ /* Vector VMOV two 32-bit lanes to two gprs, idx = 0. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMOV2_VEC_LANE_TO_GP,
+ 0xec000f00, 0xffb01ff0,
+ "vmov%c\t%0-3r, %16-19r, %13-15,22Q[2], %13-15,22Q[0]"},
- /* ARM V6K no-argument instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K), 0xbf00, 0xffff, "nop%c"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K), 0xbf10, 0xffff, "yield%c"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K), 0xbf20, 0xffff, "wfe%c"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K), 0xbf30, 0xffff, "wfi%c"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K), 0xbf40, 0xffff, "sev%c"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K), 0xbf00, 0xff0f, "nop%c\t{%4-7d}"},
+ /* Vector VMOV two 32-bit lanes to two gprs, idx = 1. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMOV2_VEC_LANE_TO_GP,
+ 0xec000f10, 0xffb01ff0,
+ "vmov%c\t%0-3r, %16-19r, %13-15,22Q[3], %13-15,22Q[1]"},
- /* ARM V6T2 instructions. */
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
- 0xb900, 0xfd00, "cbnz\t%0-2r, %b%X"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
- 0xb100, 0xfd00, "cbz\t%0-2r, %b%X"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xbf00, 0xff00, "it%I%X"},
+ /* Vector VMOV Two gprs to two 32-bit lanes, idx = 0. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMOV2_GP_TO_VEC_LANE,
+ 0xec100f00, 0xffb01ff0,
+ "vmov%c\t%13-15,22Q[2], %13-15,22Q[0], %0-3r, %16-19r"},
- /* ARM V6. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xb660, 0xfff8, "cpsie\t%2'a%1'i%0'f%X"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xb670, 0xfff8, "cpsid\t%2'a%1'i%0'f%X"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0x4600, 0xffc0, "mov%c\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xba00, 0xffc0, "rev%c\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xba40, 0xffc0, "rev16%c\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xbac0, 0xffc0, "revsh%c\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xb650, 0xfff7, "setend\t%3?ble%X"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xb200, 0xffc0, "sxth%c\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xb240, 0xffc0, "sxtb%c\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xb280, 0xffc0, "uxth%c\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xb2c0, 0xffc0, "uxtb%c\t%0-2r, %3-5r"},
+ /* Vector VMOV Two gprs to two 32-bit lanes, idx = 1. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMOV2_GP_TO_VEC_LANE,
+ 0xec100f10, 0xffb01ff0,
+ "vmov%c\t%13-15,22Q[2], %13-15,22Q[0], %0-3r, %16-19r"},
- /* ARM V5 ISA extends Thumb. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5T),
- 0xbe00, 0xff00, "bkpt\t%0-7x"}, /* Is always unconditional. */
- /* This is BLX(2). BLX(1) is a 32-bit instruction. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V5T),
- 0x4780, 0xff87, "blx%c\t%3-6r%x"}, /* note: 4 bit register number. */
- /* ARM V4T ISA (Thumb v1). */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x46C0, 0xFFFF, "nop%c\t\t\t; (mov r8, r8)"},
- /* Format 4. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4000, 0xFFC0, "and%C\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4040, 0xFFC0, "eor%C\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4080, 0xFFC0, "lsl%C\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x40C0, 0xFFC0, "lsr%C\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4100, 0xFFC0, "asr%C\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4140, 0xFFC0, "adc%C\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4180, 0xFFC0, "sbc%C\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x41C0, 0xFFC0, "ror%C\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4200, 0xFFC0, "tst%c\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4240, 0xFFC0, "neg%C\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4280, 0xFFC0, "cmp%c\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x42C0, 0xFFC0, "cmn%c\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4300, 0xFFC0, "orr%C\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4340, 0xFFC0, "mul%C\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4380, 0xFFC0, "bic%C\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x43C0, 0xFFC0, "mvn%C\t%0-2r, %3-5r"},
- /* format 13 */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xB000, 0xFF80, "add%c\tsp, #%0-6W"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xB080, 0xFF80, "sub%c\tsp, #%0-6W"},
- /* format 5 */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4700, 0xFF80, "bx%c\t%S%x"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4400, 0xFF00, "add%c\t%D, %S"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4500, 0xFF00, "cmp%c\t%D, %S"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4600, 0xFF00, "mov%c\t%D, %S"},
- /* format 14 */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xB400, 0xFE00, "push%c\t%N"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xBC00, 0xFE00, "pop%c\t%O"},
- /* format 2 */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x1800, 0xFE00, "add%C\t%0-2r, %3-5r, %6-8r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x1A00, 0xFE00, "sub%C\t%0-2r, %3-5r, %6-8r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x1C00, 0xFE00, "add%C\t%0-2r, %3-5r, #%6-8d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x1E00, 0xFE00, "sub%C\t%0-2r, %3-5r, #%6-8d"},
- /* format 8 */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x5200, 0xFE00, "strh%c\t%0-2r, [%3-5r, %6-8r]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x5A00, 0xFE00, "ldrh%c\t%0-2r, [%3-5r, %6-8r]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x5600, 0xF600, "ldrs%11?hb%c\t%0-2r, [%3-5r, %6-8r]"},
- /* format 7 */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x5000, 0xFA00, "str%10'b%c\t%0-2r, [%3-5r, %6-8r]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x5800, 0xFA00, "ldr%10'b%c\t%0-2r, [%3-5r, %6-8r]"},
- /* format 1 */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x0000, 0xFFC0, "mov%C\t%0-2r, %3-5r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x0000, 0xF800, "lsl%C\t%0-2r, %3-5r, #%6-10d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x0800, 0xF800, "lsr%C\t%0-2r, %3-5r, %s"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x1000, 0xF800, "asr%C\t%0-2r, %3-5r, %s"},
- /* format 3 */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x2000, 0xF800, "mov%C\t%8-10r, #%0-7d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x2800, 0xF800, "cmp%c\t%8-10r, #%0-7d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x3000, 0xF800, "add%C\t%8-10r, #%0-7d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x3800, 0xF800, "sub%C\t%8-10r, #%0-7d"},
- /* format 6 */
- /* TODO: Disassemble PC relative "LDR rD,=<symbolic>" */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x4800, 0xF800,
- "ldr%c\t%8-10r, [pc, #%0-7W]\t; (%0-7a)"},
- /* format 9 */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x6000, 0xF800, "str%c\t%0-2r, [%3-5r, #%6-10W]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x6800, 0xF800, "ldr%c\t%0-2r, [%3-5r, #%6-10W]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x7000, 0xF800, "strb%c\t%0-2r, [%3-5r, #%6-10d]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x7800, 0xF800, "ldrb%c\t%0-2r, [%3-5r, #%6-10d]"},
- /* format 10 */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x8000, 0xF800, "strh%c\t%0-2r, [%3-5r, #%6-10H]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x8800, 0xF800, "ldrh%c\t%0-2r, [%3-5r, #%6-10H]"},
- /* format 11 */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x9000, 0xF800, "str%c\t%8-10r, [sp, #%0-7W]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0x9800, 0xF800, "ldr%c\t%8-10r, [sp, #%0-7W]"},
- /* format 12 */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0xA000, 0xF800, "add%c\t%8-10r, pc, #%0-7W\t; (adr %8-10r, %0-7a)"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0xA800, 0xF800, "add%c\t%8-10r, sp, #%0-7W"},
- /* format 15 */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xC000, 0xF800, "stmia%c\t%8-10r!, %M"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xC800, 0xF800, "ldmia%c\t%8-10r%W, %M"},
- /* format 17 */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xDF00, 0xFF00, "svc%c\t%0-7d"},
- /* format 16 */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xDE00, 0xFF00, "udf%c\t#%0-7d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xDE00, 0xFE00, UNDEFINED_INSTRUCTION},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xD000, 0xF000, "b%8-11c.n\t%0-7B%X"},
- /* format 18 */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xE000, 0xF800, "b%c.n\t%0-10B%x"},
+ /* Vector VMOV Vector lane to gpr. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VMOV_VEC_LANE_TO_GP,
+ 0xee100b10, 0xff100f1f,
+ "vmov%c.%u%5-6,21-22s\t%12-15r, %17-19,7Q[%N]"},
- /* The E800 .. FFFF range is unconditionally redirected to the
- 32-bit table, because even in pre-V6T2 ISAs, BL and BLX(1) pairs
- are processed via that table. Thus, we can never encounter a
- bare "second half of BL/BLX(1)" instruction here. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1), 0x0000, 0x0000, UNDEFINED_INSTRUCTION},
- {ARM_FEATURE_CORE_LOW (0), 0, 0, 0}
-};
+ /* Vector VSHLL T1 Variant. Note: VSHLL T1 must appear before MVE_VMOVL due
+ to instruction opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSHLL_T1,
+ 0xeea00f40, 0xefa00fd1,
+ "vshll%T%v.%u%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
-/* Thumb32 opcodes use the same table structure as the ARM opcodes.
- We adopt the convention that hw1 is the high 16 bits of .value and
- .mask, hw2 the low 16 bits.
+ /* Vector VMOVL long. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMOVL,
+ 0xeea00f40, 0xefa70fd1,
+ "vmovl%T%v.%u%19-20s\t%13-15,22Q, %1-3,5Q"},
- print_insn_thumb32 recognizes the following format control codes:
+ /* Vector VMOV and narrow. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMOVN,
+ 0xfe310e81, 0xffb30fd1,
+ "vmovn%T%v.i%18-19s\t%13-15,22Q, %1-3,5Q"},
- %% %
+ /* Floating point move extract. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VMOVX,
+ 0xfeb00a40, 0xffbf0fd0,
+ "vmovx.f16\t%22,12-15F, %5,0-3F"},
- %I print a 12-bit immediate from hw1[10],hw2[14:12,7:0]
- %M print a modified 12-bit immediate (same location)
- %J print a 16-bit immediate from hw1[3:0,10],hw2[14:12,7:0]
- %K print a 16-bit immediate from hw2[3:0],hw1[3:0],hw2[11:4]
- %H print a 16-bit immediate from hw2[3:0],hw1[11:0]
- %S print a possibly-shifted Rm
+ /* Vector VMUL floating-point T1 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VMUL_FP_T1,
+ 0xff000d50, 0xffa11f51,
+ "vmul%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
- %L print address for a ldrd/strd instruction
- %a print the address of a plain load/store
- %w print the width and signedness of a core load/store
- %m print register mask for ldm/stm
- %n print register mask for clrm
+ /* Vector VMUL floating-point T2 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VMUL_FP_T2,
+ 0xee310e60, 0xefb11f70,
+ "vmul%v.f%28s\t%13-15,22Q, %17-19,7Q, %0-3r"},
- %E print the lsb and width fields of a bfc/bfi instruction
- %F print the lsb and width fields of a sbfx/ubfx instruction
- %G print a fallback offset for Branch Future instructions
- %W print an offset for BF instruction
- %Y print an offset for BFL instruction
- %Z print an offset for BFCSEL instruction
- %Q print an offset for Low Overhead Loop instructions
- %P print an offset for Low Overhead Loop end instructions
- %b print a conditional branch offset
- %B print an unconditional branch offset
- %s print the shift field of an SSAT instruction
- %R print the rotation field of an SXT instruction
- %U print barrier type.
- %P print address for pli instruction.
- %c print the condition code
- %x print warning if conditional an not at end of IT block"
- %X print "\t; unpredictable <IT:code>" if conditional
+ /* Vector VMUL T1 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMUL_VEC_T1,
+ 0xef000950, 0xff811f51,
+ "vmul%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
- %<bitfield>d print bitfield in decimal
- %<bitfield>D print bitfield plus one in decimal
- %<bitfield>W print bitfield*4 in decimal
- %<bitfield>r print bitfield as an ARM register
- %<bitfield>R as %<>r but r15 is UNPREDICTABLE
- %<bitfield>S as %<>r but r13 and r15 is UNPREDICTABLE
- %<bitfield>c print bitfield as a condition code
+ /* Vector VMUL T2 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMUL_VEC_T2,
+ 0xee011e60, 0xff811f70,
+ "vmul%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
- %<bitfield>'c print specified char iff bitfield is all ones
- %<bitfield>`c print specified char iff bitfield is all zeroes
- %<bitfield>?ab... select from array of values in big endian order
+ /* Vector VMULH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMULH,
+ 0xee010e01, 0xef811f51,
+ "vmulh%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
- With one exception at the bottom (done because BL and BLX(1) need
- to come dead last), this table was machine-sorted first in
- decreasing order of number of bits set in the mask, then in
- increasing numeric order of mask, then in increasing numeric order
- of opcode. This order is not the clearest for a human reader, but
- is guaranteed never to catch a special-case bit pattern with a more
- general mask, which is important, because this instruction encoding
- makes heavy use of special-case bit patterns. */
-static const struct opcode32 thumb32_opcodes[] =
-{
- /* Armv8.1-M Mainline and Armv8.1-M Mainline Security Extensions
- instructions. */
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
- 0xf040c001, 0xfff0f001, "wls\tlr, %16-19S, %Q"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
- 0xf040e001, 0xfff0ffff, "dls\tlr, %16-19S"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
- 0xf02fc001, 0xfffff001, "le\t%P"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
- 0xf00fc001, 0xfffff001, "le\tlr, %P"},
+ /* Vector VRMULH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VRMULH,
+ 0xee011e01, 0xef811f51,
+ "vrmulh%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
- 0xf040e001, 0xf860f001, "bf%c\t%G, %W"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
- 0xf060e001, 0xf8f0f001, "bfx%c\t%G, %16-19S"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
- 0xf000c001, 0xf800f001, "bfl%c\t%G, %Y"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
- 0xf070e001, 0xf8f0f001, "bflx%c\t%G, %16-19S"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
- 0xf000e001, 0xf840f001, "bfcsel\t%G, %Z, %18-21c"},
+ /* Vector VMULL integer. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMULL_INT,
+ 0xee010e00, 0xef810f51,
+ "vmull%T%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
- 0xe89f0000, 0xffff2000, "clrm%c\t%n"},
+ /* Vector VMULL polynomial. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMULL_POLY,
+ 0xee310e00, 0xefb10f51,
+ "vmull%T%v.%28s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
- /* ARMv8-M and ARMv8-M Security Extensions instructions. */
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M), 0xe97fe97f, 0xffffffff, "sg"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M),
- 0xe840f000, 0xfff0f0ff, "tt\t%8-11r, %16-19r"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M),
- 0xe840f040, 0xfff0f0ff, "ttt\t%8-11r, %16-19r"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M),
- 0xe840f080, 0xfff0f0ff, "tta\t%8-11r, %16-19r"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M),
- 0xe840f0c0, 0xfff0f0ff, "ttat\t%8-11r, %16-19r"},
+ /* Vector VMVN immediate to vector. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMVN_IMM,
+ 0xef800070, 0xefb810f0,
+ "vmvn%v.i%8-11s\t%13-15,22Q, %E"},
- /* ARM V8.2 RAS extension instructions. */
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
- 0xf3af8010, 0xffffffff, "esb"},
+ /* Vector VMVN register. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMVN_REG,
+ 0xffb005c0, 0xffbf1fd1,
+ "vmvn%v\t%13-15,22Q, %1-3,5Q"},
- /* V8 instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0xf3af8005, 0xffffffff, "sevl%c.w"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0xf78f8000, 0xfffffffc, "dcps%0-1d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0xe8c00f8f, 0xfff00fff, "stlb%c\t%12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0xe8c00f9f, 0xfff00fff, "stlh%c\t%12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0xe8c00faf, 0xfff00fff, "stl%c\t%12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0xe8c00fc0, 0xfff00ff0, "stlexb%c\t%0-3r, %12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0xe8c00fd0, 0xfff00ff0, "stlexh%c\t%0-3r, %12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0xe8c00fe0, 0xfff00ff0, "stlex%c\t%0-3r, %12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0xe8c000f0, 0xfff000f0, "stlexd%c\t%0-3r, %12-15r, %8-11r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0xe8d00f8f, 0xfff00fff, "ldab%c\t%12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0xe8d00f9f, 0xfff00fff, "ldah%c\t%12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0xe8d00faf, 0xfff00fff, "lda%c\t%12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0xe8d00fcf, 0xfff00fff, "ldaexb%c\t%12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0xe8d00fdf, 0xfff00fff, "ldaexh%c\t%12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0xe8d00fef, 0xfff00fff, "ldaex%c\t%12-15r, [%16-19R]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
- 0xe8d000ff, 0xfff000ff, "ldaexd%c\t%12-15r, %8-11r, [%16-19R]"},
+ /* Vector VNEG floating point. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VNEG_FP,
+ 0xffb107c0, 0xffb31fd1,
+ "vneg%v.f%18-19s\t%13-15,22Q, %1-3,5Q"},
- /* CRC32 instructions. */
- {ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
- 0xfac0f080, 0xfff0f0f0, "crc32b\t%8-11R, %16-19R, %0-3R"},
- {ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
- 0xfac0f090, 0xfff0f0f0, "crc32h\t%9-11R, %16-19R, %0-3R"},
- {ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
- 0xfac0f0a0, 0xfff0f0f0, "crc32w\t%8-11R, %16-19R, %0-3R"},
- {ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
- 0xfad0f080, 0xfff0f0f0, "crc32cb\t%8-11R, %16-19R, %0-3R"},
- {ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
- 0xfad0f090, 0xfff0f0f0, "crc32ch\t%8-11R, %16-19R, %0-3R"},
- {ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
- 0xfad0f0a0, 0xfff0f0f0, "crc32cw\t%8-11R, %16-19R, %0-3R"},
+ /* Vector VNEG. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VNEG_VEC,
+ 0xffb103c0, 0xffb31fd1,
+ "vneg%v.s%18-19s\t%13-15,22Q, %1-3,5Q"},
- /* Speculation Barriers. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf3af8014, 0xffffffff, "csdb"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf3bf8f40, 0xffffffff, "ssbb"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf3bf8f44, 0xffffffff, "pssbb"},
+ /* Vector VORN, vector bitwise or not. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VORN,
+ 0xef300150, 0xffb11f51,
+ "vorn%v\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
- /* V7 instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf910f000, 0xff70f000, "pli%c\t%a"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf3af80f0, 0xfffffff0, "dbg%c\t#%0-3d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8), 0xf3bf8f51, 0xfffffff3, "dmb%c\t%U"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V8), 0xf3bf8f41, 0xfffffff3, "dsb%c\t%U"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf3bf8f50, 0xfffffff0, "dmb%c\t%U"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf3bf8f40, 0xfffffff0, "dsb%c\t%U"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf3bf8f60, 0xfffffff0, "isb%c\t%U"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
- 0xfb90f0f0, 0xfff0f0f0, "sdiv%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
- 0xfbb0f0f0, 0xfff0f0f0, "udiv%c\t%8-11r, %16-19r, %0-3r"},
+ /* Vector VORR register. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VORR_REG,
+ 0xef200150, 0xffb11f51,
+ "vorr%v\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
- /* Virtualization Extension instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT), 0xf7e08000, 0xfff0f000, "hvc%c\t%V"},
- /* We skip ERET as that is SUBS pc, lr, #0. */
+ /* Vector VMOV, vector to vector move. While decoding MVE_VORR_REG if
+ "Qm==Qn", VORR should replaced by its alias VMOV. For that to happen
+ MVE_VMOV_VEC_TO_VEC need to placed after MVE_VORR_REG in this mve_opcodes
+ array. */
- /* MP Extension instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_MP), 0xf830f000, 0xff70f000, "pldw%c\t%a"},
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMOV_VEC_TO_VEC,
+ 0xef200150, 0xffb11f51,
+ "vmov%v\t%13-15,22Q, %17-19,7Q"},
- /* Security extension instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_SEC), 0xf7f08000, 0xfff0f000, "smc%c\t%K"},
+ /* Vector VQDMULL T1 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQDMULL_T1,
+ 0xee300f01, 0xefb10f51,
+ "vqdmull%T%v.s%28s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
- /* ARMv8.5-A instructions. */
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB), 0xf3bf8f70, 0xffffffff, "sb"},
+ /* Vector VPNOT. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VPNOT,
+ 0xfe310f4d, 0xffffffff,
+ "vpnot%v"},
- /* Instructions defined in the basic V6T2 set. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf3af8000, 0xffffffff, "nop%c.w"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf3af8001, 0xffffffff, "yield%c.w"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf3af8002, 0xffffffff, "wfe%c.w"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf3af8003, 0xffffffff, "wfi%c.w"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf3af8004, 0xffffffff, "sev%c.w"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf3af8000, 0xffffff00, "nop%c.w\t{%0-7d}"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf7f0a000, 0xfff0f000, "udf%c.w\t%H"},
+ /* Vector VPSEL. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VPSEL,
+ 0xfe310f01, 0xffb11f51,
+ "vpsel%v\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
- 0xf3bf8f2f, 0xffffffff, "clrex%c"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf3af8400, 0xffffff1f, "cpsie.w\t%7'a%6'i%5'f%X"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf3af8600, 0xffffff1f, "cpsid.w\t%7'a%6'i%5'f%X"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf3c08f00, 0xfff0ffff, "bxj%c\t%16-19r%x"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xe810c000, 0xffd0ffff, "rfedb%c\t%16-19r%21'!"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xe990c000, 0xffd0ffff, "rfeia%c\t%16-19r%21'!"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf3e08000, 0xffe0f000, "mrs%c\t%8-11r, %D"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf3af8100, 0xffffffe0, "cps\t#%0-4d%X"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xe8d0f000, 0xfff0fff0, "tbb%c\t[%16-19r, %0-3r]%x"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xe8d0f010, 0xfff0fff0, "tbh%c\t[%16-19r, %0-3r, lsl #1]%x"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf3af8500, 0xffffff00, "cpsie\t%7'a%6'i%5'f, #%0-4d%X"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf3af8700, 0xffffff00, "cpsid\t%7'a%6'i%5'f, #%0-4d%X"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf3de8f00, 0xffffff00, "subs%c\tpc, lr, #%0-7d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf3808000, 0xffe0f000, "msr%c\t%C, %16-19r"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
- 0xe8500f00, 0xfff00fff, "ldrex%c\t%12-15r, [%16-19r]"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
- 0xe8d00f4f, 0xfff00fef, "ldrex%4?hb%c\t%12-15r, [%16-19r]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xe800c000, 0xffd0ffe0, "srsdb%c\t%16-19r%21'!, #%0-4d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xe980c000, 0xffd0ffe0, "srsia%c\t%16-19r%21'!, #%0-4d"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa0ff080, 0xfffff0c0, "sxth%c.w\t%8-11r, %0-3r%R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa1ff080, 0xfffff0c0, "uxth%c.w\t%8-11r, %0-3r%R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa2ff080, 0xfffff0c0, "sxtb16%c\t%8-11r, %0-3r%R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa3ff080, 0xfffff0c0, "uxtb16%c\t%8-11r, %0-3r%R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa4ff080, 0xfffff0c0, "sxtb%c.w\t%8-11r, %0-3r%R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa5ff080, 0xfffff0c0, "uxtb%c.w\t%8-11r, %0-3r%R"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
- 0xe8400000, 0xfff000ff, "strex%c\t%8-11r, %12-15r, [%16-19r]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xe8d0007f, 0xfff000ff, "ldrexd%c\t%12-15r, %8-11r, [%16-19r]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa80f000, 0xfff0f0f0, "sadd8%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa80f010, 0xfff0f0f0, "qadd8%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa80f020, 0xfff0f0f0, "shadd8%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa80f040, 0xfff0f0f0, "uadd8%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa80f050, 0xfff0f0f0, "uqadd8%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa80f060, 0xfff0f0f0, "uhadd8%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa80f080, 0xfff0f0f0, "qadd%c\t%8-11r, %0-3r, %16-19r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa80f090, 0xfff0f0f0, "qdadd%c\t%8-11r, %0-3r, %16-19r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa80f0a0, 0xfff0f0f0, "qsub%c\t%8-11r, %0-3r, %16-19r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa80f0b0, 0xfff0f0f0, "qdsub%c\t%8-11r, %0-3r, %16-19r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa90f000, 0xfff0f0f0, "sadd16%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa90f010, 0xfff0f0f0, "qadd16%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa90f020, 0xfff0f0f0, "shadd16%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa90f040, 0xfff0f0f0, "uadd16%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa90f050, 0xfff0f0f0, "uqadd16%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa90f060, 0xfff0f0f0, "uhadd16%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa90f080, 0xfff0f0f0, "rev%c.w\t%8-11r, %16-19r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa90f090, 0xfff0f0f0, "rev16%c.w\t%8-11r, %16-19r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa90f0a0, 0xfff0f0f0, "rbit%c\t%8-11r, %16-19r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa90f0b0, 0xfff0f0f0, "revsh%c.w\t%8-11r, %16-19r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfaa0f000, 0xfff0f0f0, "sasx%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfaa0f010, 0xfff0f0f0, "qasx%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfaa0f020, 0xfff0f0f0, "shasx%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfaa0f040, 0xfff0f0f0, "uasx%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfaa0f050, 0xfff0f0f0, "uqasx%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfaa0f060, 0xfff0f0f0, "uhasx%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfaa0f080, 0xfff0f0f0, "sel%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfab0f080, 0xfff0f0f0, "clz%c\t%8-11r, %16-19r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfac0f000, 0xfff0f0f0, "ssub8%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfac0f010, 0xfff0f0f0, "qsub8%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfac0f020, 0xfff0f0f0, "shsub8%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfac0f040, 0xfff0f0f0, "usub8%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfac0f050, 0xfff0f0f0, "uqsub8%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfac0f060, 0xfff0f0f0, "uhsub8%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfad0f000, 0xfff0f0f0, "ssub16%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfad0f010, 0xfff0f0f0, "qsub16%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfad0f020, 0xfff0f0f0, "shsub16%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfad0f040, 0xfff0f0f0, "usub16%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfad0f050, 0xfff0f0f0, "uqsub16%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfad0f060, 0xfff0f0f0, "uhsub16%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfae0f000, 0xfff0f0f0, "ssax%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfae0f010, 0xfff0f0f0, "qsax%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfae0f020, 0xfff0f0f0, "shsax%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfae0f040, 0xfff0f0f0, "usax%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfae0f050, 0xfff0f0f0, "uqsax%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfae0f060, 0xfff0f0f0, "uhsax%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfb00f000, 0xfff0f0f0, "mul%c.w\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfb70f000, 0xfff0f0f0, "usad8%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa00f000, 0xffe0f0f0, "lsl%20's%c.w\t%8-11R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa20f000, 0xffe0f0f0, "lsr%20's%c.w\t%8-11R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa40f000, 0xffe0f0f0, "asr%20's%c.w\t%8-11R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa60f000, 0xffe0f0f0, "ror%20's%c.w\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
- 0xe8c00f40, 0xfff00fe0, "strex%4?hb%c\t%0-3r, %12-15r, [%16-19r]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf3200000, 0xfff0f0e0, "ssat16%c\t%8-11r, #%0-4D, %16-19r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf3a00000, 0xfff0f0e0, "usat16%c\t%8-11r, #%0-4d, %16-19r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfb20f000, 0xfff0f0e0, "smuad%4'x%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfb30f000, 0xfff0f0e0, "smulw%4?tb%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfb40f000, 0xfff0f0e0, "smusd%4'x%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfb50f000, 0xfff0f0e0, "smmul%4'r%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa00f080, 0xfff0f0c0, "sxtah%c\t%8-11r, %16-19r, %0-3r%R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa10f080, 0xfff0f0c0, "uxtah%c\t%8-11r, %16-19r, %0-3r%R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa20f080, 0xfff0f0c0, "sxtab16%c\t%8-11r, %16-19r, %0-3r%R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa30f080, 0xfff0f0c0, "uxtab16%c\t%8-11r, %16-19r, %0-3r%R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa40f080, 0xfff0f0c0, "sxtab%c\t%8-11r, %16-19r, %0-3r%R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfa50f080, 0xfff0f0c0, "uxtab%c\t%8-11r, %16-19r, %0-3r%R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfb10f000, 0xfff0f0c0, "smul%5?tb%4?tb%c\t%8-11r, %16-19r, %0-3r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf36f0000, 0xffff8020, "bfc%c\t%8-11r, %E"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xea100f00, 0xfff08f00, "tst%c.w\t%16-19r, %S"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xea900f00, 0xfff08f00, "teq%c\t%16-19r, %S"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xeb100f00, 0xfff08f00, "cmn%c.w\t%16-19r, %S"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xebb00f00, 0xfff08f00, "cmp%c.w\t%16-19r, %S"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf0100f00, 0xfbf08f00, "tst%c.w\t%16-19r, %M"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf0900f00, 0xfbf08f00, "teq%c\t%16-19r, %M"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf1100f00, 0xfbf08f00, "cmn%c.w\t%16-19r, %M"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf1b00f00, 0xfbf08f00, "cmp%c.w\t%16-19r, %M"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xea4f0000, 0xffef8000, "mov%20's%c.w\t%8-11r, %S"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xea6f0000, 0xffef8000, "mvn%20's%c.w\t%8-11r, %S"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xe8c00070, 0xfff000f0, "strexd%c\t%0-3r, %12-15r, %8-11r, [%16-19r]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfb000000, 0xfff000f0, "mla%c\t%8-11r, %16-19r, %0-3r, %12-15r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfb000010, 0xfff000f0, "mls%c\t%8-11r, %16-19r, %0-3r, %12-15r"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfb700000, 0xfff000f0, "usada8%c\t%8-11R, %16-19R, %0-3R, %12-15R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfb800000, 0xfff000f0, "smull%c\t%12-15R, %8-11R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfba00000, 0xfff000f0, "umull%c\t%12-15R, %8-11R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfbc00000, 0xfff000f0, "smlal%c\t%12-15R, %8-11R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfbe00000, 0xfff000f0, "umlal%c\t%12-15R, %8-11R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xfbe00060, 0xfff000f0, "umaal%c\t%12-15R, %8-11R, %16-19R, %0-3R"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
- 0xe8500f00, 0xfff00f00, "ldrex%c\t%12-15r, [%16-19r, #%0-7W]"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf04f0000, 0xfbef8000, "mov%20's%c.w\t%8-11r, %M"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf06f0000, 0xfbef8000, "mvn%20's%c.w\t%8-11r, %M"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf810f000, 0xff70f000, "pld%c\t%a"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ /* Vector VQABS. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQABS,
+ 0xffb00740, 0xffb31fd1,
+ "vqabs%v.s%18-19s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VQADD T1 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQADD_T1,
+ 0xef000050, 0xef811f51,
+ "vqadd%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VQADD T2 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQADD_T2,
+ 0xee000f60, 0xef811f70,
+ "vqadd%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VQDMULL T2 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQDMULL_T2,
+ 0xee300f60, 0xefb10f70,
+ "vqdmull%T%v.s%28s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VQMOVN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQMOVN,
+ 0xee330e01, 0xefb30fd1,
+ "vqmovn%T%v.%u%18-19s\t%13-15,22Q, %1-3,5Q"},
+
+ /* Vector VQMOVUN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQMOVUN,
+ 0xee310e81, 0xffb30fd1,
+ "vqmovun%T%v.s%18-19s\t%13-15,22Q, %1-3,5Q"},
+
+ /* Vector VQDMLADH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQDMLADH,
+ 0xee000e00, 0xff810f51,
+ "vqdmladh%X%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VQRDMLADH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRDMLADH,
+ 0xee000e01, 0xff810f51,
+ "vqrdmladh%X%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VQDMLAH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQDMLAH,
+ 0xee000e60, 0xff811f70,
+ "vqdmlah%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VQRDMLAH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRDMLAH,
+ 0xee000e40, 0xff811f70,
+ "vqrdmlah%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VQDMLASH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQDMLASH,
+ 0xee001e60, 0xff811f70,
+ "vqdmlash%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VQRDMLASH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRDMLASH,
+ 0xee001e40, 0xff811f70,
+ "vqrdmlash%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VQDMLSDH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQDMLSDH,
+ 0xfe000e00, 0xff810f51,
+ "vqdmlsdh%X%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VQRDMLSDH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRDMLSDH,
+ 0xfe000e01, 0xff810f51,
+ "vqrdmlsdh%X%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VQDMULH T1 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQDMULH_T1,
+ 0xef000b40, 0xff811f51,
+ "vqdmulh%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VQRDMULH T2 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRDMULH_T2,
+ 0xff000b40, 0xff811f51,
+ "vqrdmulh%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VQDMULH T3 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQDMULH_T3,
+ 0xee010e60, 0xff811f70,
+ "vqdmulh%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VQRDMULH T4 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRDMULH_T4,
+ 0xfe010e60, 0xff811f70,
+ "vqrdmulh%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VQNEG. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQNEG,
+ 0xffb007c0, 0xffb31fd1,
+ "vqneg%v.s%18-19s\t%13-15,22Q, %1-3,5Q"},
+
+ /* Vector VQRSHL T1 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRSHL_T1,
+ 0xef000550, 0xef811f51,
+ "vqrshl%v.%u%20-21s\t%13-15,22Q, %1-3,5Q, %17-19,7Q"},
+
+ /* Vector VQRSHL T2 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRSHL_T2,
+ 0xee331ee0, 0xefb31ff0,
+ "vqrshl%v.%u%18-19s\t%13-15,22Q, %0-3r"},
+
+ /* Vector VQRSHRN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRSHRN,
+ 0xee800f41, 0xefa00fd1,
+ "vqrshrn%T%v.%u%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VQRSHRUN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRSHRUN,
+ 0xfe800fc0, 0xffa00fd1,
+ "vqrshrun%T%v.s%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VQSHL T1 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQSHL_T1,
+ 0xee311ee0, 0xefb31ff0,
+ "vqshl%v.%u%18-19s\t%13-15,22Q, %0-3r"},
+
+ /* Vector VQSHL T4 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQSHL_T4,
+ 0xef000450, 0xef811f51,
+ "vqshl%v.%u%20-21s\t%13-15,22Q, %1-3,5Q, %17-19,7Q"},
+
+ /* Vector VQSHRN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQSHRN,
+ 0xee800f40, 0xefa00fd1,
+ "vqshrn%T%v.%u%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VQSHRUN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQSHRUN,
+ 0xee800fc0, 0xffa00fd1,
+ "vqshrun%T%v.s%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VQSUB T1 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQSUB_T1,
+ 0xef000250, 0xef811f51,
+ "vqsub%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VQSUB T2 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQSUB_T2,
+ 0xee001f60, 0xef811f70,
+ "vqsub%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VREV16. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VREV16,
+ 0xffb00140, 0xffb31fd1,
+ "vrev16%v.8\t%13-15,22Q, %1-3,5Q"},
+
+ /* Vector VREV32. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VREV32,
+ 0xffb000c0, 0xffb31fd1,
+ "vrev32%v.%18-19s\t%13-15,22Q, %1-3,5Q"},
+
+ /* Vector VREV64. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VREV64,
+ 0xffb00040, 0xffb31fd1,
+ "vrev64%v.%18-19s\t%13-15,22Q, %1-3,5Q"},
+
+ /* Vector VRINT floating point. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VRINT_FP,
+ 0xffb20440, 0xffb31c51,
+ "vrint%m%v.f%18-19s\t%13-15,22Q, %1-3,5Q"},
+
+ /* Vector VRMLALDAVH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VRMLALDAVH,
+ 0xee800f00, 0xef811f51,
+ "vrmlalvh%5A%v.%u32\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"},
+
+ /* Vector VRMLALDAVH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VRMLALDAVH,
+ 0xee801f00, 0xef811f51,
+ "vrmlaldavh%5Ax%v.%u32\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"},
+
+ /* Vector VRSHL T1 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VRSHL_T1,
+ 0xef000540, 0xef811f51,
+ "vrshl%v.%u%20-21s\t%13-15,22Q, %1-3,5Q, %17-19,7Q"},
+
+ /* Vector VRSHL T2 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VRSHL_T2,
+ 0xee331e60, 0xefb31ff0,
+ "vrshl%v.%u%18-19s\t%13-15,22Q, %0-3r"},
+
+ /* Vector VRSHRN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VRSHRN,
+ 0xfe800fc1, 0xffa00fd1,
+ "vrshrn%T%v.i%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VSBC. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSBC,
+ 0xfe300f00, 0xffb10f51,
+ "vsbc%12I%v.i32\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VSHL T2 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSHL_T2,
+ 0xee311e60, 0xefb31ff0,
+ "vshl%v.%u%18-19s\t%13-15,22Q, %0-3r"},
+
+ /* Vector VSHL T3 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSHL_T3,
+ 0xef000440, 0xef811f51,
+ "vshl%v.%u%20-21s\t%13-15,22Q, %1-3,5Q, %17-19,7Q"},
+
+ /* Vector VSHLC. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSHLC,
+ 0xeea00fc0, 0xffa01ff0,
+ "vshlc%v\t%13-15,22Q, %0-3r, #%16-20d"},
+
+ /* Vector VSHLL T2 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSHLL_T2,
+ 0xee310e01, 0xefb30fd1,
+ "vshll%T%v.%u%18-19s\t%13-15,22Q, %1-3,5Q, #%18-19d"},
+
+ /* Vector VSHRN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSHRN,
+ 0xee800fc1, 0xffa00fd1,
+ "vshrn%T%v.i%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VST2 no writeback. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VST2,
+ 0xfc801e00, 0xffb01e5f,
+ "vst2%5d.%7-8s\t%B, [%16-19r]"},
+
+ /* Vector VST2 writeback. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VST2,
+ 0xfca01e00, 0xffb01e5f,
+ "vst2%5d.%7-8s\t%B, [%16-19r]!"},
+
+ /* Vector VST4 no writeback. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VST4,
+ 0xfc801e01, 0xffb01e1f,
+ "vst4%5-6d.%7-8s\t%B, [%16-19r]"},
+
+ /* Vector VST4 writeback. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VST4,
+ 0xfca01e01, 0xffb01e1f,
+ "vst4%5-6d.%7-8s\t%B, [%16-19r]!"},
+
+ /* Vector VSTRB scatter store, T1 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSTRB_SCATTER_T1,
+ 0xec800e00, 0xffb01e50,
+ "vstrb%v.%7-8s\t%13-15,22Q, [%16-19r, %1-3,5Q]"},
+
+ /* Vector VSTRH scatter store, T2 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSTRH_SCATTER_T2,
+ 0xec800e10, 0xffb01e50,
+ "vstrh%v.%7-8s\t%13-15,22Q, [%16-19r, %1-3,5Q%o]"},
+
+ /* Vector VSTRW scatter store, T3 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSTRW_SCATTER_T3,
+ 0xec800e40, 0xffb01e50,
+ "vstrw%v.%7-8s\t%13-15,22Q, [%16-19r, %1-3,5Q%o]"},
+
+ /* Vector VSTRD scatter store, T4 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSTRD_SCATTER_T4,
+ 0xec800fd0, 0xffb01fd0,
+ "vstrd%v.64\t%13-15,22Q, [%16-19r, %1-3,5Q%o]"},
+
+ /* Vector VSTRW scatter store, T5 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSTRW_SCATTER_T5,
+ 0xfd001e00, 0xff111f00,
+ "vstrw%v.32\t%13-15,22Q, [%17-19,7Q, #%a%0-6i]%w"},
+
+ /* Vector VSTRD scatter store, T6 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSTRD_SCATTER_T6,
+ 0xfd001f00, 0xff111f00,
+ "vstrd%v.64\t%13-15,22Q, [%17-19,7Q, #%a%0-6i]%w"},
+
+ /* Vector VSTRB. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSTRB_T1,
+ 0xec000e00, 0xfe581e00,
+ "vstrb%v.%7-8s\t%13-15Q, %d"},
+
+ /* Vector VSTRH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSTRH_T2,
+ 0xec080e00, 0xfe581e00,
+ "vstrh%v.%7-8s\t%13-15Q, %d"},
+
+ /* Vector VSTRB variant T5. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSTRB_T5,
+ 0xec001e00, 0xfe101f80,
+ "vstrb%v.8\t%13-15,22Q, %d"},
+
+ /* Vector VSTRH variant T6. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSTRH_T6,
+ 0xec001e80, 0xfe101f80,
+ "vstrh%v.16\t%13-15,22Q, %d"},
+
+ /* Vector VSTRW variant T7. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSTRW_T7,
+ 0xec001f00, 0xfe101f80,
+ "vstrw%v.32\t%13-15,22Q, %d"},
+
+ /* Vector VSUB floating point T1 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VSUB_FP_T1,
+ 0xef200d40, 0xffa11f51,
+ "vsub%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VSUB floating point T2 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VSUB_FP_T2,
+ 0xee301f40, 0xefb11f70,
+ "vsub%v.f%28s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VSUB T1 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSUB_VEC_T1,
+ 0xff000840, 0xff811f51,
+ "vsub%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VSUB T2 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSUB_VEC_T2,
+ 0xee011f40, 0xff811f70,
+ "vsub%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_ASRLI,
+ 0xea50012f, 0xfff1813f,
+ "asrl%c\t%17-19l, %9-11h, %j"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_ASRL,
+ 0xea50012d, 0xfff101ff,
+ "asrl%c\t%17-19l, %9-11h, %12-15S"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_LSLLI,
+ 0xea50010f, 0xfff1813f,
+ "lsll%c\t%17-19l, %9-11h, %j"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_LSLL,
+ 0xea50010d, 0xfff101ff,
+ "lsll%c\t%17-19l, %9-11h, %12-15S"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_LSRL,
+ 0xea50011f, 0xfff1813f,
+ "lsrl%c\t%17-19l, %9-11h, %j"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_SQRSHRL,
+ 0xea51012d, 0xfff1017f,
+ "sqrshrl%c\t%17-19l, %9-11h, %k, %12-15S"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_SQRSHR,
+ 0xea500f2d, 0xfff00fff,
+ "sqrshr%c\t%16-19S, %12-15S"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_SQSHLL,
+ 0xea51013f, 0xfff1813f,
+ "sqshll%c\t%17-19l, %9-11h, %j"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_SQSHL,
+ 0xea500f3f, 0xfff08f3f,
+ "sqshl%c\t%16-19S, %j"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_SRSHRL,
+ 0xea51012f, 0xfff1813f,
+ "srshrl%c\t%17-19l, %9-11h, %j"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_SRSHR,
+ 0xea500f2f, 0xfff08f3f,
+ "srshr%c\t%16-19S, %j"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_UQRSHLL,
+ 0xea51010d, 0xfff1017f,
+ "uqrshll%c\t%17-19l, %9-11h, %k, %12-15S"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_UQRSHL,
+ 0xea500f0d, 0xfff00fff,
+ "uqrshl%c\t%16-19S, %12-15S"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_UQSHLL,
+ 0xea51010f, 0xfff1813f,
+ "uqshll%c\t%17-19l, %9-11h, %j"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_UQSHL,
+ 0xea500f0f, 0xfff08f3f,
+ "uqshl%c\t%16-19S, %j"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_URSHRL,
+ 0xea51011f, 0xfff1813f,
+ "urshrl%c\t%17-19l, %9-11h, %j"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_URSHR,
+ 0xea500f1f, 0xfff08f3f,
+ "urshr%c\t%16-19S, %j"},
+
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ MVE_CSINC,
+ 0xea509000, 0xfff0f000,
+ "csinc\t%8-11S, %16-19Z, %0-3Z, %4-7c"},
+
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ MVE_CSINV,
+ 0xea50a000, 0xfff0f000,
+ "csinv\t%8-11S, %16-19Z, %0-3Z, %4-7c"},
+
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ MVE_CSET,
+ 0xea5f900f, 0xfffff00f,
+ "cset\t%8-11S, %4-7C"},
+
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ MVE_CSETM,
+ 0xea5fa00f, 0xfffff00f,
+ "csetm\t%8-11S, %4-7C"},
+
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ MVE_CSEL,
+ 0xea508000, 0xfff0f000,
+ "csel\t%8-11S, %16-19Z, %0-3Z, %4-7c"},
+
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ MVE_CSNEG,
+ 0xea50b000, 0xfff0f000,
+ "csneg\t%8-11S, %16-19Z, %0-3Z, %4-7c"},
+
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ MVE_CINC,
+ 0xea509000, 0xfff0f000,
+ "cinc\t%8-11S, %16-19Z, %4-7C"},
+
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ MVE_CINV,
+ 0xea50a000, 0xfff0f000,
+ "cinv\t%8-11S, %16-19Z, %4-7C"},
+
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ MVE_CNEG,
+ 0xea50b000, 0xfff0f000,
+ "cneg\t%8-11S, %16-19Z, %4-7C"},
+
+ {ARM_FEATURE_CORE_LOW (0),
+ MVE_NONE,
+ 0x00000000, 0x00000000, 0}
+};
+
+/* Opcode tables: ARM, 16-bit Thumb, 32-bit Thumb. All three are partially
+ ordered: they must be searched linearly from the top to obtain a correct
+ match. */
+
+/* print_insn_arm recognizes the following format control codes:
+
+ %% %
+
+ %a print address for ldr/str instruction
+ %s print address for ldr/str halfword/signextend instruction
+ %S like %s but allow UNPREDICTABLE addressing
+ %b print branch destination
+ %c print condition code (always bits 28-31)
+ %m print register mask for ldm/stm instruction
+ %o print operand2 (immediate or register + shift)
+ %p print 'p' iff bits 12-15 are 15
+ %t print 't' iff bit 21 set and bit 24 clear
+ %B print arm BLX(1) destination
+ %C print the PSR sub type.
+ %U print barrier type.
+ %P print address for pli instruction.
+
+ %<bitfield>r print as an ARM register
+ %<bitfield>T print as an ARM register + 1
+ %<bitfield>R as %r but r15 is UNPREDICTABLE
+ %<bitfield>{r|R}u as %{r|R} but if matches the other %u field then is UNPREDICTABLE
+ %<bitfield>{r|R}U as %{r|R} but if matches the other %U field then is UNPREDICTABLE
+ %<bitfield>d print the bitfield in decimal
+ %<bitfield>W print the bitfield plus one in decimal
+ %<bitfield>x print the bitfield in hex
+ %<bitfield>X print the bitfield as 1 hex digit without leading "0x"
+
+ %<bitfield>'c print specified char iff bitfield is all ones
+ %<bitfield>`c print specified char iff bitfield is all zeroes
+ %<bitfield>?ab... select from array of values in big endian order
+
+ %e print arm SMI operand (bits 0..7,8..19).
+ %E print the LSB and WIDTH fields of a BFI or BFC instruction.
+ %V print the 16-bit immediate field of a MOVT or MOVW instruction.
+ %R print the SPSR/CPSR or banked register of an MRS. */
+
+static const struct opcode32 arm_opcodes[] =
+{
+ /* ARM instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0xe1a00000, 0xffffffff, "nop\t\t\t; (mov r0, r0)"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0xe7f000f0, 0xfff000f0, "udf\t#%e"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5),
+ 0x012FFF10, 0x0ffffff0, "bx%c\t%0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V2),
+ 0x00000090, 0x0fe000f0, "mul%20's%c\t%16-19R, %0-3R, %8-11R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V2),
+ 0x00200090, 0x0fe000f0, "mla%20's%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V2S),
+ 0x01000090, 0x0fb00ff0, "swp%22'b%c\t%12-15RU, %0-3Ru, [%16-19RuU]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V3M),
+ 0x00800090, 0x0fa000f0,
+ "%22?sumull%20's%c\t%12-15Ru, %16-19Ru, %0-3R, %8-11R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V3M),
+ 0x00a00090, 0x0fa000f0,
+ "%22?sumlal%20's%c\t%12-15Ru, %16-19Ru, %0-3R, %8-11R"},
+
+ /* V8.2 RAS extension instructions. */
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
+ 0xe320f010, 0xffffffff, "esb"},
+
+ /* V8 instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0x0320f005, 0x0fffffff, "sevl"},
+ /* Defined in V8 but is in NOP space so available to all arch. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0xe1000070, 0xfff000f0, "hlt\t0x%16-19X%12-15X%8-11X%0-3X"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS),
+ 0x01800e90, 0x0ff00ff0, "stlex%c\t%12-15r, %0-3r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
+ 0x01900e9f, 0x0ff00fff, "ldaex%c\t%12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0x01a00e90, 0x0ff00ff0, "stlexd%c\t%12-15r, %0-3r, %0-3T, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0x01b00e9f, 0x0ff00fff, "ldaexd%c\t%12-15r, %12-15T, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
+ 0x01c00e90, 0x0ff00ff0, "stlexb%c\t%12-15r, %0-3r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
+ 0x01d00e9f, 0x0ff00fff, "ldaexb%c\t%12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
+ 0x01e00e90, 0x0ff00ff0, "stlexh%c\t%12-15r, %0-3r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
+ 0x01f00e9f, 0x0ff00fff, "ldaexh%c\t%12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
+ 0x0180fc90, 0x0ff0fff0, "stl%c\t%0-3r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
+ 0x01900c9f, 0x0ff00fff, "lda%c\t%12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
+ 0x01c0fc90, 0x0ff0fff0, "stlb%c\t%0-3r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
+ 0x01d00c9f, 0x0ff00fff, "ldab%c\t%12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
+ 0x01e0fc90, 0x0ff0fff0, "stlh%c\t%0-3r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS),
+ 0x01f00c9f, 0x0ff00fff, "ldah%c\t%12-15r, [%16-19R]"},
+ /* CRC32 instructions. */
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
+ 0xe1000040, 0xfff00ff0, "crc32b\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
+ 0xe1200040, 0xfff00ff0, "crc32h\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
+ 0xe1400040, 0xfff00ff0, "crc32w\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
+ 0xe1000240, 0xfff00ff0, "crc32cb\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
+ 0xe1200240, 0xfff00ff0, "crc32ch\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
+ 0xe1400240, 0xfff00ff0, "crc32cw\t%12-15R, %16-19R, %0-3R"},
+
+ /* Privileged Access Never extension instructions. */
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
+ 0xf1100000, 0xfffffdff, "setpan\t#%9-9d"},
+
+ /* Virtualization Extension instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT), 0x0160006e, 0x0fffffff, "eret%c"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT), 0x01400070, 0x0ff000f0, "hvc%c\t%e"},
+
+ /* Integer Divide Extension instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
+ 0x0710f010, 0x0ff0f0f0, "sdiv%c\t%16-19r, %0-3r, %8-11r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
+ 0x0730f010, 0x0ff0f0f0, "udiv%c\t%16-19r, %0-3r, %8-11r"},
+
+ /* MP Extension instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_MP), 0xf410f000, 0xfc70f000, "pldw\t%a"},
+
+ /* Speculation Barriers. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V3), 0xe320f014, 0xffffffff, "csdb"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V3), 0xf57ff040, 0xffffffff, "ssbb"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V3), 0xf57ff044, 0xffffffff, "pssbb"},
+
+ /* V7 instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf450f000, 0xfd70f000, "pli\t%P"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0x0320f0f0, 0x0ffffff0, "dbg%c\t#%0-3d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8), 0xf57ff051, 0xfffffff3, "dmb\t%U"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8), 0xf57ff041, 0xfffffff3, "dsb\t%U"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf57ff050, 0xfffffff0, "dmb\t%U"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf57ff040, 0xfffffff0, "dsb\t%U"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf57ff060, 0xfffffff0, "isb\t%U"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V7),
+ 0x0320f000, 0x0fffffff, "nop%c\t{%0-7d}"},
+
+ /* ARM V6T2 instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0x07c0001f, 0x0fe0007f, "bfc%c\t%12-15R, %E"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0x07c00010, 0x0fe00070, "bfi%c\t%12-15R, %0-3r, %E"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0x00600090, 0x0ff000f0, "mls%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0x002000b0, 0x0f3000f0, "strht%c\t%12-15R, %S"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0x00300090, 0x0f3000f0, UNDEFINED_INSTRUCTION },
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0x00300090, 0x0f300090, "ldr%6's%5?hbt%c\t%12-15R, %S"},
+
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
+ 0x03000000, 0x0ff00000, "movw%c\t%12-15R, %V"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
+ 0x03400000, 0x0ff00000, "movt%c\t%12-15R, %V"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0x06ff0f30, 0x0fff0ff0, "rbit%c\t%12-15R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0x07a00050, 0x0fa00070, "%22?usbfx%c\t%12-15r, %0-3r, #%7-11d, #%16-20W"},
+
+ /* ARM Security extension instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
+ 0x01600070, 0x0ff000f0, "smc%c\t%e"},
+
+ /* ARM V6K instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
+ 0xf57ff01f, 0xffffffff, "clrex"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
+ 0x01d00f9f, 0x0ff00fff, "ldrexb%c\t%12-15R, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
+ 0x01b00f9f, 0x0ff00fff, "ldrexd%c\t%12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
+ 0x01f00f9f, 0x0ff00fff, "ldrexh%c\t%12-15R, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
+ 0x01c00f90, 0x0ff00ff0, "strexb%c\t%12-15R, %0-3R, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
+ 0x01a00f90, 0x0ff00ff0, "strexd%c\t%12-15R, %0-3r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
+ 0x01e00f90, 0x0ff00ff0, "strexh%c\t%12-15R, %0-3R, [%16-19R]"},
+
+ /* ARMv8.5-A instructions. */
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB), 0xf57ff070, 0xffffffff, "sb"},
+
+ /* ARM V6K NOP hints. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
+ 0x0320f001, 0x0fffffff, "yield%c"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
+ 0x0320f002, 0x0fffffff, "wfe%c"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
+ 0x0320f003, 0x0fffffff, "wfi%c"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
+ 0x0320f004, 0x0fffffff, "sev%c"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
+ 0x0320f000, 0x0fffff00, "nop%c\t{%0-7d}"},
+
+ /* ARM V6 instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0xf1080000, 0xfffffe3f, "cpsie\t%8'a%7'i%6'f"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0xf10a0000, 0xfffffe20, "cpsie\t%8'a%7'i%6'f,#%0-4d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0xf10C0000, 0xfffffe3f, "cpsid\t%8'a%7'i%6'f"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0xf10e0000, 0xfffffe20, "cpsid\t%8'a%7'i%6'f,#%0-4d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0xf1000000, 0xfff1fe20, "cps\t#%0-4d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06800010, 0x0ff00ff0, "pkhbt%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06800010, 0x0ff00070, "pkhbt%c\t%12-15R, %16-19R, %0-3R, lsl #%7-11d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06800050, 0x0ff00ff0, "pkhtb%c\t%12-15R, %16-19R, %0-3R, asr #32"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06800050, 0x0ff00070, "pkhtb%c\t%12-15R, %16-19R, %0-3R, asr #%7-11d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x01900f9f, 0x0ff00fff, "ldrex%c\tr%12-15d, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06200f10, 0x0ff00ff0, "qadd16%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06200f90, 0x0ff00ff0, "qadd8%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06200f30, 0x0ff00ff0, "qasx%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06200f70, 0x0ff00ff0, "qsub16%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06200ff0, 0x0ff00ff0, "qsub8%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06200f50, 0x0ff00ff0, "qsax%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06100f10, 0x0ff00ff0, "sadd16%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06100f90, 0x0ff00ff0, "sadd8%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06100f30, 0x0ff00ff0, "sasx%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06300f10, 0x0ff00ff0, "shadd16%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06300f90, 0x0ff00ff0, "shadd8%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06300f30, 0x0ff00ff0, "shasx%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06300f70, 0x0ff00ff0, "shsub16%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06300ff0, 0x0ff00ff0, "shsub8%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06300f50, 0x0ff00ff0, "shsax%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06100f70, 0x0ff00ff0, "ssub16%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06100ff0, 0x0ff00ff0, "ssub8%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06100f50, 0x0ff00ff0, "ssax%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06500f10, 0x0ff00ff0, "uadd16%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06500f90, 0x0ff00ff0, "uadd8%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06500f30, 0x0ff00ff0, "uasx%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06700f10, 0x0ff00ff0, "uhadd16%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06700f90, 0x0ff00ff0, "uhadd8%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06700f30, 0x0ff00ff0, "uhasx%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06700f70, 0x0ff00ff0, "uhsub16%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06700ff0, 0x0ff00ff0, "uhsub8%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06700f50, 0x0ff00ff0, "uhsax%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06600f10, 0x0ff00ff0, "uqadd16%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06600f90, 0x0ff00ff0, "uqadd8%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06600f30, 0x0ff00ff0, "uqasx%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06600f70, 0x0ff00ff0, "uqsub16%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06600ff0, 0x0ff00ff0, "uqsub8%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06600f50, 0x0ff00ff0, "uqsax%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06500f70, 0x0ff00ff0, "usub16%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06500ff0, 0x0ff00ff0, "usub8%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06500f50, 0x0ff00ff0, "usax%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06bf0f30, 0x0fff0ff0, "rev%c\t%12-15R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06bf0fb0, 0x0fff0ff0, "rev16%c\t%12-15R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06ff0fb0, 0x0fff0ff0, "revsh%c\t%12-15R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0xf8100a00, 0xfe50ffff, "rfe%23?id%24?ba\t%16-19r%21'!"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06bf0070, 0x0fff0ff0, "sxth%c\t%12-15R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06bf0470, 0x0fff0ff0, "sxth%c\t%12-15R, %0-3R, ror #8"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06bf0870, 0x0fff0ff0, "sxth%c\t%12-15R, %0-3R, ror #16"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06bf0c70, 0x0fff0ff0, "sxth%c\t%12-15R, %0-3R, ror #24"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x068f0070, 0x0fff0ff0, "sxtb16%c\t%12-15R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x068f0470, 0x0fff0ff0, "sxtb16%c\t%12-15R, %0-3R, ror #8"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x068f0870, 0x0fff0ff0, "sxtb16%c\t%12-15R, %0-3R, ror #16"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x068f0c70, 0x0fff0ff0, "sxtb16%c\t%12-15R, %0-3R, ror #24"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06af0070, 0x0fff0ff0, "sxtb%c\t%12-15R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06af0470, 0x0fff0ff0, "sxtb%c\t%12-15R, %0-3R, ror #8"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06af0870, 0x0fff0ff0, "sxtb%c\t%12-15R, %0-3R, ror #16"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06af0c70, 0x0fff0ff0, "sxtb%c\t%12-15R, %0-3R, ror #24"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06ff0070, 0x0fff0ff0, "uxth%c\t%12-15R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06ff0470, 0x0fff0ff0, "uxth%c\t%12-15R, %0-3R, ror #8"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06ff0870, 0x0fff0ff0, "uxth%c\t%12-15R, %0-3R, ror #16"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06ff0c70, 0x0fff0ff0, "uxth%c\t%12-15R, %0-3R, ror #24"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06cf0070, 0x0fff0ff0, "uxtb16%c\t%12-15R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06cf0470, 0x0fff0ff0, "uxtb16%c\t%12-15R, %0-3R, ror #8"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06cf0870, 0x0fff0ff0, "uxtb16%c\t%12-15R, %0-3R, ror #16"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06cf0c70, 0x0fff0ff0, "uxtb16%c\t%12-15R, %0-3R, ror #24"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06ef0070, 0x0fff0ff0, "uxtb%c\t%12-15R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06ef0470, 0x0fff0ff0, "uxtb%c\t%12-15R, %0-3R, ror #8"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06ef0870, 0x0fff0ff0, "uxtb%c\t%12-15R, %0-3R, ror #16"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06ef0c70, 0x0fff0ff0, "uxtb%c\t%12-15R, %0-3R, ror #24"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06b00070, 0x0ff00ff0, "sxtah%c\t%12-15R, %16-19r, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06b00470, 0x0ff00ff0, "sxtah%c\t%12-15R, %16-19r, %0-3R, ror #8"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06b00870, 0x0ff00ff0, "sxtah%c\t%12-15R, %16-19r, %0-3R, ror #16"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06b00c70, 0x0ff00ff0, "sxtah%c\t%12-15R, %16-19r, %0-3R, ror #24"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06800070, 0x0ff00ff0, "sxtab16%c\t%12-15R, %16-19r, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06800470, 0x0ff00ff0, "sxtab16%c\t%12-15R, %16-19r, %0-3R, ror #8"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06800870, 0x0ff00ff0, "sxtab16%c\t%12-15R, %16-19r, %0-3R, ror #16"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06800c70, 0x0ff00ff0, "sxtab16%c\t%12-15R, %16-19r, %0-3R, ror #24"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06a00070, 0x0ff00ff0, "sxtab%c\t%12-15R, %16-19r, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06a00470, 0x0ff00ff0, "sxtab%c\t%12-15R, %16-19r, %0-3R, ror #8"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06a00870, 0x0ff00ff0, "sxtab%c\t%12-15R, %16-19r, %0-3R, ror #16"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06a00c70, 0x0ff00ff0, "sxtab%c\t%12-15R, %16-19r, %0-3R, ror #24"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06f00070, 0x0ff00ff0, "uxtah%c\t%12-15R, %16-19r, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06f00470, 0x0ff00ff0, "uxtah%c\t%12-15R, %16-19r, %0-3R, ror #8"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06f00870, 0x0ff00ff0, "uxtah%c\t%12-15R, %16-19r, %0-3R, ror #16"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06f00c70, 0x0ff00ff0, "uxtah%c\t%12-15R, %16-19r, %0-3R, ror #24"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06c00070, 0x0ff00ff0, "uxtab16%c\t%12-15R, %16-19r, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06c00470, 0x0ff00ff0, "uxtab16%c\t%12-15R, %16-19r, %0-3R, ror #8"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06c00870, 0x0ff00ff0, "uxtab16%c\t%12-15R, %16-19r, %0-3R, ror #16"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06c00c70, 0x0ff00ff0, "uxtab16%c\t%12-15R, %16-19r, %0-3R, ROR #24"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06e00070, 0x0ff00ff0, "uxtab%c\t%12-15R, %16-19r, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06e00470, 0x0ff00ff0, "uxtab%c\t%12-15R, %16-19r, %0-3R, ror #8"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06e00870, 0x0ff00ff0, "uxtab%c\t%12-15R, %16-19r, %0-3R, ror #16"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06e00c70, 0x0ff00ff0, "uxtab%c\t%12-15R, %16-19r, %0-3R, ror #24"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06800fb0, 0x0ff00ff0, "sel%c\t%12-15R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0xf1010000, 0xfffffc00, "setend\t%9?ble"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x0700f010, 0x0ff0f0d0, "smuad%5'x%c\t%16-19R, %0-3R, %8-11R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x0700f050, 0x0ff0f0d0, "smusd%5'x%c\t%16-19R, %0-3R, %8-11R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x07000010, 0x0ff000d0, "smlad%5'x%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x07400010, 0x0ff000d0, "smlald%5'x%c\t%12-15Ru, %16-19Ru, %0-3R, %8-11R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x07000050, 0x0ff000d0, "smlsd%5'x%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x07400050, 0x0ff000d0, "smlsld%5'x%c\t%12-15Ru, %16-19Ru, %0-3R, %8-11R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x0750f010, 0x0ff0f0d0, "smmul%5'r%c\t%16-19R, %0-3R, %8-11R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x07500010, 0x0ff000d0, "smmla%5'r%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x075000d0, 0x0ff000d0, "smmls%5'r%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0xf84d0500, 0xfe5fffe0, "srs%23?id%24?ba\t%16-19r%21'!, #%0-4d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06a00010, 0x0fe00ff0, "ssat%c\t%12-15R, #%16-20W, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06a00010, 0x0fe00070, "ssat%c\t%12-15R, #%16-20W, %0-3R, lsl #%7-11d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06a00050, 0x0fe00070, "ssat%c\t%12-15R, #%16-20W, %0-3R, asr #%7-11d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06a00f30, 0x0ff00ff0, "ssat16%c\t%12-15r, #%16-19W, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x01800f90, 0x0ff00ff0, "strex%c\t%12-15R, %0-3R, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x00400090, 0x0ff000f0, "umaal%c\t%12-15R, %16-19R, %0-3R, %8-11R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x0780f010, 0x0ff0f0f0, "usad8%c\t%16-19R, %0-3R, %8-11R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x07800010, 0x0ff000f0, "usada8%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06e00010, 0x0fe00ff0, "usat%c\t%12-15R, #%16-20d, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06e00010, 0x0fe00070, "usat%c\t%12-15R, #%16-20d, %0-3R, lsl #%7-11d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06e00050, 0x0fe00070, "usat%c\t%12-15R, #%16-20d, %0-3R, asr #%7-11d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6),
+ 0x06e00f30, 0x0ff00ff0, "usat16%c\t%12-15R, #%16-19d, %0-3R"},
+
+ /* V5J instruction. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5J),
+ 0x012fff20, 0x0ffffff0, "bxj%c\t%0-3R"},
+
+ /* V5 Instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
+ 0xe1200070, 0xfff000f0,
+ "bkpt\t0x%16-19X%12-15X%8-11X%0-3X"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
+ 0xfa000000, 0xfe000000, "blx\t%B"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
+ 0x012fff30, 0x0ffffff0, "blx%c\t%0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5),
+ 0x016f0f10, 0x0fff0ff0, "clz%c\t%12-15R, %0-3R"},
+
+ /* V5E "El Segundo" Instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5E),
+ 0x000000d0, 0x0e1000f0, "ldrd%c\t%12-15r, %s"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5E),
+ 0x000000f0, 0x0e1000f0, "strd%c\t%12-15r, %s"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5E),
+ 0xf450f000, 0xfc70f000, "pld\t%a"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x01000080, 0x0ff000f0, "smlabb%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x010000a0, 0x0ff000f0, "smlatb%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x010000c0, 0x0ff000f0, "smlabt%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x010000e0, 0x0ff000f0, "smlatt%c\t%16-19r, %0-3r, %8-11R, %12-15R"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x01200080, 0x0ff000f0, "smlawb%c\t%16-19R, %0-3R, %8-11R, %12-15R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x012000c0, 0x0ff000f0, "smlawt%c\t%16-19R, %0-3r, %8-11R, %12-15R"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x01400080, 0x0ff000f0, "smlalbb%c\t%12-15Ru, %16-19Ru, %0-3R, %8-11R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x014000a0, 0x0ff000f0, "smlaltb%c\t%12-15Ru, %16-19Ru, %0-3R, %8-11R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x014000c0, 0x0ff000f0, "smlalbt%c\t%12-15Ru, %16-19Ru, %0-3R, %8-11R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x014000e0, 0x0ff000f0, "smlaltt%c\t%12-15Ru, %16-19Ru, %0-3R, %8-11R"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x01600080, 0x0ff0f0f0, "smulbb%c\t%16-19R, %0-3R, %8-11R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x016000a0, 0x0ff0f0f0, "smultb%c\t%16-19R, %0-3R, %8-11R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x016000c0, 0x0ff0f0f0, "smulbt%c\t%16-19R, %0-3R, %8-11R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x016000e0, 0x0ff0f0f0, "smultt%c\t%16-19R, %0-3R, %8-11R"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x012000a0, 0x0ff0f0f0, "smulwb%c\t%16-19R, %0-3R, %8-11R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x012000e0, 0x0ff0f0f0, "smulwt%c\t%16-19R, %0-3R, %8-11R"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x01000050, 0x0ff00ff0, "qadd%c\t%12-15R, %0-3R, %16-19R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x01400050, 0x0ff00ff0, "qdadd%c\t%12-15R, %0-3R, %16-19R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x01200050, 0x0ff00ff0, "qsub%c\t%12-15R, %0-3R, %16-19R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP),
+ 0x01600050, 0x0ff00ff0, "qdsub%c\t%12-15R, %0-3R, %16-19R"},
+
+ /* ARM Instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x052d0004, 0x0fff0fff, "push%c\t{%12-15r}\t\t; (str%c %12-15r, %a)"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x04400000, 0x0e500000, "strb%t%c\t%12-15R, %a"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x04000000, 0x0e500000, "str%t%c\t%12-15r, %a"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x06400000, 0x0e500ff0, "strb%t%c\t%12-15R, %a"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x06000000, 0x0e500ff0, "str%t%c\t%12-15r, %a"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x04400000, 0x0c500010, "strb%t%c\t%12-15R, %a"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x04000000, 0x0c500010, "str%t%c\t%12-15r, %a"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x04400000, 0x0e500000, "strb%c\t%12-15R, %a"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x06400000, 0x0e500010, "strb%c\t%12-15R, %a"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x004000b0, 0x0e5000f0, "strh%c\t%12-15R, %s"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x000000b0, 0x0e500ff0, "strh%c\t%12-15R, %s"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00500090, 0x0e5000f0, UNDEFINED_INSTRUCTION},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00500090, 0x0e500090, "ldr%6's%5?hb%c\t%12-15R, %s"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00100090, 0x0e500ff0, UNDEFINED_INSTRUCTION},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00100090, 0x0e500f90, "ldr%6's%5?hb%c\t%12-15R, %s"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x02000000, 0x0fe00000, "and%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00000000, 0x0fe00010, "and%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00000010, 0x0fe00090, "and%20's%c\t%12-15R, %16-19R, %o"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x02200000, 0x0fe00000, "eor%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00200000, 0x0fe00010, "eor%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00200010, 0x0fe00090, "eor%20's%c\t%12-15R, %16-19R, %o"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x02400000, 0x0fe00000, "sub%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00400000, 0x0fe00010, "sub%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00400010, 0x0fe00090, "sub%20's%c\t%12-15R, %16-19R, %o"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x02600000, 0x0fe00000, "rsb%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00600000, 0x0fe00010, "rsb%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00600010, 0x0fe00090, "rsb%20's%c\t%12-15R, %16-19R, %o"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x02800000, 0x0fe00000, "add%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00800000, 0x0fe00010, "add%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00800010, 0x0fe00090, "add%20's%c\t%12-15R, %16-19R, %o"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x02a00000, 0x0fe00000, "adc%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00a00000, 0x0fe00010, "adc%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00a00010, 0x0fe00090, "adc%20's%c\t%12-15R, %16-19R, %o"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x02c00000, 0x0fe00000, "sbc%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00c00000, 0x0fe00010, "sbc%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00c00010, 0x0fe00090, "sbc%20's%c\t%12-15R, %16-19R, %o"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x02e00000, 0x0fe00000, "rsc%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00e00000, 0x0fe00010, "rsc%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00e00010, 0x0fe00090, "rsc%20's%c\t%12-15R, %16-19R, %o"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
+ 0x0120f200, 0x0fb0f200, "msr%c\t%C, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V3),
+ 0x0120f000, 0x0db0f000, "msr%c\t%C, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V3),
+ 0x01000000, 0x0fb00cff, "mrs%c\t%12-15R, %R"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x03000000, 0x0fe00000, "tst%p%c\t%16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01000000, 0x0fe00010, "tst%p%c\t%16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01000010, 0x0fe00090, "tst%p%c\t%16-19R, %o"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x03300000, 0x0ff00000, "teq%p%c\t%16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01300000, 0x0ff00010, "teq%p%c\t%16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01300010, 0x0ff00010, "teq%p%c\t%16-19R, %o"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x03400000, 0x0fe00000, "cmp%p%c\t%16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01400000, 0x0fe00010, "cmp%p%c\t%16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01400010, 0x0fe00090, "cmp%p%c\t%16-19R, %o"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x03600000, 0x0fe00000, "cmn%p%c\t%16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01600000, 0x0fe00010, "cmn%p%c\t%16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01600010, 0x0fe00090, "cmn%p%c\t%16-19R, %o"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x03800000, 0x0fe00000, "orr%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01800000, 0x0fe00010, "orr%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01800010, 0x0fe00090, "orr%20's%c\t%12-15R, %16-19R, %o"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x03a00000, 0x0fef0000, "mov%20's%c\t%12-15r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01a00000, 0x0def0ff0, "mov%20's%c\t%12-15r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01a00000, 0x0def0060, "lsl%20's%c\t%12-15R, %q"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01a00020, 0x0def0060, "lsr%20's%c\t%12-15R, %q"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01a00040, 0x0def0060, "asr%20's%c\t%12-15R, %q"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01a00060, 0x0def0ff0, "rrx%20's%c\t%12-15r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01a00060, 0x0def0060, "ror%20's%c\t%12-15R, %q"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x03c00000, 0x0fe00000, "bic%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01c00000, 0x0fe00010, "bic%20's%c\t%12-15r, %16-19r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01c00010, 0x0fe00090, "bic%20's%c\t%12-15R, %16-19R, %o"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x03e00000, 0x0fe00000, "mvn%20's%c\t%12-15r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01e00000, 0x0fe00010, "mvn%20's%c\t%12-15r, %o"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x01e00010, 0x0fe00090, "mvn%20's%c\t%12-15R, %o"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x06000010, 0x0e000010, UNDEFINED_INSTRUCTION},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x049d0004, 0x0fff0fff, "pop%c\t{%12-15r}\t\t; (ldr%c %12-15r, %a)"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x04500000, 0x0c500000, "ldrb%t%c\t%12-15R, %a"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x04300000, 0x0d700000, "ldrt%c\t%12-15R, %a"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x04100000, 0x0c500000, "ldr%c\t%12-15r, %a"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d0001, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d0002, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d0004, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d0008, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d0010, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d0020, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d0040, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d0080, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d0100, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d0200, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d0400, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d0800, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d1000, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d2000, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d4000, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d8000, 0x0fffffff, "stmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x092d0000, 0x0fff0000, "push%c\t%m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08800000, 0x0ff00000, "stm%c\t%16-19R%21'!, %m%22'^"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08000000, 0x0e100000, "stm%23?id%24?ba%c\t%16-19R%21'!, %m%22'^"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd0001, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd0002, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd0004, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd0008, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd0010, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd0020, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd0040, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd0080, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd0100, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd0200, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd0400, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd0800, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd1000, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd2000, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd4000, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd8000, 0x0fffffff, "ldmfd%c\t%16-19R!, %m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08bd0000, 0x0fff0000, "pop%c\t%m"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08900000, 0x0f900000, "ldm%c\t%16-19R%21'!, %m%22'^"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x08100000, 0x0e100000, "ldm%23?id%24?ba%c\t%16-19R%21'!, %m%22'^"},
+
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x0a000000, 0x0e000000, "b%24'l%c\t%b"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x0f000000, 0x0f000000, "svc%c\t%0-23x"},
+
+ /* The rest. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V7),
+ 0x03200000, 0x0fff00ff, "nop%c\t{%0-7d}" UNPREDICTABLE_INSTRUCTION},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00000000, 0x00000000, UNDEFINED_INSTRUCTION},
+ {ARM_FEATURE_CORE_LOW (0),
+ 0x00000000, 0x00000000, 0}
+};
+
+/* print_insn_thumb16 recognizes the following format control codes:
+
+ %S print Thumb register (bits 3..5 as high number if bit 6 set)
+ %D print Thumb register (bits 0..2 as high number if bit 7 set)
+ %<bitfield>I print bitfield as a signed decimal
+ (top bit of range being the sign bit)
+ %N print Thumb register mask (with LR)
+ %O print Thumb register mask (with PC)
+ %M print Thumb register mask
+ %b print CZB's 6-bit unsigned branch destination
+ %s print Thumb right-shift immediate (6..10; 0 == 32).
+ %c print the condition code
+ %C print the condition code, or "s" if not conditional
+ %x print warning if conditional an not at end of IT block"
+ %X print "\t; unpredictable <IT:code>" if conditional
+ %I print IT instruction suffix and operands
+ %W print Thumb Writeback indicator for LDMIA
+ %<bitfield>r print bitfield as an ARM register
+ %<bitfield>d print bitfield as a decimal
+ %<bitfield>H print (bitfield * 2) as a decimal
+ %<bitfield>W print (bitfield * 4) as a decimal
+ %<bitfield>a print (bitfield * 4) as a pc-rel offset + decoded symbol
+ %<bitfield>B print Thumb branch destination (signed displacement)
+ %<bitfield>c print bitfield as a condition code
+ %<bitnum>'c print specified char iff bit is one
+ %<bitnum>?ab print a if bit is one else print b. */
+
+static const struct opcode16 thumb_opcodes[] =
+{
+ /* Thumb instructions. */
+
+ /* ARMv8-M Security Extensions instructions. */
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M), 0x4784, 0xff87, "blxns\t%3-6r"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M), 0x4704, 0xff87, "bxns\t%3-6r"},
+
+ /* ARM V8 instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8), 0xbf50, 0xffff, "sevl%c"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8), 0xba80, 0xffc0, "hlt\t%0-5x"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN), 0xb610, 0xfff7, "setpan\t#%3-3d"},
+
+ /* ARM V6K no-argument instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K), 0xbf00, 0xffff, "nop%c"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K), 0xbf10, 0xffff, "yield%c"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K), 0xbf20, 0xffff, "wfe%c"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K), 0xbf30, 0xffff, "wfi%c"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K), 0xbf40, 0xffff, "sev%c"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6K), 0xbf00, 0xff0f, "nop%c\t{%4-7d}"},
+
+ /* ARM V6T2 instructions. */
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
+ 0xb900, 0xfd00, "cbnz\t%0-2r, %b%X"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
+ 0xb100, 0xfd00, "cbz\t%0-2r, %b%X"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xbf00, 0xff00, "it%I%X"},
+
+ /* ARM V6. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xb660, 0xfff8, "cpsie\t%2'a%1'i%0'f%X"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xb670, 0xfff8, "cpsid\t%2'a%1'i%0'f%X"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0x4600, 0xffc0, "mov%c\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xba00, 0xffc0, "rev%c\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xba40, 0xffc0, "rev16%c\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xbac0, 0xffc0, "revsh%c\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xb650, 0xfff7, "setend\t%3?ble%X"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xb200, 0xffc0, "sxth%c\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xb240, 0xffc0, "sxtb%c\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xb280, 0xffc0, "uxth%c\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6), 0xb2c0, 0xffc0, "uxtb%c\t%0-2r, %3-5r"},
+
+ /* ARM V5 ISA extends Thumb. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5T),
+ 0xbe00, 0xff00, "bkpt\t%0-7x"}, /* Is always unconditional. */
+ /* This is BLX(2). BLX(1) is a 32-bit instruction. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V5T),
+ 0x4780, 0xff87, "blx%c\t%3-6r%x"}, /* note: 4 bit register number. */
+ /* ARM V4T ISA (Thumb v1). */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x46C0, 0xFFFF, "nop%c\t\t\t; (mov r8, r8)"},
+ /* Format 4. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4000, 0xFFC0, "and%C\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4040, 0xFFC0, "eor%C\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4080, 0xFFC0, "lsl%C\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x40C0, 0xFFC0, "lsr%C\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4100, 0xFFC0, "asr%C\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4140, 0xFFC0, "adc%C\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4180, 0xFFC0, "sbc%C\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x41C0, 0xFFC0, "ror%C\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4200, 0xFFC0, "tst%c\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4240, 0xFFC0, "neg%C\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4280, 0xFFC0, "cmp%c\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x42C0, 0xFFC0, "cmn%c\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4300, 0xFFC0, "orr%C\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4340, 0xFFC0, "mul%C\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4380, 0xFFC0, "bic%C\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x43C0, 0xFFC0, "mvn%C\t%0-2r, %3-5r"},
+ /* format 13 */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xB000, 0xFF80, "add%c\tsp, #%0-6W"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xB080, 0xFF80, "sub%c\tsp, #%0-6W"},
+ /* format 5 */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4700, 0xFF80, "bx%c\t%S%x"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4400, 0xFF00, "add%c\t%D, %S"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4500, 0xFF00, "cmp%c\t%D, %S"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x4600, 0xFF00, "mov%c\t%D, %S"},
+ /* format 14 */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xB400, 0xFE00, "push%c\t%N"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xBC00, 0xFE00, "pop%c\t%O"},
+ /* format 2 */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x1800, 0xFE00, "add%C\t%0-2r, %3-5r, %6-8r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x1A00, 0xFE00, "sub%C\t%0-2r, %3-5r, %6-8r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x1C00, 0xFE00, "add%C\t%0-2r, %3-5r, #%6-8d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x1E00, 0xFE00, "sub%C\t%0-2r, %3-5r, #%6-8d"},
+ /* format 8 */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x5200, 0xFE00, "strh%c\t%0-2r, [%3-5r, %6-8r]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x5A00, 0xFE00, "ldrh%c\t%0-2r, [%3-5r, %6-8r]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x5600, 0xF600, "ldrs%11?hb%c\t%0-2r, [%3-5r, %6-8r]"},
+ /* format 7 */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x5000, 0xFA00, "str%10'b%c\t%0-2r, [%3-5r, %6-8r]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x5800, 0xFA00, "ldr%10'b%c\t%0-2r, [%3-5r, %6-8r]"},
+ /* format 1 */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x0000, 0xFFC0, "mov%C\t%0-2r, %3-5r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x0000, 0xF800, "lsl%C\t%0-2r, %3-5r, #%6-10d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x0800, 0xF800, "lsr%C\t%0-2r, %3-5r, %s"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x1000, 0xF800, "asr%C\t%0-2r, %3-5r, %s"},
+ /* format 3 */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x2000, 0xF800, "mov%C\t%8-10r, #%0-7d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x2800, 0xF800, "cmp%c\t%8-10r, #%0-7d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x3000, 0xF800, "add%C\t%8-10r, #%0-7d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0x3800, 0xF800, "sub%C\t%8-10r, #%0-7d"},
+ /* format 6 */
+ /* TODO: Disassemble PC relative "LDR rD,=<symbolic>" */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x4800, 0xF800,
+ "ldr%c\t%8-10r, [pc, #%0-7W]\t; (%0-7a)"},
+ /* format 9 */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x6000, 0xF800, "str%c\t%0-2r, [%3-5r, #%6-10W]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x6800, 0xF800, "ldr%c\t%0-2r, [%3-5r, #%6-10W]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x7000, 0xF800, "strb%c\t%0-2r, [%3-5r, #%6-10d]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x7800, 0xF800, "ldrb%c\t%0-2r, [%3-5r, #%6-10d]"},
+ /* format 10 */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x8000, 0xF800, "strh%c\t%0-2r, [%3-5r, #%6-10H]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x8800, 0xF800, "ldrh%c\t%0-2r, [%3-5r, #%6-10H]"},
+ /* format 11 */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x9000, 0xF800, "str%c\t%8-10r, [sp, #%0-7W]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0x9800, 0xF800, "ldr%c\t%8-10r, [sp, #%0-7W]"},
+ /* format 12 */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0xA000, 0xF800, "add%c\t%8-10r, pc, #%0-7W\t; (adr %8-10r, %0-7a)"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0xA800, 0xF800, "add%c\t%8-10r, sp, #%0-7W"},
+ /* format 15 */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xC000, 0xF800, "stmia%c\t%8-10r!, %M"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xC800, 0xF800, "ldmia%c\t%8-10r%W, %M"},
+ /* format 17 */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xDF00, 0xFF00, "svc%c\t%0-7d"},
+ /* format 16 */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xDE00, 0xFF00, "udf%c\t#%0-7d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xDE00, 0xFE00, UNDEFINED_INSTRUCTION},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xD000, 0xF000, "b%8-11c.n\t%0-7B%X"},
+ /* format 18 */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T), 0xE000, 0xF800, "b%c.n\t%0-10B%x"},
+
+ /* The E800 .. FFFF range is unconditionally redirected to the
+ 32-bit table, because even in pre-V6T2 ISAs, BL and BLX(1) pairs
+ are processed via that table. Thus, we can never encounter a
+ bare "second half of BL/BLX(1)" instruction here. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1), 0x0000, 0x0000, UNDEFINED_INSTRUCTION},
+ {ARM_FEATURE_CORE_LOW (0), 0, 0, 0}
+};
+
+/* Thumb32 opcodes use the same table structure as the ARM opcodes.
+ We adopt the convention that hw1 is the high 16 bits of .value and
+ .mask, hw2 the low 16 bits.
+
+ print_insn_thumb32 recognizes the following format control codes:
+
+ %% %
+
+ %I print a 12-bit immediate from hw1[10],hw2[14:12,7:0]
+ %M print a modified 12-bit immediate (same location)
+ %J print a 16-bit immediate from hw1[3:0,10],hw2[14:12,7:0]
+ %K print a 16-bit immediate from hw2[3:0],hw1[3:0],hw2[11:4]
+ %H print a 16-bit immediate from hw2[3:0],hw1[11:0]
+ %S print a possibly-shifted Rm
+
+ %L print address for a ldrd/strd instruction
+ %a print the address of a plain load/store
+ %w print the width and signedness of a core load/store
+ %m print register mask for ldm/stm
+ %n print register mask for clrm
+
+ %E print the lsb and width fields of a bfc/bfi instruction
+ %F print the lsb and width fields of a sbfx/ubfx instruction
+ %G print a fallback offset for Branch Future instructions
+ %W print an offset for BF instruction
+ %Y print an offset for BFL instruction
+ %Z print an offset for BFCSEL instruction
+ %Q print an offset for Low Overhead Loop instructions
+ %P print an offset for Low Overhead Loop end instructions
+ %b print a conditional branch offset
+ %B print an unconditional branch offset
+ %s print the shift field of an SSAT instruction
+ %R print the rotation field of an SXT instruction
+ %U print barrier type.
+ %P print address for pli instruction.
+ %c print the condition code
+ %x print warning if conditional an not at end of IT block"
+ %X print "\t; unpredictable <IT:code>" if conditional
+
+ %<bitfield>d print bitfield in decimal
+ %<bitfield>D print bitfield plus one in decimal
+ %<bitfield>W print bitfield*4 in decimal
+ %<bitfield>r print bitfield as an ARM register
+ %<bitfield>R as %<>r but r15 is UNPREDICTABLE
+ %<bitfield>S as %<>r but r13 and r15 is UNPREDICTABLE
+ %<bitfield>c print bitfield as a condition code
+
+ %<bitfield>'c print specified char iff bitfield is all ones
+ %<bitfield>`c print specified char iff bitfield is all zeroes
+ %<bitfield>?ab... select from array of values in big endian order
+
+ With one exception at the bottom (done because BL and BLX(1) need
+ to come dead last), this table was machine-sorted first in
+ decreasing order of number of bits set in the mask, then in
+ increasing numeric order of mask, then in increasing numeric order
+ of opcode. This order is not the clearest for a human reader, but
+ is guaranteed never to catch a special-case bit pattern with a more
+ general mask, which is important, because this instruction encoding
+ makes heavy use of special-case bit patterns. */
+static const struct opcode32 thumb32_opcodes[] =
+{
+ /* Armv8.1-M Mainline and Armv8.1-M Mainline Security Extensions
+ instructions. */
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf00fe001, 0xffffffff, "lctp%c"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf02fc001, 0xfffff001, "le\t%P"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf00fc001, 0xfffff001, "le\tlr, %P"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf01fc001, 0xfffff001, "letp\tlr, %P"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf040c001, 0xfff0f001, "wls\tlr, %16-19S, %Q"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf000c001, 0xffc0f001, "wlstp.%20-21s\tlr, %16-19S, %Q"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf040e001, 0xfff0ffff, "dls\tlr, %16-19S"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf000e001, 0xffc0ffff, "dlstp.%20-21s\tlr, %16-19S"},
+
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf040e001, 0xf860f001, "bf%c\t%G, %W"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf060e001, 0xf8f0f001, "bfx%c\t%G, %16-19S"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf000c001, 0xf800f001, "bfl%c\t%G, %Y"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf070e001, 0xf8f0f001, "bflx%c\t%G, %16-19S"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf000e001, 0xf840f001, "bfcsel\t%G, %Z, %18-21c"},
+
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xe89f0000, 0xffff2000, "clrm%c\t%n"},
+
+ /* ARMv8-M and ARMv8-M Security Extensions instructions. */
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M), 0xe97fe97f, 0xffffffff, "sg"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M),
+ 0xe840f000, 0xfff0f0ff, "tt\t%8-11r, %16-19r"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M),
+ 0xe840f040, 0xfff0f0ff, "ttt\t%8-11r, %16-19r"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M),
+ 0xe840f080, 0xfff0f0ff, "tta\t%8-11r, %16-19r"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M),
+ 0xe840f0c0, 0xfff0f0ff, "ttat\t%8-11r, %16-19r"},
+
+ /* ARM V8.2 RAS extension instructions. */
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
+ 0xf3af8010, 0xffffffff, "esb"},
+
+ /* V8 instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0xf3af8005, 0xffffffff, "sevl%c.w"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0xf78f8000, 0xfffffffc, "dcps%0-1d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0xe8c00f8f, 0xfff00fff, "stlb%c\t%12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0xe8c00f9f, 0xfff00fff, "stlh%c\t%12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0xe8c00faf, 0xfff00fff, "stl%c\t%12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0xe8c00fc0, 0xfff00ff0, "stlexb%c\t%0-3r, %12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0xe8c00fd0, 0xfff00ff0, "stlexh%c\t%0-3r, %12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0xe8c00fe0, 0xfff00ff0, "stlex%c\t%0-3r, %12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0xe8c000f0, 0xfff000f0, "stlexd%c\t%0-3r, %12-15r, %8-11r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0xe8d00f8f, 0xfff00fff, "ldab%c\t%12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0xe8d00f9f, 0xfff00fff, "ldah%c\t%12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0xe8d00faf, 0xfff00fff, "lda%c\t%12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0xe8d00fcf, 0xfff00fff, "ldaexb%c\t%12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0xe8d00fdf, 0xfff00fff, "ldaexh%c\t%12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0xe8d00fef, 0xfff00fff, "ldaex%c\t%12-15r, [%16-19R]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8),
+ 0xe8d000ff, 0xfff000ff, "ldaexd%c\t%12-15r, %8-11r, [%16-19R]"},
+
+ /* CRC32 instructions. */
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
+ 0xfac0f080, 0xfff0f0f0, "crc32b\t%8-11R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
+ 0xfac0f090, 0xfff0f0f0, "crc32h\t%9-11R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
+ 0xfac0f0a0, 0xfff0f0f0, "crc32w\t%8-11R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
+ 0xfad0f080, 0xfff0f0f0, "crc32cb\t%8-11R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
+ 0xfad0f090, 0xfff0f0f0, "crc32ch\t%8-11R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
+ 0xfad0f0a0, 0xfff0f0f0, "crc32cw\t%8-11R, %16-19R, %0-3R"},
+
+ /* Speculation Barriers. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf3af8014, 0xffffffff, "csdb"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf3bf8f40, 0xffffffff, "ssbb"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf3bf8f44, 0xffffffff, "pssbb"},
+
+ /* V7 instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf910f000, 0xff70f000, "pli%c\t%a"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf3af80f0, 0xfffffff0, "dbg%c\t#%0-3d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8), 0xf3bf8f51, 0xfffffff3, "dmb%c\t%U"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V8), 0xf3bf8f41, 0xfffffff3, "dsb%c\t%U"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf3bf8f50, 0xfffffff0, "dmb%c\t%U"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf3bf8f40, 0xfffffff0, "dsb%c\t%U"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V7), 0xf3bf8f60, 0xfffffff0, "isb%c\t%U"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
+ 0xfb90f0f0, 0xfff0f0f0, "sdiv%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
+ 0xfbb0f0f0, 0xfff0f0f0, "udiv%c\t%8-11r, %16-19r, %0-3r"},
+
+ /* Virtualization Extension instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT), 0xf7e08000, 0xfff0f000, "hvc%c\t%V"},
+ /* We skip ERET as that is SUBS pc, lr, #0. */
+
+ /* MP Extension instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_MP), 0xf830f000, 0xff70f000, "pldw%c\t%a"},
+
+ /* Security extension instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_SEC), 0xf7f08000, 0xfff0f000, "smc%c\t%K"},
+
+ /* ARMv8.5-A instructions. */
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB), 0xf3bf8f70, 0xffffffff, "sb"},
+
+ /* Instructions defined in the basic V6T2 set. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf3af8000, 0xffffffff, "nop%c.w"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf3af8001, 0xffffffff, "yield%c.w"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf3af8002, 0xffffffff, "wfe%c.w"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf3af8003, 0xffffffff, "wfi%c.w"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf3af8004, 0xffffffff, "sev%c.w"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf3af8000, 0xffffff00, "nop%c.w\t{%0-7d}"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2), 0xf7f0a000, 0xfff0f000, "udf%c.w\t%H"},
+
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
+ 0xf3bf8f2f, 0xffffffff, "clrex%c"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf3af8400, 0xffffff1f, "cpsie.w\t%7'a%6'i%5'f%X"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf3af8600, 0xffffff1f, "cpsid.w\t%7'a%6'i%5'f%X"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf3c08f00, 0xfff0ffff, "bxj%c\t%16-19r%x"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xe810c000, 0xffd0ffff, "rfedb%c\t%16-19r%21'!"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xe990c000, 0xffd0ffff, "rfeia%c\t%16-19r%21'!"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf3e08000, 0xffe0f000, "mrs%c\t%8-11r, %D"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf3af8100, 0xffffffe0, "cps\t#%0-4d%X"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xe8d0f000, 0xfff0fff0, "tbb%c\t[%16-19r, %0-3r]%x"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xe8d0f010, 0xfff0fff0, "tbh%c\t[%16-19r, %0-3r, lsl #1]%x"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf3af8500, 0xffffff00, "cpsie\t%7'a%6'i%5'f, #%0-4d%X"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf3af8700, 0xffffff00, "cpsid\t%7'a%6'i%5'f, #%0-4d%X"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf3de8f00, 0xffffff00, "subs%c\tpc, lr, #%0-7d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf3808000, 0xffe0f000, "msr%c\t%C, %16-19r"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
+ 0xe8500f00, 0xfff00fff, "ldrex%c\t%12-15r, [%16-19r]"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
+ 0xe8d00f4f, 0xfff00fef, "ldrex%4?hb%c\t%12-15r, [%16-19r]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xe800c000, 0xffd0ffe0, "srsdb%c\t%16-19r%21'!, #%0-4d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xe980c000, 0xffd0ffe0, "srsia%c\t%16-19r%21'!, #%0-4d"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa0ff080, 0xfffff0c0, "sxth%c.w\t%8-11r, %0-3r%R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa1ff080, 0xfffff0c0, "uxth%c.w\t%8-11r, %0-3r%R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa2ff080, 0xfffff0c0, "sxtb16%c\t%8-11r, %0-3r%R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa3ff080, 0xfffff0c0, "uxtb16%c\t%8-11r, %0-3r%R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa4ff080, 0xfffff0c0, "sxtb%c.w\t%8-11r, %0-3r%R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa5ff080, 0xfffff0c0, "uxtb%c.w\t%8-11r, %0-3r%R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
+ 0xe8400000, 0xfff000ff, "strex%c\t%8-11r, %12-15r, [%16-19r]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xe8d0007f, 0xfff000ff, "ldrexd%c\t%12-15r, %8-11r, [%16-19r]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa80f000, 0xfff0f0f0, "sadd8%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa80f010, 0xfff0f0f0, "qadd8%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa80f020, 0xfff0f0f0, "shadd8%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa80f040, 0xfff0f0f0, "uadd8%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa80f050, 0xfff0f0f0, "uqadd8%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa80f060, 0xfff0f0f0, "uhadd8%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa80f080, 0xfff0f0f0, "qadd%c\t%8-11r, %0-3r, %16-19r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa80f090, 0xfff0f0f0, "qdadd%c\t%8-11r, %0-3r, %16-19r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa80f0a0, 0xfff0f0f0, "qsub%c\t%8-11r, %0-3r, %16-19r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa80f0b0, 0xfff0f0f0, "qdsub%c\t%8-11r, %0-3r, %16-19r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa90f000, 0xfff0f0f0, "sadd16%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa90f010, 0xfff0f0f0, "qadd16%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa90f020, 0xfff0f0f0, "shadd16%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa90f040, 0xfff0f0f0, "uadd16%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa90f050, 0xfff0f0f0, "uqadd16%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa90f060, 0xfff0f0f0, "uhadd16%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa90f080, 0xfff0f0f0, "rev%c.w\t%8-11r, %16-19r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa90f090, 0xfff0f0f0, "rev16%c.w\t%8-11r, %16-19r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa90f0a0, 0xfff0f0f0, "rbit%c\t%8-11r, %16-19r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa90f0b0, 0xfff0f0f0, "revsh%c.w\t%8-11r, %16-19r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfaa0f000, 0xfff0f0f0, "sasx%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfaa0f010, 0xfff0f0f0, "qasx%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfaa0f020, 0xfff0f0f0, "shasx%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfaa0f040, 0xfff0f0f0, "uasx%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfaa0f050, 0xfff0f0f0, "uqasx%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfaa0f060, 0xfff0f0f0, "uhasx%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfaa0f080, 0xfff0f0f0, "sel%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfab0f080, 0xfff0f0f0, "clz%c\t%8-11r, %16-19r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfac0f000, 0xfff0f0f0, "ssub8%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfac0f010, 0xfff0f0f0, "qsub8%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfac0f020, 0xfff0f0f0, "shsub8%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfac0f040, 0xfff0f0f0, "usub8%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfac0f050, 0xfff0f0f0, "uqsub8%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfac0f060, 0xfff0f0f0, "uhsub8%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfad0f000, 0xfff0f0f0, "ssub16%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfad0f010, 0xfff0f0f0, "qsub16%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfad0f020, 0xfff0f0f0, "shsub16%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfad0f040, 0xfff0f0f0, "usub16%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfad0f050, 0xfff0f0f0, "uqsub16%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfad0f060, 0xfff0f0f0, "uhsub16%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfae0f000, 0xfff0f0f0, "ssax%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfae0f010, 0xfff0f0f0, "qsax%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfae0f020, 0xfff0f0f0, "shsax%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfae0f040, 0xfff0f0f0, "usax%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfae0f050, 0xfff0f0f0, "uqsax%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfae0f060, 0xfff0f0f0, "uhsax%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfb00f000, 0xfff0f0f0, "mul%c.w\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfb70f000, 0xfff0f0f0, "usad8%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa00f000, 0xffe0f0f0, "lsl%20's%c.w\t%8-11R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa20f000, 0xffe0f0f0, "lsr%20's%c.w\t%8-11R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa40f000, 0xffe0f0f0, "asr%20's%c.w\t%8-11R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa60f000, 0xffe0f0f0, "ror%20's%c.w\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
+ 0xe8c00f40, 0xfff00fe0, "strex%4?hb%c\t%0-3r, %12-15r, [%16-19r]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf3200000, 0xfff0f0e0, "ssat16%c\t%8-11r, #%0-4D, %16-19r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf3a00000, 0xfff0f0e0, "usat16%c\t%8-11r, #%0-4d, %16-19r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfb20f000, 0xfff0f0e0, "smuad%4'x%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfb30f000, 0xfff0f0e0, "smulw%4?tb%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfb40f000, 0xfff0f0e0, "smusd%4'x%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfb50f000, 0xfff0f0e0, "smmul%4'r%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa00f080, 0xfff0f0c0, "sxtah%c\t%8-11r, %16-19r, %0-3r%R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa10f080, 0xfff0f0c0, "uxtah%c\t%8-11r, %16-19r, %0-3r%R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa20f080, 0xfff0f0c0, "sxtab16%c\t%8-11r, %16-19r, %0-3r%R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa30f080, 0xfff0f0c0, "uxtab16%c\t%8-11r, %16-19r, %0-3r%R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa40f080, 0xfff0f0c0, "sxtab%c\t%8-11r, %16-19r, %0-3r%R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfa50f080, 0xfff0f0c0, "uxtab%c\t%8-11r, %16-19r, %0-3r%R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfb10f000, 0xfff0f0c0, "smul%5?tb%4?tb%c\t%8-11r, %16-19r, %0-3r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf36f0000, 0xffff8020, "bfc%c\t%8-11r, %E"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xea100f00, 0xfff08f00, "tst%c.w\t%16-19r, %S"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xea900f00, 0xfff08f00, "teq%c\t%16-19r, %S"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xeb100f00, 0xfff08f00, "cmn%c.w\t%16-19r, %S"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xebb00f00, 0xfff08f00, "cmp%c.w\t%16-19r, %S"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf0100f00, 0xfbf08f00, "tst%c.w\t%16-19r, %M"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf0900f00, 0xfbf08f00, "teq%c\t%16-19r, %M"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf1100f00, 0xfbf08f00, "cmn%c.w\t%16-19r, %M"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf1b00f00, 0xfbf08f00, "cmp%c.w\t%16-19r, %M"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xea4f0000, 0xffef8000, "mov%20's%c.w\t%8-11r, %S"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xea6f0000, 0xffef8000, "mvn%20's%c.w\t%8-11r, %S"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xe8c00070, 0xfff000f0, "strexd%c\t%0-3r, %12-15r, %8-11r, [%16-19r]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfb000000, 0xfff000f0, "mla%c\t%8-11r, %16-19r, %0-3r, %12-15r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfb000010, 0xfff000f0, "mls%c\t%8-11r, %16-19r, %0-3r, %12-15r"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfb700000, 0xfff000f0, "usada8%c\t%8-11R, %16-19R, %0-3R, %12-15R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfb800000, 0xfff000f0, "smull%c\t%12-15R, %8-11R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfba00000, 0xfff000f0, "umull%c\t%12-15R, %8-11R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfbc00000, 0xfff000f0, "smlal%c\t%12-15R, %8-11R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfbe00000, 0xfff000f0, "umlal%c\t%12-15R, %8-11R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xfbe00060, 0xfff000f0, "umaal%c\t%12-15R, %8-11R, %16-19R, %0-3R"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M),
+ 0xe8500f00, 0xfff00f00, "ldrex%c\t%12-15r, [%16-19r, #%0-7W]"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf04f0000, 0xfbef8000, "mov%20's%c.w\t%8-11r, %M"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf06f0000, 0xfbef8000, "mvn%20's%c.w\t%8-11r, %M"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf810f000, 0xff70f000, "pld%c\t%a"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
0xfb200000, 0xfff000e0, "smlad%4'x%c\t%8-11R, %16-19R, %0-3R, %12-15R"},
{ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
0xfb300000, 0xfff000e0, "smlaw%4?tb%c\t%8-11R, %16-19R, %0-3R, %12-15R"},
{ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
0xf8100000, 0xfe100000, "ldr%w%c.w\t%12-15r, %a"},
- /* Filter out Bcc with cond=E or F, which are used for other instructions. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf3c08000, 0xfbc0d000, "undefined (bcc, cond=0xF)"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf3808000, 0xfbc0d000, "undefined (bcc, cond=0xE)"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf0008000, 0xf800d000, "b%22-25c.w\t%b%X"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
- 0xf0009000, 0xf800d000, "b%c.w\t%B%x"},
+ /* Filter out Bcc with cond=E or F, which are used for other instructions. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf3c08000, 0xfbc0d000, "undefined (bcc, cond=0xF)"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf3808000, 0xfbc0d000, "undefined (bcc, cond=0xE)"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf0008000, 0xf800d000, "b%22-25c.w\t%b%X"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2),
+ 0xf0009000, 0xf800d000, "b%c.w\t%B%x"},
+
+ /* These have been 32-bit since the invention of Thumb. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0xf000c000, 0xf800d001, "blx%c\t%B%x"},
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
+ 0xf000d000, 0xf800d000, "bl%c\t%B%x"},
+
+ /* Fallback. */
+ {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
+ 0x00000000, 0x00000000, UNDEFINED_INSTRUCTION},
+ {ARM_FEATURE_CORE_LOW (0), 0, 0, 0}
+};
+
+static const char *const arm_conditional[] =
+{"eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "al", "<und>", ""};
+
+static const char *const arm_fp_const[] =
+{"0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0"};
+
+static const char *const arm_shift[] =
+{"lsl", "lsr", "asr", "ror"};
+
+typedef struct
+{
+ const char *name;
+ const char *description;
+ const char *reg_names[16];
+}
+arm_regname;
+
+static const arm_regname regnames[] =
+{
+ { "reg-names-raw", N_("Select raw register names"),
+ { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"}},
+ { "reg-names-gcc", N_("Select register names used by GCC"),
+ { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc" }},
+ { "reg-names-std", N_("Select register names used in ARM's ISA documentation"),
+ { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "sp", "lr", "pc" }},
+ { "force-thumb", N_("Assume all insns are Thumb insns"), {NULL} },
+ { "no-force-thumb", N_("Examine preceding label to determine an insn's type"), {NULL} },
+ { "reg-names-apcs", N_("Select register names used in the APCS"),
+ { "a1", "a2", "a3", "a4", "v1", "v2", "v3", "v4", "v5", "v6", "sl", "fp", "ip", "sp", "lr", "pc" }},
+ { "reg-names-atpcs", N_("Select register names used in the ATPCS"),
+ { "a1", "a2", "a3", "a4", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "IP", "SP", "LR", "PC" }},
+ { "reg-names-special-atpcs", N_("Select special register names used in the ATPCS"),
+ { "a1", "a2", "a3", "a4", "v1", "v2", "v3", "WR", "v5", "SB", "SL", "FP", "IP", "SP", "LR", "PC" }}
+};
+
+static const char *const iwmmxt_wwnames[] =
+{"b", "h", "w", "d"};
+
+static const char *const iwmmxt_wwssnames[] =
+{"b", "bus", "bc", "bss",
+ "h", "hus", "hc", "hss",
+ "w", "wus", "wc", "wss",
+ "d", "dus", "dc", "dss"
+};
+
+static const char *const iwmmxt_regnames[] =
+{ "wr0", "wr1", "wr2", "wr3", "wr4", "wr5", "wr6", "wr7",
+ "wr8", "wr9", "wr10", "wr11", "wr12", "wr13", "wr14", "wr15"
+};
+
+static const char *const iwmmxt_cregnames[] =
+{ "wcid", "wcon", "wcssf", "wcasf", "reserved", "reserved", "reserved", "reserved",
+ "wcgr0", "wcgr1", "wcgr2", "wcgr3", "reserved", "reserved", "reserved", "reserved"
+};
+
+static const char *const vec_condnames[] =
+{ "eq", "ne", "cs", "hi", "ge", "lt", "gt", "le"
+};
+
+static const char *const mve_predicatenames[] =
+{ "", "ttt", "tt", "tte", "t", "tee", "te", "tet", "",
+ "eee", "ee", "eet", "e", "ett", "et", "ete"
+};
+
+/* Names for 2-bit size field for mve vector isntructions. */
+static const char *const mve_vec_sizename[] =
+ { "8", "16", "32", "64"};
+
+/* Indicates whether we are processing a then predicate,
+ else predicate or none at all. */
+enum vpt_pred_state
+{
+ PRED_NONE,
+ PRED_THEN,
+ PRED_ELSE
+};
+
+/* Information used to process a vpt block and subsequent instructions. */
+struct vpt_block
+{
+ /* Are we in a vpt block. */
+ bfd_boolean in_vpt_block;
+
+ /* Next predicate state if in vpt block. */
+ enum vpt_pred_state next_pred_state;
+
+ /* Mask from vpt/vpst instruction. */
+ long predicate_mask;
+
+ /* Instruction number in vpt block. */
+ long current_insn_num;
+
+ /* Number of instructions in vpt block.. */
+ long num_pred_insn;
+};
+
+static struct vpt_block vpt_block_state =
+{
+ FALSE,
+ PRED_NONE,
+ 0,
+ 0,
+ 0
+};
+
+/* Default to GCC register name set. */
+static unsigned int regname_selected = 1;
+
+#define NUM_ARM_OPTIONS ARRAY_SIZE (regnames)
+#define arm_regnames regnames[regname_selected].reg_names
+
+static bfd_boolean force_thumb = FALSE;
+
+/* Current IT instruction state. This contains the same state as the IT
+ bits in the CPSR. */
+static unsigned int ifthen_state;
+/* IT state for the next instruction. */
+static unsigned int ifthen_next_state;
+/* The address of the insn for which the IT state is valid. */
+static bfd_vma ifthen_address;
+#define IFTHEN_COND ((ifthen_state >> 4) & 0xf)
+/* Indicates that the current Conditional state is unconditional or outside
+ an IT block. */
+#define COND_UNCOND 16
+
+\f
+/* Functions. */
+/* Extract the predicate mask for a VPT or VPST instruction.
+ The mask is composed of bits 13-15 (Mkl) and bit 22 (Mkh). */
+
+static long
+mve_extract_pred_mask (long given)
+{
+ return ((given & 0x00400000) >> 19) | ((given & 0xe000) >> 13);
+}
+
+/* Return the number of instructions in a MVE predicate block. */
+static long
+num_instructions_vpt_block (long given)
+{
+ long mask = mve_extract_pred_mask (given);
+ if (mask == 0)
+ return 0;
+
+ if (mask == 8)
+ return 1;
+
+ if ((mask & 7) == 4)
+ return 2;
+
+ if ((mask & 3) == 2)
+ return 3;
+
+ if ((mask & 1) == 1)
+ return 4;
+
+ return 0;
+}
+
+static void
+mark_outside_vpt_block (void)
+{
+ vpt_block_state.in_vpt_block = FALSE;
+ vpt_block_state.next_pred_state = PRED_NONE;
+ vpt_block_state.predicate_mask = 0;
+ vpt_block_state.current_insn_num = 0;
+ vpt_block_state.num_pred_insn = 0;
+}
+
+static void
+mark_inside_vpt_block (long given)
+{
+ vpt_block_state.in_vpt_block = TRUE;
+ vpt_block_state.next_pred_state = PRED_THEN;
+ vpt_block_state.predicate_mask = mve_extract_pred_mask (given);
+ vpt_block_state.current_insn_num = 0;
+ vpt_block_state.num_pred_insn = num_instructions_vpt_block (given);
+ assert (vpt_block_state.num_pred_insn >= 1);
+}
+
+static enum vpt_pred_state
+invert_next_predicate_state (enum vpt_pred_state astate)
+{
+ if (astate == PRED_THEN)
+ return PRED_ELSE;
+ else if (astate == PRED_ELSE)
+ return PRED_THEN;
+ else
+ return PRED_NONE;
+}
+
+static enum vpt_pred_state
+update_next_predicate_state (void)
+{
+ long pred_mask = vpt_block_state.predicate_mask;
+ long mask_for_insn = 0;
+
+ switch (vpt_block_state.current_insn_num)
+ {
+ case 1:
+ mask_for_insn = 8;
+ break;
+
+ case 2:
+ mask_for_insn = 4;
+ break;
+
+ case 3:
+ mask_for_insn = 2;
+ break;
+
+ case 4:
+ return PRED_NONE;
+ }
+
+ if (pred_mask & mask_for_insn)
+ return invert_next_predicate_state (vpt_block_state.next_pred_state);
+ else
+ return vpt_block_state.next_pred_state;
+}
+
+static void
+update_vpt_block_state (void)
+{
+ vpt_block_state.current_insn_num++;
+ if (vpt_block_state.current_insn_num == vpt_block_state.num_pred_insn)
+ {
+ /* No more instructions to process in vpt block. */
+ mark_outside_vpt_block ();
+ return;
+ }
+
+ vpt_block_state.next_pred_state = update_next_predicate_state ();
+}
+
+/* Decode a bitfield of the form matching regexp (N(-N)?,)*N(-N)?.
+ Returns pointer to following character of the format string and
+ fills in *VALUEP and *WIDTHP with the extracted value and number of
+ bits extracted. WIDTHP can be NULL. */
+
+static const char *
+arm_decode_bitfield (const char *ptr,
+ unsigned long insn,
+ unsigned long *valuep,
+ int *widthp)
+{
+ unsigned long value = 0;
+ int width = 0;
+
+ do
+ {
+ int start, end;
+ int bits;
+
+ for (start = 0; *ptr >= '0' && *ptr <= '9'; ptr++)
+ start = start * 10 + *ptr - '0';
+ if (*ptr == '-')
+ for (end = 0, ptr++; *ptr >= '0' && *ptr <= '9'; ptr++)
+ end = end * 10 + *ptr - '0';
+ else
+ end = start;
+ bits = end - start;
+ if (bits < 0)
+ abort ();
+ value |= ((insn >> start) & ((2ul << bits) - 1)) << width;
+ width += bits + 1;
+ }
+ while (*ptr++ == ',');
+ *valuep = value;
+ if (widthp)
+ *widthp = width;
+ return ptr - 1;
+}
+
+static void
+arm_decode_shift (long given, fprintf_ftype func, void *stream,
+ bfd_boolean print_shift)
+{
+ func (stream, "%s", arm_regnames[given & 0xf]);
+
+ if ((given & 0xff0) != 0)
+ {
+ if ((given & 0x10) == 0)
+ {
+ int amount = (given & 0xf80) >> 7;
+ int shift = (given & 0x60) >> 5;
+
+ if (amount == 0)
+ {
+ if (shift == 3)
+ {
+ func (stream, ", rrx");
+ return;
+ }
+
+ amount = 32;
+ }
+
+ if (print_shift)
+ func (stream, ", %s #%d", arm_shift[shift], amount);
+ else
+ func (stream, ", #%d", amount);
+ }
+ else if ((given & 0x80) == 0x80)
+ func (stream, "\t; <illegal shifter operand>");
+ else if (print_shift)
+ func (stream, ", %s %s", arm_shift[(given & 0x60) >> 5],
+ arm_regnames[(given & 0xf00) >> 8]);
+ else
+ func (stream, ", %s", arm_regnames[(given & 0xf00) >> 8]);
+ }
+}
+
+/* Return TRUE if the MATCHED_INSN can be inside an IT block. */
+
+static bfd_boolean
+is_mve_okay_in_it (enum mve_instructions matched_insn)
+{
+ switch (matched_insn)
+ {
+ case MVE_VMOV_GP_TO_VEC_LANE:
+ case MVE_VMOV2_VEC_LANE_TO_GP:
+ case MVE_VMOV2_GP_TO_VEC_LANE:
+ case MVE_VMOV_VEC_LANE_TO_GP:
+ case MVE_LSLL:
+ case MVE_LSLLI:
+ case MVE_LSRL:
+ case MVE_ASRL:
+ case MVE_ASRLI:
+ case MVE_SQRSHRL:
+ case MVE_SQRSHR:
+ case MVE_UQRSHL:
+ case MVE_UQRSHLL:
+ case MVE_UQSHL:
+ case MVE_UQSHLL:
+ case MVE_URSHRL:
+ case MVE_URSHR:
+ case MVE_SRSHRL:
+ case MVE_SRSHR:
+ case MVE_SQSHLL:
+ case MVE_SQSHL:
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}
+
+static bfd_boolean
+is_mve_architecture (struct disassemble_info *info)
+{
+ struct arm_private_data *private_data = info->private_data;
+ arm_feature_set allowed_arches = private_data->features;
+
+ arm_feature_set arm_ext_v8_1m_main
+ = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
+
+ if (ARM_CPU_HAS_FEATURE (arm_ext_v8_1m_main, allowed_arches)
+ && !ARM_CPU_IS_ANY (allowed_arches))
+ return TRUE;
+ else
+ return FALSE;
+}
+
+static bfd_boolean
+is_vpt_instruction (long given)
+{
+
+ /* If mkh:mkl is '0000' then its not a vpt/vpst instruction. */
+ if ((given & 0x0040e000) == 0)
+ return FALSE;
+
+ /* VPT floating point T1 variant. */
+ if (((given & 0xefb10f50) == 0xee310f00 && ((given & 0x1001) != 0x1))
+ /* VPT floating point T2 variant. */
+ || ((given & 0xefb10f50) == 0xee310f40)
+ /* VPT vector T1 variant. */
+ || ((given & 0xff811f51) == 0xfe010f00)
+ /* VPT vector T2 variant. */
+ || ((given & 0xff811f51) == 0xfe010f01
+ && ((given & 0x300000) != 0x300000))
+ /* VPT vector T3 variant. */
+ || ((given & 0xff811f50) == 0xfe011f00)
+ /* VPT vector T4 variant. */
+ || ((given & 0xff811f70) == 0xfe010f40)
+ /* VPT vector T5 variant. */
+ || ((given & 0xff811f70) == 0xfe010f60)
+ /* VPT vector T6 variant. */
+ || ((given & 0xff811f50) == 0xfe011f40)
+ /* VPST vector T variant. */
+ || ((given & 0xffbf1fff) == 0xfe310f4d))
+ return TRUE;
+ else
+ return FALSE;
+}
+
+/* Decode a bitfield from opcode GIVEN, with starting bitfield = START
+ and ending bitfield = END. END must be greater than START. */
+
+static unsigned long
+arm_decode_field (unsigned long given, unsigned int start, unsigned int end)
+{
+ int bits = end - start;
+
+ if (bits < 0)
+ abort ();
+
+ return ((given >> start) & ((2ul << bits) - 1));
+}
+
+/* Decode a bitfield from opcode GIVEN, with multiple bitfields:
+ START:END and START2:END2. END/END2 must be greater than
+ START/START2. */
+
+static unsigned long
+arm_decode_field_multiple (unsigned long given, unsigned int start,
+ unsigned int end, unsigned int start2,
+ unsigned int end2)
+{
+ int bits = end - start;
+ int bits2 = end2 - start2;
+ unsigned long value = 0;
+ int width = 0;
+
+ if (bits2 < 0)
+ abort ();
+
+ value = arm_decode_field (given, start, end);
+ width += bits + 1;
+
+ value |= ((given >> start2) & ((2ul << bits2) - 1)) << width;
+ return value;
+}
+
+/* Return TRUE if the GIVEN encoding should not be decoded as MATCHED_INSN.
+ This helps us decode instructions that change mnemonic depending on specific
+ operand values/encodings. */
+
+static bfd_boolean
+is_mve_encoding_conflict (unsigned long given,
+ enum mve_instructions matched_insn)
+{
+ switch (matched_insn)
+ {
+ case MVE_VPST:
+ if (arm_decode_field_multiple (given, 13, 15, 22, 22) == 0)
+ return TRUE;
+ else
+ return FALSE;
+
+ case MVE_VPT_FP_T1:
+ if (arm_decode_field_multiple (given, 13, 15, 22, 22) == 0)
+ return TRUE;
+ if ((arm_decode_field (given, 12, 12) == 0)
+ && (arm_decode_field (given, 0, 0) == 1))
+ return TRUE;
+ return FALSE;
+
+ case MVE_VPT_FP_T2:
+ if (arm_decode_field_multiple (given, 13, 15, 22, 22) == 0)
+ return TRUE;
+ if (arm_decode_field (given, 0, 3) == 0xd)
+ return TRUE;
+ return FALSE;
+
+ case MVE_VPT_VEC_T1:
+ case MVE_VPT_VEC_T2:
+ case MVE_VPT_VEC_T3:
+ case MVE_VPT_VEC_T4:
+ case MVE_VPT_VEC_T5:
+ case MVE_VPT_VEC_T6:
+ if (arm_decode_field_multiple (given, 13, 15, 22, 22) == 0)
+ return TRUE;
+ if (arm_decode_field (given, 20, 21) == 3)
+ return TRUE;
+ return FALSE;
+
+ case MVE_VCMP_FP_T1:
+ if ((arm_decode_field (given, 12, 12) == 0)
+ && (arm_decode_field (given, 0, 0) == 1))
+ return TRUE;
+ else
+ return FALSE;
+
+ case MVE_VCMP_FP_T2:
+ if (arm_decode_field (given, 0, 3) == 0xd)
+ return TRUE;
+ else
+ return FALSE;
+
+ case MVE_VQADD_T2:
+ case MVE_VQSUB_T2:
+ case MVE_VMUL_VEC_T2:
+ case MVE_VMULH:
+ case MVE_VRMULH:
+ case MVE_VMLA:
+ case MVE_VMAX:
+ case MVE_VMIN:
+ case MVE_VBRSR:
+ case MVE_VADD_VEC_T2:
+ case MVE_VSUB_VEC_T2:
+ case MVE_VABAV:
+ case MVE_VQRSHL_T1:
+ case MVE_VQSHL_T4:
+ case MVE_VRSHL_T1:
+ case MVE_VSHL_T3:
+ case MVE_VCADD_VEC:
+ case MVE_VHCADD:
+ case MVE_VDDUP:
+ case MVE_VIDUP:
+ case MVE_VQRDMLADH:
+ case MVE_VQDMLAH:
+ case MVE_VQRDMLAH:
+ case MVE_VQDMLASH:
+ case MVE_VQRDMLASH:
+ case MVE_VQDMLSDH:
+ case MVE_VQRDMLSDH:
+ case MVE_VQDMULH_T3:
+ case MVE_VQRDMULH_T4:
+ case MVE_VQDMLADH:
+ case MVE_VMLAS:
+ case MVE_VMULL_INT:
+ case MVE_VHADD_T2:
+ case MVE_VHSUB_T2:
+ case MVE_VCMP_VEC_T1:
+ case MVE_VCMP_VEC_T2:
+ case MVE_VCMP_VEC_T3:
+ case MVE_VCMP_VEC_T4:
+ case MVE_VCMP_VEC_T5:
+ case MVE_VCMP_VEC_T6:
+ if (arm_decode_field (given, 20, 21) == 3)
+ return TRUE;
+ else
+ return FALSE;
+
+ case MVE_VLD2:
+ case MVE_VLD4:
+ case MVE_VST2:
+ case MVE_VST4:
+ if (arm_decode_field (given, 7, 8) == 3)
+ return TRUE;
+ else
+ return FALSE;
+
+ case MVE_VSTRB_T1:
+ case MVE_VSTRH_T2:
+ if ((arm_decode_field (given, 24, 24) == 0)
+ && (arm_decode_field (given, 21, 21) == 0))
+ {
+ return TRUE;
+ }
+ else if ((arm_decode_field (given, 7, 8) == 3))
+ return TRUE;
+ else
+ return FALSE;
+
+ case MVE_VSTRB_T5:
+ case MVE_VSTRH_T6:
+ case MVE_VSTRW_T7:
+ if ((arm_decode_field (given, 24, 24) == 0)
+ && (arm_decode_field (given, 21, 21) == 0))
+ {
+ return TRUE;
+ }
+ else
+ return FALSE;
+
+ case MVE_VCVT_FP_FIX_VEC:
+ return (arm_decode_field (given, 16, 21) & 0x38) == 0;
+
+ case MVE_VBIC_IMM:
+ case MVE_VORR_IMM:
+ {
+ unsigned long cmode = arm_decode_field (given, 8, 11);
+
+ if ((cmode & 1) == 0)
+ return TRUE;
+ else if ((cmode & 0xc) == 0xc)
+ return TRUE;
+ else
+ return FALSE;
+ }
+
+ case MVE_VMVN_IMM:
+ {
+ unsigned long cmode = arm_decode_field (given, 8, 11);
- /* These have been 32-bit since the invention of Thumb. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0xf000c000, 0xf800d001, "blx%c\t%B%x"},
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V4T),
- 0xf000d000, 0xf800d000, "bl%c\t%B%x"},
+ if (cmode == 0xe)
+ return TRUE;
+ else if ((cmode & 0x9) == 1)
+ return TRUE;
+ else if ((cmode & 0xd) == 9)
+ return TRUE;
+ else
+ return FALSE;
+ }
- /* Fallback. */
- {ARM_FEATURE_CORE_LOW (ARM_EXT_V1),
- 0x00000000, 0x00000000, UNDEFINED_INSTRUCTION},
- {ARM_FEATURE_CORE_LOW (0), 0, 0, 0}
-};
+ case MVE_VMOV_IMM_TO_VEC:
+ if ((arm_decode_field (given, 5, 5) == 1)
+ && (arm_decode_field (given, 8, 11) != 0xe))
+ return TRUE;
+ else
+ return FALSE;
-static const char *const arm_conditional[] =
-{"eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
- "hi", "ls", "ge", "lt", "gt", "le", "al", "<und>", ""};
+ case MVE_VMOVL:
+ {
+ unsigned long size = arm_decode_field (given, 19, 20);
+ if ((size == 0) || (size == 3))
+ return TRUE;
+ else
+ return FALSE;
+ }
-static const char *const arm_fp_const[] =
-{"0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0"};
+ case MVE_VMAXA:
+ case MVE_VMINA:
+ case MVE_VMAXV:
+ case MVE_VMAXAV:
+ case MVE_VMINV:
+ case MVE_VMINAV:
+ case MVE_VQRSHL_T2:
+ case MVE_VQSHL_T1:
+ case MVE_VRSHL_T2:
+ case MVE_VSHL_T2:
+ case MVE_VSHLL_T2:
+ case MVE_VADDV:
+ case MVE_VMOVN:
+ case MVE_VQMOVUN:
+ case MVE_VQMOVN:
+ if (arm_decode_field (given, 18, 19) == 3)
+ return TRUE;
+ else
+ return FALSE;
-static const char *const arm_shift[] =
-{"lsl", "lsr", "asr", "ror"};
+ case MVE_VMLSLDAV:
+ case MVE_VRMLSLDAVH:
+ case MVE_VMLALDAV:
+ case MVE_VADDLV:
+ if (arm_decode_field (given, 20, 22) == 7)
+ return TRUE;
+ else
+ return FALSE;
-typedef struct
-{
- const char *name;
- const char *description;
- const char *reg_names[16];
+ case MVE_VRMLALDAVH:
+ if ((arm_decode_field (given, 20, 22) & 6) == 6)
+ return TRUE;
+ else
+ return FALSE;
+
+ case MVE_VDWDUP:
+ case MVE_VIWDUP:
+ if ((arm_decode_field (given, 20, 21) == 3)
+ || (arm_decode_field (given, 1, 3) == 7))
+ return TRUE;
+ else
+ return FALSE;
+
+
+ case MVE_VSHLL_T1:
+ if (arm_decode_field (given, 16, 18) == 0)
+ {
+ unsigned long sz = arm_decode_field (given, 19, 20);
+
+ if ((sz == 1) || (sz == 2))
+ return TRUE;
+ else
+ return FALSE;
+ }
+ else
+ return FALSE;
+
+ case MVE_VQSHL_T2:
+ case MVE_VQSHLU_T3:
+ case MVE_VRSHR:
+ case MVE_VSHL_T1:
+ case MVE_VSHR:
+ case MVE_VSLI:
+ case MVE_VSRI:
+ if (arm_decode_field (given, 19, 21) == 0)
+ return TRUE;
+ else
+ return FALSE;
+
+ case MVE_VCTP:
+ if (arm_decode_field (given, 16, 19) == 0xf)
+ return TRUE;
+ else
+ return FALSE;
+
+ case MVE_ASRLI:
+ case MVE_ASRL:
+ case MVE_LSLLI:
+ case MVE_LSLL:
+ case MVE_LSRL:
+ case MVE_SQRSHRL:
+ case MVE_SQSHLL:
+ case MVE_SRSHRL:
+ case MVE_UQRSHLL:
+ case MVE_UQSHLL:
+ case MVE_URSHRL:
+ if (arm_decode_field (given, 9, 11) == 0x7)
+ return TRUE;
+ else
+ return FALSE;
+
+ case MVE_CSINC:
+ case MVE_CSINV:
+ {
+ unsigned long rm, rn;
+ rm = arm_decode_field (given, 0, 3);
+ rn = arm_decode_field (given, 16, 19);
+ /* CSET/CSETM. */
+ if (rm == 0xf && rn == 0xf)
+ return TRUE;
+ /* CINC/CINV. */
+ else if (rn == rm && rn != 0xf)
+ return TRUE;
+ }
+ /* Fall through. */
+ case MVE_CSEL:
+ case MVE_CSNEG:
+ if (arm_decode_field (given, 0, 3) == 0xd)
+ return TRUE;
+ /* CNEG. */
+ else if (matched_insn == MVE_CSNEG)
+ if (arm_decode_field (given, 0, 3) == arm_decode_field (given, 16, 19))
+ return TRUE;
+ return FALSE;
+
+ default:
+ case MVE_VADD_FP_T1:
+ case MVE_VADD_FP_T2:
+ case MVE_VADD_VEC_T1:
+ return FALSE;
+
+ }
}
-arm_regname;
-static const arm_regname regnames[] =
+static void
+print_mve_vld_str_addr (struct disassemble_info *info,
+ unsigned long given,
+ enum mve_instructions matched_insn)
{
- { "reg-names-raw", N_("Select raw register names"),
- { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"}},
- { "reg-names-gcc", N_("Select register names used by GCC"),
- { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc" }},
- { "reg-names-std", N_("Select register names used in ARM's ISA documentation"),
- { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "sp", "lr", "pc" }},
- { "force-thumb", N_("Assume all insns are Thumb insns"), {NULL} },
- { "no-force-thumb", N_("Examine preceding label to determine an insn's type"), {NULL} },
- { "reg-names-apcs", N_("Select register names used in the APCS"),
- { "a1", "a2", "a3", "a4", "v1", "v2", "v3", "v4", "v5", "v6", "sl", "fp", "ip", "sp", "lr", "pc" }},
- { "reg-names-atpcs", N_("Select register names used in the ATPCS"),
- { "a1", "a2", "a3", "a4", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "IP", "SP", "LR", "PC" }},
- { "reg-names-special-atpcs", N_("Select special register names used in the ATPCS"),
- { "a1", "a2", "a3", "a4", "v1", "v2", "v3", "WR", "v5", "SB", "SL", "FP", "IP", "SP", "LR", "PC" }}
-};
+ void *stream = info->stream;
+ fprintf_ftype func = info->fprintf_func;
-static const char *const iwmmxt_wwnames[] =
-{"b", "h", "w", "d"};
+ unsigned long p, w, gpr, imm, add, mod_imm;
-static const char *const iwmmxt_wwssnames[] =
-{"b", "bus", "bc", "bss",
- "h", "hus", "hc", "hss",
- "w", "wus", "wc", "wss",
- "d", "dus", "dc", "dss"
-};
+ imm = arm_decode_field (given, 0, 6);
+ mod_imm = imm;
-static const char *const iwmmxt_regnames[] =
-{ "wr0", "wr1", "wr2", "wr3", "wr4", "wr5", "wr6", "wr7",
- "wr8", "wr9", "wr10", "wr11", "wr12", "wr13", "wr14", "wr15"
-};
+ switch (matched_insn)
+ {
+ case MVE_VLDRB_T1:
+ case MVE_VSTRB_T1:
+ gpr = arm_decode_field (given, 16, 18);
+ break;
-static const char *const iwmmxt_cregnames[] =
-{ "wcid", "wcon", "wcssf", "wcasf", "reserved", "reserved", "reserved", "reserved",
- "wcgr0", "wcgr1", "wcgr2", "wcgr3", "reserved", "reserved", "reserved", "reserved"
-};
+ case MVE_VLDRH_T2:
+ case MVE_VSTRH_T2:
+ gpr = arm_decode_field (given, 16, 18);
+ mod_imm = imm << 1;
+ break;
-static const char *const vec_condnames[] =
-{ "eq", "ne", "cs", "hi", "ge", "lt", "gt", "le"
-};
+ case MVE_VLDRH_T6:
+ case MVE_VSTRH_T6:
+ gpr = arm_decode_field (given, 16, 19);
+ mod_imm = imm << 1;
+ break;
-static const char *const mve_predicatenames[] =
-{ "", "ttt", "tt", "tte", "t", "tee", "te", "tet", "",
- "eee", "ee", "eet", "e", "ett", "et", "ete"
-};
+ case MVE_VLDRW_T7:
+ case MVE_VSTRW_T7:
+ gpr = arm_decode_field (given, 16, 19);
+ mod_imm = imm << 2;
+ break;
-/* Names for 2-bit size field for mve vector isntructions. */
-static const char *const mve_vec_sizename[] =
- { "8", "16", "32", "64"};
+ case MVE_VLDRB_T5:
+ case MVE_VSTRB_T5:
+ gpr = arm_decode_field (given, 16, 19);
+ break;
-/* Indicates whether we are processing a then predicate,
- else predicate or none at all. */
-enum vpt_pred_state
-{
- PRED_NONE,
- PRED_THEN,
- PRED_ELSE
-};
+ default:
+ return;
+ }
-/* Information used to process a vpt block and subsequent instructions. */
-struct vpt_block
-{
- /* Are we in a vpt block. */
- bfd_boolean in_vpt_block;
+ p = arm_decode_field (given, 24, 24);
+ w = arm_decode_field (given, 21, 21);
- /* Next predicate state if in vpt block. */
- enum vpt_pred_state next_pred_state;
+ add = arm_decode_field (given, 23, 23);
- /* Mask from vpt/vpst instruction. */
- long predicate_mask;
+ char * add_sub;
- /* Instruction number in vpt block. */
- long current_insn_num;
+ /* Don't print anything for '+' as it is implied. */
+ if (add == 1)
+ add_sub = "";
+ else
+ add_sub = "-";
- /* Number of instructions in vpt block.. */
- long num_pred_insn;
-};
+ if (p == 1)
+ {
+ /* Offset mode. */
+ if (w == 0)
+ func (stream, "[%s, #%s%lu]", arm_regnames[gpr], add_sub, mod_imm);
+ /* Pre-indexed mode. */
+ else
+ func (stream, "[%s, #%s%lu]!", arm_regnames[gpr], add_sub, mod_imm);
+ }
+ else if ((p == 0) && (w == 1))
+ /* Post-index mode. */
+ func (stream, "[%s], #%s%lu", arm_regnames[gpr], add_sub, mod_imm);
+}
-static struct vpt_block vpt_block_state =
+/* Return FALSE if GIVEN is not an undefined encoding for MATCHED_INSN.
+ Otherwise, return TRUE and set UNDEFINED_CODE to give a reason as to why
+ this encoding is undefined. */
+
+static bfd_boolean
+is_mve_undefined (unsigned long given, enum mve_instructions matched_insn,
+ enum mve_undefined *undefined_code)
{
- FALSE,
- PRED_NONE,
- 0,
- 0,
- 0
-};
+ *undefined_code = UNDEF_NONE;
-/* Default to GCC register name set. */
-static unsigned int regname_selected = 1;
+ switch (matched_insn)
+ {
+ case MVE_VDUP:
+ if (arm_decode_field_multiple (given, 5, 5, 22, 22) == 3)
+ {
+ *undefined_code = UNDEF_SIZE_3;
+ return TRUE;
+ }
+ else
+ return FALSE;
-#define NUM_ARM_OPTIONS ARRAY_SIZE (regnames)
-#define arm_regnames regnames[regname_selected].reg_names
+ case MVE_VQADD_T1:
+ case MVE_VQSUB_T1:
+ case MVE_VMUL_VEC_T1:
+ case MVE_VABD_VEC:
+ case MVE_VADD_VEC_T1:
+ case MVE_VSUB_VEC_T1:
+ case MVE_VQDMULH_T1:
+ case MVE_VQRDMULH_T2:
+ case MVE_VRHADD:
+ case MVE_VHADD_T1:
+ case MVE_VHSUB_T1:
+ if (arm_decode_field (given, 20, 21) == 3)
+ {
+ *undefined_code = UNDEF_SIZE_3;
+ return TRUE;
+ }
+ else
+ return FALSE;
+
+ case MVE_VLDRB_T1:
+ if (arm_decode_field (given, 7, 8) == 3)
+ {
+ *undefined_code = UNDEF_SIZE_3;
+ return TRUE;
+ }
+ else
+ return FALSE;
+
+ case MVE_VLDRH_T2:
+ if (arm_decode_field (given, 7, 8) <= 1)
+ {
+ *undefined_code = UNDEF_SIZE_LE_1;
+ return TRUE;
+ }
+ else
+ return FALSE;
+
+ case MVE_VSTRB_T1:
+ if ((arm_decode_field (given, 7, 8) == 0))
+ {
+ *undefined_code = UNDEF_SIZE_0;
+ return TRUE;
+ }
+ else
+ return FALSE;
+
+ case MVE_VSTRH_T2:
+ if ((arm_decode_field (given, 7, 8) <= 1))
+ {
+ *undefined_code = UNDEF_SIZE_LE_1;
+ return TRUE;
+ }
+ else
+ return FALSE;
+
+ case MVE_VLDRB_GATHER_T1:
+ if (arm_decode_field (given, 7, 8) == 3)
+ {
+ *undefined_code = UNDEF_SIZE_3;
+ return TRUE;
+ }
+ else if ((arm_decode_field (given, 28, 28) == 0)
+ && (arm_decode_field (given, 7, 8) == 0))
+ {
+ *undefined_code = UNDEF_NOT_UNS_SIZE_0;
+ return TRUE;
+ }
+ else
+ return FALSE;
+
+ case MVE_VLDRH_GATHER_T2:
+ if (arm_decode_field (given, 7, 8) == 3)
+ {
+ *undefined_code = UNDEF_SIZE_3;
+ return TRUE;
+ }
+ else if ((arm_decode_field (given, 28, 28) == 0)
+ && (arm_decode_field (given, 7, 8) == 1))
+ {
+ *undefined_code = UNDEF_NOT_UNS_SIZE_1;
+ return TRUE;
+ }
+ else if (arm_decode_field (given, 7, 8) == 0)
+ {
+ *undefined_code = UNDEF_SIZE_0;
+ return TRUE;
+ }
+ else
+ return FALSE;
-static bfd_boolean force_thumb = FALSE;
+ case MVE_VLDRW_GATHER_T3:
+ if (arm_decode_field (given, 7, 8) != 2)
+ {
+ *undefined_code = UNDEF_SIZE_NOT_2;
+ return TRUE;
+ }
+ else if (arm_decode_field (given, 28, 28) == 0)
+ {
+ *undefined_code = UNDEF_NOT_UNSIGNED;
+ return TRUE;
+ }
+ else
+ return FALSE;
-/* Current IT instruction state. This contains the same state as the IT
- bits in the CPSR. */
-static unsigned int ifthen_state;
-/* IT state for the next instruction. */
-static unsigned int ifthen_next_state;
-/* The address of the insn for which the IT state is valid. */
-static bfd_vma ifthen_address;
-#define IFTHEN_COND ((ifthen_state >> 4) & 0xf)
-/* Indicates that the current Conditional state is unconditional or outside
- an IT block. */
-#define COND_UNCOND 16
+ case MVE_VLDRD_GATHER_T4:
+ if (arm_decode_field (given, 7, 8) != 3)
+ {
+ *undefined_code = UNDEF_SIZE_NOT_3;
+ return TRUE;
+ }
+ else if (arm_decode_field (given, 28, 28) == 0)
+ {
+ *undefined_code = UNDEF_NOT_UNSIGNED;
+ return TRUE;
+ }
+ else
+ return FALSE;
-\f
-/* Functions. */
-/* Extract the predicate mask for a VPT or VPST instruction.
- The mask is composed of bits 13-15 (Mkl) and bit 22 (Mkh). */
+ case MVE_VSTRB_SCATTER_T1:
+ if (arm_decode_field (given, 7, 8) == 3)
+ {
+ *undefined_code = UNDEF_SIZE_3;
+ return TRUE;
+ }
+ else
+ return FALSE;
-static long
-mve_extract_pred_mask (long given)
-{
- return ((given & 0x00400000) >> 19) | ((given & 0xe000) >> 13);
-}
+ case MVE_VSTRH_SCATTER_T2:
+ {
+ unsigned long size = arm_decode_field (given, 7, 8);
+ if (size == 3)
+ {
+ *undefined_code = UNDEF_SIZE_3;
+ return TRUE;
+ }
+ else if (size == 0)
+ {
+ *undefined_code = UNDEF_SIZE_0;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
-/* Return the number of instructions in a MVE predicate block. */
-static long
-num_instructions_vpt_block (long given)
-{
- long mask = mve_extract_pred_mask (given);
- if (mask == 0)
- return 0;
+ case MVE_VSTRW_SCATTER_T3:
+ if (arm_decode_field (given, 7, 8) != 2)
+ {
+ *undefined_code = UNDEF_SIZE_NOT_2;
+ return TRUE;
+ }
+ else
+ return FALSE;
- if (mask == 8)
- return 1;
+ case MVE_VSTRD_SCATTER_T4:
+ if (arm_decode_field (given, 7, 8) != 3)
+ {
+ *undefined_code = UNDEF_SIZE_NOT_3;
+ return TRUE;
+ }
+ else
+ return FALSE;
- if ((mask & 7) == 4)
- return 2;
+ case MVE_VCVT_FP_FIX_VEC:
+ {
+ unsigned long imm6 = arm_decode_field (given, 16, 21);
+ if ((imm6 & 0x20) == 0)
+ {
+ *undefined_code = UNDEF_VCVT_IMM6;
+ return TRUE;
+ }
- if ((mask & 3) == 2)
- return 3;
+ if ((arm_decode_field (given, 9, 9) == 0)
+ && ((imm6 & 0x30) == 0x20))
+ {
+ *undefined_code = UNDEF_VCVT_FSI_IMM6;
+ return TRUE;
+ }
- if ((mask & 1) == 1)
- return 4;
+ return FALSE;
+ }
- return 0;
-}
+ case MVE_VNEG_FP:
+ case MVE_VABS_FP:
+ case MVE_VCVT_BETWEEN_FP_INT:
+ case MVE_VCVT_FROM_FP_TO_INT:
+ {
+ unsigned long size = arm_decode_field (given, 18, 19);
+ if (size == 0)
+ {
+ *undefined_code = UNDEF_SIZE_0;
+ return TRUE;
+ }
+ else if (size == 3)
+ {
+ *undefined_code = UNDEF_SIZE_3;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
-static void
-mark_outside_vpt_block (void)
-{
- vpt_block_state.in_vpt_block = FALSE;
- vpt_block_state.next_pred_state = PRED_NONE;
- vpt_block_state.predicate_mask = 0;
- vpt_block_state.current_insn_num = 0;
- vpt_block_state.num_pred_insn = 0;
-}
+ case MVE_VMOV_VEC_LANE_TO_GP:
+ {
+ unsigned long op1 = arm_decode_field (given, 21, 22);
+ unsigned long op2 = arm_decode_field (given, 5, 6);
+ unsigned long u = arm_decode_field (given, 23, 23);
-static void
-mark_inside_vpt_block (long given)
-{
- vpt_block_state.in_vpt_block = TRUE;
- vpt_block_state.next_pred_state = PRED_THEN;
- vpt_block_state.predicate_mask = mve_extract_pred_mask (given);
- vpt_block_state.current_insn_num = 0;
- vpt_block_state.num_pred_insn = num_instructions_vpt_block (given);
- assert (vpt_block_state.num_pred_insn >= 1);
-}
+ if ((op2 == 0) && (u == 1))
+ {
+ if ((op1 == 0) || (op1 == 1))
+ {
+ *undefined_code = UNDEF_BAD_U_OP1_OP2;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+ else if (op2 == 2)
+ {
+ if ((op1 == 0) || (op1 == 1))
+ {
+ *undefined_code = UNDEF_BAD_OP1_OP2;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
-static enum vpt_pred_state
-invert_next_predicate_state (enum vpt_pred_state astate)
-{
- if (astate == PRED_THEN)
- return PRED_ELSE;
- else if (astate == PRED_ELSE)
- return PRED_THEN;
- else
- return PRED_NONE;
-}
+ return FALSE;
+ }
-static enum vpt_pred_state
-update_next_predicate_state (void)
-{
- long pred_mask = vpt_block_state.predicate_mask;
- long mask_for_insn = 0;
+ case MVE_VMOV_GP_TO_VEC_LANE:
+ if (arm_decode_field (given, 5, 6) == 2)
+ {
+ unsigned long op1 = arm_decode_field (given, 21, 22);
+ if ((op1 == 0) || (op1 == 1))
+ {
+ *undefined_code = UNDEF_BAD_OP1_OP2;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+ else
+ return FALSE;
- switch (vpt_block_state.current_insn_num)
- {
- case 1:
- mask_for_insn = 8;
- break;
+ case MVE_VMOV_VEC_TO_VEC:
+ if ((arm_decode_field (given, 5, 5) == 1)
+ || (arm_decode_field (given, 22, 22) == 1))
+ return TRUE;
+ return FALSE;
- case 2:
- mask_for_insn = 4;
- break;
+ case MVE_VMOV_IMM_TO_VEC:
+ if (arm_decode_field (given, 5, 5) == 0)
+ {
+ unsigned long cmode = arm_decode_field (given, 8, 11);
- case 3:
- mask_for_insn = 2;
- break;
+ if (((cmode & 9) == 1) || ((cmode & 5) == 1))
+ {
+ *undefined_code = UNDEF_OP_0_BAD_CMODE;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+ else
+ return FALSE;
- case 4:
- return PRED_NONE;
- }
+ case MVE_VSHLL_T2:
+ case MVE_VMOVN:
+ if (arm_decode_field (given, 18, 19) == 2)
+ {
+ *undefined_code = UNDEF_SIZE_2;
+ return TRUE;
+ }
+ else
+ return FALSE;
- if (pred_mask & mask_for_insn)
- return invert_next_predicate_state (vpt_block_state.next_pred_state);
- else
- return vpt_block_state.next_pred_state;
-}
+ case MVE_VRMLALDAVH:
+ case MVE_VMLADAV_T1:
+ case MVE_VMLADAV_T2:
+ case MVE_VMLALDAV:
+ if ((arm_decode_field (given, 28, 28) == 1)
+ && (arm_decode_field (given, 12, 12) == 1))
+ {
+ *undefined_code = UNDEF_XCHG_UNS;
+ return TRUE;
+ }
+ else
+ return FALSE;
-static void
-update_vpt_block_state (void)
-{
- vpt_block_state.current_insn_num++;
- if (vpt_block_state.current_insn_num == vpt_block_state.num_pred_insn)
- {
- /* No more instructions to process in vpt block. */
- mark_outside_vpt_block ();
- return;
- }
+ case MVE_VQSHRN:
+ case MVE_VQSHRUN:
+ case MVE_VSHLL_T1:
+ case MVE_VSHRN:
+ {
+ unsigned long sz = arm_decode_field (given, 19, 20);
+ if (sz == 1)
+ return FALSE;
+ else if ((sz & 2) == 2)
+ return FALSE;
+ else
+ {
+ *undefined_code = UNDEF_SIZE;
+ return TRUE;
+ }
+ }
+ break;
- vpt_block_state.next_pred_state = update_next_predicate_state ();
-}
+ case MVE_VQSHL_T2:
+ case MVE_VQSHLU_T3:
+ case MVE_VRSHR:
+ case MVE_VSHL_T1:
+ case MVE_VSHR:
+ case MVE_VSLI:
+ case MVE_VSRI:
+ {
+ unsigned long sz = arm_decode_field (given, 19, 21);
+ if ((sz & 7) == 1)
+ return FALSE;
+ else if ((sz & 6) == 2)
+ return FALSE;
+ else if ((sz & 4) == 4)
+ return FALSE;
+ else
+ {
+ *undefined_code = UNDEF_SIZE;
+ return TRUE;
+ }
+ }
-/* Decode a bitfield of the form matching regexp (N(-N)?,)*N(-N)?.
- Returns pointer to following character of the format string and
- fills in *VALUEP and *WIDTHP with the extracted value and number of
- bits extracted. WIDTHP can be NULL. */
+ case MVE_VQRSHRN:
+ case MVE_VQRSHRUN:
+ if (arm_decode_field (given, 19, 20) == 0)
+ {
+ *undefined_code = UNDEF_SIZE_0;
+ return TRUE;
+ }
+ else
+ return FALSE;
-static const char *
-arm_decode_bitfield (const char *ptr,
- unsigned long insn,
- unsigned long *valuep,
- int *widthp)
-{
- unsigned long value = 0;
- int width = 0;
+ case MVE_VABS_VEC:
+ if (arm_decode_field (given, 18, 19) == 3)
+ {
+ *undefined_code = UNDEF_SIZE_3;
+ return TRUE;
+ }
+ else
+ return FALSE;
+
+ case MVE_VQNEG:
+ case MVE_VQABS:
+ case MVE_VNEG_VEC:
+ case MVE_VCLS:
+ case MVE_VCLZ:
+ if (arm_decode_field (given, 18, 19) == 3)
+ {
+ *undefined_code = UNDEF_SIZE_3;
+ return TRUE;
+ }
+ else
+ return FALSE;
- do
- {
- int start, end;
- int bits;
+ case MVE_VREV16:
+ if (arm_decode_field (given, 18, 19) == 0)
+ return FALSE;
+ else
+ {
+ *undefined_code = UNDEF_SIZE_NOT_0;
+ return TRUE;
+ }
- for (start = 0; *ptr >= '0' && *ptr <= '9'; ptr++)
- start = start * 10 + *ptr - '0';
- if (*ptr == '-')
- for (end = 0, ptr++; *ptr >= '0' && *ptr <= '9'; ptr++)
- end = end * 10 + *ptr - '0';
+ case MVE_VREV32:
+ {
+ unsigned long size = arm_decode_field (given, 18, 19);
+ if ((size & 2) == 2)
+ {
+ *undefined_code = UNDEF_SIZE_2;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+
+ case MVE_VREV64:
+ if (arm_decode_field (given, 18, 19) != 3)
+ return FALSE;
else
- end = start;
- bits = end - start;
- if (bits < 0)
- abort ();
- value |= ((insn >> start) & ((2ul << bits) - 1)) << width;
- width += bits + 1;
+ {
+ *undefined_code = UNDEF_SIZE_3;
+ return TRUE;
+ }
+
+ default:
+ return FALSE;
}
- while (*ptr++ == ',');
- *valuep = value;
- if (widthp)
- *widthp = width;
- return ptr - 1;
}
-static void
-arm_decode_shift (long given, fprintf_ftype func, void *stream,
- bfd_boolean print_shift)
+/* Return FALSE if GIVEN is not an unpredictable encoding for MATCHED_INSN.
+ Otherwise, return TRUE and set UNPREDICTABLE_CODE to give a reason as to
+ why this encoding is unpredictable. */
+
+static bfd_boolean
+is_mve_unpredictable (unsigned long given, enum mve_instructions matched_insn,
+ enum mve_unpredictable *unpredictable_code)
{
- func (stream, "%s", arm_regnames[given & 0xf]);
+ *unpredictable_code = UNPRED_NONE;
- if ((given & 0xff0) != 0)
+ switch (matched_insn)
{
- if ((given & 0x10) == 0)
+ case MVE_VCMP_FP_T2:
+ case MVE_VPT_FP_T2:
+ if ((arm_decode_field (given, 12, 12) == 0)
+ && (arm_decode_field (given, 5, 5) == 1))
{
- int amount = (given & 0xf80) >> 7;
- int shift = (given & 0x60) >> 5;
-
- if (amount == 0)
- {
- if (shift == 3)
- {
- func (stream, ", rrx");
- return;
- }
-
- amount = 32;
- }
+ *unpredictable_code = UNPRED_FCA_0_FCB_1;
+ return TRUE;
+ }
+ else
+ return FALSE;
- if (print_shift)
- func (stream, ", %s #%d", arm_shift[shift], amount);
- else
- func (stream, ", #%d", amount);
+ case MVE_VPT_VEC_T4:
+ case MVE_VPT_VEC_T5:
+ case MVE_VPT_VEC_T6:
+ case MVE_VCMP_VEC_T4:
+ case MVE_VCMP_VEC_T5:
+ case MVE_VCMP_VEC_T6:
+ if (arm_decode_field (given, 0, 3) == 0xd)
+ {
+ *unpredictable_code = UNPRED_R13;
+ return TRUE;
}
- else if ((given & 0x80) == 0x80)
- func (stream, "\t; <illegal shifter operand>");
- else if (print_shift)
- func (stream, ", %s %s", arm_shift[(given & 0x60) >> 5],
- arm_regnames[(given & 0xf00) >> 8]);
else
- func (stream, ", %s", arm_regnames[(given & 0xf00) >> 8]);
- }
-}
+ return FALSE;
-/* Return TRUE if the MATCHED_INSN can be inside an IT block. */
+ case MVE_VDUP:
+ {
+ unsigned long gpr = arm_decode_field (given, 12, 15);
+ if (gpr == 0xd)
+ {
+ *unpredictable_code = UNPRED_R13;
+ return TRUE;
+ }
+ else if (gpr == 0xf)
+ {
+ *unpredictable_code = UNPRED_R15;
+ return TRUE;
+ }
-static bfd_boolean
-is_mve_okay_in_it (enum mve_instructions matched_insn)
-{
- return FALSE;
-}
+ return FALSE;
+ }
-static bfd_boolean
-is_mve_architecture (struct disassemble_info *info)
-{
- struct arm_private_data *private_data = info->private_data;
- arm_feature_set allowed_arches = private_data->features;
+ case MVE_VQADD_T2:
+ case MVE_VQSUB_T2:
+ case MVE_VMUL_FP_T2:
+ case MVE_VMUL_VEC_T2:
+ case MVE_VMLA:
+ case MVE_VBRSR:
+ case MVE_VADD_FP_T2:
+ case MVE_VSUB_FP_T2:
+ case MVE_VADD_VEC_T2:
+ case MVE_VSUB_VEC_T2:
+ case MVE_VQRSHL_T2:
+ case MVE_VQSHL_T1:
+ case MVE_VRSHL_T2:
+ case MVE_VSHL_T2:
+ case MVE_VSHLC:
+ case MVE_VQDMLAH:
+ case MVE_VQRDMLAH:
+ case MVE_VQDMLASH:
+ case MVE_VQRDMLASH:
+ case MVE_VQDMULH_T3:
+ case MVE_VQRDMULH_T4:
+ case MVE_VMLAS:
+ case MVE_VFMA_FP_SCALAR:
+ case MVE_VFMAS_FP_SCALAR:
+ case MVE_VHADD_T2:
+ case MVE_VHSUB_T2:
+ {
+ unsigned long gpr = arm_decode_field (given, 0, 3);
+ if (gpr == 0xd)
+ {
+ *unpredictable_code = UNPRED_R13;
+ return TRUE;
+ }
+ else if (gpr == 0xf)
+ {
+ *unpredictable_code = UNPRED_R15;
+ return TRUE;
+ }
- arm_feature_set arm_ext_v8_1m_main
- = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
+ return FALSE;
+ }
- if (ARM_CPU_HAS_FEATURE (arm_ext_v8_1m_main, allowed_arches)
- && !ARM_CPU_IS_ANY (allowed_arches))
- return TRUE;
- else
- return FALSE;
-}
+ case MVE_VLD2:
+ case MVE_VST2:
+ {
+ unsigned long rn = arm_decode_field (given, 16, 19);
-static bfd_boolean
-is_vpt_instruction (long given)
-{
+ if ((rn == 0xd) && (arm_decode_field (given, 21, 21) == 1))
+ {
+ *unpredictable_code = UNPRED_R13_AND_WB;
+ return TRUE;
+ }
- /* If mkh:mkl is '0000' then its not a vpt/vpst instruction. */
- if ((given & 0x0040e000) == 0)
- return FALSE;
+ if (rn == 0xf)
+ {
+ *unpredictable_code = UNPRED_R15;
+ return TRUE;
+ }
- /* VPT floating point T1 variant. */
- if (((given & 0xefb10f50) == 0xee310f00 && ((given & 0x1001) != 0x1))
- /* VPT floating point T2 variant. */
- || ((given & 0xefb10f50) == 0xee310f40)
- /* VPT vector T1 variant. */
- || ((given & 0xff811f51) == 0xfe010f00)
- /* VPT vector T2 variant. */
- || ((given & 0xff811f51) == 0xfe010f01
- && ((given & 0x300000) != 0x300000))
- /* VPT vector T3 variant. */
- || ((given & 0xff811f50) == 0xfe011f00)
- /* VPT vector T4 variant. */
- || ((given & 0xff811f70) == 0xfe010f40)
- /* VPT vector T5 variant. */
- || ((given & 0xff811f70) == 0xfe010f60)
- /* VPT vector T6 variant. */
- || ((given & 0xff811f50) == 0xfe011f40)
- /* VPST vector T variant. */
- || ((given & 0xffbf1fff) == 0xfe310f4d))
- return TRUE;
- else
- return FALSE;
-}
+ if (arm_decode_field_multiple (given, 13, 15, 22, 22) > 6)
+ {
+ *unpredictable_code = UNPRED_Q_GT_6;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
-/* Decode a bitfield from opcode GIVEN, with starting bitfield = START
- and ending bitfield = END. END must be greater than START. */
+ case MVE_VLD4:
+ case MVE_VST4:
+ {
+ unsigned long rn = arm_decode_field (given, 16, 19);
-static unsigned long
-arm_decode_field (unsigned long given, unsigned int start, unsigned int end)
-{
- int bits = end - start;
+ if ((rn == 0xd) && (arm_decode_field (given, 21, 21) == 1))
+ {
+ *unpredictable_code = UNPRED_R13_AND_WB;
+ return TRUE;
+ }
- if (bits < 0)
- abort ();
+ if (rn == 0xf)
+ {
+ *unpredictable_code = UNPRED_R15;
+ return TRUE;
+ }
- return ((given >> start) & ((2ul << bits) - 1));
-}
+ if (arm_decode_field_multiple (given, 13, 15, 22, 22) > 4)
+ {
+ *unpredictable_code = UNPRED_Q_GT_4;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
-/* Decode a bitfield from opcode GIVEN, with multiple bitfields:
- START:END and START2:END2. END/END2 must be greater than
- START/START2. */
+ case MVE_VLDRB_T5:
+ case MVE_VLDRH_T6:
+ case MVE_VLDRW_T7:
+ case MVE_VSTRB_T5:
+ case MVE_VSTRH_T6:
+ case MVE_VSTRW_T7:
+ {
+ unsigned long rn = arm_decode_field (given, 16, 19);
-static unsigned long
-arm_decode_field_multiple (unsigned long given, unsigned int start,
- unsigned int end, unsigned int start2,
- unsigned int end2)
-{
- int bits = end - start;
- int bits2 = end2 - start2;
- unsigned long value = 0;
- int width = 0;
+ if ((rn == 0xd) && (arm_decode_field (given, 21, 21) == 1))
+ {
+ *unpredictable_code = UNPRED_R13_AND_WB;
+ return TRUE;
+ }
+ else if (rn == 0xf)
+ {
+ *unpredictable_code = UNPRED_R15;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+
+ case MVE_VLDRB_GATHER_T1:
+ if (arm_decode_field (given, 0, 0) == 1)
+ {
+ *unpredictable_code = UNPRED_OS;
+ return TRUE;
+ }
+
+ /* fall through. */
+ /* To handle common code with T2-T4 variants. */
+ case MVE_VLDRH_GATHER_T2:
+ case MVE_VLDRW_GATHER_T3:
+ case MVE_VLDRD_GATHER_T4:
+ {
+ unsigned long qd = arm_decode_field_multiple (given, 13, 15, 22, 22);
+ unsigned long qm = arm_decode_field_multiple (given, 1, 3, 5, 5);
+
+ if (qd == qm)
+ {
+ *unpredictable_code = UNPRED_Q_REGS_EQUAL;
+ return TRUE;
+ }
+
+ if (arm_decode_field (given, 16, 19) == 0xf)
+ {
+ *unpredictable_code = UNPRED_R15;
+ return TRUE;
+ }
+
+ return FALSE;
+ }
+
+ case MVE_VLDRW_GATHER_T5:
+ case MVE_VLDRD_GATHER_T6:
+ {
+ unsigned long qd = arm_decode_field_multiple (given, 13, 15, 22, 22);
+ unsigned long qm = arm_decode_field_multiple (given, 17, 19, 7, 7);
+
+ if (qd == qm)
+ {
+ *unpredictable_code = UNPRED_Q_REGS_EQUAL;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+
+ case MVE_VSTRB_SCATTER_T1:
+ if (arm_decode_field (given, 16, 19) == 0xf)
+ {
+ *unpredictable_code = UNPRED_R15;
+ return TRUE;
+ }
+ else if (arm_decode_field (given, 0, 0) == 1)
+ {
+ *unpredictable_code = UNPRED_OS;
+ return TRUE;
+ }
+ else
+ return FALSE;
+
+ case MVE_VSTRH_SCATTER_T2:
+ case MVE_VSTRW_SCATTER_T3:
+ case MVE_VSTRD_SCATTER_T4:
+ if (arm_decode_field (given, 16, 19) == 0xf)
+ {
+ *unpredictable_code = UNPRED_R15;
+ return TRUE;
+ }
+ else
+ return FALSE;
- if (bits2 < 0)
- abort ();
+ case MVE_VMOV2_VEC_LANE_TO_GP:
+ case MVE_VMOV2_GP_TO_VEC_LANE:
+ case MVE_VCVT_BETWEEN_FP_INT:
+ case MVE_VCVT_FROM_FP_TO_INT:
+ {
+ unsigned long rt = arm_decode_field (given, 0, 3);
+ unsigned long rt2 = arm_decode_field (given, 16, 19);
- value = arm_decode_field (given, start, end);
- width += bits + 1;
+ if ((rt == 0xd) || (rt2 == 0xd))
+ {
+ *unpredictable_code = UNPRED_R13;
+ return TRUE;
+ }
+ else if ((rt == 0xf) || (rt2 == 0xf))
+ {
+ *unpredictable_code = UNPRED_R15;
+ return TRUE;
+ }
+ else if (rt == rt2)
+ {
+ *unpredictable_code = UNPRED_GP_REGS_EQUAL;
+ return TRUE;
+ }
- value |= ((given >> start2) & ((2ul << bits2) - 1)) << width;
- return value;
-}
+ return FALSE;
+ }
-/* Return TRUE if the GIVEN encoding should not be decoded as MATCHED_INSN.
- This helps us decode instructions that change mnemonic depending on specific
- operand values/encodings. */
+ case MVE_VMAXV:
+ case MVE_VMAXAV:
+ case MVE_VMAXNMV_FP:
+ case MVE_VMAXNMAV_FP:
+ case MVE_VMINNMV_FP:
+ case MVE_VMINNMAV_FP:
+ case MVE_VMINV:
+ case MVE_VMINAV:
+ case MVE_VABAV:
+ case MVE_VMOV_HFP_TO_GP:
+ case MVE_VMOV_GP_TO_VEC_LANE:
+ case MVE_VMOV_VEC_LANE_TO_GP:
+ {
+ unsigned long rda = arm_decode_field (given, 12, 15);
+ if (rda == 0xd)
+ {
+ *unpredictable_code = UNPRED_R13;
+ return TRUE;
+ }
+ else if (rda == 0xf)
+ {
+ *unpredictable_code = UNPRED_R15;
+ return TRUE;
+ }
-static bfd_boolean
-is_mve_encoding_conflict (unsigned long given,
- enum mve_instructions matched_insn)
-{
- switch (matched_insn)
- {
- case MVE_VPST:
- if (arm_decode_field_multiple (given, 13, 15, 22, 22) == 0)
- return TRUE;
- else
return FALSE;
+ }
- case MVE_VPT_FP_T1:
- if (arm_decode_field_multiple (given, 13, 15, 22, 22) == 0)
- return TRUE;
- if ((arm_decode_field (given, 12, 12) == 0)
- && (arm_decode_field (given, 0, 0) == 1))
- return TRUE;
- return FALSE;
+ case MVE_VMULL_INT:
+ {
+ unsigned long Qd;
+ unsigned long Qm;
+ unsigned long Qn;
- case MVE_VPT_FP_T2:
- if (arm_decode_field_multiple (given, 13, 15, 22, 22) == 0)
- return TRUE;
- if (arm_decode_field (given, 0, 3) == 0xd)
- return TRUE;
- return FALSE;
+ if (arm_decode_field (given, 20, 21) == 2)
+ {
+ Qd = arm_decode_field_multiple (given, 13, 15, 22, 22);
+ Qm = arm_decode_field_multiple (given, 1, 3, 5, 5);
+ Qn = arm_decode_field_multiple (given, 17, 19, 7, 7);
- case MVE_VPT_VEC_T1:
- case MVE_VPT_VEC_T2:
- case MVE_VPT_VEC_T3:
- case MVE_VPT_VEC_T4:
- case MVE_VPT_VEC_T5:
- case MVE_VPT_VEC_T6:
- if (arm_decode_field_multiple (given, 13, 15, 22, 22) == 0)
- return TRUE;
- if (arm_decode_field (given, 20, 21) == 3)
- return TRUE;
- return FALSE;
+ if ((Qd == Qn) || (Qd == Qm))
+ {
+ *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_2;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+ else
+ return FALSE;
+ }
- case MVE_VCMP_FP_T1:
- if ((arm_decode_field (given, 12, 12) == 0)
- && (arm_decode_field (given, 0, 0) == 1))
- return TRUE;
- else
- return FALSE;
+ case MVE_VCMUL_FP:
+ case MVE_VQDMULL_T1:
+ {
+ unsigned long Qd;
+ unsigned long Qm;
+ unsigned long Qn;
- case MVE_VCMP_FP_T2:
- if (arm_decode_field (given, 0, 3) == 0xd)
- return TRUE;
- else
- return FALSE;
+ if (arm_decode_field (given, 28, 28) == 1)
+ {
+ Qd = arm_decode_field_multiple (given, 13, 15, 22, 22);
+ Qm = arm_decode_field_multiple (given, 1, 3, 5, 5);
+ Qn = arm_decode_field_multiple (given, 17, 19, 7, 7);
- case MVE_VHADD_T2:
- case MVE_VHSUB_T2:
- case MVE_VCMP_VEC_T1:
- case MVE_VCMP_VEC_T2:
- case MVE_VCMP_VEC_T3:
- case MVE_VCMP_VEC_T4:
- case MVE_VCMP_VEC_T5:
- case MVE_VCMP_VEC_T6:
- if (arm_decode_field (given, 20, 21) == 3)
- return TRUE;
- else
- return FALSE;
+ if ((Qd == Qn) || (Qd == Qm))
+ {
+ *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_1;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+ else
+ return FALSE;
+ }
- default:
- return FALSE;
+ case MVE_VQDMULL_T2:
+ {
+ unsigned long gpr = arm_decode_field (given, 0, 3);
+ if (gpr == 0xd)
+ {
+ *unpredictable_code = UNPRED_R13;
+ return TRUE;
+ }
+ else if (gpr == 0xf)
+ {
+ *unpredictable_code = UNPRED_R15;
+ return TRUE;
+ }
- }
-}
+ if (arm_decode_field (given, 28, 28) == 1)
+ {
+ unsigned long Qd
+ = arm_decode_field_multiple (given, 13, 15, 22, 22);
+ unsigned long Qn = arm_decode_field_multiple (given, 17, 19, 7, 7);
-/* Return FALSE if GIVEN is not an undefined encoding for MATCHED_INSN.
- Otherwise, return TRUE and set UNDEFINED_CODE to give a reason as to why
- this encoding is undefined. */
+ if (Qd == Qn)
+ {
+ *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_1;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
-static bfd_boolean
-is_mve_undefined (unsigned long given, enum mve_instructions matched_insn,
- enum mve_undefined *undefined_code)
-{
- *undefined_code = UNDEF_NONE;
+ return FALSE;
+ }
- switch (matched_insn)
- {
- case MVE_VDUP:
- if (arm_decode_field_multiple (given, 5, 5, 22, 22) == 3)
+ case MVE_VMLSLDAV:
+ case MVE_VRMLSLDAVH:
+ case MVE_VMLALDAV:
+ case MVE_VADDLV:
+ if (arm_decode_field (given, 20, 22) == 6)
{
- *undefined_code = UNDEF_SIZE_3;
+ *unpredictable_code = UNPRED_R13;
return TRUE;
}
else
return FALSE;
- case MVE_VRHADD:
- case MVE_VHADD_T1:
- case MVE_VHSUB_T1:
- if (arm_decode_field (given, 20, 21) == 3)
+ case MVE_VDWDUP:
+ case MVE_VIWDUP:
+ if (arm_decode_field (given, 1, 3) == 6)
{
- *undefined_code = UNDEF_SIZE_3;
+ *unpredictable_code = UNPRED_R13;
return TRUE;
}
else
return FALSE;
- default:
- return FALSE;
- }
-}
+ case MVE_VCADD_VEC:
+ case MVE_VHCADD:
+ {
+ unsigned long Qd = arm_decode_field_multiple (given, 13, 15, 22, 22);
+ unsigned long Qm = arm_decode_field_multiple (given, 1, 3, 5, 5);
+ if ((Qd == Qm) && arm_decode_field (given, 20, 21) == 2)
+ {
+ *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_2;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
-/* Return FALSE if GIVEN is not an unpredictable encoding for MATCHED_INSN.
- Otherwise, return TRUE and set UNPREDICTABLE_CODE to give a reason as to
- why this encoding is unpredictable. */
+ case MVE_VCADD_FP:
+ {
+ unsigned long Qd = arm_decode_field_multiple (given, 13, 15, 22, 22);
+ unsigned long Qm = arm_decode_field_multiple (given, 1, 3, 5, 5);
+ if ((Qd == Qm) && arm_decode_field (given, 20, 20) == 1)
+ {
+ *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_1;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
-static bfd_boolean
-is_mve_unpredictable (unsigned long given, enum mve_instructions matched_insn,
- enum mve_unpredictable *unpredictable_code)
-{
- *unpredictable_code = UNPRED_NONE;
+ case MVE_VCMLA_FP:
+ {
+ unsigned long Qda;
+ unsigned long Qm;
+ unsigned long Qn;
- switch (matched_insn)
- {
- case MVE_VCMP_FP_T2:
- case MVE_VPT_FP_T2:
- if ((arm_decode_field (given, 12, 12) == 0)
- && (arm_decode_field (given, 5, 5) == 1))
- {
- *unpredictable_code = UNPRED_FCA_0_FCB_1;
- return TRUE;
- }
- else
- return FALSE;
+ if (arm_decode_field (given, 20, 20) == 1)
+ {
+ Qda = arm_decode_field_multiple (given, 13, 15, 22, 22);
+ Qm = arm_decode_field_multiple (given, 1, 3, 5, 5);
+ Qn = arm_decode_field_multiple (given, 17, 19, 7, 7);
- case MVE_VPT_VEC_T4:
- case MVE_VPT_VEC_T5:
- case MVE_VPT_VEC_T6:
- case MVE_VCMP_VEC_T4:
- case MVE_VCMP_VEC_T5:
- case MVE_VCMP_VEC_T6:
- if (arm_decode_field (given, 0, 3) == 0xd)
+ if ((Qda == Qn) || (Qda == Qm))
+ {
+ *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_1;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+ else
+ return FALSE;
+
+ }
+
+ case MVE_VCTP:
+ if (arm_decode_field (given, 16, 19) == 0xd)
{
*unpredictable_code = UNPRED_R13;
return TRUE;
else
return FALSE;
- case MVE_VDUP:
+ case MVE_VREV64:
{
- unsigned long gpr = arm_decode_field (given, 12, 15);
+ unsigned long qd = arm_decode_field_multiple (given, 13, 15, 22, 22);
+ unsigned long qm = arm_decode_field_multiple (given, 1, 3, 6, 6);
+
+ if (qd == qm)
+ {
+ *unpredictable_code = UNPRED_Q_REGS_EQUAL;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+
+ case MVE_LSLL:
+ case MVE_LSLLI:
+ case MVE_LSRL:
+ case MVE_ASRL:
+ case MVE_ASRLI:
+ case MVE_UQSHLL:
+ case MVE_UQRSHLL:
+ case MVE_URSHRL:
+ case MVE_SRSHRL:
+ case MVE_SQSHLL:
+ case MVE_SQRSHRL:
+ {
+ unsigned long gpr = arm_decode_field (given, 9, 11);
+ gpr = ((gpr << 1) | 1);
if (gpr == 0xd)
{
*unpredictable_code = UNPRED_R13;
return TRUE;
}
- return FALSE;
- }
+ return FALSE;
+ }
+
+ default:
+ return FALSE;
+ }
+}
+
+static void
+print_mve_vmov_index (struct disassemble_info *info, unsigned long given)
+{
+ unsigned long op1 = arm_decode_field (given, 21, 22);
+ unsigned long op2 = arm_decode_field (given, 5, 6);
+ unsigned long h = arm_decode_field (given, 16, 16);
+ unsigned long index_operand, esize, targetBeat, idx;
+ void *stream = info->stream;
+ fprintf_ftype func = info->fprintf_func;
+
+ if ((op1 & 0x2) == 0x2)
+ {
+ index_operand = op2;
+ esize = 8;
+ }
+ else if (((op1 & 0x2) == 0x0) && ((op2 & 0x1) == 0x1))
+ {
+ index_operand = op2 >> 1;
+ esize = 16;
+ }
+ else if (((op1 & 0x2) == 0) && ((op2 & 0x3) == 0))
+ {
+ index_operand = 0;
+ esize = 32;
+ }
+ else
+ {
+ func (stream, "<undefined index>");
+ return;
+ }
+
+ targetBeat = (op1 & 0x1) | (h << 1);
+ idx = index_operand + targetBeat * (32/esize);
+
+ func (stream, "%lu", idx);
+}
+
+/* Print neon and mve 8-bit immediate that can be a 8, 16, 32, or 64-bits
+ in length and integer of floating-point type. */
+static void
+print_simd_imm8 (struct disassemble_info *info, unsigned long given,
+ unsigned int ibit_loc, const struct mopcode32 *insn)
+{
+ int bits = 0;
+ int cmode = (given >> 8) & 0xf;
+ int op = (given >> 5) & 0x1;
+ unsigned long value = 0, hival = 0;
+ unsigned shift;
+ int size = 0;
+ int isfloat = 0;
+ void *stream = info->stream;
+ fprintf_ftype func = info->fprintf_func;
+
+ /* On Neon the 'i' bit is at bit 24, on mve it is
+ at bit 28. */
+ bits |= ((given >> ibit_loc) & 1) << 7;
+ bits |= ((given >> 16) & 7) << 4;
+ bits |= ((given >> 0) & 15) << 0;
+
+ if (cmode < 8)
+ {
+ shift = (cmode >> 1) & 3;
+ value = (unsigned long) bits << (8 * shift);
+ size = 32;
+ }
+ else if (cmode < 12)
+ {
+ shift = (cmode >> 1) & 1;
+ value = (unsigned long) bits << (8 * shift);
+ size = 16;
+ }
+ else if (cmode < 14)
+ {
+ shift = (cmode & 1) + 1;
+ value = (unsigned long) bits << (8 * shift);
+ value |= (1ul << (8 * shift)) - 1;
+ size = 32;
+ }
+ else if (cmode == 14)
+ {
+ if (op)
+ {
+ /* Bit replication into bytes. */
+ int ix;
+ unsigned long mask;
+
+ value = 0;
+ hival = 0;
+ for (ix = 7; ix >= 0; ix--)
+ {
+ mask = ((bits >> ix) & 1) ? 0xff : 0;
+ if (ix <= 3)
+ value = (value << 8) | mask;
+ else
+ hival = (hival << 8) | mask;
+ }
+ size = 64;
+ }
+ else
+ {
+ /* Byte replication. */
+ value = (unsigned long) bits;
+ size = 8;
+ }
+ }
+ else if (!op)
+ {
+ /* Floating point encoding. */
+ int tmp;
+
+ value = (unsigned long) (bits & 0x7f) << 19;
+ value |= (unsigned long) (bits & 0x80) << 24;
+ tmp = bits & 0x40 ? 0x3c : 0x40;
+ value |= (unsigned long) tmp << 24;
+ size = 32;
+ isfloat = 1;
+ }
+ else
+ {
+ func (stream, "<illegal constant %.8x:%x:%x>",
+ bits, cmode, op);
+ size = 32;
+ return;
+ }
+
+ // printU determines whether the immediate value should be printed as
+ // unsigned.
+ unsigned printU = 0;
+ switch (insn->mve_op)
+ {
+ default:
+ break;
+ // We want this for instructions that don't have a 'signed' type
+ case MVE_VBIC_IMM:
+ case MVE_VORR_IMM:
+ case MVE_VMVN_IMM:
+ case MVE_VMOV_IMM_TO_VEC:
+ printU = 1;
+ break;
+ }
+ switch (size)
+ {
+ case 8:
+ func (stream, "#%ld\t; 0x%.2lx", value, value);
+ break;
- case MVE_VFMA_FP_SCALAR:
- case MVE_VFMAS_FP_SCALAR:
- case MVE_VHADD_T2:
- case MVE_VHSUB_T2:
- {
- unsigned long gpr = arm_decode_field (given, 0, 3);
- if (gpr == 0xd)
- {
- *unpredictable_code = UNPRED_R13;
- return TRUE;
- }
- else if (gpr == 0xf)
- {
- *unpredictable_code = UNPRED_R15;
- return TRUE;
- }
+ case 16:
+ func (stream,
+ printU
+ ? "#%lu\t; 0x%.4lx"
+ : "#%ld\t; 0x%.4lx", value, value);
+ break;
- return FALSE;
- }
+ case 32:
+ if (isfloat)
+ {
+ unsigned char valbytes[4];
+ double fvalue;
+
+ /* Do this a byte at a time so we don't have to
+ worry about the host's endianness. */
+ valbytes[0] = value & 0xff;
+ valbytes[1] = (value >> 8) & 0xff;
+ valbytes[2] = (value >> 16) & 0xff;
+ valbytes[3] = (value >> 24) & 0xff;
+
+ floatformat_to_double
+ (& floatformat_ieee_single_little, valbytes,
+ & fvalue);
+
+ func (stream, "#%.7g\t; 0x%.8lx", fvalue,
+ value);
+ }
+ else
+ func (stream,
+ printU
+ ? "#%lu\t; 0x%.8lx"
+ : "#%ld\t; 0x%.8lx",
+ (long) (((value & 0x80000000L) != 0)
+ && !printU
+ ? value | ~0xffffffffL : value),
+ value);
+ break;
+
+ case 64:
+ func (stream, "#0x%.8lx%.8lx", hival, value);
+ break;
default:
- return FALSE;
+ abort ();
}
+
}
static void
switch (undefined_code)
{
+ case UNDEF_SIZE:
+ func (stream, "illegal size");
+ break;
+
+ case UNDEF_SIZE_0:
+ func (stream, "size equals zero");
+ break;
+
+ case UNDEF_SIZE_2:
+ func (stream, "size equals two");
+ break;
+
case UNDEF_SIZE_3:
func (stream, "size equals three");
break;
+ case UNDEF_SIZE_LE_1:
+ func (stream, "size <= 1");
+ break;
+
+ case UNDEF_SIZE_NOT_0:
+ func (stream, "size not equal to 0");
+ break;
+
+ case UNDEF_SIZE_NOT_2:
+ func (stream, "size not equal to 2");
+ break;
+
+ case UNDEF_SIZE_NOT_3:
+ func (stream, "size not equal to 3");
+ break;
+
+ case UNDEF_NOT_UNS_SIZE_0:
+ func (stream, "not unsigned and size = zero");
+ break;
+
+ case UNDEF_NOT_UNS_SIZE_1:
+ func (stream, "not unsigned and size = one");
+ break;
+
+ case UNDEF_NOT_UNSIGNED:
+ func (stream, "not unsigned");
+ break;
+
+ case UNDEF_VCVT_IMM6:
+ func (stream, "invalid imm6");
+ break;
+
+ case UNDEF_VCVT_FSI_IMM6:
+ func (stream, "fsi = 0 and invalid imm6");
+ break;
+
+ case UNDEF_BAD_OP1_OP2:
+ func (stream, "bad size with op2 = 2 and op1 = 0 or 1");
+ break;
+
+ case UNDEF_BAD_U_OP1_OP2:
+ func (stream, "unsigned with op2 = 0 and op1 = 0 or 1");
+ break;
+
+ case UNDEF_OP_0_BAD_CMODE:
+ func (stream, "op field equal 0 and bad cmode");
+ break;
+
+ case UNDEF_XCHG_UNS:
+ func (stream, "exchange and unsigned together");
+ break;
+
case UNDEF_NONE:
break;
}
func (stream, "use of r15 (pc)");
break;
+ case UNPRED_Q_GT_4:
+ func (stream, "start register block > r4");
+ break;
+
+ case UNPRED_Q_GT_6:
+ func (stream, "start register block > r6");
+ break;
+
+ case UNPRED_R13_AND_WB:
+ func (stream, "use of r13 and write back");
+ break;
+
+ case UNPRED_Q_REGS_EQUAL:
+ func (stream,
+ "same vector register used for destination and other operand");
+ break;
+
+ case UNPRED_OS:
+ func (stream, "use of offset scaled");
+ break;
+
+ case UNPRED_GP_REGS_EQUAL:
+ func (stream, "same general-purpose register used for both operands");
+ break;
+
+ case UNPRED_Q_REGS_EQ_AND_SIZE_1:
+ func (stream, "use of identical q registers and size = 1");
+ break;
+
+ case UNPRED_Q_REGS_EQ_AND_SIZE_2:
+ func (stream, "use of identical q registers and size = 1");
+ break;
+
case UNPRED_NONE:
break;
}
}
+/* Print register block operand for mve vld2/vld4/vst2/vld4. */
+
+static void
+print_mve_register_blocks (struct disassemble_info *info,
+ unsigned long given,
+ enum mve_instructions matched_insn)
+{
+ void *stream = info->stream;
+ fprintf_ftype func = info->fprintf_func;
+
+ unsigned long q_reg_start = arm_decode_field_multiple (given,
+ 13, 15,
+ 22, 22);
+ switch (matched_insn)
+ {
+ case MVE_VLD2:
+ case MVE_VST2:
+ if (q_reg_start <= 6)
+ func (stream, "{q%ld, q%ld}", q_reg_start, q_reg_start + 1);
+ else
+ func (stream, "<illegal reg q%ld>", q_reg_start);
+ break;
+
+ case MVE_VLD4:
+ case MVE_VST4:
+ if (q_reg_start <= 4)
+ func (stream, "{q%ld, q%ld, q%ld, q%ld}", q_reg_start,
+ q_reg_start + 1, q_reg_start + 2,
+ q_reg_start + 3);
+ else
+ func (stream, "<illegal reg q%ld>", q_reg_start);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+print_mve_rounding_mode (struct disassemble_info *info,
+ unsigned long given,
+ enum mve_instructions matched_insn)
+{
+ void *stream = info->stream;
+ fprintf_ftype func = info->fprintf_func;
+
+ switch (matched_insn)
+ {
+ case MVE_VCVT_FROM_FP_TO_INT:
+ {
+ switch (arm_decode_field (given, 8, 9))
+ {
+ case 0:
+ func (stream, "a");
+ break;
+
+ case 1:
+ func (stream, "n");
+ break;
+
+ case 2:
+ func (stream, "p");
+ break;
+
+ case 3:
+ func (stream, "m");
+ break;
+
+ default:
+ break;
+ }
+ }
+ break;
+
+ case MVE_VRINT_FP:
+ {
+ switch (arm_decode_field (given, 7, 9))
+ {
+ case 0:
+ func (stream, "n");
+ break;
+
+ case 1:
+ func (stream, "x");
+ break;
+
+ case 2:
+ func (stream, "a");
+ break;
+
+ case 3:
+ func (stream, "z");
+ break;
+
+ case 5:
+ func (stream, "m");
+ break;
+
+ case 7:
+ func (stream, "p");
+
+ case 4:
+ case 6:
+ default:
+ break;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+print_mve_vcvt_size (struct disassemble_info *info,
+ unsigned long given,
+ enum mve_instructions matched_insn)
+{
+ unsigned long mode = 0;
+ void *stream = info->stream;
+ fprintf_ftype func = info->fprintf_func;
+
+ switch (matched_insn)
+ {
+ case MVE_VCVT_FP_FIX_VEC:
+ {
+ mode = (((given & 0x200) >> 7)
+ | ((given & 0x10000000) >> 27)
+ | ((given & 0x100) >> 8));
+
+ switch (mode)
+ {
+ case 0:
+ func (stream, "f16.s16");
+ break;
+
+ case 1:
+ func (stream, "s16.f16");
+ break;
+
+ case 2:
+ func (stream, "f16.u16");
+ break;
+
+ case 3:
+ func (stream, "u16.f16");
+ break;
+
+ case 4:
+ func (stream, "f32.s32");
+ break;
+
+ case 5:
+ func (stream, "s32.f32");
+ break;
+
+ case 6:
+ func (stream, "f32.u32");
+ break;
+
+ case 7:
+ func (stream, "u32.f32");
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+ case MVE_VCVT_BETWEEN_FP_INT:
+ {
+ unsigned long size = arm_decode_field (given, 18, 19);
+ unsigned long op = arm_decode_field (given, 7, 8);
+
+ if (size == 1)
+ {
+ switch (op)
+ {
+ case 0:
+ func (stream, "f16.s16");
+ break;
+
+ case 1:
+ func (stream, "f16.u16");
+ break;
+
+ case 2:
+ func (stream, "s16.f16");
+ break;
+
+ case 3:
+ func (stream, "u16.f16");
+ break;
+
+ default:
+ break;
+ }
+ }
+ else if (size == 2)
+ {
+ switch (op)
+ {
+ case 0:
+ func (stream, "f32.s32");
+ break;
+
+ case 1:
+ func (stream, "f32.u32");
+ break;
+
+ case 2:
+ func (stream, "s32.f32");
+ break;
+
+ case 3:
+ func (stream, "u32.f32");
+ break;
+ }
+ }
+ }
+ break;
+
+ case MVE_VCVT_FP_HALF_FP:
+ {
+ unsigned long op = arm_decode_field (given, 28, 28);
+ if (op == 0)
+ func (stream, "f16.f32");
+ else if (op == 1)
+ func (stream, "f32.f16");
+ }
+ break;
+
+ case MVE_VCVT_FROM_FP_TO_INT:
+ {
+ unsigned long size = arm_decode_field_multiple (given, 7, 7, 18, 19);
+
+ switch (size)
+ {
+ case 2:
+ func (stream, "s16.f16");
+ break;
+
+ case 3:
+ func (stream, "u16.f16");
+ break;
+
+ case 4:
+ func (stream, "s32.f32");
+ break;
+
+ case 5:
+ func (stream, "u32.f32");
+ break;
+
+ default:
+ break;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+print_mve_rotate (struct disassemble_info *info, unsigned long rot,
+ unsigned long rot_width)
+{
+ void *stream = info->stream;
+ fprintf_ftype func = info->fprintf_func;
+
+ if (rot_width == 1)
+ {
+ switch (rot)
+ {
+ case 0:
+ func (stream, "90");
+ break;
+ case 1:
+ func (stream, "270");
+ break;
+ default:
+ break;
+ }
+ }
+ else if (rot_width == 2)
+ {
+ switch (rot)
+ {
+ case 0:
+ func (stream, "0");
+ break;
+ case 1:
+ func (stream, "90");
+ break;
+ case 2:
+ func (stream, "180");
+ break;
+ case 3:
+ func (stream, "270");
+ break;
+ default:
+ break;
+ }
+ }
+}
+
static void
print_instruction_predicate (struct disassemble_info *info)
{
switch (matched_insn)
{
+ case MVE_VABAV:
+ case MVE_VABD_VEC:
+ case MVE_VABS_FP:
+ case MVE_VABS_VEC:
+ case MVE_VADD_VEC_T1:
+ case MVE_VADD_VEC_T2:
+ case MVE_VADDV:
+ case MVE_VBRSR:
+ case MVE_VCADD_VEC:
+ case MVE_VCLS:
+ case MVE_VCLZ:
case MVE_VCMP_VEC_T1:
case MVE_VCMP_VEC_T2:
case MVE_VCMP_VEC_T3:
case MVE_VCMP_VEC_T4:
case MVE_VCMP_VEC_T5:
case MVE_VCMP_VEC_T6:
+ case MVE_VCTP:
+ case MVE_VDDUP:
+ case MVE_VDWDUP:
case MVE_VHADD_T1:
case MVE_VHADD_T2:
+ case MVE_VHCADD:
case MVE_VHSUB_T1:
case MVE_VHSUB_T2:
+ case MVE_VIDUP:
+ case MVE_VIWDUP:
+ case MVE_VLD2:
+ case MVE_VLD4:
+ case MVE_VLDRB_GATHER_T1:
+ case MVE_VLDRH_GATHER_T2:
+ case MVE_VLDRW_GATHER_T3:
+ case MVE_VLDRD_GATHER_T4:
+ case MVE_VLDRB_T1:
+ case MVE_VLDRH_T2:
+ case MVE_VMAX:
+ case MVE_VMAXA:
+ case MVE_VMAXV:
+ case MVE_VMAXAV:
+ case MVE_VMIN:
+ case MVE_VMINA:
+ case MVE_VMINV:
+ case MVE_VMINAV:
+ case MVE_VMLA:
+ case MVE_VMLAS:
+ case MVE_VMUL_VEC_T1:
+ case MVE_VMUL_VEC_T2:
+ case MVE_VMULH:
+ case MVE_VRMULH:
+ case MVE_VMULL_INT:
+ case MVE_VNEG_FP:
+ case MVE_VNEG_VEC:
case MVE_VPT_VEC_T1:
case MVE_VPT_VEC_T2:
case MVE_VPT_VEC_T3:
case MVE_VPT_VEC_T4:
case MVE_VPT_VEC_T5:
case MVE_VPT_VEC_T6:
+ case MVE_VQABS:
+ case MVE_VQADD_T1:
+ case MVE_VQADD_T2:
+ case MVE_VQDMLADH:
+ case MVE_VQRDMLADH:
+ case MVE_VQDMLAH:
+ case MVE_VQRDMLAH:
+ case MVE_VQDMLASH:
+ case MVE_VQRDMLASH:
+ case MVE_VQDMLSDH:
+ case MVE_VQRDMLSDH:
+ case MVE_VQDMULH_T1:
+ case MVE_VQRDMULH_T2:
+ case MVE_VQDMULH_T3:
+ case MVE_VQRDMULH_T4:
+ case MVE_VQNEG:
+ case MVE_VQRSHL_T1:
+ case MVE_VQRSHL_T2:
+ case MVE_VQSHL_T1:
+ case MVE_VQSHL_T4:
+ case MVE_VQSUB_T1:
+ case MVE_VQSUB_T2:
+ case MVE_VREV32:
+ case MVE_VREV64:
case MVE_VRHADD:
+ case MVE_VRINT_FP:
+ case MVE_VRSHL_T1:
+ case MVE_VRSHL_T2:
+ case MVE_VSHL_T2:
+ case MVE_VSHL_T3:
+ case MVE_VSHLL_T2:
+ case MVE_VST2:
+ case MVE_VST4:
+ case MVE_VSTRB_SCATTER_T1:
+ case MVE_VSTRH_SCATTER_T2:
+ case MVE_VSTRW_SCATTER_T3:
+ case MVE_VSTRB_T1:
+ case MVE_VSTRH_T2:
+ case MVE_VSUB_VEC_T1:
+ case MVE_VSUB_VEC_T2:
if (size <= 3)
func (stream, "%s", mve_vec_sizename[size]);
else
func (stream, "<undef size>");
break;
+ case MVE_VABD_FP:
+ case MVE_VADD_FP_T1:
+ case MVE_VADD_FP_T2:
+ case MVE_VSUB_FP_T1:
+ case MVE_VSUB_FP_T2:
case MVE_VCMP_FP_T1:
case MVE_VCMP_FP_T2:
case MVE_VFMA_FP_SCALAR:
case MVE_VFMA_FP:
case MVE_VFMS_FP:
case MVE_VFMAS_FP_SCALAR:
+ case MVE_VMAXNM_FP:
+ case MVE_VMAXNMA_FP:
+ case MVE_VMAXNMV_FP:
+ case MVE_VMAXNMAV_FP:
+ case MVE_VMINNM_FP:
+ case MVE_VMINNMA_FP:
+ case MVE_VMINNMV_FP:
+ case MVE_VMINNMAV_FP:
+ case MVE_VMUL_FP_T1:
+ case MVE_VMUL_FP_T2:
case MVE_VPT_FP_T1:
case MVE_VPT_FP_T2:
if (size == 0)
func (stream, "16");
break;
+ case MVE_VCADD_FP:
+ case MVE_VCMLA_FP:
+ case MVE_VCMUL_FP:
+ case MVE_VMLADAV_T1:
+ case MVE_VMLALDAV:
+ case MVE_VMLSDAV_T1:
+ case MVE_VMLSLDAV:
+ case MVE_VMOVN:
+ case MVE_VQDMULL_T1:
+ case MVE_VQDMULL_T2:
+ case MVE_VQMOVN:
+ case MVE_VQMOVUN:
+ if (size == 0)
+ func (stream, "16");
+ else if (size == 1)
+ func (stream, "32");
+ break;
+
+ case MVE_VMOVL:
+ if (size == 1)
+ func (stream, "8");
+ else if (size == 2)
+ func (stream, "16");
+ break;
+
case MVE_VDUP:
switch (size)
{
}
break;
+ case MVE_VMOV_GP_TO_VEC_LANE:
+ case MVE_VMOV_VEC_LANE_TO_GP:
+ switch (size)
+ {
+ case 0: case 4:
+ func (stream, "32");
+ break;
+
+ case 1: case 3:
+ case 5: case 7:
+ func (stream, "16");
+ break;
+
+ case 8: case 9: case 10: case 11:
+ case 12: case 13: case 14: case 15:
+ func (stream, "8");
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case MVE_VMOV_IMM_TO_VEC:
+ switch (size)
+ {
+ case 0: case 4: case 8:
+ case 12: case 24: case 26:
+ func (stream, "i32");
+ break;
+ case 16: case 20:
+ func (stream, "i16");
+ break;
+ case 28:
+ func (stream, "i8");
+ break;
+ case 29:
+ func (stream, "i64");
+ break;
+ case 30:
+ func (stream, "f32");
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case MVE_VMULL_POLY:
+ if (size == 0)
+ func (stream, "p8");
+ else if (size == 1)
+ func (stream, "p16");
+ break;
+
+ case MVE_VMVN_IMM:
+ switch (size)
+ {
+ case 0: case 2: case 4:
+ case 6: case 12: case 13:
+ func (stream, "32");
+ break;
+
+ case 8: case 10:
+ func (stream, "16");
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case MVE_VBIC_IMM:
+ case MVE_VORR_IMM:
+ switch (size)
+ {
+ case 1: case 3:
+ case 5: case 7:
+ func (stream, "32");
+ break;
+
+ case 9: case 11:
+ func (stream, "16");
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case MVE_VQSHRN:
+ case MVE_VQSHRUN:
+ case MVE_VQRSHRN:
+ case MVE_VQRSHRUN:
+ case MVE_VRSHRN:
+ case MVE_VSHRN:
+ {
+ switch (size)
+ {
+ case 1:
+ func (stream, "16");
+ break;
+
+ case 2: case 3:
+ func (stream, "32");
+ break;
+
+ default:
+ break;
+ }
+ }
+ break;
+
+ case MVE_VQSHL_T2:
+ case MVE_VQSHLU_T3:
+ case MVE_VRSHR:
+ case MVE_VSHL_T1:
+ case MVE_VSHLL_T1:
+ case MVE_VSHR:
+ case MVE_VSLI:
+ case MVE_VSRI:
+ {
+ switch (size)
+ {
+ case 1:
+ func (stream, "8");
+ break;
+
+ case 2: case 3:
+ func (stream, "16");
+ break;
+
+ case 4: case 5: case 6: case 7:
+ func (stream, "32");
+ break;
+
+ default:
+ break;
+ }
+ }
+ break;
+
default:
break;
}
}
+static void
+print_mve_shift_n (struct disassemble_info *info, long given,
+ enum mve_instructions matched_insn)
+{
+ void *stream = info->stream;
+ fprintf_ftype func = info->fprintf_func;
+
+ int startAt0
+ = matched_insn == MVE_VQSHL_T2
+ || matched_insn == MVE_VQSHLU_T3
+ || matched_insn == MVE_VSHL_T1
+ || matched_insn == MVE_VSHLL_T1
+ || matched_insn == MVE_VSLI;
+
+ unsigned imm6 = (given & 0x3f0000) >> 16;
+
+ if (matched_insn == MVE_VSHLL_T1)
+ imm6 &= 0x1f;
+
+ unsigned shiftAmount = 0;
+ if ((imm6 & 0x20) != 0)
+ shiftAmount = startAt0 ? imm6 - 32 : 64 - imm6;
+ else if ((imm6 & 0x10) != 0)
+ shiftAmount = startAt0 ? imm6 - 16 : 32 - imm6;
+ else if ((imm6 & 0x08) != 0)
+ shiftAmount = startAt0 ? imm6 - 8 : 16 - imm6;
+ else
+ print_mve_undefined (info, UNDEF_SIZE_0);
+
+ func (stream, "%u", shiftAmount);
+}
+
static void
print_vec_condition (struct disassemble_info *info, long given,
enum mve_instructions matched_insn)
recognised coprocessor instruction. */
static bfd_boolean
-print_insn_coprocessor (bfd_vma pc,
- struct disassemble_info *info,
- long given,
- bfd_boolean thumb)
+print_insn_coprocessor_1 (const struct sopcode32 *opcodes,
+ bfd_vma pc,
+ struct disassemble_info *info,
+ long given,
+ bfd_boolean thumb)
{
const struct sopcode32 *insn;
void *stream = info->stream;
allowed_arches = private_data->features;
- for (insn = coprocessor_opcodes; insn->assembler; insn++)
+ for (insn = opcodes; insn->assembler; insn++)
{
unsigned long u_reg = 16;
bfd_boolean is_unpredictable = FALSE;
&& (cp_num == 8 || cp_num == 14 || cp_num == 15))
continue;
}
+ else if ((insn->value == 0xec100f80 /* vldr (system register) */
+ || insn->value == 0xec000f80) /* vstr (system register) */
+ && arm_decode_field (given, 24, 24) == 0
+ && arm_decode_field (given, 21, 21) == 0)
+ /* If the P and W bits are both 0 then these encodings match the MVE
+ VLDR and VSTR instructions, these are in a different table, so we
+ don't let it match here. */
+ continue;
for (c = insn->assembler; *c; c++)
{
if (cond != COND_UNCOND && cp_num == 9)
is_unpredictable = TRUE;
+ /* Fall through. */
+ case 'b':
func (stream, "%s", arm_conditional[cond]);
break;
return FALSE;
}
+static bfd_boolean
+print_insn_coprocessor (bfd_vma pc,
+ struct disassemble_info *info,
+ long given,
+ bfd_boolean thumb)
+{
+ return print_insn_coprocessor_1 (coprocessor_opcodes,
+ pc, info, given, thumb);
+}
+
+static bfd_boolean
+print_insn_generic_coprocessor (bfd_vma pc,
+ struct disassemble_info *info,
+ long given,
+ bfd_boolean thumb)
+{
+ return print_insn_coprocessor_1 (generic_coprocessor_opcodes,
+ pc, info, given, thumb);
+}
+
/* Decodes and prints ARM addressing modes. Returns the offset
used in the address, if any, if it is worthwhile printing the
offset as a hexadecimal value in a comment at the end of the
}
else if ((given & 0xff000000) == 0xf9000000)
given ^= 0xf9000000 ^ 0xf4000000;
+ /* BFloat16 neon instructions without special top byte handling. */
+ else if ((given & 0xff000000) == 0xfe000000
+ || (given & 0xff000000) == 0xfc000000)
+ ;
/* vdup is also a valid neon instruction. */
else if ((given & 0xff910f5f) != 0xee800b10)
return FALSE;
if (is_mve_undefined (given, insn->mve_op, &undefined_cond))
is_undefined = TRUE;
+ /* In "VORR Qd, Qm, Qn", if Qm==Qn, VORR is nothing but VMOV,
+ i.e "VMOV Qd, Qm". */
+ if ((insn->mve_op == MVE_VORR_REG)
+ && (arm_decode_field (given, 1, 3)
+ == arm_decode_field (given, 17, 19)))
+ continue;
+
for (c = insn->assembler; *c; c++)
{
if (*c == '%')
func (stream, "%%");
break;
+ case 'a':
+ /* Don't print anything for '+' as it is implied. */
+ if (arm_decode_field (given, 23, 23) == 0)
+ func (stream, "-");
+ break;
+
case 'c':
if (ifthen_state)
func (stream, "%s", arm_conditional[IFTHEN_COND]);
break;
+ case 'd':
+ print_mve_vld_str_addr (info, given, insn->mve_op);
+ break;
+
case 'i':
{
long mve_mask = mve_extract_pred_mask (given);
}
break;
+ case 'j':
+ {
+ unsigned int imm5 = 0;
+ imm5 |= arm_decode_field (given, 6, 7);
+ imm5 |= (arm_decode_field (given, 12, 14) << 2);
+ func (stream, "#%u", (imm5 == 0) ? 32 : imm5);
+ }
+ break;
+
+ case 'k':
+ func (stream, "#%u",
+ (arm_decode_field (given, 7, 7) == 0) ? 64 : 48);
+ break;
+
case 'n':
print_vec_condition (info, given, insn->mve_op);
break;
+ case 'o':
+ if (arm_decode_field (given, 0, 0) == 1)
+ {
+ unsigned long size
+ = arm_decode_field (given, 4, 4)
+ | (arm_decode_field (given, 6, 6) << 1);
+
+ func (stream, ", uxtw #%lu", size);
+ }
+ break;
+
+ case 'm':
+ print_mve_rounding_mode (info, given, insn->mve_op);
+ break;
+
+ case 's':
+ print_mve_vcvt_size (info, given, insn->mve_op);
+ break;
+
+ case 'u':
+ {
+ unsigned long op1 = arm_decode_field (given, 21, 22);
+
+ if ((insn->mve_op == MVE_VMOV_VEC_LANE_TO_GP))
+ {
+ /* Check for signed. */
+ if (arm_decode_field (given, 23, 23) == 0)
+ {
+ /* We don't print 's' for S32. */
+ if ((arm_decode_field (given, 5, 6) == 0)
+ && ((op1 == 0) || (op1 == 1)))
+ ;
+ else
+ func (stream, "s");
+ }
+ else
+ func (stream, "u");
+ }
+ else
+ {
+ if (arm_decode_field (given, 28, 28) == 0)
+ func (stream, "s");
+ else
+ func (stream, "u");
+ }
+ }
+ break;
+
case 'v':
print_instruction_predicate (info);
break;
+ case 'w':
+ if (arm_decode_field (given, 21, 21) == 1)
+ func (stream, "!");
+ break;
+
+ case 'B':
+ print_mve_register_blocks (info, given, insn->mve_op);
+ break;
+
+ case 'E':
+ /* SIMD encoded constant for mov, mvn, vorr, vbic. */
+
+ print_simd_imm8 (info, given, 28, insn);
+ break;
+
+ case 'N':
+ print_mve_vmov_index (info, given);
+ break;
+
+ case 'T':
+ if (arm_decode_field (given, 12, 12) == 0)
+ func (stream, "b");
+ else
+ func (stream, "t");
+ break;
+
+ case 'X':
+ if (arm_decode_field (given, 12, 12) == 1)
+ func (stream, "x");
+ break;
+
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
{
else
func (stream, "%s", arm_regnames[value]);
break;
+
+ case 'c':
+ func (stream, "%s", arm_conditional[value]);
+ break;
+
+ case 'C':
+ value ^= 1;
+ func (stream, "%s", arm_conditional[value]);
+ break;
+
+ case 'S':
+ if (value == 13 || value == 15)
+ is_unpredictable = TRUE;
+ else
+ func (stream, "%s", arm_regnames[value]);
+ break;
+
case 's':
print_mve_size (info,
value,
insn->mve_op);
break;
+ case 'I':
+ if (value == 1)
+ func (stream, "i");
+ break;
+ case 'A':
+ if (value == 1)
+ func (stream, "a");
+ break;
+ case 'h':
+ {
+ unsigned int odd_reg = (value << 1) | 1;
+ func (stream, "%s", arm_regnames[odd_reg]);
+ }
+ break;
+ case 'i':
+ {
+ unsigned long imm
+ = arm_decode_field (given, 0, 6);
+ unsigned long mod_imm = imm;
+
+ switch (insn->mve_op)
+ {
+ case MVE_VLDRW_GATHER_T5:
+ case MVE_VSTRW_SCATTER_T5:
+ mod_imm = mod_imm << 2;
+ break;
+ case MVE_VSTRD_SCATTER_T6:
+ case MVE_VLDRD_GATHER_T6:
+ mod_imm = mod_imm << 3;
+ break;
+
+ default:
+ break;
+ }
+
+ func (stream, "%lu", mod_imm);
+ }
+ break;
+ case 'k':
+ func (stream, "%lu", 64 - value);
+ break;
+ case 'l':
+ {
+ unsigned int even_reg = value << 1;
+ func (stream, "%s", arm_regnames[even_reg]);
+ }
+ break;
+ case 'u':
+ switch (value)
+ {
+ case 0:
+ func (stream, "1");
+ break;
+ case 1:
+ func (stream, "2");
+ break;
+ case 2:
+ func (stream, "4");
+ break;
+ case 3:
+ func (stream, "8");
+ break;
+ default:
+ break;
+ }
+ break;
+ case 'o':
+ print_mve_rotate (info, value, width);
+ break;
case 'r':
func (stream, "%s", arm_regnames[value]);
break;
+ case 'd':
+ if (insn->mve_op == MVE_VQSHL_T2
+ || insn->mve_op == MVE_VQSHLU_T3
+ || insn->mve_op == MVE_VRSHR
+ || insn->mve_op == MVE_VRSHRN
+ || insn->mve_op == MVE_VSHL_T1
+ || insn->mve_op == MVE_VSHLL_T1
+ || insn->mve_op == MVE_VSHR
+ || insn->mve_op == MVE_VSHRN
+ || insn->mve_op == MVE_VSLI
+ || insn->mve_op == MVE_VSRI)
+ print_mve_shift_n (info, given, insn->mve_op);
+ else if (insn->mve_op == MVE_VSHLL_T2)
+ {
+ switch (value)
+ {
+ case 0x00:
+ func (stream, "8");
+ break;
+ case 0x01:
+ func (stream, "16");
+ break;
+ case 0x10:
+ print_mve_undefined (info, UNDEF_SIZE_0);
+ break;
+ default:
+ assert (0);
+ break;
+ }
+ }
+ else
+ {
+ if (insn->mve_op == MVE_VSHLC && value == 0)
+ value = 32;
+ func (stream, "%ld", value);
+ value_in_comment = value;
+ }
+ break;
+ case 'F':
+ func (stream, "s%ld", value);
+ break;
case 'Q':
if (value & 0x8)
func (stream, "<illegal reg q%ld.5>", value);
else
func (stream, "q%ld", value);
break;
+ case 'x':
+ func (stream, "0x%08lx", value);
+ break;
default:
abort ();
}
if (print_insn_neon (info, given, FALSE))
return;
+ if (print_insn_generic_coprocessor (pc, info, given, FALSE))
+ return;
+
for (insn = arm_opcodes; insn->assembler; insn++)
{
if ((given & insn->mask) != insn->value)
unsigned int immed = (given & 0xff);
unsigned int a, i;
- a = (((immed << (32 - rotate))
- | (immed >> rotate)) & 0xffffffff);
+ a = (immed << ((32 - rotate) & 31)
+ | immed >> rotate) & 0xffffffff;
/* If there is another encoding with smaller rotate,
the rotate should be specified directly. */
for (i = 0; i < 32; i += 2)
- if ((a << i | a >> (32 - i)) <= 0xff)
+ if ((a << i | a >> ((32 - i) & 31)) <= 0xff)
break;
if (i != rotate)
if (is_mve && print_insn_mve (info, given))
return;
+ if (print_insn_generic_coprocessor (pc, info, given, TRUE))
+ return;
+
for (insn = thumb32_opcodes; insn->assembler; insn++)
if ((given & insn->mask) == insn->value)
{
switch (*c)
{
+ case 's':
+ if (val <= 3)
+ func (stream, "%s", mve_vec_sizename[val]);
+ else
+ func (stream, "<undef size>");
+ break;
+
case 'd':
func (stream, "%lu", val);
value_in_comment = val;
case bfd_mach_arm_7EM: ARM_SET_FEATURES (ARM_ARCH_V7EM); break;
case bfd_mach_arm_8:
{
- /* Add bits for extensions that Armv8.5-A recognizes. */
- arm_feature_set armv8_5_ext_fset
+ /* Add bits for extensions that Armv8.6-A recognizes. */
+ arm_feature_set armv8_6_ext_fset
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
- ARM_SET_FEATURES (ARM_ARCH_V8_5A);
- ARM_MERGE_FEATURE_SETS (arch_fset, arch_fset, armv8_5_ext_fset);
+ ARM_SET_FEATURES (ARM_ARCH_V8_6A);
+ ARM_MERGE_FEATURE_SETS (arch_fset, arch_fset, armv8_6_ext_fset);
break;
}
case bfd_mach_arm_8R: ARM_SET_FEATURES (ARM_ARCH_V8R); break;
print_insn (bfd_vma pc, struct disassemble_info *info, bfd_boolean little)
{
unsigned char b[4];
- long given;
+ unsigned long given;
int status;
int is_thumb = FALSE;
int is_data = FALSE;
status = info->read_memory_func (pc, (bfd_byte *) b, 4, info);
if (little_code)
- given = (b[0]) | (b[1] << 8) | (b[2] << 16) | (b[3] << 24);
+ given = (b[0]) | (b[1] << 8) | (b[2] << 16) | ((unsigned) b[3] << 24);
else
- given = (b[3]) | (b[2] << 8) | (b[1] << 16) | (b[0] << 24);
+ given = (b[3]) | (b[2] << 8) | (b[1] << 16) | ((unsigned) b[0] << 24);
}
else
{