MVE_VBIC_IMM,
MVE_VBIC_REG,
MVE_VMOVX,
+ MVE_VMOVL,
+ MVE_VMOVN,
+ MVE_VMULL_INT,
+ MVE_VMULL_POLY,
+ MVE_VQDMULL_T1,
+ MVE_VQDMULL_T2,
+ MVE_VQMOVN,
+ MVE_VQMOVUN,
+ MVE_VADDV,
+ MVE_VMLADAV_T1,
+ MVE_VMLADAV_T2,
+ MVE_VMLALDAV,
+ MVE_VMLAS,
+ MVE_VADDLV,
+ MVE_VMLSDAV_T1,
+ MVE_VMLSDAV_T2,
+ MVE_VMLSLDAV,
+ MVE_VRMLALDAVH,
+ MVE_VRMLSLDAVH,
+ MVE_VQDMLADH,
+ MVE_VQRDMLADH,
+ MVE_VQDMLAH,
+ MVE_VQRDMLAH,
+ MVE_VQDMLASH,
+ MVE_VQRDMLASH,
+ MVE_VQDMLSDH,
+ MVE_VQRDMLSDH,
+ MVE_VQDMULH_T1,
+ MVE_VQRDMULH_T2,
+ MVE_VQDMULH_T3,
+ MVE_VQRDMULH_T4,
+ MVE_VDDUP,
+ MVE_VDWDUP,
+ MVE_VIWDUP,
+ MVE_VIDUP,
+ MVE_VCADD_FP,
+ MVE_VCADD_VEC,
+ MVE_VHCADD,
+ MVE_VCMLA_FP,
+ MVE_VCMUL_FP,
+ MVE_VQRSHL_T1,
+ MVE_VQRSHL_T2,
+ MVE_VQRSHRN,
+ MVE_VQRSHRUN,
+ MVE_VQSHL_T1,
+ MVE_VQSHL_T2,
+ MVE_VQSHLU_T3,
+ MVE_VQSHL_T4,
+ MVE_VQSHRN,
+ MVE_VQSHRUN,
+ MVE_VRSHL_T1,
+ MVE_VRSHL_T2,
+ MVE_VRSHR,
+ MVE_VRSHRN,
+ MVE_VSHL_T1,
+ MVE_VSHL_T2,
+ MVE_VSHL_T3,
+ MVE_VSHLC,
+ MVE_VSHLL_T1,
+ MVE_VSHLL_T2,
+ MVE_VSHR,
+ MVE_VSHRN,
+ MVE_VSLI,
+ MVE_VSRI,
+ MVE_VADC,
+ MVE_VABAV,
+ MVE_VABD_FP,
+ MVE_VABD_VEC,
+ MVE_VABS_FP,
+ MVE_VABS_VEC,
+ MVE_VADD_FP_T1,
+ MVE_VADD_FP_T2,
+ MVE_VADD_VEC_T1,
+ MVE_VADD_VEC_T2,
+ MVE_VSBC,
+ MVE_VSUB_FP_T1,
+ MVE_VSUB_FP_T2,
+ MVE_VSUB_VEC_T1,
+ MVE_VSUB_VEC_T2,
MVE_NONE
};
enum mve_undefined
{
+ UNDEF_SIZE, /* undefined size. */
UNDEF_SIZE_0, /* undefined because size == 0. */
UNDEF_SIZE_2, /* undefined because size == 2. */
UNDEF_SIZE_3, /* undefined because size == 3. */
op2 == 0 and op1 == (0 or 1). */
UNDEF_OP_0_BAD_CMODE, /* undefined because op == 0 and cmode
in {0xx1, x0x1}. */
+ UNDEF_XCHG_UNS, /* undefined because X == 1 and U == 1. */
UNDEF_NONE /* no undefined behavior. */
};
%B print v{st,ld}[24] any one operands
%E print vmov, vmvn, vorr, vbic encoded constant
%N print generic index for vmov
+ %T print bottom ('b') or top ('t') of source register
+ %X print exchange field in vmla* instructions
%<bitfield>r print as an ARM register
%<bitfield>d print the bitfield in decimal
+ %<bitfield>A print accumulate or not
%<bitfield>Q print as a MVE Q register
%<bitfield>F print as a MVE S register
%<bitfield>Z as %<>r but r15 is ZR instead of PC and r13 is
UNPREDICTABLE
%<bitfield>s print size for vector predicate & non VMOV instructions
+ %<bitfield>I print carry flag or not
%<bitfield>i print immediate for vstr/vldr reg +/- imm
+ %<bitfield>h print high half of 64-bit destination reg
%<bitfield>k print immediate for vector conversion instruction
+ %<bitfield>l print low half of 64-bit destination reg
+ %<bitfield>o print rotate value for vcmul
+ %<bitfield>u print immediate value for vddup/vdwdup
%<bitfield>x print the bitfield in hex.
- */
+ */
static const struct mopcode32 mve_opcodes[] =
{
0xef100150, 0xffb11f51,
"vbic%v\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+ /* Vector VABAV. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VABAV,
+ 0xee800f01, 0xefc10f51,
+ "vabav%v.%u%20-21s\t%12-15r, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VABD floating point. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VABD_FP,
+ 0xff200d40, 0xffa11f51,
+ "vabd%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VABD. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VABD_VEC,
+ 0xef000740, 0xef811f51,
+ "vabd%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VABS floating point. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VABS_FP,
+ 0xFFB10740, 0xFFB31FD1,
+ "vabs%v.f%18-19s\t%13-15,22Q, %1-3,5Q"},
+ /* Vector VABS. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VABS_VEC,
+ 0xffb10340, 0xffb31fd1,
+ "vabs%v.s%18-19s\t%13-15,22Q, %1-3,5Q"},
+
+ /* Vector VADD floating point T1. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VADD_FP_T1,
+ 0xef000d40, 0xffa11f51,
+ "vadd%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+ /* Vector VADD floating point T2. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VADD_FP_T2,
+ 0xee300f40, 0xefb11f70,
+ "vadd%v.f%28s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+ /* Vector VADD T1. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VADD_VEC_T1,
+ 0xef000840, 0xff811f51,
+ "vadd%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+ /* Vector VADD T2. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VADD_VEC_T2,
+ 0xee010f40, 0xff811f70,
+ "vadd%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VADDLV. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VADDLV,
+ 0xee890f00, 0xef8f1fd1,
+ "vaddlv%5A%v.%u32\t%13-15l, %20-22h, %1-3Q"},
+
+ /* Vector VADDV. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VADDV,
+ 0xeef10f00, 0xeff31fd1,
+ "vaddv%5A%v.%u%18-19s\t%13-15l, %1-3Q"},
+
+ /* Vector VADC. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VADC,
+ 0xee300f00, 0xffb10f51,
+ "vadc%12I%v.i32\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VCADD floating point. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VCADD_FP,
+ 0xfc800840, 0xfea11f51,
+ "vcadd%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q, #%24o"},
+
+ /* Vector VCADD. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VCADD_VEC,
+ 0xfe000f00, 0xff810f51,
+ "vcadd%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q, #%12o"},
+
+ /* Vector VCMLA. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VCMLA_FP,
+ 0xfc200840, 0xfe211f51,
+ "vcmla%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q, #%23-24o"},
+
/* Vector VCMP floating point T1. */
{ARM_FEATURE_COPROC (FPU_MVE_FP),
MVE_VCMP_FP_T1,
0xee001f40, 0xef811f70,
"vhsub%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+ /* Vector VCMUL. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VCMUL_FP,
+ 0xee300e00, 0xefb10f50,
+ "vcmul%v.f%28s\t%13-15,22Q, %17-19,7Q, %1-3,5Q, #%0,12o"},
+
/* Vector VDUP. */
{ARM_FEATURE_COPROC (FPU_MVE),
MVE_VDUP,
0xffb30040, 0xffb31c51,
"vcvt%m%v.%s\t%13-15,22Q, %1-3,5Q"},
+ /* Vector VDDUP. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VDDUP,
+ 0xee011f6e, 0xff811f7e,
+ "vddup%v.u%20-21s\t%13-15,22Q, %17-19l, #%0,7u"},
+
+ /* Vector VDWDUP. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VDWDUP,
+ 0xee011f60, 0xff811f70,
+ "vdwdup%v.u%20-21s\t%13-15,22Q, %17-19l, %1-3h, #%0,7u"},
+
+ /* Vector VHCADD. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VHCADD,
+ 0xee000f00, 0xff810f51,
+ "vhcadd%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q, #%12o"},
+
+ /* Vector VIWDUP. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VIWDUP,
+ 0xee010f60, 0xff811f70,
+ "viwdup%v.u%20-21s\t%13-15,22Q, %17-19l, %1-3h, #%0,7u"},
+
+ /* Vector VIDUP. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VIDUP,
+ 0xee010f6e, 0xff811f7e,
+ "vidup%v.u%20-21s\t%13-15,22Q, %17-19l, #%0,7u"},
+
/* Vector VLD2. */
{ARM_FEATURE_COPROC (FPU_MVE),
MVE_VLD2,
0xec101f00, 0xfe101f80,
"vldrw%v.u32\t%13-15,22Q, %d"},
+ /* Vector VMLALDAV. Note must appear before VMLADAV due to instruction
+ opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLALDAV,
+ 0xee801e00, 0xef801f51,
+ "vmlaldav%5Ax%v.%u%16s\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"},
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLALDAV,
+ 0xee800e00, 0xef801f51,
+ "vmlalv%5A%v.%u%16s\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"},
+
+ /* Vector VMLAV T1 variant, same as VMLADAV but with X == 0. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLADAV_T1,
+ 0xeef00e00, 0xeff01f51,
+ "vmlav%5A%v.%u%16s\t%13-15l, %17-19,7Q, %1-3Q"},
+
+ /* Vector VMLAV T2 variant, same as VMLADAV but with X == 0. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLADAV_T2,
+ 0xeef00f00, 0xeff11f51,
+ "vmlav%5A%v.%u8\t%13-15l, %17-19,7Q, %1-3Q"},
+
+ /* Vector VMLADAV T1 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLADAV_T1,
+ 0xeef01e00, 0xeff01f51,
+ "vmladav%5Ax%v.%u%16s\t%13-15l, %17-19,7Q, %1-3Q"},
+
+ /* Vector VMLADAV T2 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLADAV_T2,
+ 0xeef01f00, 0xeff11f51,
+ "vmladav%5Ax%v.%u8\t%13-15l, %17-19,7Q, %1-3Q"},
+
+ /* Vector VMLAS. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLAS,
+ 0xee011e40, 0xef811f70,
+ "vmlas%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VRMLSLDAVH. Note must appear before VMLSDAV due to instruction
+ opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VRMLSLDAVH,
+ 0xfe800e01, 0xff810f51,
+ "vrmlsldavh%5A%X%v.s32\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"},
+
+ /* Vector VMLSLDAV. Note must appear before VMLSDAV due to instruction
+ opcdoe aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLSLDAV,
+ 0xee800e01, 0xff800f51,
+ "vmlsldav%5A%X%v.%u%16s\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"},
+
+ /* Vector VMLSDAV T1 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLSDAV_T1,
+ 0xeef00e01, 0xfff00f51,
+ "vmlsdav%5A%X%v.s%16s\t%13-15l, %17-19,7Q, %1-3Q"},
+
+ /* Vector VMLSDAV T2 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMLSDAV_T2,
+ 0xfef00e01, 0xfff10f51,
+ "vmlsdav%5A%X%v.s8\t%13-15l, %17-19,7Q, %1-3Q"},
+
/* Vector VMOV between gpr and half precision register, op == 0. */
{ARM_FEATURE_COPROC (FPU_MVE_FP),
MVE_VMOV_HFP_TO_GP,
0xef800050, 0xefb810f0,
"vorr%v.i%8-11s\t%13-15,22Q, %E"},
+ /* Vector VQSHL T2 Variant.
+ NOTE: MVE_VQSHL_T2 must appear in the table before
+ before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQSHL_T2,
+ 0xef800750, 0xef801fd1,
+ "vqshl%v.%u%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VQSHLU T3 Variant
+ NOTE: MVE_VQSHL_T2 must appear in the table before
+ before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */
+
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQSHLU_T3,
+ 0xff800650, 0xff801fd1,
+ "vqshlu%v.s%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VRSHR
+ NOTE: MVE_VRSHR must appear in the table before
+ before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VRSHR,
+ 0xef800250, 0xef801fd1,
+ "vrshr%v.%u%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VSHL.
+ NOTE: MVE_VSHL must appear in the table before
+ before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSHL_T1,
+ 0xef800550, 0xff801fd1,
+ "vshl%v.i%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VSHR
+ NOTE: MVE_VSHR must appear in the table before
+ before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSHR,
+ 0xef800050, 0xef801fd1,
+ "vshr%v.%u%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VSLI
+ NOTE: MVE_VSLI must appear in the table before
+ before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSLI,
+ 0xff800550, 0xff801fd1,
+ "vsli%v.%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VSRI
+ NOTE: MVE_VSRI must appear in the table before
+ before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSRI,
+ 0xff800450, 0xff801fd1,
+ "vsri%v.%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
/* Vector VMOV immediate to vector,
cmode == 11x1 -> VMVN which is UNDEFINED
for such a cmode. */
0xee100b10, 0xff100f1f,
"vmov%c.%u%5-6,21-22s\t%12-15r, %17-19,7Q[%N]"},
+ /* Vector VSHLL T1 Variant. Note: VSHLL T1 must appear before MVE_VMOVL due
+ to instruction opcode aliasing. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSHLL_T1,
+ 0xeea00f40, 0xefa00fd1,
+ "vshll%T%v.%u%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VMOVL long. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMOVL,
+ 0xeea00f40, 0xefa70fd1,
+ "vmovl%T%v.%u%19-20s\t%13-15,22Q, %1-3,5Q"},
+
+ /* Vector VMOV and narrow. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMOVN,
+ 0xfe310e81, 0xffb30fd1,
+ "vmovn%T%v.i%18-19s\t%13-15,22Q, %1-3,5Q"},
+
/* Floating point move extract. */
{ARM_FEATURE_COPROC (FPU_MVE_FP),
MVE_VMOVX,
0xfeb00a40, 0xffbf0fd0,
"vmovx.f16\t%22,12-15F, %5,0-3F"},
+ /* Vector VMULL integer. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMULL_INT,
+ 0xee010e00, 0xef810f51,
+ "vmull%T%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VMULL polynomial. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VMULL_POLY,
+ 0xee310e00, 0xefb10f51,
+ "vmull%T%v.%28s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
/* Vector VMVN immediate to vector. */
{ARM_FEATURE_COPROC (FPU_MVE),
MVE_VMVN_IMM,
0xef200150, 0xffb11f51,
"vorr%v\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+ /* Vector VQDMULL T1 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQDMULL_T1,
+ 0xee300f01, 0xefb10f51,
+ "vqdmull%T%v.s%28s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VQDMULL T2 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQDMULL_T2,
+ 0xee300f60, 0xefb10f70,
+ "vqdmull%T%v.s%28s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VQMOVN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQMOVN,
+ 0xee330e01, 0xefb30fd1,
+ "vqmovn%T%v.%u%18-19s\t%13-15,22Q, %1-3,5Q"},
+
+ /* Vector VQMOVUN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQMOVUN,
+ 0xee310e81, 0xffb30fd1,
+ "vqmovun%T%v.s%18-19s\t%13-15,22Q, %1-3,5Q"},
+
+ /* Vector VQDMLADH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQDMLADH,
+ 0xee000e00, 0xff810f51,
+ "vqdmladh%X%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VQRDMLADH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRDMLADH,
+ 0xee000e01, 0xff810f51,
+ "vqrdmladh%X%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VQDMLAH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQDMLAH,
+ 0xee000e60, 0xef811f70,
+ "vqdmlah%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VQRDMLAH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRDMLAH,
+ 0xee000e40, 0xef811f70,
+ "vqrdmlah%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VQDMLASH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQDMLASH,
+ 0xee001e60, 0xef811f70,
+ "vqdmlash%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VQRDMLASH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRDMLASH,
+ 0xee001e40, 0xef811f70,
+ "vqrdmlash%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VQDMLSDH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQDMLSDH,
+ 0xfe000e00, 0xff810f51,
+ "vqdmlsdh%X%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VQRDMLSDH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRDMLSDH,
+ 0xfe000e01, 0xff810f51,
+ "vqrdmlsdh%X%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VQDMULH T1 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQDMULH_T1,
+ 0xef000b40, 0xff811f51,
+ "vqdmulh%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VQRDMULH T2 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRDMULH_T2,
+ 0xff000b40, 0xff811f51,
+ "vqrdmulh%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VQDMULH T3 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQDMULH_T3,
+ 0xee010e60, 0xff811f70,
+ "vqdmulh%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VQRDMULH T4 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRDMULH_T4,
+ 0xfe010e60, 0xff811f70,
+ "vqrdmulh%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VQRSHL T1 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRSHL_T1,
+ 0xef000550, 0xef811f51,
+ "vqrshl%v.%u%20-21s\t%13-15,22Q, %1-3,5Q, %17-19,7Q"},
+
+ /* Vector VQRSHL T2 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRSHL_T2,
+ 0xee331ee0, 0xefb31ff0,
+ "vqrshl%v.%u%18-19s\t%13-15,22Q, %0-3r"},
+
+ /* Vector VQRSHRN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRSHRN,
+ 0xee800f41, 0xefa00fd1,
+ "vqrshrn%T%v.%u%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VQRSHRUN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQRSHRUN,
+ 0xfe800fc0, 0xffa00fd1,
+ "vqrshrun%T%v.s%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VQSHL T1 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQSHL_T1,
+ 0xee311ee0, 0xefb31ff0,
+ "vqshl%v.%u%18-19s\t%13-15,22Q, %0-3r"},
+
+ /* Vector VQSHL T4 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQSHL_T4,
+ 0xef000450, 0xef811f51,
+ "vqshl%v.%u%20-21s\t%13-15,22Q, %1-3,5Q, %17-19,7Q"},
+
+ /* Vector VQSHRN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQSHRN,
+ 0xee800f40, 0xefa00fd1,
+ "vqshrn%T%v.%u%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VQSHRUN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VQSHRUN,
+ 0xee800fc0, 0xffa00fd1,
+ "vqshrun%T%v.s%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
/* Vector VRINT floating point. */
{ARM_FEATURE_COPROC (FPU_MVE_FP),
MVE_VRINT_FP,
0xffb20440, 0xffb31c51,
"vrint%m%v.f%18-19s\t%13-15,22Q, %1-3,5Q"},
+ /* Vector VRMLALDAVH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VRMLALDAVH,
+ 0xee800f00, 0xef811f51,
+ "vrmlalvh%5A%v.%u32\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"},
+
+ /* Vector VRMLALDAVH. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VRMLALDAVH,
+ 0xee801f00, 0xef811f51,
+ "vrmlaldavh%5Ax%v.%u32\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"},
+
+ /* Vector VRSHL T1 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VRSHL_T1,
+ 0xef000540, 0xef811f51,
+ "vrshl%v.%u%20-21s\t%13-15,22Q, %1-3,5Q, %17-19,7Q"},
+
+ /* Vector VRSHL T2 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VRSHL_T2,
+ 0xee331e60, 0xefb31ff0,
+ "vrshl%v.%u%18-19s\t%13-15,22Q, %0-3r"},
+
+ /* Vector VRSHRN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VRSHRN,
+ 0xfe800fc1, 0xffa00fd1,
+ "vrshrn%T%v.i%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
+ /* Vector VSBC. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSBC,
+ 0xfe300f00, 0xffb10f51,
+ "vsbc%12I%v.i32\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VSHL T2 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSHL_T2,
+ 0xee311e60, 0xefb31ff0,
+ "vshl%v.%u%18-19s\t%13-15,22Q, %0-3r"},
+
+ /* Vector VSHL T3 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSHL_T3,
+ 0xef000440, 0xef811f51,
+ "vshl%v.%u%20-21s\t%13-15,22Q, %1-3,5Q, %17-19,7Q"},
+
+ /* Vector VSHLC. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSHLC,
+ 0xeea00fc0, 0xffa01ff0,
+ "vshlc%v\t%13-15,22Q, %0-3r, #%16-20d"},
+
+ /* Vector VSHLL T2 Variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSHLL_T2,
+ 0xee310e01, 0xefb30fd1,
+ "vshll%T%v.%u%18-19s\t%13-15,22Q, %1-3,5Q, #%18-19d"},
+
+ /* Vector VSHRN. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSHRN,
+ 0xee800fc1, 0xffa00fd1,
+ "vshrn%T%v.i%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"},
+
/* Vector VST2 no writeback. */
{ARM_FEATURE_COPROC (FPU_MVE),
MVE_VST2,
0xec001f00, 0xfe101f80,
"vstrw%v.32\t%13-15,22Q, %d"},
+ /* Vector VSUB floating point T1 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VSUB_FP_T1,
+ 0xef200d40, 0xffa11f51,
+ "vsub%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VSUB floating point T2 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE_FP),
+ MVE_VSUB_FP_T2,
+ 0xee301f40, 0xefb11f70,
+ "vsub%v.f%28s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
+ /* Vector VSUB T1 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSUB_VEC_T1,
+ 0xff000840, 0xff811f51,
+ "vsub%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"},
+
+ /* Vector VSUB T2 variant. */
+ {ARM_FEATURE_COPROC (FPU_MVE),
+ MVE_VSUB_VEC_T2,
+ 0xee011f40, 0xff811f70,
+ "vsub%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"},
+
{ARM_FEATURE_CORE_LOW (0),
MVE_NONE,
0x00000000, 0x00000000, 0}
/* Armv8.1-M Mainline and Armv8.1-M Mainline Security Extensions
instructions. */
{ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
- 0xf040c001, 0xfff0f001, "wls\tlr, %16-19S, %Q"},
- {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
- 0xf040e001, 0xfff0ffff, "dls\tlr, %16-19S"},
+ 0xf00fe001, 0xffffffff, "lctp%c"},
{ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
0xf02fc001, 0xfffff001, "le\t%P"},
{ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
0xf00fc001, 0xfffff001, "le\tlr, %P"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf01fc001, 0xfffff001, "letp\tlr, %P"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf040c001, 0xfff0f001, "wls\tlr, %16-19S, %Q"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf000c001, 0xffc0f001, "wlstp.%20-21s\tlr, %16-19S, %Q"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf040e001, 0xfff0ffff, "dls\tlr, %16-19S"},
+ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
+ 0xf000e001, 0xffc0ffff, "dlstp.%20-21s\tlr, %16-19S"},
{ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN),
0xf040e001, 0xf860f001, "bf%c\t%G, %W"},
else
return FALSE;
+ case MVE_VADD_VEC_T2:
+ case MVE_VSUB_VEC_T2:
+ case MVE_VABAV:
+ case MVE_VQRSHL_T1:
+ case MVE_VQSHL_T4:
+ case MVE_VRSHL_T1:
+ case MVE_VSHL_T3:
+ case MVE_VCADD_VEC:
+ case MVE_VHCADD:
+ case MVE_VDDUP:
+ case MVE_VIDUP:
+ case MVE_VQRDMLADH:
+ case MVE_VQDMLAH:
+ case MVE_VQRDMLAH:
+ case MVE_VQDMLASH:
+ case MVE_VQRDMLASH:
+ case MVE_VQDMLSDH:
+ case MVE_VQRDMLSDH:
+ case MVE_VQDMULH_T3:
+ case MVE_VQRDMULH_T4:
+ case MVE_VQDMLADH:
+ case MVE_VMLAS:
+ case MVE_VMULL_INT:
case MVE_VHADD_T2:
case MVE_VHSUB_T2:
case MVE_VCMP_VEC_T1:
else
return FALSE;
+ case MVE_VMOVL:
+ {
+ unsigned long size = arm_decode_field (given, 19, 20);
+ if ((size == 0) || (size == 3))
+ return TRUE;
+ else
+ return FALSE;
+ }
+
+ case MVE_VQRSHL_T2:
+ case MVE_VQSHL_T1:
+ case MVE_VRSHL_T2:
+ case MVE_VSHL_T2:
+ case MVE_VSHLL_T2:
+ case MVE_VADDV:
+ case MVE_VMOVN:
+ case MVE_VQMOVUN:
+ case MVE_VQMOVN:
+ if (arm_decode_field (given, 18, 19) == 3)
+ return TRUE;
+ else
+ return FALSE;
+
+ case MVE_VMLSLDAV:
+ case MVE_VRMLSLDAVH:
+ case MVE_VMLALDAV:
+ case MVE_VADDLV:
+ if (arm_decode_field (given, 20, 22) == 7)
+ return TRUE;
+ else
+ return FALSE;
+
+ case MVE_VRMLALDAVH:
+ if ((arm_decode_field (given, 20, 22) & 6) == 6)
+ return TRUE;
+ else
+ return FALSE;
+
+ case MVE_VDWDUP:
+ case MVE_VIWDUP:
+ if ((arm_decode_field (given, 20, 21) == 3)
+ || (arm_decode_field (given, 1, 3) == 7))
+ return TRUE;
+ else
+ return FALSE;
+
+
+ case MVE_VSHLL_T1:
+ if (arm_decode_field (given, 16, 18) == 0)
+ {
+ unsigned long sz = arm_decode_field (given, 19, 20);
+
+ if ((sz == 1) || (sz == 2))
+ return TRUE;
+ else
+ return FALSE;
+ }
+ else
+ return FALSE;
+
+ case MVE_VQSHL_T2:
+ case MVE_VQSHLU_T3:
+ case MVE_VRSHR:
+ case MVE_VSHL_T1:
+ case MVE_VSHR:
+ case MVE_VSLI:
+ case MVE_VSRI:
+ if (arm_decode_field (given, 19, 21) == 0)
+ return TRUE;
+ else
+ return FALSE;
+
default:
+ case MVE_VADD_FP_T1:
+ case MVE_VADD_FP_T2:
+ case MVE_VADD_VEC_T1:
return FALSE;
}
else
return FALSE;
+ case MVE_VABD_VEC:
+ case MVE_VADD_VEC_T1:
+ case MVE_VSUB_VEC_T1:
+ case MVE_VQDMULH_T1:
+ case MVE_VQRDMULH_T2:
case MVE_VRHADD:
case MVE_VHADD_T1:
case MVE_VHSUB_T1:
return FALSE;
}
+ case MVE_VABS_FP:
case MVE_VCVT_BETWEEN_FP_INT:
case MVE_VCVT_FROM_FP_TO_INT:
{
else
return FALSE;
+ case MVE_VSHLL_T2:
+ case MVE_VMOVN:
+ if (arm_decode_field (given, 18, 19) == 2)
+ {
+ *undefined_code = UNDEF_SIZE_2;
+ return TRUE;
+ }
+ else
+ return FALSE;
+
+ case MVE_VRMLALDAVH:
+ case MVE_VMLADAV_T1:
+ case MVE_VMLADAV_T2:
+ case MVE_VMLALDAV:
+ if ((arm_decode_field (given, 28, 28) == 1)
+ && (arm_decode_field (given, 12, 12) == 1))
+ {
+ *undefined_code = UNDEF_XCHG_UNS;
+ return TRUE;
+ }
+ else
+ return FALSE;
+
+ case MVE_VQSHRN:
+ case MVE_VQSHRUN:
+ case MVE_VSHLL_T1:
+ case MVE_VSHRN:
+ {
+ unsigned long sz = arm_decode_field (given, 19, 20);
+ if (sz == 1)
+ return FALSE;
+ else if ((sz & 2) == 2)
+ return FALSE;
+ else
+ {
+ *undefined_code = UNDEF_SIZE;
+ return TRUE;
+ }
+ }
+ break;
+
+ case MVE_VQSHL_T2:
+ case MVE_VQSHLU_T3:
+ case MVE_VRSHR:
+ case MVE_VSHL_T1:
+ case MVE_VSHR:
+ case MVE_VSLI:
+ case MVE_VSRI:
+ {
+ unsigned long sz = arm_decode_field (given, 19, 21);
+ if ((sz & 7) == 1)
+ return FALSE;
+ else if ((sz & 6) == 2)
+ return FALSE;
+ else if ((sz & 4) == 4)
+ return FALSE;
+ else
+ {
+ *undefined_code = UNDEF_SIZE;
+ return TRUE;
+ }
+ }
+
+ case MVE_VQRSHRN:
+ case MVE_VQRSHRUN:
+ if (arm_decode_field (given, 19, 20) == 0)
+ {
+ *undefined_code = UNDEF_SIZE_0;
+ return TRUE;
+ }
+ else
+ return FALSE;
+
+ case MVE_VABS_VEC:
+ if (arm_decode_field (given, 18, 19) == 3)
+ {
+ *undefined_code = UNDEF_SIZE_3;
+ return TRUE;
+ }
+ else
+ return FALSE;
+
default:
return FALSE;
}
return FALSE;
}
+ case MVE_VADD_FP_T2:
+ case MVE_VSUB_FP_T2:
+ case MVE_VADD_VEC_T2:
+ case MVE_VSUB_VEC_T2:
+ case MVE_VQRSHL_T2:
+ case MVE_VQSHL_T1:
+ case MVE_VRSHL_T2:
+ case MVE_VSHL_T2:
+ case MVE_VSHLC:
+ case MVE_VQDMLAH:
+ case MVE_VQRDMLAH:
+ case MVE_VQDMLASH:
+ case MVE_VQRDMLASH:
+ case MVE_VQDMULH_T3:
+ case MVE_VQRDMULH_T4:
+ case MVE_VMLAS:
case MVE_VFMA_FP_SCALAR:
case MVE_VFMAS_FP_SCALAR:
case MVE_VHADD_T2:
return FALSE;
}
+ case MVE_VABAV:
case MVE_VMOV_HFP_TO_GP:
case MVE_VMOV_GP_TO_VEC_LANE:
case MVE_VMOV_VEC_LANE_TO_GP:
return FALSE;
}
+ case MVE_VQRDMLADH:
+ case MVE_VQDMLSDH:
+ case MVE_VQRDMLSDH:
+ case MVE_VQDMLADH:
+ case MVE_VMULL_INT:
+ {
+ unsigned long Qd;
+ unsigned long Qm;
+ unsigned long Qn;
+
+ if (arm_decode_field (given, 20, 21) == 2)
+ {
+ Qd = arm_decode_field_multiple (given, 13, 15, 22, 22);
+ Qm = arm_decode_field_multiple (given, 1, 3, 5, 5);
+ Qn = arm_decode_field_multiple (given, 17, 19, 7, 7);
+
+ if ((Qd == Qn) || (Qd == Qm))
+ {
+ *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_2;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+ else
+ return FALSE;
+ }
+
+ case MVE_VCMUL_FP:
+ case MVE_VQDMULL_T1:
+ {
+ unsigned long Qd;
+ unsigned long Qm;
+ unsigned long Qn;
+
+ if (arm_decode_field (given, 28, 28) == 1)
+ {
+ Qd = arm_decode_field_multiple (given, 13, 15, 22, 22);
+ Qm = arm_decode_field_multiple (given, 1, 3, 5, 5);
+ Qn = arm_decode_field_multiple (given, 17, 19, 7, 7);
+
+ if ((Qd == Qn) || (Qd == Qm))
+ {
+ *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_1;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+ else
+ return FALSE;
+ }
+
+ case MVE_VQDMULL_T2:
+ {
+ unsigned long gpr = arm_decode_field (given, 0, 3);
+ if (gpr == 0xd)
+ {
+ *unpredictable_code = UNPRED_R13;
+ return TRUE;
+ }
+ else if (gpr == 0xf)
+ {
+ *unpredictable_code = UNPRED_R15;
+ return TRUE;
+ }
+
+ if (arm_decode_field (given, 28, 28) == 1)
+ {
+ unsigned long Qd
+ = arm_decode_field_multiple (given, 13, 15, 22, 22);
+ unsigned long Qn = arm_decode_field_multiple (given, 17, 19, 7, 7);
+
+ if ((Qd == Qn))
+ {
+ *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_1;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+
+ return FALSE;
+ }
+
+ case MVE_VMLSLDAV:
+ case MVE_VRMLSLDAVH:
+ case MVE_VMLALDAV:
+ case MVE_VADDLV:
+ if (arm_decode_field (given, 20, 22) == 6)
+ {
+ *unpredictable_code = UNPRED_R13;
+ return TRUE;
+ }
+ else
+ return FALSE;
+
+ case MVE_VDWDUP:
+ case MVE_VIWDUP:
+ if (arm_decode_field (given, 1, 3) == 6)
+ {
+ *unpredictable_code = UNPRED_R13;
+ return TRUE;
+ }
+ else
+ return FALSE;
+
+ case MVE_VCADD_VEC:
+ case MVE_VHCADD:
+ {
+ unsigned long Qd = arm_decode_field_multiple (given, 13, 15, 22, 22);
+ unsigned long Qm = arm_decode_field_multiple (given, 1, 3, 5, 5);
+ if ((Qd == Qm) && arm_decode_field (given, 20, 21) == 2)
+ {
+ *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_2;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+
+ case MVE_VCADD_FP:
+ {
+ unsigned long Qd = arm_decode_field_multiple (given, 13, 15, 22, 22);
+ unsigned long Qm = arm_decode_field_multiple (given, 1, 3, 5, 5);
+ if ((Qd == Qm) && arm_decode_field (given, 20, 20) == 1)
+ {
+ *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_1;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+
+ case MVE_VCMLA_FP:
+ {
+ unsigned long Qda;
+ unsigned long Qm;
+ unsigned long Qn;
+
+ if (arm_decode_field (given, 20, 20) == 1)
+ {
+ Qda = arm_decode_field_multiple (given, 13, 15, 22, 22);
+ Qm = arm_decode_field_multiple (given, 1, 3, 5, 5);
+ Qn = arm_decode_field_multiple (given, 17, 19, 7, 7);
+
+ if ((Qda == Qn) || (Qda == Qm))
+ {
+ *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_1;
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+ else
+ return FALSE;
+
+ }
+
default:
return FALSE;
}
switch (undefined_code)
{
+ case UNDEF_SIZE:
+ func (stream, "illegal size");
+ break;
+
case UNDEF_SIZE_0:
func (stream, "size equals zero");
break;
func (stream, "op field equal 0 and bad cmode");
break;
+ case UNDEF_XCHG_UNS:
+ func (stream, "exchange and unsigned together");
+ break;
+
case UNDEF_NONE:
break;
}
}
}
+static void
+print_mve_rotate (struct disassemble_info *info, unsigned long rot,
+ unsigned long rot_width)
+{
+ void *stream = info->stream;
+ fprintf_ftype func = info->fprintf_func;
+
+ if (rot_width == 1)
+ {
+ switch (rot)
+ {
+ case 0:
+ func (stream, "90");
+ break;
+ case 1:
+ func (stream, "270");
+ break;
+ default:
+ break;
+ }
+ }
+ else if (rot_width == 2)
+ {
+ switch (rot)
+ {
+ case 0:
+ func (stream, "0");
+ break;
+ case 1:
+ func (stream, "90");
+ break;
+ case 2:
+ func (stream, "180");
+ break;
+ case 3:
+ func (stream, "270");
+ break;
+ default:
+ break;
+ }
+ }
+}
+
static void
print_instruction_predicate (struct disassemble_info *info)
{
switch (matched_insn)
{
+ case MVE_VABAV:
+ case MVE_VABD_VEC:
+ case MVE_VABS_FP:
+ case MVE_VABS_VEC:
+ case MVE_VADD_VEC_T1:
+ case MVE_VADD_VEC_T2:
+ case MVE_VADDV:
+ case MVE_VCADD_VEC:
case MVE_VCMP_VEC_T1:
case MVE_VCMP_VEC_T2:
case MVE_VCMP_VEC_T3:
case MVE_VCMP_VEC_T4:
case MVE_VCMP_VEC_T5:
case MVE_VCMP_VEC_T6:
+ case MVE_VDDUP:
+ case MVE_VDWDUP:
case MVE_VHADD_T1:
case MVE_VHADD_T2:
+ case MVE_VHCADD:
case MVE_VHSUB_T1:
case MVE_VHSUB_T2:
+ case MVE_VIDUP:
+ case MVE_VIWDUP:
case MVE_VLD2:
case MVE_VLD4:
case MVE_VLDRB_GATHER_T1:
case MVE_VLDRD_GATHER_T4:
case MVE_VLDRB_T1:
case MVE_VLDRH_T2:
+ case MVE_VMLAS:
case MVE_VPT_VEC_T1:
case MVE_VPT_VEC_T2:
case MVE_VPT_VEC_T3:
case MVE_VPT_VEC_T4:
case MVE_VPT_VEC_T5:
case MVE_VPT_VEC_T6:
+ case MVE_VQDMLADH:
+ case MVE_VQRDMLADH:
+ case MVE_VQDMLAH:
+ case MVE_VQRDMLAH:
+ case MVE_VQDMLASH:
+ case MVE_VQRDMLASH:
+ case MVE_VQDMLSDH:
+ case MVE_VQRDMLSDH:
+ case MVE_VQDMULH_T1:
+ case MVE_VQRDMULH_T2:
+ case MVE_VQDMULH_T3:
+ case MVE_VQRDMULH_T4:
+ case MVE_VQRSHL_T1:
+ case MVE_VQRSHL_T2:
+ case MVE_VQSHL_T1:
+ case MVE_VQSHL_T4:
case MVE_VRHADD:
case MVE_VRINT_FP:
+ case MVE_VRSHL_T1:
+ case MVE_VRSHL_T2:
+ case MVE_VSHL_T2:
+ case MVE_VSHL_T3:
+ case MVE_VSHLL_T2:
case MVE_VST2:
case MVE_VST4:
case MVE_VSTRB_SCATTER_T1:
case MVE_VSTRW_SCATTER_T3:
case MVE_VSTRB_T1:
case MVE_VSTRH_T2:
+ case MVE_VSUB_VEC_T1:
+ case MVE_VSUB_VEC_T2:
if (size <= 3)
func (stream, "%s", mve_vec_sizename[size]);
else
func (stream, "<undef size>");
break;
+ case MVE_VABD_FP:
+ case MVE_VADD_FP_T1:
+ case MVE_VADD_FP_T2:
+ case MVE_VSUB_FP_T1:
+ case MVE_VSUB_FP_T2:
case MVE_VCMP_FP_T1:
case MVE_VCMP_FP_T2:
case MVE_VFMA_FP_SCALAR:
func (stream, "16");
break;
+ case MVE_VCADD_FP:
+ case MVE_VCMLA_FP:
+ case MVE_VCMUL_FP:
+ case MVE_VMLADAV_T1:
+ case MVE_VMLALDAV:
+ case MVE_VMLSDAV_T1:
+ case MVE_VMLSLDAV:
+ case MVE_VMOVN:
+ case MVE_VQDMULL_T1:
+ case MVE_VQDMULL_T2:
+ case MVE_VQMOVN:
+ case MVE_VQMOVUN:
+ if (size == 0)
+ func (stream, "16");
+ else if (size == 1)
+ func (stream, "32");
+ break;
+
+ case MVE_VMOVL:
+ if (size == 1)
+ func (stream, "8");
+ else if (size == 2)
+ func (stream, "16");
+ break;
+
case MVE_VDUP:
switch (size)
{
}
break;
+ case MVE_VMULL_POLY:
+ if (size == 0)
+ func (stream, "p8");
+ else if (size == 1)
+ func (stream, "p16");
+ break;
+
case MVE_VMVN_IMM:
switch (size)
{
}
break;
+ case MVE_VQSHRN:
+ case MVE_VQSHRUN:
+ case MVE_VQRSHRN:
+ case MVE_VQRSHRUN:
+ case MVE_VRSHRN:
+ case MVE_VSHRN:
+ {
+ switch (size)
+ {
+ case 1:
+ func (stream, "16");
+ break;
+
+ case 2: case 3:
+ func (stream, "32");
+ break;
+
+ default:
+ break;
+ }
+ }
+ break;
+
+ case MVE_VQSHL_T2:
+ case MVE_VQSHLU_T3:
+ case MVE_VRSHR:
+ case MVE_VSHL_T1:
+ case MVE_VSHLL_T1:
+ case MVE_VSHR:
+ case MVE_VSLI:
+ case MVE_VSRI:
+ {
+ switch (size)
+ {
+ case 1:
+ func (stream, "8");
+ break;
+
+ case 2: case 3:
+ func (stream, "16");
+ break;
+
+ case 4: case 5: case 6: case 7:
+ func (stream, "32");
+ break;
+
+ default:
+ break;
+ }
+ }
+ break;
+
default:
break;
}
}
+static void
+print_mve_shift_n (struct disassemble_info *info, long given,
+ enum mve_instructions matched_insn)
+{
+ void *stream = info->stream;
+ fprintf_ftype func = info->fprintf_func;
+
+ int startAt0
+ = matched_insn == MVE_VQSHL_T2
+ || matched_insn == MVE_VQSHLU_T3
+ || matched_insn == MVE_VSHL_T1
+ || matched_insn == MVE_VSHLL_T1
+ || matched_insn == MVE_VSLI;
+
+ unsigned imm6 = (given & 0x3f0000) >> 16;
+
+ if (matched_insn == MVE_VSHLL_T1)
+ imm6 &= 0x1f;
+
+ unsigned shiftAmount = 0;
+ if ((imm6 & 0x20) != 0)
+ shiftAmount = startAt0 ? imm6 - 32 : 64 - imm6;
+ else if ((imm6 & 0x10) != 0)
+ shiftAmount = startAt0 ? imm6 - 16 : 32 - imm6;
+ else if ((imm6 & 0x08) != 0)
+ shiftAmount = startAt0 ? imm6 - 8 : 16 - imm6;
+ else
+ print_mve_undefined (info, UNDEF_SIZE_0);
+
+ func (stream, "%u", shiftAmount);
+}
+
static void
print_vec_condition (struct disassemble_info *info, long given,
enum mve_instructions matched_insn)
print_mve_vmov_index (info, given);
break;
+ case 'T':
+ if (arm_decode_field (given, 12, 12) == 0)
+ func (stream, "b");
+ else
+ func (stream, "t");
+ break;
+
+ case 'X':
+ if (arm_decode_field (given, 12, 12) == 1)
+ func (stream, "x");
+ break;
+
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
{
value,
insn->mve_op);
break;
+ case 'I':
+ if (value == 1)
+ func (stream, "i");
+ break;
+ case 'A':
+ if (value == 1)
+ func (stream, "a");
+ break;
+ case 'h':
+ {
+ unsigned int odd_reg = (value << 1) | 1;
+ func (stream, "%s", arm_regnames[odd_reg]);
+ }
+ break;
case 'i':
{
unsigned long imm
case 'k':
func (stream, "%lu", 64 - value);
break;
+ case 'l':
+ {
+ unsigned int even_reg = value << 1;
+ func (stream, "%s", arm_regnames[even_reg]);
+ }
+ break;
+ case 'u':
+ switch (value)
+ {
+ case 0:
+ func (stream, "1");
+ break;
+ case 1:
+ func (stream, "2");
+ break;
+ case 2:
+ func (stream, "4");
+ break;
+ case 3:
+ func (stream, "8");
+ break;
+ default:
+ break;
+ }
+ break;
+ case 'o':
+ print_mve_rotate (info, value, width);
+ break;
case 'r':
func (stream, "%s", arm_regnames[value]);
break;
case 'd':
- func (stream, "%ld", value);
- value_in_comment = value;
+ if (insn->mve_op == MVE_VQSHL_T2
+ || insn->mve_op == MVE_VQSHLU_T3
+ || insn->mve_op == MVE_VRSHR
+ || insn->mve_op == MVE_VRSHRN
+ || insn->mve_op == MVE_VSHL_T1
+ || insn->mve_op == MVE_VSHLL_T1
+ || insn->mve_op == MVE_VSHR
+ || insn->mve_op == MVE_VSHRN
+ || insn->mve_op == MVE_VSLI
+ || insn->mve_op == MVE_VSRI)
+ print_mve_shift_n (info, given, insn->mve_op);
+ else if (insn->mve_op == MVE_VSHLL_T2)
+ {
+ switch (value)
+ {
+ case 0x00:
+ func (stream, "8");
+ break;
+ case 0x01:
+ func (stream, "16");
+ break;
+ case 0x10:
+ print_mve_undefined (info, UNDEF_SIZE_0);
+ break;
+ default:
+ assert (0);
+ break;
+ }
+ }
+ else
+ {
+ if (insn->mve_op == MVE_VSHLC && value == 0)
+ value = 32;
+ func (stream, "%ld", value);
+ value_in_comment = value;
+ }
break;
case 'F':
func (stream, "s%ld", value);
switch (*c)
{
+ case 's':
+ if (val <= 3)
+ func (stream, "%s", mve_vec_sizename[val]);
+ else
+ func (stream, "<undef size>");
+ break;
+
case 'd':
func (stream, "%lu", val);
value_in_comment = val;