AARCH64_FEATURE (AARCH64_FEATURE_FP, 0);
static const aarch64_feature_set aarch64_feature_simd =
AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0);
-static const aarch64_feature_set aarch64_feature_crypto =
- AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_AES
- | AARCH64_FEATURE_SHA2 | AARCH64_FEATURE_SIMD | AARCH64_FEATURE_FP, 0);
static const aarch64_feature_set aarch64_feature_crc =
AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0);
static const aarch64_feature_set aarch64_feature_lse =
AARCH64_FEATURE (AARCH64_FEATURE_V8 | AARCH64_FEATURE_AES, 0);
static const aarch64_feature_set aarch64_feature_v8_4 =
AARCH64_FEATURE (AARCH64_FEATURE_V8_4, 0);
-static const aarch64_feature_set aarch64_feature_crypto_v8_2 =
- AARCH64_FEATURE (AARCH64_FEATURE_V8_2 | AARCH64_FEATURE_CRYPTO
- | AARCH64_FEATURE_SIMD | AARCH64_FEATURE_FP, 0);
static const aarch64_feature_set aarch64_feature_sm4 =
AARCH64_FEATURE (AARCH64_FEATURE_V8_2 | AARCH64_FEATURE_SM4
| AARCH64_FEATURE_SIMD | AARCH64_FEATURE_FP, 0);
#define CORE &aarch64_feature_v8
#define FP &aarch64_feature_fp
#define SIMD &aarch64_feature_simd
-#define CRYPTO &aarch64_feature_crypto
#define CRC &aarch64_feature_crc
#define LSE &aarch64_feature_lse
#define LOR &aarch64_feature_lor
#define ARMV8_4 &aarch64_feature_v8_4
#define SHA3 &aarch64_feature_sha3
#define SM4 &aarch64_feature_sm4
-#define CRYPTO_V8_2 &aarch64_feature_crypto_v8_2
#define FP_F16_V8_2 &aarch64_feature_fp_16_v8_2
#define DOTPROD &aarch64_feature_dotprod
#define ARMV8_5 &aarch64_feature_v8_5
{ NAME, OPCODE, MASK, CLASS, OP, SIMD, OPS, QUALS, FLAGS, 0, 0, NULL }
#define _SIMD_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,VERIFIER) \
{ NAME, OPCODE, MASK, CLASS, OP, SIMD, OPS, QUALS, FLAGS, 0, 0, VERIFIER }
-#define CRYP_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
- { NAME, OPCODE, MASK, CLASS, 0, CRYPTO, OPS, QUALS, FLAGS, 0, 0, NULL }
#define _CRC_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, CRC, OPS, QUALS, FLAGS, 0, 0, NULL }
#define _LSE_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, AES, OPS, QUALS, FLAGS, 0, 0, NULL }
#define V8_4_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, ARMV8_4, OPS, QUALS, FLAGS, 0, 0, NULL }
-#define CRYPTO_V8_2_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
- { NAME, OPCODE, MASK, CLASS, 0, CRYPTO_V8_2, OPS, QUALS, FLAGS, 0, NULL }
#define SHA3_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, SHA3, OPS, QUALS, FLAGS, 0, 0, NULL }
#define SM4_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
#define SVE2BITPERM_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \
{ NAME, OPCODE, MASK, CLASS, OP, SVE2_BITPERM, OPS, QUALS, \
FLAGS | F_STRICT, 0, TIED, NULL }
+#define V8_6_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
+ { NAME, OPCODE, MASK, CLASS, 0, ARMV8_6, OPS, QUALS, FLAGS, 0, 0, NULL }
#define BFLOAT16_SVE_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, BFLOAT16_SVE, OPS, QUALS, FLAGS, 0, 0, NULL }
#define BFLOAT16_SVE_INSNC(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS, CONSTRAINTS, TIED) \
SVE2_INSNC ("shsub", 0x44128000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
SVE2_INSNC ("shsubr", 0x44168000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
SVE2_INSN ("sli", 0x4500f400, 0xff20fc00, sve_shift_tsz_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHLIMM_UNPRED), OP_SVE_VVU_BHSD, 0, 0),
- SVE2_INSNC ("smaxp", 0x4414a000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 0),
- SVE2_INSNC ("sminp", 0x4416a000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 0),
+ SVE2_INSNC ("smaxp", 0x4414a000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+ SVE2_INSNC ("sminp", 0x4416a000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
SVE2_INSNC ("smlalb", 0x44a08000, 0xffe0f400, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
SVE2_INSNC ("smlalb", 0x44e08000, 0xffe0f400, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, C_SCAN_MOVPRFX, 0),
SVE2_INSNC ("smlalb", 0x44004000, 0xff20fc00, sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
V8_4_INSN ("stlur", 0xd9000000, 0xffe00c00, ldst_unscaled, OP2 (Rt, ADDR_OFFSET), QL_STLX, 0),
V8_4_INSN ("ldapur", 0xd9400000, 0xffe00c00, ldst_unscaled, OP2 (Rt, ADDR_OFFSET), QL_STLX, 0),
+ /* V8.6 instructions */
+ V8_6_INSN("dgh", 0xd50320df, 0xffffffff, aarch64_misc, OP0 (), {}, 0),
+
/* Matrix Multiply instructions. */
INT8MATMUL_SVE_INSNC ("smmla", 0x45009800, 0xffe0fc00, sve_misc, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_SBB, 0, C_SCAN_MOVPRFX, 0),
INT8MATMUL_SVE_INSNC ("ummla", 0x45c09800, 0xffe0fc00, sve_misc, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_SBB, 0, C_SCAN_MOVPRFX, 0),