/* AArch64 assembler/disassembler support.
- Copyright (C) 2009-2015 Free Software Foundation, Inc.
+ Copyright (C) 2009-2018 Free Software Foundation, Inc.
Contributed by ARM Ltd.
This file is part of GNU Binutils.
typedef uint32_t aarch64_insn;
/* The following bitmasks control CPU features. */
+#define AARCH64_FEATURE_SHA2 0x200000000ULL /* SHA2 instructions. */
+#define AARCH64_FEATURE_AES 0x800000000ULL /* AES instructions. */
+#define AARCH64_FEATURE_V8_4 0x000000800ULL /* ARMv8.4 processors. */
+#define AARCH64_FEATURE_SM4 0x100000000ULL /* SM3 & SM4 instructions. */
+#define AARCH64_FEATURE_SHA3 0x400000000ULL /* SHA3 instructions. */
#define AARCH64_FEATURE_V8 0x00000001 /* All processors. */
#define AARCH64_FEATURE_V8_2 0x00000020 /* ARMv8.2 processors. */
+#define AARCH64_FEATURE_V8_3 0x00000040 /* ARMv8.3 processors. */
#define AARCH64_FEATURE_CRYPTO 0x00010000 /* Crypto instructions. */
#define AARCH64_FEATURE_FP 0x00020000 /* FP instructions. */
#define AARCH64_FEATURE_SIMD 0x00040000 /* SIMD instructions. */
#define AARCH64_FEATURE_V8_1 0x01000000 /* v8.1 features. */
#define AARCH64_FEATURE_F16 0x02000000 /* v8.2 FP16 instructions. */
#define AARCH64_FEATURE_RAS 0x04000000 /* RAS Extensions. */
+#define AARCH64_FEATURE_PROFILE 0x08000000 /* Statistical Profiling. */
+#define AARCH64_FEATURE_SVE 0x10000000 /* SVE instructions. */
+#define AARCH64_FEATURE_RCPC 0x20000000 /* RCPC instructions. */
+#define AARCH64_FEATURE_COMPNUM 0x40000000 /* Complex # instructions. */
+#define AARCH64_FEATURE_DOTPROD 0x080000000 /* Dot Product instructions. */
+#define AARCH64_FEATURE_F16_FML 0x1000000000ULL /* v8.2 FP16FML ins. */
/* Architectures are the sum of the base and extensions. */
#define AARCH64_ARCH_V8 AARCH64_FEATURE (AARCH64_FEATURE_V8, \
AARCH64_FEATURE_FP \
| AARCH64_FEATURE_SIMD)
-#define AARCH64_ARCH_V8_1 AARCH64_FEATURE (AARCH64_FEATURE_V8, \
- AARCH64_FEATURE_FP \
- | AARCH64_FEATURE_SIMD \
- | AARCH64_FEATURE_CRC \
+#define AARCH64_ARCH_V8_1 AARCH64_FEATURE (AARCH64_ARCH_V8, \
+ AARCH64_FEATURE_CRC \
| AARCH64_FEATURE_V8_1 \
| AARCH64_FEATURE_LSE \
| AARCH64_FEATURE_PAN \
| AARCH64_FEATURE_LOR \
| AARCH64_FEATURE_RDMA)
-#define AARCH64_ARCH_V8_2 AARCH64_FEATURE (AARCH64_FEATURE_V8, \
+#define AARCH64_ARCH_V8_2 AARCH64_FEATURE (AARCH64_ARCH_V8_1, \
AARCH64_FEATURE_V8_2 \
- | AARCH64_FEATURE_F16 \
- | AARCH64_FEATURE_RAS \
- | AARCH64_FEATURE_FP \
- | AARCH64_FEATURE_SIMD \
- | AARCH64_FEATURE_CRC \
- | AARCH64_FEATURE_V8_1 \
- | AARCH64_FEATURE_LSE \
- | AARCH64_FEATURE_PAN \
- | AARCH64_FEATURE_LOR \
- | AARCH64_FEATURE_RDMA)
+ | AARCH64_FEATURE_RAS)
+#define AARCH64_ARCH_V8_3 AARCH64_FEATURE (AARCH64_ARCH_V8_2, \
+ AARCH64_FEATURE_V8_3 \
+ | AARCH64_FEATURE_RCPC \
+ | AARCH64_FEATURE_COMPNUM)
+#define AARCH64_ARCH_V8_4 AARCH64_FEATURE (AARCH64_ARCH_V8_3, \
+ AARCH64_FEATURE_V8_4 \
+ | AARCH64_FEATURE_DOTPROD \
+ | AARCH64_FEATURE_F16_FML)
#define AARCH64_ARCH_NONE AARCH64_FEATURE (0, 0)
#define AARCH64_ANY AARCH64_FEATURE (-1, 0) /* Any basic core. */
/* CPU-specific features. */
-typedef unsigned long aarch64_feature_set;
+typedef unsigned long long aarch64_feature_set;
-#define AARCH64_CPU_HAS_FEATURE(CPU,FEAT) \
+#define AARCH64_CPU_HAS_ALL_FEATURES(CPU,FEAT) \
+ ((~(CPU) & (FEAT)) == 0)
+
+#define AARCH64_CPU_HAS_ANY_FEATURES(CPU,FEAT) \
(((CPU) & (FEAT)) != 0)
+#define AARCH64_CPU_HAS_FEATURE(CPU,FEAT) \
+ AARCH64_CPU_HAS_ALL_FEATURES (CPU,FEAT)
+
#define AARCH64_MERGE_FEATURE_SETS(TARG,F1,F2) \
do \
{ \
#define AARCH64_FEATURE(core,coproc) ((core) | (coproc))
-#define AARCH64_OPCODE_HAS_FEATURE(OPC,FEAT) \
- (((OPC) & (FEAT)) != 0)
-
enum aarch64_operand_class
{
AARCH64_OPND_CLASS_NIL,
AARCH64_OPND_CLASS_SIMD_ELEMENT,
AARCH64_OPND_CLASS_SISD_REG,
AARCH64_OPND_CLASS_SIMD_REGLIST,
- AARCH64_OPND_CLASS_CP_REG,
+ AARCH64_OPND_CLASS_SVE_REG,
+ AARCH64_OPND_CLASS_PRED_REG,
AARCH64_OPND_CLASS_ADDRESS,
AARCH64_OPND_CLASS_IMMEDIATE,
AARCH64_OPND_CLASS_SYSTEM,
AARCH64_OPND_Rd_SP, /* Integer Rd or SP. */
AARCH64_OPND_Rn_SP, /* Integer Rn or SP. */
+ AARCH64_OPND_Rm_SP, /* Integer Rm or SP. */
AARCH64_OPND_PAIRREG, /* Paired register operand. */
AARCH64_OPND_Rm_EXT, /* Integer Rm extended. */
AARCH64_OPND_Rm_SFT, /* Integer Rm shifted. */
AARCH64_OPND_Sn, /* AdvSIMD Scalar Sn. */
AARCH64_OPND_Sm, /* AdvSIMD Scalar Sm. */
+ AARCH64_OPND_Va, /* AdvSIMD Vector Va. */
AARCH64_OPND_Vd, /* AdvSIMD Vector Vd. */
AARCH64_OPND_Vn, /* AdvSIMD Vector Vn. */
AARCH64_OPND_Vm, /* AdvSIMD Vector Vm. */
AARCH64_OPND_Ed, /* AdvSIMD Vector Element Vd. */
AARCH64_OPND_En, /* AdvSIMD Vector Element Vn. */
AARCH64_OPND_Em, /* AdvSIMD Vector Element Vm. */
+ AARCH64_OPND_Em16, /* AdvSIMD Vector Element Vm restricted to V0 - V15 when
+ qualifier is S_H. */
AARCH64_OPND_LVn, /* AdvSIMD Vector register list used in e.g. TBL. */
AARCH64_OPND_LVt, /* AdvSIMD Vector register list used in ld/st. */
AARCH64_OPND_LVt_AL, /* AdvSIMD Vector register list for loading single
structure to all lanes. */
AARCH64_OPND_LEt, /* AdvSIMD Vector Element list. */
- AARCH64_OPND_Cn, /* Co-processor register in CRn field. */
- AARCH64_OPND_Cm, /* Co-processor register in CRm field. */
+ AARCH64_OPND_CRn, /* Co-processor register in CRn field. */
+ AARCH64_OPND_CRm, /* Co-processor register in CRm field. */
AARCH64_OPND_IDX, /* AdvSIMD EXT index operand. */
+ AARCH64_OPND_MASK, /* AdvSIMD EXT index operand. */
AARCH64_OPND_IMM_VLSL,/* Immediate for shifting vector registers left. */
AARCH64_OPND_IMM_VLSR,/* Immediate for shifting vector registers right. */
AARCH64_OPND_SIMD_IMM,/* AdvSIMD modified immediate without shift. */
AARCH64_OPND_IMMS, /* Immediate #<imms> in e.g. BFM. */
AARCH64_OPND_WIDTH, /* Immediate #<width> in e.g. BFI. */
AARCH64_OPND_IMM, /* Immediate. */
+ AARCH64_OPND_IMM_2, /* Immediate. */
AARCH64_OPND_UIMM3_OP1,/* Unsigned 3-bit immediate in the op1 field. */
AARCH64_OPND_UIMM3_OP2,/* Unsigned 3-bit immediate in the op2 field. */
AARCH64_OPND_UIMM4, /* Unsigned 4-bit immediate in the CRm field. */
AARCH64_OPND_BIT_NUM, /* Immediate. */
AARCH64_OPND_EXCEPTION,/* imm16 operand in exception instructions. */
AARCH64_OPND_CCMP_IMM,/* Immediate in conditional compare instructions. */
+ AARCH64_OPND_SIMM5, /* 5-bit signed immediate in the imm5 field. */
AARCH64_OPND_NZCV, /* Flag bit specifier giving an alternative value for
each condition flag. */
AARCH64_OPND_HALF, /* #<imm16>{, LSL #<shift>} operand in move wide. */
AARCH64_OPND_FBITS, /* FP #<fbits> operand in e.g. SCVTF */
AARCH64_OPND_IMM_MOV, /* Immediate operand for the MOV alias. */
+ AARCH64_OPND_IMM_ROT1, /* Immediate rotate operand for FCMLA. */
+ AARCH64_OPND_IMM_ROT2, /* Immediate rotate operand for indexed FCMLA. */
+ AARCH64_OPND_IMM_ROT3, /* Immediate rotate operand for FCADD. */
AARCH64_OPND_COND, /* Standard condition as the last operand. */
AARCH64_OPND_COND1, /* Same as the above, but excluding AL and NV. */
friendly feature of using LDR/STR as the
the mnemonic name for LDUR/STUR instructions
wherever there is no ambiguity. */
+ AARCH64_OPND_ADDR_SIMM10, /* Address of signed 10-bit immediate. */
AARCH64_OPND_ADDR_UIMM12, /* Address of unsigned 12-bit immediate. */
AARCH64_OPND_SIMD_ADDR_SIMPLE,/* Address of ld/st multiple structures. */
+ AARCH64_OPND_ADDR_OFFSET, /* Address with an optional 9-bit immediate. */
AARCH64_OPND_SIMD_ADDR_POST, /* Address of ld/st multiple post-indexed. */
AARCH64_OPND_SYSREG, /* System register operand. */
AARCH64_OPND_BARRIER, /* Barrier operand. */
AARCH64_OPND_BARRIER_ISB, /* Barrier operand for ISB. */
AARCH64_OPND_PRFOP, /* Prefetch operation. */
+ AARCH64_OPND_BARRIER_PSB, /* Barrier operand for PSB. */
+
+ AARCH64_OPND_SVE_ADDR_RI_S4x16, /* SVE [<Xn|SP>, #<simm4>*16]. */
+ AARCH64_OPND_SVE_ADDR_RI_S4xVL, /* SVE [<Xn|SP>, #<simm4>, MUL VL]. */
+ AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL]. */
+ AARCH64_OPND_SVE_ADDR_RI_S4x3xVL, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL]. */
+ AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, /* SVE [<Xn|SP>, #<simm4>*4, MUL VL]. */
+ AARCH64_OPND_SVE_ADDR_RI_S6xVL, /* SVE [<Xn|SP>, #<simm6>, MUL VL]. */
+ AARCH64_OPND_SVE_ADDR_RI_S9xVL, /* SVE [<Xn|SP>, #<simm9>, MUL VL]. */
+ AARCH64_OPND_SVE_ADDR_RI_U6, /* SVE [<Xn|SP>, #<uimm6>]. */
+ AARCH64_OPND_SVE_ADDR_RI_U6x2, /* SVE [<Xn|SP>, #<uimm6>*2]. */
+ AARCH64_OPND_SVE_ADDR_RI_U6x4, /* SVE [<Xn|SP>, #<uimm6>*4]. */
+ AARCH64_OPND_SVE_ADDR_RI_U6x8, /* SVE [<Xn|SP>, #<uimm6>*8]. */
+ AARCH64_OPND_SVE_ADDR_R, /* SVE [<Xn|SP>]. */
+ AARCH64_OPND_SVE_ADDR_RR, /* SVE [<Xn|SP>, <Xm|XZR>]. */
+ AARCH64_OPND_SVE_ADDR_RR_LSL1, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #1]. */
+ AARCH64_OPND_SVE_ADDR_RR_LSL2, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #2]. */
+ AARCH64_OPND_SVE_ADDR_RR_LSL3, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #3]. */
+ AARCH64_OPND_SVE_ADDR_RX, /* SVE [<Xn|SP>, <Xm>]. */
+ AARCH64_OPND_SVE_ADDR_RX_LSL1, /* SVE [<Xn|SP>, <Xm>, LSL #1]. */
+ AARCH64_OPND_SVE_ADDR_RX_LSL2, /* SVE [<Xn|SP>, <Xm>, LSL #2]. */
+ AARCH64_OPND_SVE_ADDR_RX_LSL3, /* SVE [<Xn|SP>, <Xm>, LSL #3]. */
+ AARCH64_OPND_SVE_ADDR_RZ, /* SVE [<Xn|SP>, Zm.D]. */
+ AARCH64_OPND_SVE_ADDR_RZ_LSL1, /* SVE [<Xn|SP>, Zm.D, LSL #1]. */
+ AARCH64_OPND_SVE_ADDR_RZ_LSL2, /* SVE [<Xn|SP>, Zm.D, LSL #2]. */
+ AARCH64_OPND_SVE_ADDR_RZ_LSL3, /* SVE [<Xn|SP>, Zm.D, LSL #3]. */
+ AARCH64_OPND_SVE_ADDR_RZ_XTW_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
+ Bit 14 controls S/U choice. */
+ AARCH64_OPND_SVE_ADDR_RZ_XTW_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
+ Bit 22 controls S/U choice. */
+ AARCH64_OPND_SVE_ADDR_RZ_XTW1_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
+ Bit 14 controls S/U choice. */
+ AARCH64_OPND_SVE_ADDR_RZ_XTW1_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
+ Bit 22 controls S/U choice. */
+ AARCH64_OPND_SVE_ADDR_RZ_XTW2_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
+ Bit 14 controls S/U choice. */
+ AARCH64_OPND_SVE_ADDR_RZ_XTW2_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
+ Bit 22 controls S/U choice. */
+ AARCH64_OPND_SVE_ADDR_RZ_XTW3_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
+ Bit 14 controls S/U choice. */
+ AARCH64_OPND_SVE_ADDR_RZ_XTW3_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
+ Bit 22 controls S/U choice. */
+ AARCH64_OPND_SVE_ADDR_ZI_U5, /* SVE [Zn.<T>, #<uimm5>]. */
+ AARCH64_OPND_SVE_ADDR_ZI_U5x2, /* SVE [Zn.<T>, #<uimm5>*2]. */
+ AARCH64_OPND_SVE_ADDR_ZI_U5x4, /* SVE [Zn.<T>, #<uimm5>*4]. */
+ AARCH64_OPND_SVE_ADDR_ZI_U5x8, /* SVE [Zn.<T>, #<uimm5>*8]. */
+ AARCH64_OPND_SVE_ADDR_ZZ_LSL, /* SVE [Zn.<T>, Zm,<T>, LSL #<msz>]. */
+ AARCH64_OPND_SVE_ADDR_ZZ_SXTW, /* SVE [Zn.<T>, Zm,<T>, SXTW #<msz>]. */
+ AARCH64_OPND_SVE_ADDR_ZZ_UXTW, /* SVE [Zn.<T>, Zm,<T>, UXTW #<msz>]. */
+ AARCH64_OPND_SVE_AIMM, /* SVE unsigned arithmetic immediate. */
+ AARCH64_OPND_SVE_ASIMM, /* SVE signed arithmetic immediate. */
+ AARCH64_OPND_SVE_FPIMM8, /* SVE 8-bit floating-point immediate. */
+ AARCH64_OPND_SVE_I1_HALF_ONE, /* SVE choice between 0.5 and 1.0. */
+ AARCH64_OPND_SVE_I1_HALF_TWO, /* SVE choice between 0.5 and 2.0. */
+ AARCH64_OPND_SVE_I1_ZERO_ONE, /* SVE choice between 0.0 and 1.0. */
+ AARCH64_OPND_SVE_IMM_ROT1, /* SVE 1-bit rotate operand (90 or 270). */
+ AARCH64_OPND_SVE_IMM_ROT2, /* SVE 2-bit rotate operand (N*90). */
+ AARCH64_OPND_SVE_INV_LIMM, /* SVE inverted logical immediate. */
+ AARCH64_OPND_SVE_LIMM, /* SVE logical immediate. */
+ AARCH64_OPND_SVE_LIMM_MOV, /* SVE logical immediate for MOV. */
+ AARCH64_OPND_SVE_PATTERN, /* SVE vector pattern enumeration. */
+ AARCH64_OPND_SVE_PATTERN_SCALED, /* Likewise, with additional MUL factor. */
+ AARCH64_OPND_SVE_PRFOP, /* SVE prefetch operation. */
+ AARCH64_OPND_SVE_Pd, /* SVE p0-p15 in Pd. */
+ AARCH64_OPND_SVE_Pg3, /* SVE p0-p7 in Pg. */
+ AARCH64_OPND_SVE_Pg4_5, /* SVE p0-p15 in Pg, bits [8,5]. */
+ AARCH64_OPND_SVE_Pg4_10, /* SVE p0-p15 in Pg, bits [13,10]. */
+ AARCH64_OPND_SVE_Pg4_16, /* SVE p0-p15 in Pg, bits [19,16]. */
+ AARCH64_OPND_SVE_Pm, /* SVE p0-p15 in Pm. */
+ AARCH64_OPND_SVE_Pn, /* SVE p0-p15 in Pn. */
+ AARCH64_OPND_SVE_Pt, /* SVE p0-p15 in Pt. */
+ AARCH64_OPND_SVE_Rm, /* Integer Rm or ZR, alt. SVE position. */
+ AARCH64_OPND_SVE_Rn_SP, /* Integer Rn or SP, alt. SVE position. */
+ AARCH64_OPND_SVE_SHLIMM_PRED, /* SVE shift left amount (predicated). */
+ AARCH64_OPND_SVE_SHLIMM_UNPRED, /* SVE shift left amount (unpredicated). */
+ AARCH64_OPND_SVE_SHRIMM_PRED, /* SVE shift right amount (predicated). */
+ AARCH64_OPND_SVE_SHRIMM_UNPRED, /* SVE shift right amount (unpredicated). */
+ AARCH64_OPND_SVE_SIMM5, /* SVE signed 5-bit immediate. */
+ AARCH64_OPND_SVE_SIMM5B, /* SVE secondary signed 5-bit immediate. */
+ AARCH64_OPND_SVE_SIMM6, /* SVE signed 6-bit immediate. */
+ AARCH64_OPND_SVE_SIMM8, /* SVE signed 8-bit immediate. */
+ AARCH64_OPND_SVE_UIMM3, /* SVE unsigned 3-bit immediate. */
+ AARCH64_OPND_SVE_UIMM7, /* SVE unsigned 7-bit immediate. */
+ AARCH64_OPND_SVE_UIMM8, /* SVE unsigned 8-bit immediate. */
+ AARCH64_OPND_SVE_UIMM8_53, /* SVE split unsigned 8-bit immediate. */
+ AARCH64_OPND_SVE_VZn, /* Scalar SIMD&FP register in Zn field. */
+ AARCH64_OPND_SVE_Vd, /* Scalar SIMD&FP register in Vd. */
+ AARCH64_OPND_SVE_Vm, /* Scalar SIMD&FP register in Vm. */
+ AARCH64_OPND_SVE_Vn, /* Scalar SIMD&FP register in Vn. */
+ AARCH64_OPND_SVE_Za_5, /* SVE vector register in Za, bits [9,5]. */
+ AARCH64_OPND_SVE_Za_16, /* SVE vector register in Za, bits [20,16]. */
+ AARCH64_OPND_SVE_Zd, /* SVE vector register in Zd. */
+ AARCH64_OPND_SVE_Zm_5, /* SVE vector register in Zm, bits [9,5]. */
+ AARCH64_OPND_SVE_Zm_16, /* SVE vector register in Zm, bits [20,16]. */
+ AARCH64_OPND_SVE_Zm3_INDEX, /* z0-z7[0-3] in Zm, bits [20,16]. */
+ AARCH64_OPND_SVE_Zm3_22_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 22. */
+ AARCH64_OPND_SVE_Zm4_INDEX, /* z0-z15[0-1] in Zm, bits [20,16]. */
+ AARCH64_OPND_SVE_Zn, /* SVE vector register in Zn. */
+ AARCH64_OPND_SVE_Zn_INDEX, /* Indexed SVE vector register, for DUP. */
+ AARCH64_OPND_SVE_ZnxN, /* SVE vector register list in Zn. */
+ AARCH64_OPND_SVE_Zt, /* SVE vector register in Zt. */
+ AARCH64_OPND_SVE_ZtxN, /* SVE vector register list in Zt. */
+ AARCH64_OPND_SM3_IMM2, /* SM3 encodes lane in bits [13, 14]. */
};
/* Qualifier constrains an operand. It either specifies a variant of an
AARCH64_OPND_QLF_S_S,
AARCH64_OPND_QLF_S_D,
AARCH64_OPND_QLF_S_Q,
+ /* This type qualifier has a special meaning in that it means that 4 x 1 byte
+ are selected by the instruction. Other than that it has no difference
+ with AARCH64_OPND_QLF_S_B in encoding. It is here purely for syntactical
+ reasons and is an exception from normal AArch64 disassembly scheme. */
+ AARCH64_OPND_QLF_S_4B,
/* Qualifying an operand which is a SIMD vector register or a SIMD vector
register list; indicating register shape.
a use is only for the ease of operand encoding/decoding and qualifier
sequence matching; such a use should not be applied widely; use the value
constraint qualifiers for immediate operands wherever possible. */
+ AARCH64_OPND_QLF_V_4B,
AARCH64_OPND_QLF_V_8B,
AARCH64_OPND_QLF_V_16B,
+ AARCH64_OPND_QLF_V_2H,
AARCH64_OPND_QLF_V_4H,
AARCH64_OPND_QLF_V_8H,
AARCH64_OPND_QLF_V_2S,
AARCH64_OPND_QLF_V_2D,
AARCH64_OPND_QLF_V_1Q,
+ AARCH64_OPND_QLF_P_Z,
+ AARCH64_OPND_QLF_P_M,
+
/* Constraint on value. */
+ AARCH64_OPND_QLF_CR, /* CRn, CRm. */
AARCH64_OPND_QLF_imm_0_7,
AARCH64_OPND_QLF_imm_0_15,
AARCH64_OPND_QLF_imm_0_31,
ldst_immpost,
ldst_immpre,
ldst_imm9, /* immpost or immpre */
+ ldst_imm10, /* LDRAA/LDRAB */
ldst_pos,
ldst_regoff,
ldst_unpriv,
movewide,
pcreladdr,
ic_system,
+ sve_cpy,
+ sve_index,
+ sve_limm,
+ sve_misc,
+ sve_movprfx,
+ sve_pred_zm,
+ sve_shift_pred,
+ sve_shift_unpred,
+ sve_size_bhs,
+ sve_size_bhsd,
+ sve_size_hsd,
+ sve_size_sd,
testbranch,
+ cryptosm3,
+ cryptosm4,
+ dotproduct,
};
/* Opcode enumerators. */
OP_UXTL,
OP_UXTL2,
+ OP_MOV_P_P,
+ OP_MOV_Z_P_Z,
+ OP_MOV_Z_V,
+ OP_MOV_Z_Z,
+ OP_MOV_Z_Zi,
+ OP_MOVM_P_P_P,
+ OP_MOVS_P_P,
+ OP_MOVZS_P_P_P,
+ OP_MOVZ_P_P_P,
+ OP_NOTS_P_P_P_Z,
+ OP_NOT_P_P_P_Z,
+
+ OP_FCMLA_ELEM, /* ARMv8.3, indexed element version. */
+
OP_TOTAL_NUM, /* Pseudo. */
};
aarch64_opnd_qualifier_seq_t qualifiers_list[AARCH64_MAX_QLF_SEQ_NUM];
/* Flags providing information about this instruction */
- uint32_t flags;
+ uint64_t flags;
+
+ /* Extra constraints on the instruction that the verifier checks. */
+ uint32_t constraints;
+
+ /* If nonzero, this operand and operand 0 are both registers and
+ are required to have the same register number. */
+ unsigned char tied_operand;
+
+ /* If non-NULL, a function to verify that a given instruction is valid. */
+ bfd_boolean (* verifier) (const struct aarch64_opcode *, const aarch64_insn);
};
typedef struct aarch64_opcode aarch64_opcode;
#define F_OD(X) (((X) & 0x7) << 24)
/* Instruction has the field of 'sz'. */
#define F_LSE_SZ (1 << 27)
-/* Next bit is 28. */
+/* Require an exact qualifier match, even for NIL qualifiers. */
+#define F_STRICT (1ULL << 28)
+/* This system instruction is used to read system registers. */
+#define F_SYS_READ (1ULL << 29)
+/* This system instruction is used to write system registers. */
+#define F_SYS_WRITE (1ULL << 30)
+/* This instruction has an extra constraint on it that imposes a requirement on
+ subsequent instructions. */
+#define F_SCAN (1ULL << 31)
+/* Next bit is 32. */
+
+/* Instruction constraints. */
+/* This instruction has a predication constraint on the instruction at PC+4. */
+#define C_SCAN_MOVPRFX (1U << 0)
+/* This instruction's operation width is determined by the operand with the
+ largest element size. */
+#define C_MAX_ELEM (1U << 1)
+/* Next bit is 2. */
static inline bfd_boolean
alias_opcode_p (const aarch64_opcode *opcode)
extern const struct aarch64_name_value_pair aarch64_operand_modifiers [];
extern const struct aarch64_name_value_pair aarch64_barrier_options [16];
extern const struct aarch64_name_value_pair aarch64_prfops [32];
+extern const struct aarch64_name_value_pair aarch64_hint_options [];
typedef struct
{
AARCH64_MOD_SXTH,
AARCH64_MOD_SXTW,
AARCH64_MOD_SXTX,
+ AARCH64_MOD_MUL,
+ AARCH64_MOD_MUL_VL,
};
bfd_boolean
{
/* A list of names with the first one as the disassembly preference;
terminated by NULL if fewer than 3. */
- const char *names[3];
+ const char *names[4];
aarch64_insn value;
} aarch64_cond;
} reg;
struct
{
- unsigned regno : 5;
- unsigned index : 4;
+ unsigned int regno;
+ int64_t index;
} reglane;
/* e.g. LVn. */
struct
/* 1 if it is a list of reg element. */
unsigned has_index : 1;
/* Lane index; valid only when has_index is 1. */
- unsigned index : 4;
+ int64_t index;
} reglist;
/* e.g. immediate or pc relative address offset. */
struct
unsigned preind : 1; /* Pre-indexed. */
unsigned postind : 1; /* Post-indexed. */
} addr;
+
+ struct
+ {
+ /* The encoding of the system register. */
+ aarch64_insn value;
+
+ /* The system register flags. */
+ uint32_t flags;
+ } sysreg;
+
const aarch64_cond *cond;
- /* The encoding of the system register. */
- aarch64_insn sysreg;
/* The encoding of the PSTATE field. */
aarch64_insn pstatefield;
const aarch64_sys_ins_reg *sysins_op;
const struct aarch64_name_value_pair *barrier;
+ const struct aarch64_name_value_pair *hint_option;
const struct aarch64_name_value_pair *prfop;
};
struct
{
enum aarch64_modifier_kind kind;
- int amount;
unsigned operator_present: 1; /* Only valid during encoding. */
/* Value of the 'S' field in ld/st reg offset; used only in decoding. */
unsigned amount_present: 1;
+ int64_t amount;
} shifter;
unsigned skip:1; /* Operand is not completed if there is a fixup needed
No syntax error, but the operands are not a valid combination, e.g.
FMOV D0,S0
+ AARCH64_OPDE_UNTIED_OPERAND
+ The asm failed to use the same register for a destination operand
+ and a tied source operand.
+
AARCH64_OPDE_OUT_OF_RANGE
Error about some immediate value out of a valid range.
AARCH64_OPDE_SYNTAX_ERROR,
AARCH64_OPDE_FATAL_SYNTAX_ERROR,
AARCH64_OPDE_INVALID_VARIANT,
+ AARCH64_OPDE_UNTIED_OPERAND,
AARCH64_OPDE_OUT_OF_RANGE,
AARCH64_OPDE_UNALIGNED,
AARCH64_OPDE_REG_LIST,
int index;
const char *error;
int data[3]; /* Some data for extra information. */
+ bfd_boolean non_fatal;
};
typedef struct aarch64_operand_error aarch64_operand_error;
/* Generate the string representation of an operand. */
extern void
aarch64_print_operand (char *, size_t, bfd_vma, const aarch64_opcode *,
- const aarch64_opnd_info *, int, int *, bfd_vma *);
+ const aarch64_opnd_info *, int, int *, bfd_vma *,
+ char **);
/* Miscellaneous interface. */
aarch64_zero_register_p (const aarch64_opnd_info *);
extern int
-aarch64_decode_insn (aarch64_insn, aarch64_inst *, bfd_boolean);
+aarch64_decode_insn (aarch64_insn, aarch64_inst *, bfd_boolean,
+ aarch64_operand_error *errors);
/* Given an operand qualifier, return the expected data element size
of a qualified operand. */
extern const char *
aarch64_get_operand_desc (enum aarch64_opnd);
+extern bfd_boolean
+aarch64_sve_dupm_mov_immediate_p (uint64_t, int);
+
#ifdef DEBUG_AARCH64
extern int debug_dump;
#define DEBUG_TRACE_IF(C, M, ...) ;
#endif /* DEBUG_AARCH64 */
+extern const char *const aarch64_sve_pattern_array[32];
+extern const char *const aarch64_sve_prfop_array[16];
+
#ifdef __cplusplus
}
#endif