[AArch64] Use "must" rather than "should" in error messages
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
index 7a73c7e409fa01364b143318fcd01dff47d60d87..0f0795bf4043307f8ae4c274e172809d6e51f360 100644 (file)
@@ -27,6 +27,7 @@
 #include <inttypes.h>
 
 #include "opintl.h"
+#include "libiberty.h"
 
 #include "aarch64-opc.h"
 
 int debug_dump = FALSE;
 #endif /* DEBUG_AARCH64 */
 
+/* The enumeration strings associated with each value of a 5-bit SVE
+   pattern operand.  A null entry indicates a reserved meaning.  */
+const char *const aarch64_sve_pattern_array[32] = {
+  /* 0-7.  */
+  "pow2",
+  "vl1",
+  "vl2",
+  "vl3",
+  "vl4",
+  "vl5",
+  "vl6",
+  "vl7",
+  /* 8-15.  */
+  "vl8",
+  "vl16",
+  "vl32",
+  "vl64",
+  "vl128",
+  "vl256",
+  0,
+  0,
+  /* 16-23.  */
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  0,
+  /* 24-31.  */
+  0,
+  0,
+  0,
+  0,
+  0,
+  "mul4",
+  "mul3",
+  "all"
+};
+
+/* The enumeration strings associated with each value of a 4-bit SVE
+   prefetch operand.  A null entry indicates a reserved meaning.  */
+const char *const aarch64_sve_prfop_array[16] = {
+  /* 0-7.  */
+  "pldl1keep",
+  "pldl1strm",
+  "pldl2keep",
+  "pldl2strm",
+  "pldl3keep",
+  "pldl3strm",
+  0,
+  0,
+  /* 8-15.  */
+  "pstl1keep",
+  "pstl1strm",
+  "pstl2keep",
+  "pstl2strm",
+  "pstl3keep",
+  "pstl3strm",
+  0,
+  0
+};
+
 /* Helper functions to determine which operand to be used to encode/decode
    the size:Q fields for AdvSIMD instructions.  */
 
@@ -199,6 +264,51 @@ const aarch64_field fields[] =
     { 31,  1 },        /* b5: in the test bit and branch instructions.  */
     { 19,  5 },        /* b40: in the test bit and branch instructions.  */
     { 10,  6 },        /* scale: in the fixed-point scalar to fp converting inst.  */
+    {  4,  1 }, /* SVE_M_4: Merge/zero select, bit 4.  */
+    { 14,  1 }, /* SVE_M_14: Merge/zero select, bit 14.  */
+    { 16,  1 }, /* SVE_M_16: Merge/zero select, bit 16.  */
+    { 17,  1 }, /* SVE_N: SVE equivalent of N.  */
+    {  0,  4 }, /* SVE_Pd: p0-p15, bits [3,0].  */
+    { 10,  3 }, /* SVE_Pg3: p0-p7, bits [12,10].  */
+    {  5,  4 }, /* SVE_Pg4_5: p0-p15, bits [8,5].  */
+    { 10,  4 }, /* SVE_Pg4_10: p0-p15, bits [13,10].  */
+    { 16,  4 }, /* SVE_Pg4_16: p0-p15, bits [19,16].  */
+    { 16,  4 }, /* SVE_Pm: p0-p15, bits [19,16].  */
+    {  5,  4 }, /* SVE_Pn: p0-p15, bits [8,5].  */
+    {  0,  4 }, /* SVE_Pt: p0-p15, bits [3,0].  */
+    {  5,  5 }, /* SVE_Rm: SVE alternative position for Rm.  */
+    { 16,  5 }, /* SVE_Rn: SVE alternative position for Rn.  */
+    {  0,  5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0].  */
+    {  5,  5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5].  */
+    {  5,  5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5].  */
+    {  5,  5 }, /* SVE_Za_5: SVE vector register, bits [9,5].  */
+    { 16,  5 }, /* SVE_Za_16: SVE vector register, bits [20,16].  */
+    {  0,  5 }, /* SVE_Zd: SVE vector register. bits [4,0].  */
+    {  5,  5 }, /* SVE_Zm_5: SVE vector register, bits [9,5].  */
+    { 16,  5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
+    {  5,  5 }, /* SVE_Zn: SVE vector register, bits [9,5].  */
+    {  0,  5 }, /* SVE_Zt: SVE vector register, bits [4,0].  */
+    {  5,  1 }, /* SVE_i1: single-bit immediate.  */
+    { 16,  3 }, /* SVE_imm3: 3-bit immediate field.  */
+    { 16,  4 }, /* SVE_imm4: 4-bit immediate field.  */
+    {  5,  5 }, /* SVE_imm5: 5-bit immediate field.  */
+    { 16,  5 }, /* SVE_imm5b: secondary 5-bit immediate field.  */
+    { 16,  6 }, /* SVE_imm6: 6-bit immediate field.  */
+    { 14,  7 }, /* SVE_imm7: 7-bit immediate field.  */
+    {  5,  8 }, /* SVE_imm8: 8-bit immediate field.  */
+    {  5,  9 }, /* SVE_imm9: 9-bit immediate field.  */
+    { 11,  6 }, /* SVE_immr: SVE equivalent of immr.  */
+    {  5,  6 }, /* SVE_imms: SVE equivalent of imms.  */
+    { 10,  2 }, /* SVE_msz: 2-bit shift amount for ADR.  */
+    {  5,  5 }, /* SVE_pattern: vector pattern enumeration.  */
+    {  0,  4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD].  */
+    { 22,  1 }, /* SVE_sz: 1-bit element size select.  */
+    { 16,  4 }, /* SVE_tsz: triangular size select.  */
+    { 22,  2 }, /* SVE_tszh: triangular size select high, bits [23,22].  */
+    {  8,  2 }, /* SVE_tszl_8: triangular size select low, bits [9,8].  */
+    { 19,  2 }, /* SVE_tszl_19: triangular size select low, bits [20,19].  */
+    { 14,  1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14).  */
+    { 22,  1 }  /* SVE_xs_22: UXTW/SXTW select (bit 22).  */
 };
 
 enum aarch64_operand_class
@@ -224,18 +334,18 @@ aarch64_get_operand_desc (enum aarch64_opnd type)
 /* Table of all conditional affixes.  */
 const aarch64_cond aarch64_conds[16] =
 {
-  {{"eq"}, 0x0},
-  {{"ne"}, 0x1},
-  {{"cs", "hs"}, 0x2},
-  {{"cc", "lo", "ul"}, 0x3},
-  {{"mi"}, 0x4},
-  {{"pl"}, 0x5},
+  {{"eq", "none"}, 0x0},
+  {{"ne", "any"}, 0x1},
+  {{"cs", "hs", "nlast"}, 0x2},
+  {{"cc", "lo", "ul", "last"}, 0x3},
+  {{"mi", "first"}, 0x4},
+  {{"pl", "nfrst"}, 0x5},
   {{"vs"}, 0x6},
   {{"vc"}, 0x7},
-  {{"hi"}, 0x8},
-  {{"ls"}, 0x9},
-  {{"ge"}, 0xa},
-  {{"lt"}, 0xb},
+  {{"hi", "pmore"}, 0x8},
+  {{"ls", "plast"}, 0x9},
+  {{"ge", "tcont"}, 0xa},
+  {{"lt", "tstop"}, 0xb},
   {{"gt"}, 0xc},
   {{"le"}, 0xd},
   {{"al"}, 0xe},
@@ -276,6 +386,8 @@ const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
     {"sxth", 0x5},
     {"sxtw", 0x6},
     {"sxtx", 0x7},
+    {"mul", 0x0},
+    {"mul vl", 0x0},
     {NULL, 0},
 };
 
@@ -397,10 +509,11 @@ value_in_range_p (int64_t value, int low, int high)
   return (value >= low && value <= high) ? 1 : 0;
 }
 
+/* Return true if VALUE is a multiple of ALIGN.  */
 static inline int
 value_aligned_p (int64_t value, int align)
 {
-  return ((value & (align - 1)) == 0) ? 1 : 0;
+  return (value % align) == 0;
 }
 
 /* A signed value fits in a field.  */
@@ -587,6 +700,9 @@ struct operand_qualifier_data aarch64_opnd_qualifiers[] =
   {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
   {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
 
+  {0, 0, 0, "z", OQK_OPD_VARIANT},
+  {0, 0, 0, "m", OQK_OPD_VARIANT},
+
   /* Qualifiers constraining the value range.
      First 3 fields:
      Lower bound, higher bound, unused.  */
@@ -1217,6 +1333,18 @@ set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
                          _("shift amount"));
 }
 
+/* Report that the MUL modifier in operand IDX should be in the range
+   [LOWER_BOUND, UPPER_BOUND].  */
+static inline void
+set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
+                                  int idx, int lower_bound, int upper_bound)
+{
+  if (mismatch_detail == NULL)
+    return;
+  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
+                         _("multiplier"));
+}
+
 static inline void
 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
                     int alignment)
@@ -1268,9 +1396,10 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
                                  const aarch64_opcode *opcode,
                                  aarch64_operand_error *mismatch_detail)
 {
-  unsigned num;
+  unsigned num, modifiers, shift;
   unsigned char size;
-  int64_t imm;
+  int64_t imm, min_value, max_value;
+  uint64_t uvalue, mask;
   const aarch64_opnd_info *opnd = opnds + idx;
   aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
 
@@ -1332,6 +1461,43 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
        }
       break;
 
+    case AARCH64_OPND_CLASS_SVE_REG:
+      switch (type)
+       {
+       case AARCH64_OPND_SVE_Zn_INDEX:
+         size = aarch64_get_qualifier_esize (opnd->qualifier);
+         if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
+           {
+             set_elem_idx_out_of_range_error (mismatch_detail, idx,
+                                              0, 64 / size - 1);
+             return 0;
+           }
+         break;
+
+       case AARCH64_OPND_SVE_ZnxN:
+       case AARCH64_OPND_SVE_ZtxN:
+         if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
+           {
+             set_other_error (mismatch_detail, idx,
+                              _("invalid register list"));
+             return 0;
+           }
+         break;
+
+       default:
+         break;
+       }
+      break;
+
+    case AARCH64_OPND_CLASS_PRED_REG:
+      if (opnd->reg.regno >= 8
+         && get_operand_fields_width (get_operand_from_code (type)) == 3)
+       {
+         set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
+         return 0;
+       }
+      break;
+
     case AARCH64_OPND_CLASS_COND:
       if (type == AARCH64_OPND_COND1
          && (opnds[idx].cond->value & 0xe) == 0xe)
@@ -1525,6 +1691,156 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
            }
          break;
 
+       case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
+       case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
+       case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
+       case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
+         min_value = -8;
+         max_value = 7;
+       sve_imm_offset_vl:
+         assert (!opnd->addr.offset.is_reg);
+         assert (opnd->addr.preind);
+         num = 1 + get_operand_specific_data (&aarch64_operands[type]);
+         min_value *= num;
+         max_value *= num;
+         if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
+             || (opnd->shifter.operator_present
+                 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
+           {
+             set_other_error (mismatch_detail, idx,
+                              _("invalid addressing mode"));
+             return 0;
+           }
+         if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
+           {
+             set_offset_out_of_range_error (mismatch_detail, idx,
+                                            min_value, max_value);
+             return 0;
+           }
+         if (!value_aligned_p (opnd->addr.offset.imm, num))
+           {
+             set_unaligned_error (mismatch_detail, idx, num);
+             return 0;
+           }
+         break;
+
+       case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
+         min_value = -32;
+         max_value = 31;
+         goto sve_imm_offset_vl;
+
+       case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
+         min_value = -256;
+         max_value = 255;
+         goto sve_imm_offset_vl;
+
+       case AARCH64_OPND_SVE_ADDR_RI_U6:
+       case AARCH64_OPND_SVE_ADDR_RI_U6x2:
+       case AARCH64_OPND_SVE_ADDR_RI_U6x4:
+       case AARCH64_OPND_SVE_ADDR_RI_U6x8:
+         min_value = 0;
+         max_value = 63;
+       sve_imm_offset:
+         assert (!opnd->addr.offset.is_reg);
+         assert (opnd->addr.preind);
+         num = 1 << get_operand_specific_data (&aarch64_operands[type]);
+         min_value *= num;
+         max_value *= num;
+         if (opnd->shifter.operator_present
+             || opnd->shifter.amount_present)
+           {
+             set_other_error (mismatch_detail, idx,
+                              _("invalid addressing mode"));
+             return 0;
+           }
+         if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
+           {
+             set_offset_out_of_range_error (mismatch_detail, idx,
+                                            min_value, max_value);
+             return 0;
+           }
+         if (!value_aligned_p (opnd->addr.offset.imm, num))
+           {
+             set_unaligned_error (mismatch_detail, idx, num);
+             return 0;
+           }
+         break;
+
+       case AARCH64_OPND_SVE_ADDR_RR:
+       case AARCH64_OPND_SVE_ADDR_RR_LSL1:
+       case AARCH64_OPND_SVE_ADDR_RR_LSL2:
+       case AARCH64_OPND_SVE_ADDR_RR_LSL3:
+       case AARCH64_OPND_SVE_ADDR_RX:
+       case AARCH64_OPND_SVE_ADDR_RX_LSL1:
+       case AARCH64_OPND_SVE_ADDR_RX_LSL2:
+       case AARCH64_OPND_SVE_ADDR_RX_LSL3:
+       case AARCH64_OPND_SVE_ADDR_RZ:
+       case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
+       case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
+       case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
+         modifiers = 1 << AARCH64_MOD_LSL;
+       sve_rr_operand:
+         assert (opnd->addr.offset.is_reg);
+         assert (opnd->addr.preind);
+         if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
+             && opnd->addr.offset.regno == 31)
+           {
+             set_other_error (mismatch_detail, idx,
+                              _("index register xzr is not allowed"));
+             return 0;
+           }
+         if (((1 << opnd->shifter.kind) & modifiers) == 0
+             || (opnd->shifter.amount
+                 != get_operand_specific_data (&aarch64_operands[type])))
+           {
+             set_other_error (mismatch_detail, idx,
+                              _("invalid addressing mode"));
+             return 0;
+           }
+         break;
+
+       case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
+       case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
+       case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
+       case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
+       case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
+       case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
+       case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
+       case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
+         modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
+         goto sve_rr_operand;
+
+       case AARCH64_OPND_SVE_ADDR_ZI_U5:
+       case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
+       case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
+       case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
+         min_value = 0;
+         max_value = 31;
+         goto sve_imm_offset;
+
+       case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
+         modifiers = 1 << AARCH64_MOD_LSL;
+       sve_zz_operand:
+         assert (opnd->addr.offset.is_reg);
+         assert (opnd->addr.preind);
+         if (((1 << opnd->shifter.kind) & modifiers) == 0
+             || opnd->shifter.amount < 0
+             || opnd->shifter.amount > 3)
+           {
+             set_other_error (mismatch_detail, idx,
+                              _("invalid addressing mode"));
+             return 0;
+           }
+         break;
+
+       case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
+         modifiers = (1 << AARCH64_MOD_SXTW);
+         goto sve_zz_operand;
+
+       case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
+         modifiers = 1 << AARCH64_MOD_UXTW;
+         goto sve_zz_operand;
+
        default:
          break;
        }
@@ -1598,7 +1914,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
          if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
            {
              set_other_error (mismatch_detail, idx,
-                              _("shift amount expected to be 0 or 12"));
+                              _("shift amount must be 0 or 12"));
              return 0;
            }
          if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
@@ -1621,7 +1937,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
          if (!value_aligned_p (opnd->shifter.amount, 16))
            {
              set_other_error (mismatch_detail, idx,
-                              _("shift amount should be a multiple of 16"));
+                              _("shift amount must be a multiple of 16"));
              return 0;
            }
          if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
@@ -1684,6 +2000,10 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
        case AARCH64_OPND_UIMM7:
        case AARCH64_OPND_UIMM3_OP1:
        case AARCH64_OPND_UIMM3_OP2:
+       case AARCH64_OPND_SVE_UIMM3:
+       case AARCH64_OPND_SVE_UIMM7:
+       case AARCH64_OPND_SVE_UIMM8:
+       case AARCH64_OPND_SVE_UIMM8_53:
          size = get_operand_fields_width (get_operand_from_code (type));
          assert (size < 32);
          if (!value_fit_unsigned_field_p (opnd->imm.value, size))
@@ -1694,6 +2014,22 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
            }
          break;
 
+       case AARCH64_OPND_SIMM5:
+       case AARCH64_OPND_SVE_SIMM5:
+       case AARCH64_OPND_SVE_SIMM5B:
+       case AARCH64_OPND_SVE_SIMM6:
+       case AARCH64_OPND_SVE_SIMM8:
+         size = get_operand_fields_width (get_operand_from_code (type));
+         assert (size < 32);
+         if (!value_fit_signed_field_p (opnd->imm.value, size))
+           {
+             set_imm_out_of_range_error (mismatch_detail, idx,
+                                         -(1 << (size - 1)),
+                                         (1 << (size - 1)) - 1);
+             return 0;
+           }
+         break;
+
        case AARCH64_OPND_WIDTH:
          assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
                  && opnds[0].type == AARCH64_OPND_Rd);
@@ -1708,6 +2044,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
          break;
 
        case AARCH64_OPND_LIMM:
+       case AARCH64_OPND_SVE_LIMM:
          {
            int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
            uint64_t uimm = opnd->imm.value;
@@ -1837,7 +2174,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
              if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
                {
                  set_other_error (mismatch_detail, idx,
-                                  _("shift amount expected to be 0 or 16"));
+                                  _("shift amount must be 0 or 16"));
                  return 0;
                }
              break;
@@ -1854,6 +2191,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
 
        case AARCH64_OPND_FPIMM:
        case AARCH64_OPND_SIMD_FPIMM:
+       case AARCH64_OPND_SVE_FPIMM8:
          if (opnd->imm.is_fp == 0)
            {
              set_other_error (mismatch_detail, idx,
@@ -1878,6 +2216,150 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
            }
          break;
 
+       case AARCH64_OPND_SVE_AIMM:
+         min_value = 0;
+       sve_aimm:
+         assert (opnd->shifter.kind == AARCH64_MOD_LSL);
+         size = aarch64_get_qualifier_esize (opnds[0].qualifier);
+         mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
+         uvalue = opnd->imm.value;
+         shift = opnd->shifter.amount;
+         if (size == 1)
+           {
+             if (shift != 0)
+               {
+                 set_other_error (mismatch_detail, idx,
+                                  _("no shift amount allowed for"
+                                    " 8-bit constants"));
+                 return 0;
+               }
+           }
+         else
+           {
+             if (shift != 0 && shift != 8)
+               {
+                 set_other_error (mismatch_detail, idx,
+                                  _("shift amount must be 0 or 8"));
+                 return 0;
+               }
+             if (shift == 0 && (uvalue & 0xff) == 0)
+               {
+                 shift = 8;
+                 uvalue = (int64_t) uvalue / 256;
+               }
+           }
+         mask >>= shift;
+         if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
+           {
+             set_other_error (mismatch_detail, idx,
+                              _("immediate too big for element size"));
+             return 0;
+           }
+         uvalue = (uvalue - min_value) & mask;
+         if (uvalue > 0xff)
+           {
+             set_other_error (mismatch_detail, idx,
+                              _("invalid arithmetic immediate"));
+             return 0;
+           }
+         break;
+
+       case AARCH64_OPND_SVE_ASIMM:
+         min_value = -128;
+         goto sve_aimm;
+
+       case AARCH64_OPND_SVE_I1_HALF_ONE:
+         assert (opnd->imm.is_fp);
+         if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
+           {
+             set_other_error (mismatch_detail, idx,
+                              _("floating-point value must be 0.5 or 1.0"));
+             return 0;
+           }
+         break;
+
+       case AARCH64_OPND_SVE_I1_HALF_TWO:
+         assert (opnd->imm.is_fp);
+         if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
+           {
+             set_other_error (mismatch_detail, idx,
+                              _("floating-point value must be 0.5 or 2.0"));
+             return 0;
+           }
+         break;
+
+       case AARCH64_OPND_SVE_I1_ZERO_ONE:
+         assert (opnd->imm.is_fp);
+         if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
+           {
+             set_other_error (mismatch_detail, idx,
+                              _("floating-point value must be 0.0 or 1.0"));
+             return 0;
+           }
+         break;
+
+       case AARCH64_OPND_SVE_INV_LIMM:
+         {
+           int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
+           uint64_t uimm = ~opnd->imm.value;
+           if (!aarch64_logical_immediate_p (uimm, esize, NULL))
+             {
+               set_other_error (mismatch_detail, idx,
+                                _("immediate out of range"));
+               return 0;
+             }
+         }
+         break;
+
+       case AARCH64_OPND_SVE_LIMM_MOV:
+         {
+           int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
+           uint64_t uimm = opnd->imm.value;
+           if (!aarch64_logical_immediate_p (uimm, esize, NULL))
+             {
+               set_other_error (mismatch_detail, idx,
+                                _("immediate out of range"));
+               return 0;
+             }
+           if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
+             {
+               set_other_error (mismatch_detail, idx,
+                                _("invalid replicated MOV immediate"));
+               return 0;
+             }
+         }
+         break;
+
+       case AARCH64_OPND_SVE_PATTERN_SCALED:
+         assert (opnd->shifter.kind == AARCH64_MOD_MUL);
+         if (!value_in_range_p (opnd->shifter.amount, 1, 16))
+           {
+             set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
+             return 0;
+           }
+         break;
+
+       case AARCH64_OPND_SVE_SHLIMM_PRED:
+       case AARCH64_OPND_SVE_SHLIMM_UNPRED:
+         size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
+         if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
+           {
+             set_imm_out_of_range_error (mismatch_detail, idx,
+                                         0, 8 * size - 1);
+             return 0;
+           }
+         break;
+
+       case AARCH64_OPND_SVE_SHRIMM_PRED:
+       case AARCH64_OPND_SVE_SHRIMM_UNPRED:
+         size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
+         if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
+           {
+             set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
+             return 0;
+           }
+         break;
+
        default:
          break;
        }
@@ -2058,6 +2540,23 @@ aarch64_match_operands_constraint (aarch64_inst *inst,
 
   DEBUG_TRACE ("enter");
 
+  /* Check for cases where a source register needs to be the same as the
+     destination register.  Do this before matching qualifiers since if
+     an instruction has both invalid tying and invalid qualifiers,
+     the error about qualifiers would suggest several alternative
+     instructions that also have invalid tying.  */
+  i = inst->opcode->tied_operand;
+  if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
+    {
+      if (mismatch_detail)
+       {
+         mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
+         mismatch_detail->index = i;
+         mismatch_detail->error = NULL;
+       }
+      return 0;
+    }
+
   /* Match operands' qualifier.
      *INST has already had qualifier establish for some, if not all, of
      its operands; we need to find out whether these established
@@ -2167,6 +2666,17 @@ static const char *int_reg[2][2][32] = {
 #undef R64
 #undef R32
 };
+
+/* Names of the SVE vector registers, first with .S suffixes,
+   then with .D suffixes.  */
+
+static const char *sve_reg[2][32] = {
+#define ZS(X) "z" #X ".s"
+#define ZD(X) "z" #X ".d"
+  BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
+#undef ZD
+#undef ZS
+};
 #undef BANK
 
 /* Return the integer register name.
@@ -2210,6 +2720,17 @@ get_offset_int_reg_name (const aarch64_opnd_info *opnd)
     }
 }
 
+/* Get the name of the SVE vector offset register in OPND, using the operand
+   qualifier to decide whether the suffix should be .S or .D.  */
+
+static inline const char *
+get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
+{
+  assert (qualifier == AARCH64_OPND_QLF_S_S
+         || qualifier == AARCH64_OPND_QLF_S_D);
+  return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
+}
+
 /* Types for expanding an encoded 8-bit value to a floating-point value.  */
 
 typedef union
@@ -2349,7 +2870,13 @@ print_immediate_offset_address (char *buf, size_t size,
     }
   else
     {
-      if (opnd->addr.offset.imm)
+      if (opnd->shifter.operator_present)
+       {
+         assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
+         snprintf (buf, size, "[%s,#%d,mul vl]",
+                   base, opnd->addr.offset.imm);
+       }
+      else if (opnd->addr.offset.imm)
        snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm);
       else
        snprintf (buf, size, "[%s]", base);
@@ -2385,7 +2912,8 @@ print_register_offset_address (char *buf, size_t size,
   if (print_extend_p)
     {
       if (print_amount_p)
-       snprintf (tb, sizeof (tb), ",%s #%d", shift_name, opnd->shifter.amount);
+       snprintf (tb, sizeof (tb), ",%s #%" PRIi64, shift_name,
+                 opnd->shifter.amount);
       else
        snprintf (tb, sizeof (tb), ",%s", shift_name);
     }
@@ -2412,11 +2940,11 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
                       const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
                       bfd_vma *address)
 {
-  int i;
+  unsigned int i, num_conds;
   const char *name = NULL;
   const aarch64_opnd_info *opnd = opnds + idx;
   enum aarch64_modifier_kind kind;
-  uint64_t addr;
+  uint64_t addr, enum_value;
 
   buf[0] = '\0';
   if (pcrel_p)
@@ -2433,6 +2961,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
     case AARCH64_OPND_Ra:
     case AARCH64_OPND_Rt_SYS:
     case AARCH64_OPND_PAIRREG:
+    case AARCH64_OPND_SVE_Rm:
       /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
         the <ic_op>, therefore we we use opnd->present to override the
         generic optional-ness information.  */
@@ -2450,6 +2979,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
 
     case AARCH64_OPND_Rd_SP:
     case AARCH64_OPND_Rn_SP:
+    case AARCH64_OPND_SVE_Rn_SP:
       assert (opnd->qualifier == AARCH64_OPND_QLF_W
              || opnd->qualifier == AARCH64_OPND_QLF_WSP
              || opnd->qualifier == AARCH64_OPND_QLF_X
@@ -2480,7 +3010,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
            }
        }
       if (opnd->shifter.amount)
-       snprintf (buf, size, "%s, %s #%d",
+       snprintf (buf, size, "%s, %s #%" PRIi64,
                  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
                  aarch64_operand_modifiers[kind].name,
                  opnd->shifter.amount);
@@ -2497,7 +3027,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
        snprintf (buf, size, "%s",
                  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
       else
-       snprintf (buf, size, "%s, %s #%d",
+       snprintf (buf, size, "%s, %s #%" PRIi64,
                  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
                  aarch64_operand_modifiers[opnd->shifter.kind].name,
                  opnd->shifter.amount);
@@ -2512,6 +3042,10 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
     case AARCH64_OPND_Sd:
     case AARCH64_OPND_Sn:
     case AARCH64_OPND_Sm:
+    case AARCH64_OPND_SVE_VZn:
+    case AARCH64_OPND_SVE_Vd:
+    case AARCH64_OPND_SVE_Vm:
+    case AARCH64_OPND_SVE_Vn:
       snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
                opnd->reg.regno);
       break;
@@ -2543,6 +3077,50 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
       print_register_list (buf, size, opnd, "v");
       break;
 
+    case AARCH64_OPND_SVE_Pd:
+    case AARCH64_OPND_SVE_Pg3:
+    case AARCH64_OPND_SVE_Pg4_5:
+    case AARCH64_OPND_SVE_Pg4_10:
+    case AARCH64_OPND_SVE_Pg4_16:
+    case AARCH64_OPND_SVE_Pm:
+    case AARCH64_OPND_SVE_Pn:
+    case AARCH64_OPND_SVE_Pt:
+      if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
+       snprintf (buf, size, "p%d", opnd->reg.regno);
+      else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
+              || opnd->qualifier == AARCH64_OPND_QLF_P_M)
+       snprintf (buf, size, "p%d/%s", opnd->reg.regno,
+                 aarch64_get_qualifier_name (opnd->qualifier));
+      else
+       snprintf (buf, size, "p%d.%s", opnd->reg.regno,
+                 aarch64_get_qualifier_name (opnd->qualifier));
+      break;
+
+    case AARCH64_OPND_SVE_Za_5:
+    case AARCH64_OPND_SVE_Za_16:
+    case AARCH64_OPND_SVE_Zd:
+    case AARCH64_OPND_SVE_Zm_5:
+    case AARCH64_OPND_SVE_Zm_16:
+    case AARCH64_OPND_SVE_Zn:
+    case AARCH64_OPND_SVE_Zt:
+      if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
+       snprintf (buf, size, "z%d", opnd->reg.regno);
+      else
+       snprintf (buf, size, "z%d.%s", opnd->reg.regno,
+                 aarch64_get_qualifier_name (opnd->qualifier));
+      break;
+
+    case AARCH64_OPND_SVE_ZnxN:
+    case AARCH64_OPND_SVE_ZtxN:
+      print_register_list (buf, size, opnd, "z");
+      break;
+
+    case AARCH64_OPND_SVE_Zn_INDEX:
+      snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
+               aarch64_get_qualifier_name (opnd->qualifier),
+               opnd->reglane.index);
+      break;
+
     case AARCH64_OPND_Cn:
     case AARCH64_OPND_Cm:
       snprintf (buf, size, "C%d", opnd->reg.regno);
@@ -2561,9 +3139,73 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
     case AARCH64_OPND_IMMR:
     case AARCH64_OPND_IMMS:
     case AARCH64_OPND_FBITS:
+    case AARCH64_OPND_SIMM5:
+    case AARCH64_OPND_SVE_SHLIMM_PRED:
+    case AARCH64_OPND_SVE_SHLIMM_UNPRED:
+    case AARCH64_OPND_SVE_SHRIMM_PRED:
+    case AARCH64_OPND_SVE_SHRIMM_UNPRED:
+    case AARCH64_OPND_SVE_SIMM5:
+    case AARCH64_OPND_SVE_SIMM5B:
+    case AARCH64_OPND_SVE_SIMM6:
+    case AARCH64_OPND_SVE_SIMM8:
+    case AARCH64_OPND_SVE_UIMM3:
+    case AARCH64_OPND_SVE_UIMM7:
+    case AARCH64_OPND_SVE_UIMM8:
+    case AARCH64_OPND_SVE_UIMM8_53:
       snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
       break;
 
+    case AARCH64_OPND_SVE_I1_HALF_ONE:
+    case AARCH64_OPND_SVE_I1_HALF_TWO:
+    case AARCH64_OPND_SVE_I1_ZERO_ONE:
+      {
+       single_conv_t c;
+       c.i = opnd->imm.value;
+       snprintf (buf, size, "#%.1f", c.f);
+       break;
+      }
+
+    case AARCH64_OPND_SVE_PATTERN:
+      if (optional_operand_p (opcode, idx)
+         && opnd->imm.value == get_optional_operand_default_value (opcode))
+       break;
+      enum_value = opnd->imm.value;
+      assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
+      if (aarch64_sve_pattern_array[enum_value])
+       snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
+      else
+       snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
+      break;
+
+    case AARCH64_OPND_SVE_PATTERN_SCALED:
+      if (optional_operand_p (opcode, idx)
+         && !opnd->shifter.operator_present
+         && opnd->imm.value == get_optional_operand_default_value (opcode))
+       break;
+      enum_value = opnd->imm.value;
+      assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
+      if (aarch64_sve_pattern_array[opnd->imm.value])
+       snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
+      else
+       snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
+      if (opnd->shifter.operator_present)
+       {
+         size_t len = strlen (buf);
+         snprintf (buf + len, size - len, ", %s #%" PRIi64,
+                   aarch64_operand_modifiers[opnd->shifter.kind].name,
+                   opnd->shifter.amount);
+       }
+      break;
+
+    case AARCH64_OPND_SVE_PRFOP:
+      enum_value = opnd->imm.value;
+      assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
+      if (aarch64_sve_prfop_array[enum_value])
+       snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
+      else
+       snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
+      break;
+
     case AARCH64_OPND_IMM_MOV:
       switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
        {
@@ -2588,8 +3230,11 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
     case AARCH64_OPND_LIMM:
     case AARCH64_OPND_AIMM:
     case AARCH64_OPND_HALF:
+    case AARCH64_OPND_SVE_INV_LIMM:
+    case AARCH64_OPND_SVE_LIMM:
+    case AARCH64_OPND_SVE_LIMM_MOV:
       if (opnd->shifter.amount)
-       snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
+       snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
                  opnd->shifter.amount);
       else
        snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
@@ -2601,13 +3246,23 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
          || opnd->shifter.kind == AARCH64_MOD_NONE)
        snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
       else
-       snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
+       snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
                  aarch64_operand_modifiers[opnd->shifter.kind].name,
                  opnd->shifter.amount);
       break;
 
+    case AARCH64_OPND_SVE_AIMM:
+    case AARCH64_OPND_SVE_ASIMM:
+      if (opnd->shifter.amount)
+       snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
+                 opnd->shifter.amount);
+      else
+       snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
+      break;
+
     case AARCH64_OPND_FPIMM:
     case AARCH64_OPND_SIMD_FPIMM:
+    case AARCH64_OPND_SVE_FPIMM8:
       switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
        {
        case 2: /* e.g. FMOV <Hd>, #<imm>.  */
@@ -2651,6 +3306,17 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
     case AARCH64_OPND_COND:
     case AARCH64_OPND_COND1:
       snprintf (buf, size, "%s", opnd->cond->names[0]);
+      num_conds = ARRAY_SIZE (opnd->cond->names);
+      for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
+       {
+         size_t len = strlen (buf);
+         if (i == 1)
+           snprintf (buf + len, size - len, "  // %s = %s",
+                     opnd->cond->names[0], opnd->cond->names[i]);
+         else
+           snprintf (buf + len, size - len, ", %s",
+                     opnd->cond->names[i]);
+       }
       break;
 
     case AARCH64_OPND_ADDR_ADRP:
@@ -2699,18 +3365,71 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
       break;
 
     case AARCH64_OPND_ADDR_REGOFF:
+    case AARCH64_OPND_SVE_ADDR_RR:
+    case AARCH64_OPND_SVE_ADDR_RR_LSL1:
+    case AARCH64_OPND_SVE_ADDR_RR_LSL2:
+    case AARCH64_OPND_SVE_ADDR_RR_LSL3:
+    case AARCH64_OPND_SVE_ADDR_RX:
+    case AARCH64_OPND_SVE_ADDR_RX_LSL1:
+    case AARCH64_OPND_SVE_ADDR_RX_LSL2:
+    case AARCH64_OPND_SVE_ADDR_RX_LSL3:
       print_register_offset_address
        (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
         get_offset_int_reg_name (opnd));
       break;
 
+    case AARCH64_OPND_SVE_ADDR_RZ:
+    case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
+    case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
+    case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
+    case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
+    case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
+    case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
+    case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
+    case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
+    case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
+    case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
+    case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
+      print_register_offset_address
+       (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
+        get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
+      break;
+
     case AARCH64_OPND_ADDR_SIMM7:
     case AARCH64_OPND_ADDR_SIMM9:
     case AARCH64_OPND_ADDR_SIMM9_2:
+    case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
+    case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
+    case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
+    case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
+    case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
+    case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
+    case AARCH64_OPND_SVE_ADDR_RI_U6:
+    case AARCH64_OPND_SVE_ADDR_RI_U6x2:
+    case AARCH64_OPND_SVE_ADDR_RI_U6x4:
+    case AARCH64_OPND_SVE_ADDR_RI_U6x8:
       print_immediate_offset_address
        (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
       break;
 
+    case AARCH64_OPND_SVE_ADDR_ZI_U5:
+    case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
+    case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
+    case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
+      print_immediate_offset_address
+       (buf, size, opnd,
+        get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
+      break;
+
+    case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
+    case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
+    case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
+      print_register_offset_address
+       (buf, size, opnd,
+        get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
+        get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
+      break;
+
     case AARCH64_OPND_ADDR_UIMM12:
       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
       if (opnd->addr.offset.imm)
@@ -3481,6 +4200,33 @@ verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
   return TRUE;
 }
 
+/* Return true if VALUE cannot be moved into an SVE register using DUP
+   (with any element size, not just ESIZE) and if using DUPM would
+   therefore be OK.  ESIZE is the number of bytes in the immediate.  */
+
+bfd_boolean
+aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
+{
+  int64_t svalue = uvalue;
+  uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
+
+  if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
+    return FALSE;
+  if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
+    {
+      svalue = (int32_t) uvalue;
+      if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
+       {
+         svalue = (int16_t) uvalue;
+         if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
+           return FALSE;
+       }
+    }
+  if ((svalue & 0xff) == 0)
+    svalue /= 256;
+  return svalue < -128 || svalue >= 128;
+}
+
 /* Include the opcode description table as well as the operand description
    table.  */
 #define VERIFIER(x) verify_##x
This page took 0.040178 seconds and 4 git commands to generate.