static int fix_v4bx = FALSE;
/* Warn on using deprecated features. */
static int warn_on_deprecated = TRUE;
+static int warn_on_restrict_it = FALSE;
/* Understand CodeComposer Studio assembly syntax. */
bfd_boolean codecomposer_syntax = FALSE;
ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
static const arm_feature_set arm_ext_bf16 =
ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16);
+static const arm_feature_set arm_ext_i8mm =
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM);
+static const arm_feature_set arm_ext_crc =
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC);
static const arm_feature_set arm_arch_any = ARM_ANY;
-#ifdef OBJ_ELF
static const arm_feature_set fpu_any = FPU_ANY;
-#endif
static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
static const arm_feature_set fpu_crypto_ext_armv8 =
ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
-static const arm_feature_set crc_ext_armv8 =
- ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
static const arm_feature_set fpu_neon_ext_v8_1 =
ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
static const arm_feature_set fpu_neon_ext_dotprod =
inst.operands[0].present = 1;
}
-static void
-do_vfp_nsyn_push (void)
-{
- nsyn_insert_sp ();
-
- constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
- _("register list must contain at least 1 and at most 16 "
- "registers"));
-
- if (inst.operands[1].issingle)
- do_vfp_nsyn_opcode ("fstmdbs");
- else
- do_vfp_nsyn_opcode ("fstmdbd");
-}
-
-static void
-do_vfp_nsyn_pop (void)
-{
- nsyn_insert_sp ();
-
- constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
- _("register list must contain at least 1 and at most 16 "
- "registers"));
-
- if (inst.operands[1].issingle)
- do_vfp_nsyn_opcode ("fldmias");
- else
- do_vfp_nsyn_opcode ("fldmiad");
-}
-
/* Fix up Neon data-processing instructions, ORing in the correct bits for
ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
{
constraint (!inst.operands[1].isreg && !inst.operands[0].isquad, BAD_FPU);
- constraint ((inst.instruction & 0xd00) == 0xd00,
- _("immediate value out of range"));
}
}
static void
do_neon_ldm_stm (void)
{
+ constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
+ && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
+ _(BAD_FPU));
/* P, U and L bits are part of bitmask. */
int is_dbmode = (inst.instruction & (1 << 24)) != 0;
unsigned offsetbits = inst.operands[1].imm * 2;
do_vfp_cond_or_thumb ();
}
+static void
+do_vfp_nsyn_pop (void)
+{
+ nsyn_insert_sp ();
+ if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) {
+ return do_vfp_nsyn_opcode ("vldm");
+ }
+
+ constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
+ _(BAD_FPU));
+
+ constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
+ _("register list must contain at least 1 and at most 16 "
+ "registers"));
+
+ if (inst.operands[1].issingle)
+ do_vfp_nsyn_opcode ("fldmias");
+ else
+ do_vfp_nsyn_opcode ("fldmiad");
+}
+
+static void
+do_vfp_nsyn_push (void)
+{
+ nsyn_insert_sp ();
+ if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) {
+ return do_vfp_nsyn_opcode ("vstmdb");
+ }
+
+ constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
+ _(BAD_FPU));
+
+ constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
+ _("register list must contain at least 1 and at most 16 "
+ "registers"));
+
+ if (inst.operands[1].issingle)
+ do_vfp_nsyn_opcode ("fstmdbs");
+ else
+ do_vfp_nsyn_opcode ("fstmdbd");
+}
+
+
static void
do_neon_ldr_str (void)
{
/* VLDR/VSTR. */
else
{
- if (!mark_feature_used (&fpu_vfp_ext_v1xd))
+ if (!mark_feature_used (&fpu_vfp_ext_v1xd)
+ && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
as_bad (_("Instruction not permitted on this architecture"));
do_neon_ldr_str ();
}
return do_neon_dotproduct (1);
}
+static void
+do_vusdot (void)
+{
+ enum neon_shape rs;
+ set_pred_insn_type (OUTSIDE_PRED_INSN);
+ if (inst.operands[2].isscalar)
+ {
+ rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
+ neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
+
+ inst.instruction |= (1 << 25);
+ int index = inst.operands[2].reg & 0xf;
+ constraint ((index != 1 && index != 0), _("index must be 0 or 1"));
+ inst.operands[2].reg >>= 4;
+ constraint (!(inst.operands[2].reg < 16),
+ _("indexed register must be less than 16"));
+ neon_three_args (rs == NS_QQS);
+ inst.instruction |= (index << 5);
+ }
+ else
+ {
+ inst.instruction |= (1 << 21);
+ rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
+ neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
+ neon_three_args (rs == NS_QQQ);
+ }
+}
+
+static void
+do_vsudot (void)
+{
+ enum neon_shape rs;
+ set_pred_insn_type (OUTSIDE_PRED_INSN);
+ if (inst.operands[2].isscalar)
+ {
+ rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
+ neon_check_type (3, rs, N_EQK, N_EQK, N_U8 | N_KEY);
+
+ inst.instruction |= (1 << 25);
+ int index = inst.operands[2].reg & 0xf;
+ constraint ((index != 1 && index != 0), _("index must be 0 or 1"));
+ inst.operands[2].reg >>= 4;
+ constraint (!(inst.operands[2].reg < 16),
+ _("indexed register must be less than 16"));
+ neon_three_args (rs == NS_QQS);
+ inst.instruction |= (index << 5);
+ }
+}
+
+static void
+do_vsmmla (void)
+{
+ enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
+ neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
+
+ set_pred_insn_type (OUTSIDE_PRED_INSN);
+
+ neon_three_args (1);
+
+}
+
+static void
+do_vummla (void)
+{
+ enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
+ neon_check_type (3, rs, N_EQK, N_EQK, N_U8 | N_KEY);
+
+ set_pred_insn_type (OUTSIDE_PRED_INSN);
+
+ neon_three_args (1);
+
+}
+
/* Crypto v1 instructions. */
static void
do_crypto_2op_1 (unsigned elttype, int op)
handle_pred_state ();
if (now_pred.insn_cond
+ && warn_on_restrict_it
&& !now_pred.warn_deprecated
&& warn_on_deprecated
&& ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
#undef ARM_VARIANT
-#define ARM_VARIANT & crc_ext_armv8
+#define ARM_VARIANT & arm_ext_crc
#undef THUMB_VARIANT
-#define THUMB_VARIANT & crc_ext_armv8
+#define THUMB_VARIANT & arm_ext_crc
TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
#define THUMB_VARIANT & arm_ext_v6t2
mcCE(vmrs, ef00a10, 2, (APSR_RR, RVC), vmrs),
mcCE(vmsr, ee00a10, 2, (RVC, RR), vmsr),
+ mcCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
+ mcCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
+ mcCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
+ mcCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
#undef THUMB_VARIANT
/* Moves and type conversions. */
cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
/* Memory operations. */
- cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
- cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
/* Double precision load/store are still present on single precision
implementations. */
- cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
- cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
Individual encoder functions perform additional architecture checks. */
#undef ARM_VARIANT
#define ARM_VARIANT & fpu_vfp_ext_v1xd
+#undef THUMB_VARIANT
+#define THUMB_VARIANT & arm_ext_v6t2
+
+ NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
+ NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
+ NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
+ NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
+ NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
+ NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
+
+ NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
+ NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
+
#undef THUMB_VARIANT
#define THUMB_VARIANT & fpu_vfp_ext_v1xd
nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
- NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
- NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
/* Mnemonics shared by Neon and VFP. */
nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
- NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
- NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
- NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
- NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
- NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
- NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
-
mnCEF(vcvt, _vcvt, 3, (RNSDQMQ, RNSDQMQ, oI32z), neon_cvt),
nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
MNCEF(vcvtb, eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtb),
#define THUMB_VARIANT &arm_ext_i8mm
TUF ("vsmmla", c200c40, fc200c40, 3, (RNQ, RNQ, RNQ), vsmmla, vsmmla),
TUF ("vummla", c200c50, fc200c50, 3, (RNQ, RNQ, RNQ), vummla, vummla),
- TUF ("vusmmla", ca00c40, fca00c40, 3, (RNQ, RNQ, RNQ), vummla, vummla),
+ TUF ("vusmmla", ca00c40, fca00c40, 3, (RNQ, RNQ, RNQ), vsmmla, vsmmla),
TUF ("vusdot", c800d00, fc800d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), vusdot, vusdot),
TUF ("vsudot", c800d10, fc800d10, 3, (RNDQ, RNDQ, RNSC), vsudot, vsudot),
};
{"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
{"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
&warn_on_deprecated, 0, NULL},
+
+ {"mwarn-restrict-it", N_("warn about performance deprecated IT instructions"
+ " in ARMv8-A and ARMv8-R"), &warn_on_restrict_it, 1, NULL},
+ {"mno-warn-restrict-it", NULL, &warn_on_restrict_it, 0, NULL},
+
{"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
{"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
{NULL, NULL, NULL, 0, NULL}
ARM_ARCH_NONE,
FPU_ARCH_NEON_VFP_V4),
ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
- ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
- ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
- ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
- ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
- ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
- ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
FPU_ARCH_VFP_V3D16),
ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
- ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
FPU_ARCH_NEON_VFP_ARMV8),
ARM_CPU_OPT ("cortex-m35p", "Cortex-M35P", ARM_ARCH_V8M_MAIN,
ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
ARM_ARCH_NONE,
FPU_NONE),
ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
- ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A,
ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
ARM_ARCH_NONE,
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
- ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
{ NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
static const struct arm_ext_table armv8a_ext_table[] =
{
- ARM_ADD ("crc", ARCH_CRC_ARMV8),
+ ARM_ADD ("crc", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC)),
ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16),
ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML),
+ ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
+ ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
{
ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
+ ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
+ ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
{
ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
+ ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
+ ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
static const struct arm_ext_table armv86a_ext_table[] =
{
+ ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
{ NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
};
static const struct arm_ext_table armv8r_ext_table[] =
{
- ARM_ADD ("crc", ARCH_CRC_ARMV8),
+ ARM_ADD ("crc", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC)),
ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
use the context sensitive approach using arm_ext_table's. */
static const struct arm_option_extension_value_table arm_extensions[] =
{
- ARM_EXT_OPT ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16),
- ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16),
- ARM_ARCH_V8_2A),
- ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
+ ARM_EXT_OPT ("crc", ARM_FEATURE_CORE_HIGH(ARM_EXT2_CRC),
+ ARM_FEATURE_CORE_HIGH(ARM_EXT2_CRC),
ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
if (streq (opt->name, name))
{
selected_arch = opt->value;
+ selected_ctx_ext_table = opt->ext_table;
selected_ext = arm_arch_none;
selected_cpu = selected_arch;
strcpy (selected_cpu_name, opt->name);
if (streq (opt->name, name))
{
selected_fpu = opt->value;
+ ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, fpu_any);
#ifndef CPU_DEFAULT
if (no_cpu_selected ())
ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);