/* tc-arm.c -- Assemble for the ARM
- Copyright (C) 1994-2017 Free Software Foundation, Inc.
+ Copyright (C) 1994-2018 Free Software Foundation, Inc.
Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
Modified by David Taylor (dtaylor@armltd.co.uk)
Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
#define streq(a, b) (strcmp (a, b) == 0)
+/* Current set of feature bits available (CPU+FPU). Different from
+ selected_cpu + selected_fpu in case of autodetection since the CPU
+ feature bits are then all set. */
static arm_feature_set cpu_variant;
+/* Feature bits used in each execution state. Used to set build attribute
+ (in particular Tag_*_ISA_use) in CPU autodetection mode. */
static arm_feature_set arm_arch_used;
static arm_feature_set thumb_arch_used;
/* Variables that we set while parsing command-line options. Once all
options have been read we re-process these values to set the real
assembly flags. */
+
+/* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
+ instead of -mcpu=arm1). */
static const arm_feature_set *legacy_cpu = NULL;
static const arm_feature_set *legacy_fpu = NULL;
+/* CPU, extension and FPU feature bits selected by -mcpu. */
static const arm_feature_set *mcpu_cpu_opt = NULL;
-static arm_feature_set *dyn_mcpu_ext_opt = NULL;
+static arm_feature_set *mcpu_ext_opt = NULL;
static const arm_feature_set *mcpu_fpu_opt = NULL;
+
+/* CPU, extension and FPU feature bits selected by -march. */
static const arm_feature_set *march_cpu_opt = NULL;
-static arm_feature_set *dyn_march_ext_opt = NULL;
+static arm_feature_set *march_ext_opt = NULL;
static const arm_feature_set *march_fpu_opt = NULL;
+
+/* Feature bits selected by -mfpu. */
static const arm_feature_set *mfpu_opt = NULL;
-static const arm_feature_set *object_arch = NULL;
/* Constants for known architecture features. */
static const arm_feature_set fpu_default = FPU_DEFAULT;
#endif
static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
-static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
+static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
/* FP16 instructions. */
static const arm_feature_set arm_ext_fp16 =
ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
+static const arm_feature_set arm_ext_fp16_fml =
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
+static const arm_feature_set arm_ext_v8_2 =
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
static const arm_feature_set arm_ext_v8_3 =
ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
static int mfloat_abi_opt = -1;
-/* Record user cpu selection for object attributes. */
+/* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
+ directive. */
+static arm_feature_set selected_arch = ARM_ARCH_NONE;
+/* Extension feature bits selected by the last -mcpu/-march or .arch_extension
+ directive. */
+static arm_feature_set selected_ext = ARM_ARCH_NONE;
+/* Feature bits selected by the last -mcpu/-march or by the combination of the
+ last .cpu/.arch directive .arch_extension directives since that
+ directive. */
static arm_feature_set selected_cpu = ARM_ARCH_NONE;
+/* FPU feature bits selected by the last -mfpu or .fpu directive. */
+static arm_feature_set selected_fpu = FPU_NONE;
+/* Feature bits selected by the last .object_arch directive. */
+static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
/* Must be long enough to hold any of the names in arm_cpus. */
static char selected_cpu_name[20];
struct reloc_entry
{
- const char * name;
+ const char * name;
bfd_reloc_code_real_type reloc;
};
};
/* ARM register categories. This includes coprocessor numbers and various
- architecture extensions' registers. */
+ architecture extensions' registers. Each entry should have an error message
+ in reg_expected_msgs below. */
enum arm_reg_type
{
REG_TYPE_RN,
REG_TYPE_NQ,
REG_TYPE_VFSD,
REG_TYPE_NDQ,
+ REG_TYPE_NSD,
REG_TYPE_NSDQ,
REG_TYPE_VFC,
REG_TYPE_MVF,
/* Diagnostics used when we don't get a register of the expected type. */
const char * const reg_expected_msgs[] =
{
- N_("ARM register expected"),
- N_("bad or missing co-processor number"),
- N_("co-processor register expected"),
- N_("FPA register expected"),
- N_("VFP single precision register expected"),
- N_("VFP/Neon double precision register expected"),
- N_("Neon quad precision register expected"),
- N_("VFP single or double precision register expected"),
- N_("Neon double or quad precision register expected"),
- N_("VFP single, double or Neon quad precision register expected"),
- N_("VFP system register expected"),
- N_("Maverick MVF register expected"),
- N_("Maverick MVD register expected"),
- N_("Maverick MVFX register expected"),
- N_("Maverick MVDX register expected"),
- N_("Maverick MVAX register expected"),
- N_("Maverick DSPSC register expected"),
- N_("iWMMXt data register expected"),
- N_("iWMMXt control register expected"),
- N_("iWMMXt scalar register expected"),
- N_("XScale accumulator register expected"),
+ [REG_TYPE_RN] = N_("ARM register expected"),
+ [REG_TYPE_CP] = N_("bad or missing co-processor number"),
+ [REG_TYPE_CN] = N_("co-processor register expected"),
+ [REG_TYPE_FN] = N_("FPA register expected"),
+ [REG_TYPE_VFS] = N_("VFP single precision register expected"),
+ [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"),
+ [REG_TYPE_NQ] = N_("Neon quad precision register expected"),
+ [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"),
+ [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"),
+ [REG_TYPE_NSD] = N_("Neon single or double precision register expected"),
+ [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register"
+ " expected"),
+ [REG_TYPE_VFC] = N_("VFP system register expected"),
+ [REG_TYPE_MVF] = N_("Maverick MVF register expected"),
+ [REG_TYPE_MVD] = N_("Maverick MVD register expected"),
+ [REG_TYPE_MVFX] = N_("Maverick MVFX register expected"),
+ [REG_TYPE_MVDX] = N_("Maverick MVDX register expected"),
+ [REG_TYPE_MVAX] = N_("Maverick MVAX register expected"),
+ [REG_TYPE_DSPSC] = N_("Maverick DSPSC register expected"),
+ [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"),
+ [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"),
+ [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
+ [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
+ [REG_TYPE_RNB] = N_("")
};
/* Some well known registers that we refer to directly elsewhere. */
/* Return TRUE if anything in the expression is a bignum. */
-static int
+static bfd_boolean
walk_no_bignums (symbolS * sp)
{
if (symbol_get_value_expression (sp)->X_op == O_big)
- return 1;
+ return TRUE;
if (symbol_get_value_expression (sp)->X_add_symbol)
{
&& walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
}
- return 0;
+ return FALSE;
}
-static int in_my_get_expression = 0;
+static bfd_boolean in_my_get_expression = FALSE;
/* Third argument to my_get_expression. */
#define GE_NO_PREFIX 0
if (is_immediate_prefix (**str))
(*str)++;
break;
- default: abort ();
+ default:
+ abort ();
}
memset (ep, 0, sizeof (expressionS));
save_in = input_line_pointer;
input_line_pointer = *str;
- in_my_get_expression = 1;
+ in_my_get_expression = TRUE;
seg = expression (ep);
- in_my_get_expression = 0;
+ in_my_get_expression = FALSE;
if (ep->X_op == O_illegal || ep->X_op == O_absent)
{
*str = input_line_pointer;
input_line_pointer = save_in;
- return 0;
+ return SUCCESS;
}
/* Turn a string in input_line_pointer into a floating point constant
/* We handle all bad expressions here, so that we can report the faulty
instruction in the error message. */
+
void
md_operand (expressionS * exp)
{
/* Immediate values. */
+#ifdef OBJ_ELF
/* Generic immediate-value read function for use in directives.
Accepts anything that 'expression' can fold to a constant.
*val receives the number. */
-#ifdef OBJ_ELF
+
static int
immediate_for_directive (int *val)
{
|| (type == REG_TYPE_NSDQ
&& (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
|| reg->type == REG_TYPE_NQ))
+ || (type == REG_TYPE_NSD
+ && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
|| (type == REG_TYPE_MMXWC
&& (reg->type == REG_TYPE_MMXWCG)))
type = (enum arm_reg_type) reg->type;
if (skip_past_char (&str, '[') == SUCCESS)
{
- if (type != REG_TYPE_VFD)
+ if (type != REG_TYPE_VFD
+ && !(type == REG_TYPE_VFS
+ && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2)))
{
first_error (_("only D registers may be indexed"));
return FAIL;
int reg;
char *str = *ccp;
struct neon_typed_alias atype;
+ enum arm_reg_type reg_type = REG_TYPE_VFD;
+
+ if (elsize == 4)
+ reg_type = REG_TYPE_VFS;
- reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
+ reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
return FAIL;
}
if (size > nbytes)
- as_bad (_("%s relocations do not fit in %d bytes"),
+ as_bad (ngettext ("%s relocations do not fit in %d byte",
+ "%s relocations do not fit in %d bytes",
+ nbytes),
howto->name, nbytes);
else
{
{"4byte", cons, 4},
{"8byte", cons, 8},
/* These are used for dwarf2. */
- { "file", (void (*) (int)) dwarf2_directive_file, 0 },
+ { "file", dwarf2_directive_file, 0 },
{ "loc", dwarf2_directive_loc, 0 },
{ "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
#endif
bfd_boolean prefix_opt)
{
expressionS exp;
+
my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
if (exp.X_op != O_constant)
{
else
{
char *q = p;
+
if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
return PARSE_OPERAND_FAIL;
/* If the offset is 0, find out if it's a +0 or -0. */
else
{
char *q = p;
+
if (inst.operands[i].negative)
{
inst.operands[i].negative = 0;
ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
}
-/* If the given feature available in the selected CPU, mark it as used.
- Returns TRUE iff feature is available. */
+/* If the given feature is currently allowed, mark it as used and return TRUE.
+ Return FALSE otherwise. */
static bfd_boolean
mark_feature_used (const arm_feature_set *feature)
{
- /* Ensure the option is valid on the current architecture. */
+ /* Ensure the option is currently allowed. */
if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
return FALSE;
- /* Add the appropriate architecture feature for the barrier option used.
- */
+ /* Add the appropriate architecture feature for the barrier option used. */
record_feature_use (feature);
return TRUE;
OP_RND, /* Neon double precision register (0..31) */
OP_RNQ, /* Neon quad precision register */
OP_RVSD, /* VFP single or double precision register */
+ OP_RNSD, /* Neon single or double precision register */
OP_RNDQ, /* Neon double or quad precision register */
OP_RNSDQ, /* Neon single, double or quad precision register */
OP_RNSC, /* Neon scalar D[X] */
OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
OP_RR_RNSC, /* ARM reg or Neon scalar. */
+ OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
case OP_oRNQ:
case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
+ case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
case OP_oRNDQ:
case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
}
break;
+ case OP_RNSD_RNSC:
+ {
+ po_scalar_or_goto (8, try_s_scalar);
+ break;
+ try_s_scalar:
+ po_scalar_or_goto (4, try_nsd);
+ break;
+ try_nsd:
+ po_reg_or_fail (REG_TYPE_NSD);
+ }
+ break;
+
case OP_RNDQ_RNSC:
{
po_scalar_or_goto (8, try_ndq);
&& inst.reloc.exp.X_add_symbol != NULL
&& S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
&& THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
- inst.reloc.exp.X_add_number += 1;
+ inst.reloc.exp.X_add_number += 1;
}
/* This is a pseudo-op of the form "adrl rd, label" to be converted
&& inst.reloc.exp.X_add_symbol != NULL
&& S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
&& THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
- inst.reloc.exp.X_add_number += 1;
+ inst.reloc.exp.X_add_number += 1;
}
static void
/* Output R_ARM_V4BX relocations if is an EABI object that looks like
it is for ARMv4t or earlier. */
want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
- if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
+ if (!ARM_FEATURE_ZERO (selected_object_arch)
+ && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
want_reloc = TRUE;
#ifdef OBJ_ELF
{
int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
+ if (is_push && one_reg == 13 /* SP */)
+ /* PR 22483: The A2 encoding cannot be used when
+ pushing the stack pointer as this is UNPREDICTABLE. */
+ return;
+
inst.instruction &= A_COND_MASK;
inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
inst.instruction |= one_reg << 12;
if (inst.operands[1].isreg)
{
br = inst.operands[1].reg;
- if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
+ if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
as_bad (_("bad register for mrs"));
}
else
inst.instruction |= Rm;
}
+static void
+do_t_csdb (void)
+{
+ set_it_insn_type (OUTSIDE_IT_INSN);
+}
+
static void
do_t_cps (void)
{
X (2, (H, I), HALF), \
X (3, (H, H, H), HALF), \
X (3, (H, F, I), MIXED), \
- X (3, (F, H, I), MIXED)
+ X (3, (F, H, I), MIXED), \
+ X (3, (D, H, H), MIXED), \
+ X (3, (D, H, S), MIXED)
#define S2(A,B) NS_##A##B
#define S3(A,B,C) NS_##A##B##C
neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
}
+/* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
+ internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
+
+static unsigned
+neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
+{
+ unsigned regno = NEON_SCALAR_REG (scalar);
+ unsigned elno = NEON_SCALAR_INDEX (scalar);
+
+ if (quad_p)
+ {
+ if (regno > 7 || elno > 3)
+ goto bad_scalar;
+
+ return ((regno & 0x7)
+ | ((elno & 0x1) << 3)
+ | (((elno >> 1) & 0x1) << 5));
+ }
+ else
+ {
+ if (regno > 15 || elno > 1)
+ goto bad_scalar;
+
+ return (((regno & 0x1) << 5)
+ | ((regno >> 1) & 0x7)
+ | ((elno & 0x1) << 3));
+ }
+
+bad_scalar:
+ first_error (_("scalar out of range for multiply instruction"));
+ return 0;
+}
+
+static void
+do_neon_fmac_maybe_scalar_long (int subtype)
+{
+ enum neon_shape rs;
+ int high8;
+ /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
+ field (bits[21:20]) has different meaning. For scalar index variant, it's
+ used to differentiate add and subtract, otherwise it's with fixed value
+ 0x2. */
+ int size = -1;
+
+ if (inst.cond != COND_ALWAYS)
+ as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
+ "behaviour is UNPREDICTABLE"));
+
+ constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
+ _(BAD_FP16));
+
+ constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
+ _(BAD_FPU));
+
+ /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
+ be a scalar index register. */
+ if (inst.operands[2].isscalar)
+ {
+ high8 = 0xfe000000;
+ if (subtype)
+ size = 16;
+ rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
+ }
+ else
+ {
+ high8 = 0xfc000000;
+ size = 32;
+ if (subtype)
+ inst.instruction |= (0x1 << 23);
+ rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
+ }
+
+ neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16);
+
+ /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
+ the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
+ so we simply pass -1 as size. */
+ unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
+ neon_three_same (quad_p, 0, size);
+
+ /* Undo neon_dp_fixup. Redo the high eight bits. */
+ inst.instruction &= 0x00ffffff;
+ inst.instruction |= high8;
+
+#define LOW1(R) ((R) & 0x1)
+#define HI4(R) (((R) >> 1) & 0xf)
+ /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
+ whether the instruction is in Q form and whether Vm is a scalar indexed
+ operand. */
+ if (inst.operands[2].isscalar)
+ {
+ unsigned rm
+ = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
+ inst.instruction &= 0xffffffd0;
+ inst.instruction |= rm;
+
+ if (!quad_p)
+ {
+ /* Redo Rn as well. */
+ inst.instruction &= 0xfff0ff7f;
+ inst.instruction |= HI4 (inst.operands[1].reg) << 16;
+ inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
+ }
+ }
+ else if (!quad_p)
+ {
+ /* Redo Rn and Rm. */
+ inst.instruction &= 0xfff0ff50;
+ inst.instruction |= HI4 (inst.operands[1].reg) << 16;
+ inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
+ inst.instruction |= HI4 (inst.operands[2].reg);
+ inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
+ }
+}
+
+static void
+do_neon_vfmal (void)
+{
+ return do_neon_fmac_maybe_scalar_long (0);
+}
+
+static void
+do_neon_vfmsl (void)
+{
+ return do_neon_fmac_maybe_scalar_long (1);
+}
+
static void
do_neon_dyadic_wide (void)
{
constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
_(BAD_FPU));
+ if (inst.cond != COND_ALWAYS)
+ {
+ if (thumb_mode)
+ {
+ as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
+ " the behaviour is UNPREDICTABLE"));
+ }
+ else
+ {
+ inst.error = BAD_COND;
+ return;
+ }
+ }
+
do_vfp_sp_monadic ();
inst.is_neon = 1;
if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
as_warn (UNPRED_REG ("r15"));
- if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
- as_warn (UNPRED_REG ("r13"));
}
static void
case OT_cinfix3_deprecated:
case OT_odd_infix_unc:
if (!unified_syntax)
- return 0;
+ return NULL;
/* Fall through. */
case OT_csuffix:
if (now_it.insn_cond
&& !now_it.warn_deprecated
&& warn_on_deprecated
- && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
+ && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
+ && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
{
if (inst.instruction >= 0x10000)
{
as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
- "deprecated in ARMv8"));
+ "performance deprecated in ARMv8-A and ARMv8-R"));
now_it.warn_deprecated = TRUE;
}
else
{
if ((inst.instruction & p->mask) == p->pattern)
{
- as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
- "of the following class are deprecated in ARMv8: "
- "%s"), p->description);
+ as_tsktsk (_("IT blocks containing 16-bit Thumb "
+ "instructions of the following class are "
+ "performance deprecated in ARMv8-A and "
+ "ARMv8-R: %s"), p->description);
now_it.warn_deprecated = TRUE;
break;
}
if (now_it.block_length > 1)
{
as_tsktsk (_("IT blocks containing more than one conditional "
- "instruction are deprecated in ARMv8"));
+ "instruction are performance deprecated in ARMv8-A and "
+ "ARMv8-R"));
now_it.warn_deprecated = TRUE;
}
}
#define C3(mnem, op, nops, ops, ae) \
{ #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
+/* Thumb-only variants of TCE and TUE. */
+#define ToC(mnem, top, nops, ops, te) \
+ { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
+ do_##te }
+
+#define ToU(mnem, top, nops, ops, te) \
+ { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
+ NULL, do_##te }
+
/* Legacy mnemonics that always have conditional infix after the third
character. */
#define CL(mnem, op, nops, ops, ae) \
TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
+#undef ARM_VARIANT
+#define ARM_VARIANT & arm_ext_v3
+#undef THUMB_VARIANT
+#define THUMB_VARIANT & arm_ext_v6t2
+
+ TUE("csdb", 320f014, f3af8014, 0, (), noargs, t_csdb),
+
+#undef ARM_VARIANT
+#define ARM_VARIANT & arm_ext_v6t2
#undef THUMB_VARIANT
#define THUMB_VARIANT & arm_ext_v6t2_v8m
TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
+ /* New backported fma/fms instructions optional in v8.2. */
+ NCE (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
+ NCE (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
+
#undef THUMB_VARIANT
#define THUMB_VARIANT & fpu_neon_ext_v1
#undef ARM_VARIANT
#define ARM_VARIANT NULL
#undef THUMB_VARIANT
#define THUMB_VARIANT & arm_ext_v8m
- TUE("sg", 0, e97fe97f, 0, (), 0, noargs),
- TUE("blxns", 0, 4784, 1, (RRnpc), 0, t_blx),
- TUE("bxns", 0, 4704, 1, (RRnpc), 0, t_bx),
- TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt),
- TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt),
- TUE("tta", 0, e840f080, 2, (RRnpc, RRnpc), 0, tt),
- TUE("ttat", 0, e840f0c0, 2, (RRnpc, RRnpc), 0, tt),
+ ToU("sg", e97fe97f, 0, (), noargs),
+ ToC("blxns", 4784, 1, (RRnpc), t_blx),
+ ToC("bxns", 4704, 1, (RRnpc), t_bx),
+ ToC("tt", e840f000, 2, (RRnpc, RRnpc), tt),
+ ToC("ttt", e840f040, 2, (RRnpc, RRnpc), tt),
+ ToC("tta", e840f080, 2, (RRnpc, RRnpc), tt),
+ ToC("ttat", e840f0c0, 2, (RRnpc, RRnpc), tt),
/* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
instructions behave as nop if no VFP is present. */
#undef THUMB_VARIANT
#define THUMB_VARIANT & arm_ext_v8m_main
- TUEc("vlldm", 0, ec300a00, 1, (RRnpc), rn),
- TUEc("vlstm", 0, ec200a00, 1, (RRnpc), rn),
+ ToC("vlldm", ec300a00, 1, (RRnpc), rn),
+ ToC("vlstm", ec200a00, 1, (RRnpc), rn),
};
#undef ARM_VARIANT
#undef THUMB_VARIANT
}
/* Finish the list of unwind opcodes for this function. */
+
static void
finish_unwind_opcodes (void)
{
if (reg != FAIL)
return reg + 256;
- return -1;
+ return FAIL;
}
#ifdef TE_PE
}
/* Read a 32-bit thumb instruction from buf. */
+
static unsigned long
get_thumb32_insn (char * buf)
{
return insn;
}
-
/* We usually want to set the low bit on the address of thumb function
symbols. In particular .word foo - . should have the low bit set.
Generic code tries to fold the difference of two symbols to
/* MOV accepts both Thumb2 modified immediate (T2 encoding) and
UINT16 (T3 encoding), MOVW only accepts UINT16. When
disassembling, MOV is preferred when there is no encoding
- overlap.
- NOTE: MOV is using ORR opcode under Thumb 2 mode. */
+ overlap. */
if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
+ /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
+ but with the Rn field [19:16] set to 1111. */
+ && (((newval >> 16) & 0xf) == 0xf)
&& ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
&& !((newval >> T2_SBIT_SHIFT) & 0x1)
- && value >= 0 && value <=0xffff)
+ && value >= 0 && value <= 0xffff)
{
/* Toggle bit[25] to change encoding from T2 to T3. */
newval ^= 1 << 25;
if (mcpu_cpu_opt || march_cpu_opt)
as_bad (_("use of old and new-style options to set CPU type"));
- mcpu_cpu_opt = legacy_cpu;
+ selected_arch = *legacy_cpu;
}
- else if (!mcpu_cpu_opt)
+ else if (mcpu_cpu_opt)
{
- mcpu_cpu_opt = march_cpu_opt;
- dyn_mcpu_ext_opt = dyn_march_ext_opt;
- /* Avoid double free in arm_md_end. */
- dyn_march_ext_opt = NULL;
+ selected_arch = *mcpu_cpu_opt;
+ selected_ext = *mcpu_ext_opt;
}
+ else if (march_cpu_opt)
+ {
+ selected_arch = *march_cpu_opt;
+ selected_ext = *march_ext_opt;
+ }
+ ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
if (legacy_fpu)
{
if (mfpu_opt)
as_bad (_("use of old and new-style options to set FPU type"));
- mfpu_opt = legacy_fpu;
+ selected_fpu = *legacy_fpu;
}
- else if (!mfpu_opt)
+ else if (mfpu_opt)
+ selected_fpu = *mfpu_opt;
+ else
{
#if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
|| defined (TE_NetBSD) || defined (TE_VXWORKS))
/* Some environments specify a default FPU. If they don't, infer it
from the processor. */
if (mcpu_fpu_opt)
- mfpu_opt = mcpu_fpu_opt;
+ selected_fpu = *mcpu_fpu_opt;
else
- mfpu_opt = march_fpu_opt;
+ selected_fpu = *march_fpu_opt;
#else
- mfpu_opt = &fpu_default;
+ selected_fpu = fpu_default;
#endif
}
- if (!mfpu_opt)
+ if (ARM_FEATURE_ZERO (selected_fpu))
{
- if (mcpu_cpu_opt != NULL)
- mfpu_opt = &fpu_default;
- else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
- mfpu_opt = &fpu_arch_vfp_v2;
+ if (!no_cpu_selected ())
+ selected_fpu = fpu_default;
else
- mfpu_opt = &fpu_arch_fpa;
+ selected_fpu = fpu_arch_fpa;
}
#ifdef CPU_DEFAULT
- if (!mcpu_cpu_opt)
+ if (ARM_FEATURE_ZERO (selected_arch))
{
- mcpu_cpu_opt = &cpu_default;
- selected_cpu = cpu_default;
+ selected_arch = cpu_default;
+ selected_cpu = selected_arch;
}
- else if (dyn_mcpu_ext_opt)
- ARM_MERGE_FEATURE_SETS (selected_cpu, *mcpu_cpu_opt, *dyn_mcpu_ext_opt);
- else
- selected_cpu = *mcpu_cpu_opt;
+ ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
#else
- if (mcpu_cpu_opt && dyn_mcpu_ext_opt)
- ARM_MERGE_FEATURE_SETS (selected_cpu, *mcpu_cpu_opt, *dyn_mcpu_ext_opt);
- else if (mcpu_cpu_opt)
- selected_cpu = *mcpu_cpu_opt;
+ /* Autodection of feature mode: allow all features in cpu_variant but leave
+ selected_cpu unset. It will be set in aeabi_set_public_attributes ()
+ after all instruction have been processed and we can decide what CPU
+ should be selected. */
+ if (ARM_FEATURE_ZERO (selected_arch))
+ ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
else
- mcpu_cpu_opt = &arm_arch_any;
+ ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
#endif
- ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
- if (dyn_mcpu_ext_opt)
- ARM_MERGE_FEATURE_SETS (cpu_variant, cpu_variant, *dyn_mcpu_ext_opt);
-
autoselect_thumb_from_cpu_variant ();
arm_arch_used = thumb_arch_used = arm_arch_none;
{NULL, no_argument, NULL, 0}
};
-
size_t md_longopts_size = sizeof (md_longopts);
struct arm_option_table
{
- const char *option; /* Option name to match. */
- const char *help; /* Help information. */
- int *var; /* Variable to change. */
- int value; /* What to change it to. */
- const char *deprecated; /* If non-null, print this message. */
+ const char * option; /* Option name to match. */
+ const char * help; /* Help information. */
+ int * var; /* Variable to change. */
+ int value; /* What to change it to. */
+ const char * deprecated; /* If non-null, print this message. */
};
struct arm_option_table arm_opts[] =
struct arm_legacy_option_table
{
- const char *option; /* Option name to match. */
- const arm_feature_set **var; /* Variable to change. */
- const arm_feature_set value; /* What to change it to. */
- const char *deprecated; /* If non-null, print this message. */
+ const char * option; /* Option name to match. */
+ const arm_feature_set ** var; /* Variable to change. */
+ const arm_feature_set value; /* What to change it to. */
+ const char * deprecated; /* If non-null, print this message. */
};
const struct arm_legacy_option_table arm_legacy_opts[] =
{"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
/* Floating point variants -- don't add any more to this list either. */
- {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
- {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
- {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
- {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
+ {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
+ {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
+ {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
+ {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
N_("use either -mfpu=softfpa or -mfpu=softvfp")},
{NULL, NULL, ARM_ARCH_NONE, NULL}
struct arm_cpu_option_table
{
- const char *name;
- size_t name_len;
- const arm_feature_set value;
- const arm_feature_set ext;
+ const char * name;
+ size_t name_len;
+ const arm_feature_set value;
+ const arm_feature_set ext;
/* For some CPUs we assume an FPU unless the user explicitly sets
-mfpu=... */
- const arm_feature_set default_fpu;
+ const arm_feature_set default_fpu;
/* The canonical name of the CPU, or NULL to use NAME converted to upper
case. */
- const char *canonical_name;
+ const char * canonical_name;
};
/* This list should, at a minimum, contain all the cpu names
recognized by GCC. */
#define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
+
static const struct arm_cpu_option_table arm_cpus[] =
{
ARM_CPU_OPT ("all", NULL, ARM_ANY,
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
- FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
+ FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
- FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
+ FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
ARM_ARCH_NONE,
FPU_NONE),
ARM_ARCH_NONE,
FPU_ARCH_VFP_V2),
- /* Maverick */
+ /* Maverick. */
ARM_CPU_OPT ("ep9312", "ARM920T",
ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
struct arm_arch_option_table
{
- const char *name;
- size_t name_len;
- const arm_feature_set value;
- const arm_feature_set default_fpu;
+ const char * name;
+ size_t name_len;
+ const arm_feature_set value;
+ const arm_feature_set default_fpu;
};
/* This list should, at a minimum, contain all the architecture names
recognized by GCC. */
#define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
+
static const struct arm_arch_option_table arm_archs[] =
{
ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
ARM_ARCH_OPT ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP),
ARM_ARCH_OPT ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP),
+ ARM_ARCH_OPT ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP),
ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
#undef ARM_ARCH_OPT
/* ISA extensions in the co-processor and main instruction set space. */
+
struct arm_option_extension_value_table
{
- const char *name;
- size_t name_len;
- const arm_feature_set merge_value;
- const arm_feature_set clear_value;
+ const char * name;
+ size_t name_len;
+ const arm_feature_set merge_value;
+ const arm_feature_set clear_value;
/* List of architectures for which an extension is available. ARM_ARCH_NONE
indicates that an extension is available for all architectures while
ARM_ANY marks an empty entry. */
- const arm_feature_set allowed_archs[2];
+ const arm_feature_set allowed_archs[2];
};
-/* The following table must be in alphabetical order with a NULL last entry.
- */
+/* The following table must be in alphabetical order with a NULL last entry. */
+
#define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
#define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
+
static const struct arm_option_extension_value_table arm_extensions[] =
{
ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
ARM_ARCH_V8_2A),
+ ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
+ | ARM_EXT2_FP16_FML),
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
+ | ARM_EXT2_FP16_FML),
+ ARM_ARCH_V8_2A),
ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
/* ISA floating-point and Advanced SIMD extensions. */
struct arm_option_fpu_value_table
{
- const char *name;
- const arm_feature_set value;
+ const char * name;
+ const arm_feature_set value;
};
/* This list should, at a minimum, contain all the fpu names
struct arm_long_option_table
{
- const char * option; /* Substring to match. */
+ const char * option; /* Substring to match. */
const char * help; /* Help information. */
int (* func) (const char * subopt); /* Function to decode sub-option. */
const char * deprecated; /* If non-null, print this message. */
static bfd_boolean
arm_parse_extension (const char *str, const arm_feature_set *opt_set,
- arm_feature_set **ext_set_p)
+ arm_feature_set *ext_set)
{
/* We insist on extensions being specified in alphabetical order, and with
extensions being added before being removed. We achieve this by having
const arm_feature_set arm_any = ARM_ANY;
int adding_value = -1;
- if (!*ext_set_p)
- {
- *ext_set_p = XNEW (arm_feature_set);
- **ext_set_p = arm_arch_none;
- }
-
while (str != NULL && *str != 0)
{
const char *ext;
/* Add or remove the extension. */
if (adding_value)
- ARM_MERGE_FEATURE_SETS (**ext_set_p, **ext_set_p,
- opt->merge_value);
+ ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
else
- ARM_CLEAR_FEATURE (**ext_set_p, **ext_set_p, opt->clear_value);
+ ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
/* Allowing Thumb division instructions for ARMv7 in autodetection
rely on this break so that duplicate extensions (extensions
if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
{
mcpu_cpu_opt = &opt->value;
- if (!dyn_mcpu_ext_opt)
- dyn_mcpu_ext_opt = XNEW (arm_feature_set);
- *dyn_mcpu_ext_opt = opt->ext;
+ if (mcpu_ext_opt == NULL)
+ mcpu_ext_opt = XNEW (arm_feature_set);
+ *mcpu_ext_opt = opt->ext;
mcpu_fpu_opt = &opt->default_fpu;
if (opt->canonical_name)
{
}
if (ext != NULL)
- return arm_parse_extension (ext, mcpu_cpu_opt, &dyn_mcpu_ext_opt);
+ return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt);
return TRUE;
}
if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
{
march_cpu_opt = &opt->value;
+ if (march_ext_opt == NULL)
+ march_ext_opt = XNEW (arm_feature_set);
+ *march_ext_opt = arm_arch_none;
march_fpu_opt = &opt->default_fpu;
strcpy (selected_cpu_name, opt->name);
if (ext != NULL)
- return arm_parse_extension (ext, march_cpu_opt, &dyn_march_ext_opt);
+ return arm_parse_extension (ext, march_cpu_opt, march_ext_opt);
return TRUE;
}
--fix-v4bx Allow BX in ARMv4 code\n"));
}
-
#ifdef OBJ_ELF
+
typedef struct
{
int val;
{16, ARM_ARCH_V8M_BASE},
{17, ARM_ARCH_V8M_MAIN},
{15, ARM_ARCH_V8R},
+ {14, ARM_ARCH_V8_4A},
{-1, ARM_ARCH_NONE}
};
/* Set an attribute if it has not already been set by the user. */
+
static void
aeabi_set_attribute_int (int tag, int value)
{
/* Return whether features in the *NEEDED feature set are available via
extensions for the architecture whose feature set is *ARCH_FSET. */
+
static bfd_boolean
have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
const arm_feature_set *needed)
For -march/-mcpu=all the build attribute value of the most featureful
architecture is returned. Tag_CPU_arch_profile result is returned in
PROFILE. */
+
static int
get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
const arm_feature_set *ext_fset,
}
/* Set the public EABI object attributes. */
+
static void
aeabi_set_public_attributes (void)
{
- char profile;
+ char profile = '\0';
int arch = -1;
int virt_sec = 0;
int fp16_optional = 0;
ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
/* Code run during relaxation relies on selected_cpu being set. */
+ ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
+ flags_ext = arm_arch_none;
+ ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
+ selected_ext = flags_ext;
selected_cpu = flags;
}
/* Otherwise, choose the architecture based on the capabilities of the
requested cpu. */
else
- flags = selected_cpu;
- ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
+ {
+ ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
+ ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
+ flags_ext = selected_ext;
+ flags = selected_cpu;
+ }
+ ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
/* Allow the user to override the reported architecture. */
- if (object_arch)
+ if (!ARM_FEATURE_ZERO (selected_object_arch))
{
- ARM_CLEAR_FEATURE (flags_arch, *object_arch, fpu_any);
+ ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
flags_ext = arm_arch_none;
}
else
- {
- ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
- flags_ext = dyn_mcpu_ext_opt ? *dyn_mcpu_ext_opt : arm_arch_none;
- skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
- }
+ skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
/* When this function is run again after relaxation has happened there is no
way to determine whether an architecture or CPU was specified by the user:
aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
/* Tag_DSP_extension. */
- if (dyn_mcpu_ext_opt && ARM_CPU_HAS_FEATURE (*dyn_mcpu_ext_opt, arm_ext_dsp))
+ if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
aeabi_set_attribute_int (Tag_DSP_extension, 1);
ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
/* Post relaxation hook. Recompute ARM attributes now that relaxation is
finished and free extension feature bits which will not be used anymore. */
+
void
arm_md_post_relax (void)
{
aeabi_set_public_attributes ();
- XDELETE (dyn_mcpu_ext_opt);
- dyn_mcpu_ext_opt = NULL;
- XDELETE (dyn_march_ext_opt);
- dyn_march_ext_opt = NULL;
+ XDELETE (mcpu_ext_opt);
+ mcpu_ext_opt = NULL;
+ XDELETE (march_ext_opt);
+ march_ext_opt = NULL;
}
/* Add the default contents for the .ARM.attributes section. */
+
void
arm_md_end (void)
{
}
#endif /* OBJ_ELF */
-
/* Parse a .cpu directive. */
static void
for (opt = arm_cpus + 1; opt->name != NULL; opt++)
if (streq (opt->name, name))
{
- mcpu_cpu_opt = &opt->value;
- if (!dyn_mcpu_ext_opt)
- dyn_mcpu_ext_opt = XNEW (arm_feature_set);
- *dyn_mcpu_ext_opt = opt->ext;
- ARM_MERGE_FEATURE_SETS (selected_cpu, *mcpu_cpu_opt, *dyn_mcpu_ext_opt);
+ selected_arch = opt->value;
+ selected_ext = opt->ext;
+ ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
if (opt->canonical_name)
strcpy (selected_cpu_name, opt->canonical_name);
else
selected_cpu_name[i] = 0;
}
- ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
- if (dyn_mcpu_ext_opt)
- ARM_MERGE_FEATURE_SETS (cpu_variant, cpu_variant, *dyn_mcpu_ext_opt);
+ ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
+
*input_line_pointer = saved_char;
demand_empty_rest_of_line ();
return;
ignore_rest_of_line ();
}
-
/* Parse a .arch directive. */
static void
for (opt = arm_archs + 1; opt->name != NULL; opt++)
if (streq (opt->name, name))
{
- mcpu_cpu_opt = &opt->value;
- XDELETE (dyn_mcpu_ext_opt);
- dyn_mcpu_ext_opt = NULL;
- selected_cpu = *mcpu_cpu_opt;
+ selected_arch = opt->value;
+ selected_ext = arm_arch_none;
+ selected_cpu = selected_arch;
strcpy (selected_cpu_name, opt->name);
- ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, *mfpu_opt);
+ ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
*input_line_pointer = saved_char;
demand_empty_rest_of_line ();
return;
ignore_rest_of_line ();
}
-
/* Parse a .object_arch directive. */
static void
for (opt = arm_archs + 1; opt->name != NULL; opt++)
if (streq (opt->name, name))
{
- object_arch = &opt->value;
+ selected_object_arch = opt->value;
*input_line_pointer = saved_char;
demand_empty_rest_of_line ();
return;
s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
{
const struct arm_option_extension_value_table *opt;
- const arm_feature_set arm_any = ARM_ANY;
char saved_char;
char *name;
int adding_value = 1;
for (i = 0; i < nb_allowed_archs; i++)
{
/* Empty entry. */
- if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
+ if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
continue;
- if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *mcpu_cpu_opt))
+ if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
break;
}
break;
}
- if (!dyn_mcpu_ext_opt)
- {
- dyn_mcpu_ext_opt = XNEW (arm_feature_set);
- *dyn_mcpu_ext_opt = arm_arch_none;
- }
if (adding_value)
- ARM_MERGE_FEATURE_SETS (*dyn_mcpu_ext_opt, *dyn_mcpu_ext_opt,
+ ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
opt->merge_value);
else
- ARM_CLEAR_FEATURE (*dyn_mcpu_ext_opt, *dyn_mcpu_ext_opt,
- opt->clear_value);
+ ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
- ARM_MERGE_FEATURE_SETS (selected_cpu, *mcpu_cpu_opt, *dyn_mcpu_ext_opt);
- ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, *mfpu_opt);
+ ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
+ ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
*input_line_pointer = saved_char;
demand_empty_rest_of_line ();
/* Allowing Thumb division instructions for ARMv7 in autodetection rely
for (opt = arm_fpus; opt->name != NULL; opt++)
if (streq (opt->name, name))
{
- mfpu_opt = &opt->value;
- ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
- if (dyn_mcpu_ext_opt)
- ARM_MERGE_FEATURE_SETS (cpu_variant, cpu_variant, *dyn_mcpu_ext_opt);
+ selected_fpu = opt->value;
+#ifndef CPU_DEFAULT
+ if (no_cpu_selected ())
+ ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
+ else
+#endif
+ ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
*input_line_pointer = saved_char;
demand_empty_rest_of_line ();
return;
return -1;
}
-
/* Apply sym value for relocations only in the case that they are for
local symbols in the same segment as the fixup and you have the
respective architectural feature for blx and simple switches. */
+
int
arm_apply_sym_value (struct fix * fixP, segT this_seg)
{