/* tc-arm.c -- Assemble for the ARM
- Copyright (C) 1994-2018 Free Software Foundation, Inc.
+ Copyright (C) 1994-2019 Free Software Foundation, Inc.
Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
Modified by David Taylor (dtaylor@armltd.co.uk)
Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
+/* Only for compatability of hint instructions. */
+static const arm_feature_set arm_ext_v6k_v6t2 =
+ ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V6T2);
static const arm_feature_set arm_ext_v6_notm =
ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
static const arm_feature_set arm_ext_v6_dsp =
static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
static const arm_feature_set arm_ext_v8m_main =
ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
+static const arm_feature_set arm_ext_v8_1m_main =
+ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
/* Instructions in ARMv8-M only found in M profile architectures. */
static const arm_feature_set arm_ext_v8m_m_only =
ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
static const arm_feature_set arm_ext_sb =
ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
+static const arm_feature_set arm_ext_predres =
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
static const arm_feature_set arm_arch_any = ARM_ANY;
#ifdef OBJ_ELF
/* The maximum number of operands we need. */
#define ARM_IT_MAX_OPERANDS 6
+#define ARM_IT_MAX_RELOCS 3
struct arm_it
{
bfd_reloc_code_real_type type;
expressionS exp;
int pc_rel;
- } reloc;
+ } relocs[ARM_IT_MAX_RELOCS];
enum it_instruction_type it_insn_type;
#define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
#define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
#define BAD_BRANCH _("branch must be last instruction in IT block")
+#define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
#define BAD_NOT_IT _("instruction not allowed in IT block")
#define BAD_FPU _("selected FPU does not support instruction")
#define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
}
else
{
- if (inst.reloc.type != 0)
+ if (inst.relocs[0].type != 0)
{
inst.error = _("expression too complex");
return FAIL;
}
- memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
- inst.reloc.type = BFD_RELOC_ARM_MULTI;
- inst.reloc.pc_rel = 0;
+ memcpy (&inst.relocs[0].exp, &exp, sizeof (expressionS));
+ inst.relocs[0].type = BFD_RELOC_ARM_MULTI;
+ inst.relocs[0].pc_rel = 0;
}
}
{
imm1 = inst.operands[1].imm;
imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
- : inst.reloc.exp.X_unsigned ? 0
+ : inst.relocs[0].exp.X_unsigned ? 0
: ((bfd_int64_t) inst.operands[1].imm) >> 32);
if (target_big_endian)
{
{
if (nbytes == 4)
{
- if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
- && (inst.reloc.exp.X_op == O_constant)
+ if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
+ && (inst.relocs[0].exp.X_op == O_constant)
&& (pool->literals[entry].X_add_number
- == inst.reloc.exp.X_add_number)
+ == inst.relocs[0].exp.X_add_number)
&& (pool->literals[entry].X_md == nbytes)
&& (pool->literals[entry].X_unsigned
- == inst.reloc.exp.X_unsigned))
+ == inst.relocs[0].exp.X_unsigned))
break;
- if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
- && (inst.reloc.exp.X_op == O_symbol)
+ if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
+ && (inst.relocs[0].exp.X_op == O_symbol)
&& (pool->literals[entry].X_add_number
- == inst.reloc.exp.X_add_number)
+ == inst.relocs[0].exp.X_add_number)
&& (pool->literals[entry].X_add_symbol
- == inst.reloc.exp.X_add_symbol)
+ == inst.relocs[0].exp.X_add_symbol)
&& (pool->literals[entry].X_op_symbol
- == inst.reloc.exp.X_op_symbol)
+ == inst.relocs[0].exp.X_op_symbol)
&& (pool->literals[entry].X_md == nbytes))
break;
}
&& (pool->literals[entry].X_op == O_constant)
&& (pool->literals[entry].X_add_number == (offsetT) imm1)
&& (pool->literals[entry].X_unsigned
- == inst.reloc.exp.X_unsigned)
+ == inst.relocs[0].exp.X_unsigned)
&& (pool->literals[entry + 1].X_op == O_constant)
&& (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
&& (pool->literals[entry + 1].X_unsigned
- == inst.reloc.exp.X_unsigned))
+ == inst.relocs[0].exp.X_unsigned))
break;
padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
We also check to make sure the literal operand is a
constant number. */
- if (!(inst.reloc.exp.X_op == O_constant
- || inst.reloc.exp.X_op == O_big))
+ if (!(inst.relocs[0].exp.X_op == O_constant
+ || inst.relocs[0].exp.X_op == O_big))
{
inst.error = _("invalid type for literal pool");
return FAIL;
return FAIL;
}
- pool->literals[entry] = inst.reloc.exp;
+ pool->literals[entry] = inst.relocs[0].exp;
pool->literals[entry].X_op = O_constant;
pool->literals[entry].X_add_number = 0;
pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
return FAIL;
}
- pool->literals[entry] = inst.reloc.exp;
+ pool->literals[entry] = inst.relocs[0].exp;
pool->literals[entry].X_op = O_constant;
pool->literals[entry].X_add_number = imm1;
- pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
+ pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
pool->literals[entry++].X_md = 4;
- pool->literals[entry] = inst.reloc.exp;
+ pool->literals[entry] = inst.relocs[0].exp;
pool->literals[entry].X_op = O_constant;
pool->literals[entry].X_add_number = imm2;
- pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
+ pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
pool->literals[entry].X_md = 4;
pool->alignment = 3;
pool->next_free_entry += 1;
}
else
{
- pool->literals[entry] = inst.reloc.exp;
+ pool->literals[entry] = inst.relocs[0].exp;
pool->literals[entry].X_md = 4;
}
}
else if (padding_slot_p)
{
- pool->literals[entry] = inst.reloc.exp;
+ pool->literals[entry] = inst.relocs[0].exp;
pool->literals[entry].X_md = nbytes;
}
- inst.reloc.exp.X_op = O_symbol;
- inst.reloc.exp.X_add_number = pool_size;
- inst.reloc.exp.X_add_symbol = pool->symbol;
+ inst.relocs[0].exp.X_op = O_symbol;
+ inst.relocs[0].exp.X_add_number = pool_size;
+ inst.relocs[0].exp.X_add_symbol = pool->symbol;
return SUCCESS;
}
inst.operands[i].imm = reg;
inst.operands[i].immisreg = 1;
}
- else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
+ else if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
return FAIL;
}
inst.operands[i].shift_kind = shift;
inst.operands[i].isreg = 1;
/* parse_shift will override this if appropriate */
- inst.reloc.exp.X_op = O_constant;
- inst.reloc.exp.X_add_number = 0;
+ inst.relocs[0].exp.X_op = O_constant;
+ inst.relocs[0].exp.X_add_number = 0;
if (skip_past_comma (str) == FAIL)
return SUCCESS;
return parse_shift (str, i, NO_SHIFT_RESTRICT);
}
- if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
+ if (my_get_expression (&inst.relocs[0].exp, str, GE_IMM_PREFIX))
return FAIL;
if (skip_past_comma (str) == SUCCESS)
if (my_get_expression (&exp, str, GE_NO_PREFIX))
return FAIL;
- if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
+ if (exp.X_op != O_constant || inst.relocs[0].exp.X_op != O_constant)
{
inst.error = _("constant expression expected");
return FAIL;
inst.error = _("invalid rotation");
return FAIL;
}
- if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
+ if (inst.relocs[0].exp.X_add_number < 0
+ || inst.relocs[0].exp.X_add_number > 255)
{
inst.error = _("invalid constant");
return FAIL;
}
/* Encode as specified. */
- inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
+ inst.operands[i].imm = inst.relocs[0].exp.X_add_number | value << 7;
return SUCCESS;
}
- inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
- inst.reloc.pc_rel = 0;
+ inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
+ inst.relocs[0].pc_rel = 0;
return SUCCESS;
}
/* We now have the group relocation table entry corresponding to
the name in the assembler source. Next, we parse the expression. */
- if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
+ if (my_get_expression (&inst.relocs[0].exp, str, GE_NO_PREFIX))
return PARSE_OPERAND_FAIL_NO_BACKTRACK;
/* Record the relocation type (always the ALU variant here). */
- inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
- gas_assert (inst.reloc.type != 0);
+ inst.relocs[0].type = (bfd_reloc_code_real_type) entry->alu_code;
+ gas_assert (inst.relocs[0].type != 0);
return PARSE_OPERAND_SUCCESS;
}
}
/* Parse all forms of an ARM address expression. Information is written
- to inst.operands[i] and/or inst.reloc.
+ to inst.operands[i] and/or inst.relocs[0].
Preindexed addressing (.preind=1):
- [Rn, #offset] .reg=Rn .reloc.exp=offset
+ [Rn, #offset] .reg=Rn .relocs[0].exp=offset
[Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
[Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
- .shift_kind=shift .reloc.exp=shift_imm
+ .shift_kind=shift .relocs[0].exp=shift_imm
These three may have a trailing ! which causes .writeback to be set also.
Postindexed addressing (.postind=1, .writeback=1):
- [Rn], #offset .reg=Rn .reloc.exp=offset
+ [Rn], #offset .reg=Rn .relocs[0].exp=offset
[Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
[Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
- .shift_kind=shift .reloc.exp=shift_imm
+ .shift_kind=shift .relocs[0].exp=shift_imm
Unindexed addressing (.preind=0, .postind=0):
Other:
[Rn]{!} shorthand for [Rn,#0]{!}
- =immediate .isreg=0 .reloc.exp=immediate
- label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
+ =immediate .isreg=0 .relocs[0].exp=immediate
+ label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
It is the caller's responsibility to check for addressing modes not
- supported by the instruction, and to set inst.reloc.type. */
+ supported by the instruction, and to set inst.relocs[0].type. */
static parse_operand_result
parse_address_main (char **str, int i, int group_relocations,
if (skip_past_char (&p, '=') == FAIL)
{
/* Bare address - translate to PC-relative offset. */
- inst.reloc.pc_rel = 1;
+ inst.relocs[0].pc_rel = 1;
inst.operands[i].reg = REG_PC;
inst.operands[i].isreg = 1;
inst.operands[i].preind = 1;
- if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
+ if (my_get_expression (&inst.relocs[0].exp, &p, GE_OPT_PREFIX_BIG))
return PARSE_OPERAND_FAIL;
}
- else if (parse_big_immediate (&p, i, &inst.reloc.exp,
+ else if (parse_big_immediate (&p, i, &inst.relocs[0].exp,
/*allow_symbol_p=*/TRUE))
return PARSE_OPERAND_FAIL;
/* We now have the group relocation table entry corresponding to
the name in the assembler source. Next, we parse the
expression. */
- if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
+ if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
return PARSE_OPERAND_FAIL_NO_BACKTRACK;
/* Record the relocation type. */
switch (group_type)
{
case GROUP_LDR:
- inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
+ inst.relocs[0].type
+ = (bfd_reloc_code_real_type) entry->ldr_code;
break;
case GROUP_LDRS:
- inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
+ inst.relocs[0].type
+ = (bfd_reloc_code_real_type) entry->ldrs_code;
break;
case GROUP_LDC:
- inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
+ inst.relocs[0].type
+ = (bfd_reloc_code_real_type) entry->ldc_code;
break;
default:
gas_assert (0);
}
- if (inst.reloc.type == 0)
+ if (inst.relocs[0].type == 0)
{
inst.error = _("this group relocation is not allowed on this instruction");
return PARSE_OPERAND_FAIL_NO_BACKTRACK;
{
char *q = p;
- if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
+ if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
return PARSE_OPERAND_FAIL;
/* If the offset is 0, find out if it's a +0 or -0. */
- if (inst.reloc.exp.X_op == O_constant
- && inst.reloc.exp.X_add_number == 0)
+ if (inst.relocs[0].exp.X_op == O_constant
+ && inst.relocs[0].exp.X_add_number == 0)
{
skip_whitespace (q);
if (*q == '#')
inst.operands[i].negative = 0;
p--;
}
- if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
+ if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
return PARSE_OPERAND_FAIL;
/* If the offset is 0, find out if it's a +0 or -0. */
- if (inst.reloc.exp.X_op == O_constant
- && inst.reloc.exp.X_add_number == 0)
+ if (inst.relocs[0].exp.X_op == O_constant
+ && inst.relocs[0].exp.X_add_number == 0)
{
skip_whitespace (q);
if (*q == '#')
if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
{
inst.operands[i].preind = 1;
- inst.reloc.exp.X_op = O_constant;
- inst.reloc.exp.X_add_number = 0;
+ inst.relocs[0].exp.X_op = O_constant;
+ inst.relocs[0].exp.X_add_number = 0;
}
*str = p;
return PARSE_OPERAND_SUCCESS;
p = *str;
skip_past_char (&p, '#');
if (strncasecmp (p, ":lower16:", 9) == 0)
- inst.reloc.type = BFD_RELOC_ARM_MOVW;
+ inst.relocs[0].type = BFD_RELOC_ARM_MOVW;
else if (strncasecmp (p, ":upper16:", 9) == 0)
- inst.reloc.type = BFD_RELOC_ARM_MOVT;
+ inst.relocs[0].type = BFD_RELOC_ARM_MOVT;
- if (inst.reloc.type != BFD_RELOC_UNUSED)
+ if (inst.relocs[0].type != BFD_RELOC_UNUSED)
{
p += 9;
skip_whitespace (p);
}
- if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
+ if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
return FAIL;
- if (inst.reloc.type == BFD_RELOC_UNUSED)
+ if (inst.relocs[0].type == BFD_RELOC_UNUSED)
{
- if (inst.reloc.exp.X_op != O_constant)
+ if (inst.relocs[0].exp.X_op != O_constant)
{
inst.error = _("constant expression expected");
return FAIL;
}
- if (inst.reloc.exp.X_add_number < 0
- || inst.reloc.exp.X_add_number > 0xffff)
+ if (inst.relocs[0].exp.X_add_number < 0
+ || inst.relocs[0].exp.X_add_number > 0xffff)
{
inst.error = _("immediate value out of range");
return FAIL;
{
if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
return FAIL;
- if (inst.reloc.exp.X_add_number != 1)
+ if (inst.relocs[0].exp.X_add_number != 1)
{
inst.error = _("invalid shift");
return FAIL;
OP_EXP, /* arbitrary expression */
OP_EXPi, /* same, with optional immediate prefix */
OP_EXPr, /* same, with optional relocation suffix */
+ OP_EXPs, /* same, with optional non-first operand relocation suffix */
OP_HALF, /* 0 .. 65535 or low/high reloc. */
OP_IROT1, /* VCADD rotate immediate: 90, 270. */
OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
/* Expressions */
case OP_EXPi: EXPi:
- po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
+ po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
GE_OPT_PREFIX));
break;
case OP_EXP:
- po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
+ po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
GE_NO_PREFIX));
break;
case OP_EXPr: EXPr:
- po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
+ po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
GE_NO_PREFIX));
- if (inst.reloc.exp.X_op == O_symbol)
+ if (inst.relocs[0].exp.X_op == O_symbol)
{
val = parse_reloc (&str);
if (val == -1)
}
break;
+ case OP_EXPs:
+ po_misc_or_fail (my_get_expression (&inst.relocs[i].exp, &str,
+ GE_NO_PREFIX));
+ if (inst.relocs[i].exp.X_op == O_symbol)
+ {
+ inst.operands[i].hasreloc = 1;
+ }
+ else if (inst.relocs[i].exp.X_op == O_constant)
+ {
+ inst.operands[i].imm = inst.relocs[i].exp.X_add_number;
+ inst.operands[i].hasreloc = 0;
+ }
+ break;
+
/* Operand for MOVW or MOVT. */
case OP_HALF:
po_misc_or_fail (parse_half (&str));
inst.instruction |= inst.operands[i].imm << 8;
}
else
- inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
+ inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
}
}
else
{
inst.instruction |= INST_IMMEDIATE;
- if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
+ if (inst.relocs[0].type != BFD_RELOC_ARM_IMMEDIATE)
inst.instruction |= inst.operands[i].imm;
}
}
else
{
inst.instruction |= inst.operands[i].shift_kind << 5;
- inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
+ inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
}
}
}
- else /* immediate offset in inst.reloc */
+ else /* immediate offset in inst.relocs[0] */
{
- if (is_pc && !inst.reloc.pc_rel)
+ if (is_pc && !inst.relocs[0].pc_rel)
{
const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
as_tsktsk (_("use of PC in this instruction is deprecated"));
}
- if (inst.reloc.type == BFD_RELOC_UNUSED)
+ if (inst.relocs[0].type == BFD_RELOC_UNUSED)
{
/* Prefer + for zero encoded value. */
if (!inst.operands[i].negative)
inst.instruction |= INDEX_UP;
- inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
+ inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM;
}
}
}
if (!inst.operands[i].negative)
inst.instruction |= INDEX_UP;
}
- else /* immediate offset in inst.reloc */
+ else /* immediate offset in inst.relocs[0] */
{
- constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
+ constraint ((inst.operands[i].reg == REG_PC && !inst.relocs[0].pc_rel
&& inst.operands[i].writeback),
BAD_PC_WRITEBACK);
inst.instruction |= HWOFFSET_IMM;
- if (inst.reloc.type == BFD_RELOC_UNUSED)
+ if (inst.relocs[0].type == BFD_RELOC_UNUSED)
{
/* Prefer + for zero encoded value. */
if (!inst.operands[i].negative)
inst.instruction |= INDEX_UP;
- inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
+ inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM8;
}
}
}
static void do_vfp_nsyn_opcode (const char *);
-/* inst.reloc.exp describes an "=expr" load pseudo-operation.
+/* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
Determine whether it can be performed with a move instruction; if
it can, convert inst.instruction to that move instruction and
return TRUE; if it can't, convert inst.instruction to a literal-pool
return TRUE;
}
- if (inst.reloc.exp.X_op != O_constant
- && inst.reloc.exp.X_op != O_symbol
- && inst.reloc.exp.X_op != O_big)
+ if (inst.relocs[0].exp.X_op != O_constant
+ && inst.relocs[0].exp.X_op != O_symbol
+ && inst.relocs[0].exp.X_op != O_big)
{
inst.error = _("constant expression expected");
return TRUE;
}
- if (inst.reloc.exp.X_op == O_constant
- || inst.reloc.exp.X_op == O_big)
+ if (inst.relocs[0].exp.X_op == O_constant
+ || inst.relocs[0].exp.X_op == O_big)
{
#if defined BFD_HOST_64_BIT
bfd_int64_t v;
#else
offsetT v;
#endif
- if (inst.reloc.exp.X_op == O_big)
+ if (inst.relocs[0].exp.X_op == O_big)
{
LITTLENUM_TYPE w[X_PRECISION];
LITTLENUM_TYPE * l;
- if (inst.reloc.exp.X_add_number == -1)
+ if (inst.relocs[0].exp.X_add_number == -1)
{
gen_to_words (w, X_PRECISION, E_PRECISION);
l = w;
#endif
}
else
- v = inst.reloc.exp.X_add_number;
+ v = inst.relocs[0].exp.X_add_number;
if (!inst.operands[i].issingle)
{
unsigned immlo = inst.operands[1].imm;
unsigned immhi = inst.operands[1].regisimm
? inst.operands[1].reg
- : inst.reloc.exp.X_unsigned
+ : inst.relocs[0].exp.X_unsigned
? 0
: ((bfd_int64_t)((int) immlo)) >> 32;
int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
inst.operands[1].reg = REG_PC;
inst.operands[1].isreg = 1;
inst.operands[1].preind = 1;
- inst.reloc.pc_rel = 1;
- inst.reloc.type = (thumb_p
+ inst.relocs[0].pc_rel = 1;
+ inst.relocs[0].type = (thumb_p
? BFD_RELOC_ARM_THUMB_OFFSET
: (mode_3
? BFD_RELOC_ARM_HWLITERAL
}
if (reloc_override)
- inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
- else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
- || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
- && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
+ inst.relocs[0].type = (bfd_reloc_code_real_type) reloc_override;
+ else if ((inst.relocs[0].type < BFD_RELOC_ARM_ALU_PC_G0_NC
+ || inst.relocs[0].type > BFD_RELOC_ARM_LDC_SB_G2)
+ && inst.relocs[0].type != BFD_RELOC_ARM_LDR_PC_G0)
{
if (thumb_mode)
- inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
+ inst.relocs[0].type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
else
- inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
+ inst.relocs[0].type = BFD_RELOC_ARM_CP_OFF_IMM;
}
/* Prefer + for zero encoded value. */
do_rm_rd_rn (void)
{
constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
- constraint (((inst.reloc.exp.X_op != O_constant
- && inst.reloc.exp.X_op != O_illegal)
- || inst.reloc.exp.X_add_number != 0),
+ constraint (((inst.relocs[0].exp.X_op != O_constant
+ && inst.relocs[0].exp.X_op != O_illegal)
+ || inst.relocs[0].exp.X_add_number != 0),
BAD_ADDR_MODE);
inst.instruction |= inst.operands[0].reg;
inst.instruction |= inst.operands[1].reg << 12;
/* Frag hacking will turn this into a sub instruction if the offset turns
out to be negative. */
- inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
- inst.reloc.pc_rel = 1;
- inst.reloc.exp.X_add_number -= 8;
+ inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
+ inst.relocs[0].pc_rel = 1;
+ inst.relocs[0].exp.X_add_number -= 8;
if (support_interwork
- && inst.reloc.exp.X_op == O_symbol
- && inst.reloc.exp.X_add_symbol != NULL
- && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
- && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
- inst.reloc.exp.X_add_number |= 1;
+ && inst.relocs[0].exp.X_op == O_symbol
+ && inst.relocs[0].exp.X_add_symbol != NULL
+ && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
+ && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
+ inst.relocs[0].exp.X_add_number |= 1;
}
/* This is a pseudo-op of the form "adrl rd, label" to be converted
/* Frag hacking will turn this into a sub instruction if the offset turns
out to be negative. */
- inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
- inst.reloc.pc_rel = 1;
+ inst.relocs[0].type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
+ inst.relocs[0].pc_rel = 1;
inst.size = INSN_SIZE * 2;
- inst.reloc.exp.X_add_number -= 8;
+ inst.relocs[0].exp.X_add_number -= 8;
if (support_interwork
- && inst.reloc.exp.X_op == O_symbol
- && inst.reloc.exp.X_add_symbol != NULL
- && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
- && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
- inst.reloc.exp.X_add_number |= 1;
+ && inst.relocs[0].exp.X_op == O_symbol
+ && inst.relocs[0].exp.X_add_symbol != NULL
+ && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
+ && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
+ inst.relocs[0].exp.X_add_number |= 1;
}
static void
do_arit (void)
{
- constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
- && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
+ constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
+ && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
THUMB1_RELOC_ONLY);
if (!inst.operands[1].present)
inst.operands[1].reg = inst.operands[0].reg;
constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
&& inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
_("the only valid suffixes here are '(plt)' and '(tlscall)'"));
- inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
+ inst.relocs[0].type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
? BFD_RELOC_ARM_PLT32
: thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
}
else
- inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
- inst.reloc.pc_rel = 1;
+ inst.relocs[0].type = (bfd_reloc_code_real_type) default_reloc;
+ inst.relocs[0].pc_rel = 1;
}
static void
want_reloc = FALSE;
if (want_reloc)
- inst.reloc.type = BFD_RELOC_ARM_V4BX;
+ inst.relocs[0].type = BFD_RELOC_ARM_V4BX;
}
|| (inst.operands[1].reg == REG_PC),
BAD_ADDR_MODE);
- constraint (inst.reloc.exp.X_op != O_constant
- || inst.reloc.exp.X_add_number != 0,
+ constraint (inst.relocs[0].exp.X_op != O_constant
+ || inst.relocs[0].exp.X_add_number != 0,
_("offset must be zero in ARM encoding"));
constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
inst.instruction |= inst.operands[0].reg << 12;
inst.instruction |= inst.operands[1].reg << 16;
- inst.reloc.type = BFD_RELOC_UNUSED;
+ inst.relocs[0].type = BFD_RELOC_UNUSED;
}
static void
constraint (!(inst.operands[1].immisreg)
&& (inst.operands[0].reg == REG_PC
&& inst.operands[1].reg == REG_PC
- && (inst.reloc.exp.X_add_number & 0x3)),
+ && (inst.relocs[0].exp.X_add_number & 0x3)),
_("ldr to register 15 must be 4-byte aligned"));
}
reject [Rn,...]. */
if (inst.operands[1].preind)
{
- constraint (inst.reloc.exp.X_op != O_constant
- || inst.reloc.exp.X_add_number != 0,
+ constraint (inst.relocs[0].exp.X_op != O_constant
+ || inst.relocs[0].exp.X_add_number != 0,
_("this instruction requires a post-indexed address"));
inst.operands[1].preind = 0;
reject [Rn,...]. */
if (inst.operands[1].preind)
{
- constraint (inst.reloc.exp.X_op != O_constant
- || inst.reloc.exp.X_add_number != 0,
+ constraint (inst.relocs[0].exp.X_op != O_constant
+ || inst.relocs[0].exp.X_add_number != 0,
_("this instruction requires a post-indexed address"));
inst.operands[1].preind = 0;
static void
do_mov (void)
{
- constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
- && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
+ constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
+ && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
THUMB1_RELOC_ONLY);
inst.instruction |= inst.operands[0].reg << 12;
encode_arm_shifter_operand (1);
bfd_boolean top;
top = (inst.instruction & 0x00400000) != 0;
- constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
+ constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW,
_(":lower16: not allowed in this instruction"));
- constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
+ constraint (!top && inst.relocs[0].type == BFD_RELOC_ARM_MOVT,
_(":upper16: not allowed in this instruction"));
inst.instruction |= inst.operands[0].reg << 12;
- if (inst.reloc.type == BFD_RELOC_UNUSED)
+ if (inst.relocs[0].type == BFD_RELOC_UNUSED)
{
- imm = inst.reloc.exp.X_add_number;
+ imm = inst.relocs[0].exp.X_add_number;
/* The value is in two pieces: 0:11, 16:19. */
inst.instruction |= (imm & 0x00000fff);
inst.instruction |= (imm & 0x0000f000) << 4;
else
{
inst.instruction |= INST_IMMEDIATE;
- inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
- inst.reloc.pc_rel = 0;
+ inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
+ inst.relocs[0].pc_rel = 0;
}
}
_("extraneous shift as part of operand to shift insn"));
}
else
- inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
+ inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
}
static void
do_smc (void)
{
- inst.reloc.type = BFD_RELOC_ARM_SMC;
- inst.reloc.pc_rel = 0;
+ inst.relocs[0].type = BFD_RELOC_ARM_SMC;
+ inst.relocs[0].pc_rel = 0;
}
static void
do_hvc (void)
{
- inst.reloc.type = BFD_RELOC_ARM_HVC;
- inst.reloc.pc_rel = 0;
+ inst.relocs[0].type = BFD_RELOC_ARM_HVC;
+ inst.relocs[0].pc_rel = 0;
}
static void
do_swi (void)
{
- inst.reloc.type = BFD_RELOC_ARM_SWI;
- inst.reloc.pc_rel = 0;
+ inst.relocs[0].type = BFD_RELOC_ARM_SWI;
+ inst.relocs[0].pc_rel = 0;
}
static void
constraint (inst.operands[0].reg == inst.operands[1].reg
|| inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
- constraint (inst.reloc.exp.X_op != O_constant
- || inst.reloc.exp.X_add_number != 0,
+ constraint (inst.relocs[0].exp.X_op != O_constant
+ || inst.relocs[0].exp.X_add_number != 0,
_("offset must be zero in ARM encoding"));
inst.instruction |= inst.operands[0].reg << 12;
inst.instruction |= inst.operands[1].reg;
inst.instruction |= inst.operands[2].reg << 16;
- inst.reloc.type = BFD_RELOC_UNUSED;
+ inst.relocs[0].type = BFD_RELOC_UNUSED;
}
static void
[Rn]{!}. The instruction does not really support stacking or
unstacking, so we have to emulate these by setting appropriate
bits and offsets. */
- constraint (inst.reloc.exp.X_op != O_constant
- || inst.reloc.exp.X_add_number != 0,
+ constraint (inst.relocs[0].exp.X_op != O_constant
+ || inst.relocs[0].exp.X_add_number != 0,
_("this instruction does not support indexing"));
if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
- inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
+ inst.relocs[0].exp.X_add_number = 12 * inst.operands[1].imm;
if (!(inst.instruction & INDEX_UP))
- inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
+ inst.relocs[0].exp.X_add_number = -inst.relocs[0].exp.X_add_number;
if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
{
if (inst.operands[1].writeback)
inst.instruction |= WRITE_BACK;
inst.instruction |= inst.operands[1].reg << 16;
- inst.instruction |= inst.reloc.exp.X_add_number << 4;
+ inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
inst.instruction |= inst.operands[1].imm;
}
else
static void
encode_thumb32_shifted_operand (int i)
{
- unsigned int value = inst.reloc.exp.X_add_number;
+ unsigned int value = inst.relocs[0].exp.X_add_number;
unsigned int shift = inst.operands[i].shift_kind;
constraint (inst.operands[i].immisreg,
inst.instruction |= SHIFT_ROR << 4;
else
{
- constraint (inst.reloc.exp.X_op != O_constant,
+ constraint (inst.relocs[0].exp.X_op != O_constant,
_("expression too complex"));
constraint (value > 32
inst.instruction |= inst.operands[i].imm;
if (inst.operands[i].shifted)
{
- constraint (inst.reloc.exp.X_op != O_constant,
+ constraint (inst.relocs[0].exp.X_op != O_constant,
_("expression too complex"));
- constraint (inst.reloc.exp.X_add_number < 0
- || inst.reloc.exp.X_add_number > 3,
+ constraint (inst.relocs[0].exp.X_add_number < 0
+ || inst.relocs[0].exp.X_add_number > 3,
_("shift out of range"));
- inst.instruction |= inst.reloc.exp.X_add_number << 4;
+ inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
}
- inst.reloc.type = BFD_RELOC_UNUSED;
+ inst.relocs[0].type = BFD_RELOC_UNUSED;
}
else if (inst.operands[i].preind)
{
if (inst.operands[i].writeback)
inst.instruction |= 0x00000100;
}
- inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
+ inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
}
else if (inst.operands[i].postind)
{
inst.instruction |= 0x00200000;
else
inst.instruction |= 0x00000900;
- inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
+ inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
}
else /* unindexed - only for coprocessor */
inst.error = _("instruction does not accept unindexed addressing");
reject_bad_reg (Rd);
inst.instruction |= (Rn << 16) | (Rd << 8);
- inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
+ inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
}
/* Parse an add or subtract instruction. We get here with inst.instruction
{
inst.instruction = THUMB_OP16(opcode);
inst.instruction |= (Rd << 4) | Rs;
- if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
- || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
+ if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
+ || (inst.relocs[0].type
+ > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC))
{
if (inst.size_req == 2)
- inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
+ inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
else
inst.relax = opcode;
}
if (inst.size_req == 4
|| (inst.size_req != 2 && !opcode))
{
- constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
- && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
+ constraint ((inst.relocs[0].type
+ >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
+ && (inst.relocs[0].type
+ <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
THUMB1_RELOC_ONLY);
if (Rd == REG_PC)
{
constraint (add, BAD_PC);
constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
_("only SUBS PC, LR, #const allowed"));
- constraint (inst.reloc.exp.X_op != O_constant,
+ constraint (inst.relocs[0].exp.X_op != O_constant,
_("expression too complex"));
- constraint (inst.reloc.exp.X_add_number < 0
- || inst.reloc.exp.X_add_number > 0xff,
+ constraint (inst.relocs[0].exp.X_add_number < 0
+ || inst.relocs[0].exp.X_add_number > 0xff,
_("immediate value out of range"));
inst.instruction = T2_SUBS_PC_LR
- | inst.reloc.exp.X_add_number;
- inst.reloc.type = BFD_RELOC_UNUSED;
+ | inst.relocs[0].exp.X_add_number;
+ inst.relocs[0].type = BFD_RELOC_UNUSED;
return;
}
else if (Rs == REG_PC)
{
/* Always use addw/subw. */
inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
- inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
+ inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
}
else
{
inst.instruction = (inst.instruction & 0xe1ffffff)
| 0x10000000;
if (flags)
- inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
+ inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
else
- inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
+ inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_IMM;
}
inst.instruction |= Rd << 8;
inst.instruction |= Rs << 16;
}
else
{
- unsigned int value = inst.reloc.exp.X_add_number;
+ unsigned int value = inst.relocs[0].exp.X_add_number;
unsigned int shift = inst.operands[2].shift_kind;
Rn = inst.operands[2].reg;
inst.instruction = (inst.instruction == T_MNEM_add
? 0x0000 : 0x8000);
inst.instruction |= (Rd << 4) | Rs;
- inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
+ inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
return;
}
/* Generate a 32-bit opcode. */
inst.instruction = THUMB_OP32 (inst.instruction);
inst.instruction |= Rd << 8;
- inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
- inst.reloc.pc_rel = 1;
+ inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_PC12;
+ inst.relocs[0].pc_rel = 1;
}
else
{
/* Generate a 16-bit opcode. */
inst.instruction = THUMB_OP16 (inst.instruction);
- inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
- inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
- inst.reloc.pc_rel = 1;
+ inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
+ inst.relocs[0].exp.X_add_number -= 4; /* PC relative adjust. */
+ inst.relocs[0].pc_rel = 1;
inst.instruction |= Rd << 4;
}
- if (inst.reloc.exp.X_op == O_symbol
- && inst.reloc.exp.X_add_symbol != NULL
- && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
- && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
- inst.reloc.exp.X_add_number += 1;
+ if (inst.relocs[0].exp.X_op == O_symbol
+ && inst.relocs[0].exp.X_add_symbol != NULL
+ && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
+ && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
+ inst.relocs[0].exp.X_add_number += 1;
}
/* Arithmetic instructions for which there is just one 16-bit
inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
inst.instruction |= Rd << 8;
inst.instruction |= Rs << 16;
- inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
+ inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
}
else
{
inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
inst.instruction |= Rd << 8;
inst.instruction |= Rs << 16;
- inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
+ inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
}
else
{
&& (inst.size_req == 4
|| (inst.size_req != 2
&& (inst.operands[0].hasreloc
- || inst.reloc.exp.X_op == O_constant))))
+ || inst.relocs[0].exp.X_op == O_constant))))
{
inst.instruction = THUMB_OP32(opcode);
if (cond == COND_ALWAYS)
if (unified_syntax && inst.size_req != 2)
inst.relax = opcode;
}
- inst.reloc.type = reloc;
- inst.reloc.pc_rel = 1;
+ inst.relocs[0].type = reloc;
+ inst.relocs[0].pc_rel = 1;
}
/* Actually do the work for Thumb state bkpt and hlt. The only difference
the branch encoding is now needed to deal with TLSCALL relocs.
So if we see a PLT reloc now, put it back to how it used to be to
keep the preexisting behaviour. */
- if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
- inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
+ if (inst.relocs[0].type == BFD_RELOC_ARM_PLT32)
+ inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH23;
#if defined(OBJ_COFF)
/* If the destination of the branch is a defined symbol which does not have
the THUMB_FUNC attribute, then we must be calling a function which has
the (interfacearm) attribute. We look for the Thumb entry point to that
function and change the branch to refer to that function instead. */
- if ( inst.reloc.exp.X_op == O_symbol
- && inst.reloc.exp.X_add_symbol != NULL
- && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
- && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
- inst.reloc.exp.X_add_symbol =
- find_real_start (inst.reloc.exp.X_add_symbol);
+ if ( inst.relocs[0].exp.X_op == O_symbol
+ && inst.relocs[0].exp.X_add_symbol != NULL
+ && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
+ && ! THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
+ inst.relocs[0].exp.X_add_symbol
+ = find_real_start (inst.relocs[0].exp.X_add_symbol);
#endif
}
set_it_insn_type (OUTSIDE_IT_INSN);
constraint (inst.operands[0].reg > 7, BAD_HIREG);
inst.instruction |= inst.operands[0].reg;
- inst.reloc.pc_rel = 1;
- inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
+ inst.relocs[0].pc_rel = 1;
+ inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH7;
}
static void
do_t_ldmstm (void)
{
/* This really doesn't seem worth it. */
- constraint (inst.reloc.type != BFD_RELOC_UNUSED,
+ constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
_("expression too complex"));
constraint (inst.operands[1].writeback,
_("Thumb load/store multiple does not support {reglist}^"));
inst.instruction |= inst.operands[0].reg << 12;
inst.instruction |= inst.operands[1].reg << 16;
- inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
+ inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
}
static void
{
if (Rn == REG_PC)
{
- if (inst.reloc.pc_rel)
+ if (inst.relocs[0].pc_rel)
opcode = T_MNEM_ldr_pc2;
else
opcode = T_MNEM_ldr_pc;
}
inst.instruction |= THUMB_OP16 (opcode);
if (inst.size_req == 2)
- inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
+ inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
else
inst.relax = opcode;
return;
inst.instruction = T_OPCODE_STR_SP;
inst.instruction |= inst.operands[0].reg << 8;
- inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
+ inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
return;
}
/* Immediate offset. */
inst.instruction |= inst.operands[0].reg;
inst.instruction |= inst.operands[1].reg << 3;
- inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
+ inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
return;
}
{
inst.instruction = THUMB_OP16 (opcode);
inst.instruction |= Rn << 8;
- if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
- || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
+ if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
+ || inst.relocs[0].type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
{
if (inst.size_req == 2)
- inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
+ inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
else
inst.relax = opcode;
}
}
else
{
- constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
- && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
+ constraint ((inst.relocs[0].type
+ >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
+ && (inst.relocs[0].type
+ <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
THUMB1_RELOC_ONLY);
inst.instruction = THUMB_OP32 (inst.instruction);
inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
inst.instruction |= Rn << r0off;
- inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
+ inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
}
}
else if (inst.operands[1].shifted && inst.operands[1].immisreg
{
inst.instruction |= Rn;
inst.instruction |= Rm << 3;
- inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
+ inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
}
else
{
constraint (Rn > 7,
_("only lo regs allowed with immediate"));
inst.instruction |= Rn << 8;
- inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
+ inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
}
}
bfd_boolean top;
top = (inst.instruction & 0x00800000) != 0;
- if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
+ if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW)
{
constraint (top, _(":lower16: not allowed in this instruction"));
- inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
+ inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVW;
}
- else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
+ else if (inst.relocs[0].type == BFD_RELOC_ARM_MOVT)
{
constraint (!top, _(":upper16: not allowed in this instruction"));
- inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
+ inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVT;
}
Rd = inst.operands[0].reg;
reject_bad_reg (Rd);
inst.instruction |= Rd << 8;
- if (inst.reloc.type == BFD_RELOC_UNUSED)
+ if (inst.relocs[0].type == BFD_RELOC_UNUSED)
{
- imm = inst.reloc.exp.X_add_number;
+ imm = inst.relocs[0].exp.X_add_number;
inst.instruction |= (imm & 0xf000) << 4;
inst.instruction |= (imm & 0x0800) << 15;
inst.instruction |= (imm & 0x0700) << 4;
inst.instruction = THUMB_OP32 (inst.instruction);
inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
inst.instruction |= Rn << r0off;
- inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
+ inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
}
else
{
if (!inst.operands[2].isreg)
{
inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
- inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
+ inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
}
else
{
inst.instruction |= Rm;
if (inst.operands[3].present)
{
- unsigned int val = inst.reloc.exp.X_add_number;
- constraint (inst.reloc.exp.X_op != O_constant,
+ unsigned int val = inst.relocs[0].exp.X_add_number;
+ constraint (inst.relocs[0].exp.X_op != O_constant,
_("expression too complex"));
inst.instruction |= (val & 0x1c) << 10;
inst.instruction |= (val & 0x03) << 6;
constraint (inst.operands[0].writeback,
_("push/pop do not support {reglist}^"));
- constraint (inst.reloc.type != BFD_RELOC_UNUSED,
+ constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
_("expression too complex"));
mask = inst.operands[0].imm;
if (inst.size_req == 4 || !unified_syntax)
narrow = FALSE;
- if (inst.reloc.exp.X_op != O_constant
- || inst.reloc.exp.X_add_number != 0)
+ if (inst.relocs[0].exp.X_op != O_constant
+ || inst.relocs[0].exp.X_add_number != 0)
narrow = FALSE;
/* Turn rsb #0 into 16-bit neg. We should probably do this via
relaxation, but it doesn't seem worth the hassle. */
if (narrow)
{
- inst.reloc.type = BFD_RELOC_UNUSED;
+ inst.relocs[0].type = BFD_RELOC_UNUSED;
inst.instruction = THUMB_OP16 (T_MNEM_negs);
inst.instruction |= Rs << 3;
inst.instruction |= Rd;
else
{
inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
- inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
+ inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
}
}
else
inst.instruction |= inst.operands[0].reg << 8;
encode_thumb32_shifted_operand (1);
/* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
- inst.reloc.type = BFD_RELOC_UNUSED;
+ inst.relocs[0].type = BFD_RELOC_UNUSED;
}
}
else
case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
default: abort ();
}
- inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
+ inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
inst.instruction |= inst.operands[0].reg;
inst.instruction |= inst.operands[1].reg << 3;
}
case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
default: abort ();
}
- inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
+ inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
inst.instruction |= inst.operands[0].reg;
inst.instruction |= inst.operands[1].reg << 3;
}
static void
do_t_smc (void)
{
- unsigned int value = inst.reloc.exp.X_add_number;
+ unsigned int value = inst.relocs[0].exp.X_add_number;
constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
_("SMC is not permitted on this architecture"));
- constraint (inst.reloc.exp.X_op != O_constant,
+ constraint (inst.relocs[0].exp.X_op != O_constant,
_("expression too complex"));
- inst.reloc.type = BFD_RELOC_UNUSED;
+ inst.relocs[0].type = BFD_RELOC_UNUSED;
inst.instruction |= (value & 0xf000) >> 12;
inst.instruction |= (value & 0x0ff0);
inst.instruction |= (value & 0x000f) << 16;
static void
do_t_hvc (void)
{
- unsigned int value = inst.reloc.exp.X_add_number;
+ unsigned int value = inst.relocs[0].exp.X_add_number;
- inst.reloc.type = BFD_RELOC_UNUSED;
+ inst.relocs[0].type = BFD_RELOC_UNUSED;
inst.instruction |= (value & 0x0fff);
inst.instruction |= (value & 0xf000) << 4;
}
if (inst.operands[3].present)
{
- offsetT shift_amount = inst.reloc.exp.X_add_number;
+ offsetT shift_amount = inst.relocs[0].exp.X_add_number;
- inst.reloc.type = BFD_RELOC_UNUSED;
+ inst.relocs[0].type = BFD_RELOC_UNUSED;
- constraint (inst.reloc.exp.X_op != O_constant,
+ constraint (inst.relocs[0].exp.X_op != O_constant,
_("expression too complex"));
if (shift_amount != 0)
inst.instruction |= inst.operands[0].reg << 8;
inst.instruction |= inst.operands[1].reg << 12;
inst.instruction |= inst.operands[2].reg << 16;
- inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
+ inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
}
static void
static void
do_t_swi (void)
{
- inst.reloc.type = BFD_RELOC_ARM_SWI;
+ inst.relocs[0].type = BFD_RELOC_ARM_SWI;
}
static void
inst.instruction |= Rn << 16;
}
+/* Checking the range of the branch offset (VAL) with NBITS bits
+ and IS_SIGNED signedness. Also checks the LSB to be 0. */
+static int
+v8_1_branch_value_check (int val, int nbits, int is_signed)
+{
+ gas_assert (nbits > 0 && nbits <= 32);
+ if (is_signed)
+ {
+ int cmp = (1 << (nbits - 1));
+ if ((val < -cmp) || (val >= cmp) || (val & 0x01))
+ return FAIL;
+ }
+ else
+ {
+ if ((val <= 0) || (val >= (1 << nbits)) || (val & 0x1))
+ return FAIL;
+ }
+ return SUCCESS;
+}
+
/* Neon instruction encoder helpers. */
/* Encodings for the different types for various Neon opcodes. */
/* Half-precision conversions for Advanced SIMD -- neon. */
case NS_QD:
case NS_DQ:
+ if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
+ return;
if ((rs == NS_DQ)
&& (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
constraint (rs != NS_HH, _("invalid suffix"));
- constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
- _(BAD_FPU));
-
if (inst.cond != COND_ALWAYS)
{
if (thumb_mode)
else
{
constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
- constraint (inst.reloc.exp.X_op != O_constant
- || inst.reloc.exp.X_add_number != 0,
+ constraint (inst.relocs[0].exp.X_op != O_constant
+ || inst.relocs[0].exp.X_add_number != 0,
BAD_ADDR_MODE);
if (inst.operands[1].writeback)
{
constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
_(BAD_FPU));
- constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
- unsigned rot = inst.reloc.exp.X_add_number;
+ constraint (inst.relocs[0].exp.X_op != O_constant,
+ _("expression too complex"));
+ unsigned rot = inst.relocs[0].exp.X_add_number;
constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
_("immediate out of range"));
rot /= 90;
{
constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
_(BAD_FPU));
- constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
- unsigned rot = inst.reloc.exp.X_add_number;
+ constraint (inst.relocs[0].exp.X_op != O_constant,
+ _("expression too complex"));
+ unsigned rot = inst.relocs[0].exp.X_add_number;
constraint (rot != 90 && rot != 270, _("immediate out of range"));
enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
start of the instruction. */
dwarf2_emit_insn (0);
- switch (inst.reloc.exp.X_op)
+ switch (inst.relocs[0].exp.X_op)
{
case O_symbol:
- sym = inst.reloc.exp.X_add_symbol;
- offset = inst.reloc.exp.X_add_number;
+ sym = inst.relocs[0].exp.X_add_symbol;
+ offset = inst.relocs[0].exp.X_add_number;
break;
case O_constant:
sym = NULL;
- offset = inst.reloc.exp.X_add_number;
+ offset = inst.relocs[0].exp.X_add_number;
break;
default:
- sym = make_expr_symbol (&inst.reloc.exp);
+ sym = make_expr_symbol (&inst.relocs[0].exp);
offset = 0;
break;
}
else
md_number_to_chars (to, inst.instruction, inst.size);
- if (inst.reloc.type != BFD_RELOC_UNUSED)
- fix_new_arm (frag_now, to - frag_now->fr_literal,
- inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
- inst.reloc.type);
+ int r;
+ for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
+ {
+ if (inst.relocs[r].type != BFD_RELOC_UNUSED)
+ fix_new_arm (frag_now, to - frag_now->fr_literal,
+ inst.size, & inst.relocs[r].exp, inst.relocs[r].pc_rel,
+ inst.relocs[r].type);
+ }
dwarf2_emit_insn (inst.size);
}
}
memset (&inst, '\0', sizeof (inst));
- inst.reloc.type = BFD_RELOC_UNUSED;
+ int r;
+ for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
+ inst.relocs[r].type = BFD_RELOC_UNUSED;
opcode = opcode_lookup (&p);
if (!opcode)
TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
#undef ARM_VARIANT
-#define ARM_VARIANT & arm_ext_v6k
+#define ARM_VARIANT & arm_ext_v6k_v6t2
#undef THUMB_VARIANT
-#define THUMB_VARIANT & arm_ext_v6k
+#define THUMB_VARIANT & arm_ext_v6k_v6t2
tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
#define THUMB_VARIANT & arm_ext_v8
tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
- TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
ldrexd, t_ldrexd),
TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
strexd, t_strexd),
+
+/* Defined in V8 but is in undefined encoding space for earlier
+ architectures. However earlier architectures are required to treat
+ this instuction as a semihosting trap as well. Hence while not explicitly
+ defined as such, it is in fact correct to define the instruction for all
+ architectures. */
+#undef THUMB_VARIANT
+#define THUMB_VARIANT & arm_ext_v1
+#undef ARM_VARIANT
+#define ARM_VARIANT & arm_ext_v1
+ TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
+
/* ARMv8 T32 only. */
#undef ARM_VARIANT
#define ARM_VARIANT NULL
#define THUMB_VARIANT & arm_ext_sb
TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
+#undef ARM_VARIANT
+#define ARM_VARIANT & arm_ext_predres
+#undef THUMB_VARIANT
+#define THUMB_VARIANT & arm_ext_predres
+ CE("cfprctx", e070f93, 1, (RRnpc), rd),
+ CE("dvprctx", e070fb3, 1, (RRnpc), rd),
+ CE("cpprctx", e070ff3, 1, (RRnpc), rd),
+
/* ARMv8-M instructions. */
#undef ARM_VARIANT
#define ARM_VARIANT NULL
return (base + 4) & ~3;
/* Thumb branches are simply offset by +4. */
+ case BFD_RELOC_THUMB_PCREL_BRANCH5:
case BFD_RELOC_THUMB_PCREL_BRANCH7:
case BFD_RELOC_THUMB_PCREL_BRANCH9:
case BFD_RELOC_THUMB_PCREL_BRANCH12:
case BFD_RELOC_THUMB_PCREL_BRANCH20:
case BFD_RELOC_THUMB_PCREL_BRANCH25:
+ case BFD_RELOC_ARM_THUMB_BF17:
return base + 4;
case BFD_RELOC_THUMB_PCREL_BRANCH23:
{
bfd_vma insn;
bfd_vma encoded_addend;
- bfd_vma addend_abs = abs (value);
+ bfd_vma addend_abs = llabs (value);
/* Check that the absolute value of the addend can be
expressed as an 8-bit constant plus a rotation. */
if (!seg->use_rela_p)
{
bfd_vma insn;
- bfd_vma addend_abs = abs (value);
+ bfd_vma addend_abs = llabs (value);
/* Check that the absolute value of the addend can be
encoded in 12 bits. */
if (!seg->use_rela_p)
{
bfd_vma insn;
- bfd_vma addend_abs = abs (value);
+ bfd_vma addend_abs = llabs (value);
/* Check that the absolute value of the addend can be
encoded in 8 bits. */
if (!seg->use_rela_p)
{
bfd_vma insn;
- bfd_vma addend_abs = abs (value);
+ bfd_vma addend_abs = llabs (value);
/* Check that the absolute value of the addend is a multiple of
four and, when divided by four, fits in 8 bits. */
}
break;
+ case BFD_RELOC_THUMB_PCREL_BRANCH5:
+ if (fixP->fx_addsy
+ && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
+ && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && ARM_IS_FUNC (fixP->fx_addsy)
+ && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
+ {
+ /* Force a relocation for a branch 5 bits wide. */
+ fixP->fx_done = 0;
+ }
+ if (v8_1_branch_value_check (value, 5, FALSE) == FAIL)
+ as_bad_where (fixP->fx_file, fixP->fx_line,
+ BAD_BRANCH_OFF);
+
+ if (fixP->fx_done || !seg->use_rela_p)
+ {
+ addressT boff = value >> 1;
+
+ newval = md_chars_to_number (buf, THUMB_SIZE);
+ newval |= (boff << 7);
+ md_number_to_chars (buf, newval, THUMB_SIZE);
+ }
+ break;
+
+ case BFD_RELOC_ARM_THUMB_BF17:
+ if (fixP->fx_addsy
+ && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
+ && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && ARM_IS_FUNC (fixP->fx_addsy)
+ && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
+ {
+ /* Force a relocation for a branch 17 bits wide. */
+ fixP->fx_done = 0;
+ }
+
+ if (v8_1_branch_value_check (value, 17, TRUE) == FAIL)
+ as_bad_where (fixP->fx_file, fixP->fx_line,
+ BAD_BRANCH_OFF);
+
+ if (fixP->fx_done || !seg->use_rela_p)
+ {
+ offsetT newval2;
+ addressT immA, immB, immC;
+
+ immA = (value & 0x0001f000) >> 12;
+ immB = (value & 0x00000ffc) >> 2;
+ immC = (value & 0x00000002) >> 1;
+
+ newval = md_chars_to_number (buf, THUMB_SIZE);
+ newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
+ newval |= immA;
+ newval2 |= (immC << 11) | (immB << 1);
+ md_number_to_chars (buf, newval, THUMB_SIZE);
+ md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
+ }
+ break;
+
case BFD_RELOC_ARM_V4BX:
/* This will need to go in the object file. */
fixP->fx_done = 0;
case BFD_RELOC_ARM_GOTFUNCDESC:
case BFD_RELOC_ARM_GOTOFFFUNCDESC:
case BFD_RELOC_ARM_FUNCDESC:
+ case BFD_RELOC_ARM_THUMB_BF17:
code = fixp->fx_r_type;
break;
_("ADRL used for a symbol not defined in the same file"));
return NULL;
+ case BFD_RELOC_THUMB_PCREL_BRANCH5:
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _("%s used for a symbol not defined in the same file"),
+ bfd_get_reloc_code_name (fixp->fx_r_type));
+ return NULL;
+
case BFD_RELOC_ARM_OFFSET_IMM:
if (section->use_rela_p)
{
ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A,
ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
+ ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A,
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
+ FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
ARM_ARCH_NONE,
FPU_NONE),
ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
-
+ ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A,
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
+ FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
/* ??? XSCALE is really an architecture. */
ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
ARM_ARCH_NONE,
};
#undef ARM_CPU_OPT
+struct arm_ext_table
+{
+ const char * name;
+ size_t name_len;
+ const arm_feature_set merge;
+ const arm_feature_set clear;
+};
+
struct arm_arch_option_table
{
- const char * name;
- size_t name_len;
- const arm_feature_set value;
- const arm_feature_set default_fpu;
+ const char * name;
+ size_t name_len;
+ const arm_feature_set value;
+ const arm_feature_set default_fpu;
+ const struct arm_ext_table * ext_table;
+};
+
+/* Used to add support for +E and +noE extension. */
+#define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
+/* Used to add support for a +E extension. */
+#define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
+/* Used to add support for a +noE extension. */
+#define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
+
+#define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
+ ~0 & ~FPU_ENDIAN_PURE)
+
+static const struct arm_ext_table armv5te_ext_table[] =
+{
+ ARM_EXT ("fp", FPU_ARCH_VFP_V2, ALL_FP),
+ { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
+};
+
+static const struct arm_ext_table armv7_ext_table[] =
+{
+ ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
+ { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
+};
+
+static const struct arm_ext_table armv7ve_ext_table[] =
+{
+ ARM_EXT ("fp", FPU_ARCH_VFP_V4D16, ALL_FP),
+ ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16),
+ ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
+ ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
+ ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
+ ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16), /* Alias for +fp. */
+ ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
+
+ ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4,
+ ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
+
+ /* Aliases for +simd. */
+ ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
+
+ ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
+ ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
+ ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
+
+ { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
+};
+
+static const struct arm_ext_table armv7a_ext_table[] =
+{
+ ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
+ ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
+ ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
+ ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
+ ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
+ ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),
+ ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
+
+ ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1,
+ ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
+
+ /* Aliases for +simd. */
+ ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
+ ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
+
+ ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
+ ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
+
+ ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP)),
+ ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC)),
+ { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
+};
+
+static const struct arm_ext_table armv7r_ext_table[] =
+{
+ ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD),
+ ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD), /* Alias for +fp.sp. */
+ ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
+ ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
+ ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16),
+ ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
+ ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
+ ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV)),
+ { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
+};
+
+static const struct arm_ext_table armv7em_ext_table[] =
+{
+ ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16, ALL_FP),
+ /* Alias for +fp, used to be known as fpv4-sp-d16. */
+ ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16),
+ ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16),
+ ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
+ ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16),
+ { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
+};
+
+static const struct arm_ext_table armv8a_ext_table[] =
+{
+ ARM_ADD ("crc", ARCH_CRC_ARMV8),
+ ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
+ ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
+ ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
+
+ /* Armv8-a does not allow an FP implementation without SIMD, so the user
+ should use the +simd option to turn on FP. */
+ ARM_REMOVE ("fp", ALL_FP),
+ ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
+ ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
+ { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
+};
+
+
+static const struct arm_ext_table armv81a_ext_table[] =
+{
+ ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
+ ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
+ ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
+
+ /* Armv8-a does not allow an FP implementation without SIMD, so the user
+ should use the +simd option to turn on FP. */
+ ARM_REMOVE ("fp", ALL_FP),
+ ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
+ ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
+ { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
+};
+
+static const struct arm_ext_table armv82a_ext_table[] =
+{
+ ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
+ ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16),
+ ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML),
+ ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
+ ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
+ ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
+
+ /* Armv8-a does not allow an FP implementation without SIMD, so the user
+ should use the +simd option to turn on FP. */
+ ARM_REMOVE ("fp", ALL_FP),
+ ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
+ ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
+ { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
+};
+
+static const struct arm_ext_table armv84a_ext_table[] =
+{
+ ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
+ ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
+ ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
+ ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
+
+ /* Armv8-a does not allow an FP implementation without SIMD, so the user
+ should use the +simd option to turn on FP. */
+ ARM_REMOVE ("fp", ALL_FP),
+ ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
+ ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
+ { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
+};
+
+static const struct arm_ext_table armv85a_ext_table[] =
+{
+ ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
+ ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
+ ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
+ ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
+
+ /* Armv8-a does not allow an FP implementation without SIMD, so the user
+ should use the +simd option to turn on FP. */
+ ARM_REMOVE ("fp", ALL_FP),
+ { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
+};
+
+static const struct arm_ext_table armv8m_main_ext_table[] =
+{
+ ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
+ ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
+ ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16, ALL_FP),
+ ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
+ { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
+};
+
+static const struct arm_ext_table armv8_1m_main_ext_table[] =
+{
+ ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
+ ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
+ ARM_EXT ("fp",
+ ARM_FEATURE (0, ARM_EXT2_FP16_INST,
+ FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA),
+ ALL_FP),
+ ARM_ADD ("fp.dp",
+ ARM_FEATURE (0, ARM_EXT2_FP16_INST,
+ FPU_VFP_V5D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
+ { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
+};
+
+static const struct arm_ext_table armv8r_ext_table[] =
+{
+ ARM_ADD ("crc", ARCH_CRC_ARMV8),
+ ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
+ ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
+ ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
+ ARM_REMOVE ("fp", ALL_FP),
+ ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16),
+ { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
};
/* This list should, at a minimum, contain all the architecture names
recognized by GCC. */
-#define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
+#define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
+#define ARM_ARCH_OPT2(N, V, DF, ext) \
+ { N, sizeof (N) - 1, V, DF, ext##_ext_table }
static const struct arm_arch_option_table arm_archs[] =
{
- ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
- ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
- ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
- ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
- ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
- ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
- ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
- ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
- ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
- ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
- ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
- ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
+ ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
+ ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
+ ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
+ ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
+ ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
+ ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
+ ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
+ ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
+ ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
+ ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
+ ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
+ ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
+ ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
+ ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
+ ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP, armv5te),
+ ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP, armv5te),
+ ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP, armv5te),
+ ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
+ ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
+ ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP, armv5te),
+ ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP, armv5te),
/* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
kept to preserve existing behaviour. */
- ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
+ ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
+ ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
+ ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP, armv5te),
+ ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP, armv5te),
+ ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP, armv5te),
/* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
kept to preserve existing behaviour. */
- ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
+ ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
+ ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
+ ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
+ ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
+ ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP, armv7),
/* The official spelling of the ARMv7 profile variants is the dashed form.
Accept the non-dashed form for compatibility with old toolchains. */
- ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("armv8.5-a", ARM_ARCH_V8_5A, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
- ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
- { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
+ ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
+ ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP, armv7ve),
+ ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
+ ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
+ ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
+ ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
+ ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
+ ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP, armv7em),
+ ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
+ ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP,
+ armv8m_main),
+ ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN, FPU_ARCH_VFP,
+ armv8_1m_main),
+ ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP, armv8a),
+ ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP, armv81a),
+ ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP, armv82a),
+ ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP, armv82a),
+ ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP, armv8r),
+ ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP, armv84a),
+ ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A, FPU_ARCH_VFP, armv85a),
+ ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
+ ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
+ ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2, FPU_ARCH_VFP),
+ { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
};
#undef ARM_ARCH_OPT
#define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
#define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
+/* DEPRECATED: Refrain from using this table to add any new extensions, instead
+ use the context sensitive approach using arm_ext_table's. */
static const struct arm_option_extension_value_table arm_extensions[] =
{
ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
+ ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
+ ARM_ARCH_V8A),
ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
static bfd_boolean
arm_parse_extension (const char *str, const arm_feature_set *opt_set,
- arm_feature_set *ext_set)
+ arm_feature_set *ext_set,
+ const struct arm_ext_table *ext_table)
{
/* We insist on extensions being specified in alphabetical order, and with
extensions being added before being removed. We achieve this by having
gas_assert (adding_value != -1);
gas_assert (opt != NULL);
+ if (ext_table != NULL)
+ {
+ const struct arm_ext_table * ext_opt = ext_table;
+ bfd_boolean found = FALSE;
+ for (; ext_opt->name != NULL; ext_opt++)
+ if (ext_opt->name_len == len
+ && strncmp (ext_opt->name, str, len) == 0)
+ {
+ if (adding_value)
+ {
+ if (ARM_FEATURE_ZERO (ext_opt->merge))
+ /* TODO: Option not supported. When we remove the
+ legacy table this case should error out. */
+ continue;
+
+ ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, ext_opt->merge);
+ }
+ else
+ {
+ if (ARM_FEATURE_ZERO (ext_opt->clear))
+ /* TODO: Option not supported. When we remove the
+ legacy table this case should error out. */
+ continue;
+ ARM_CLEAR_FEATURE (*ext_set, *ext_set, ext_opt->clear);
+ }
+ found = TRUE;
+ break;
+ }
+ if (found)
+ {
+ str = ext;
+ continue;
+ }
+ }
+
/* Scan over the options table trying to find an exact match. */
for (; opt->name != NULL; opt++)
if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
}
if (ext != NULL)
- return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt);
+ return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt, NULL);
return TRUE;
}
strcpy (selected_cpu_name, opt->name);
if (ext != NULL)
- return arm_parse_extension (ext, march_cpu_opt, march_ext_opt);
+ return arm_parse_extension (ext, march_cpu_opt, march_ext_opt,
+ opt->ext_table);
return TRUE;
}
stable when new architectures are added. */
static const cpu_arch_ver_table cpu_arch_ver[] =
{
- {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V1},
- {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2},
- {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2S},
- {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3},
- {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3M},
- {TAG_CPU_ARCH_V4, ARM_ARCH_V4xM},
- {TAG_CPU_ARCH_V4, ARM_ARCH_V4},
- {TAG_CPU_ARCH_V4T, ARM_ARCH_V4TxM},
- {TAG_CPU_ARCH_V4T, ARM_ARCH_V4T},
- {TAG_CPU_ARCH_V5T, ARM_ARCH_V5xM},
- {TAG_CPU_ARCH_V5T, ARM_ARCH_V5},
- {TAG_CPU_ARCH_V5T, ARM_ARCH_V5TxM},
- {TAG_CPU_ARCH_V5T, ARM_ARCH_V5T},
- {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TExP},
- {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TE},
- {TAG_CPU_ARCH_V5TEJ, ARM_ARCH_V5TEJ},
- {TAG_CPU_ARCH_V6, ARM_ARCH_V6},
- {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6Z},
- {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6KZ},
- {TAG_CPU_ARCH_V6K, ARM_ARCH_V6K},
- {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6T2},
- {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KT2},
- {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6ZT2},
- {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KZT2},
+ {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V1},
+ {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2},
+ {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2S},
+ {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3},
+ {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3M},
+ {TAG_CPU_ARCH_V4, ARM_ARCH_V4xM},
+ {TAG_CPU_ARCH_V4, ARM_ARCH_V4},
+ {TAG_CPU_ARCH_V4T, ARM_ARCH_V4TxM},
+ {TAG_CPU_ARCH_V4T, ARM_ARCH_V4T},
+ {TAG_CPU_ARCH_V5T, ARM_ARCH_V5xM},
+ {TAG_CPU_ARCH_V5T, ARM_ARCH_V5},
+ {TAG_CPU_ARCH_V5T, ARM_ARCH_V5TxM},
+ {TAG_CPU_ARCH_V5T, ARM_ARCH_V5T},
+ {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TExP},
+ {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TE},
+ {TAG_CPU_ARCH_V5TEJ, ARM_ARCH_V5TEJ},
+ {TAG_CPU_ARCH_V6, ARM_ARCH_V6},
+ {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6Z},
+ {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6KZ},
+ {TAG_CPU_ARCH_V6K, ARM_ARCH_V6K},
+ {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6T2},
+ {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KT2},
+ {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6ZT2},
+ {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KZT2},
/* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
always selected build attributes to match those of ARMv6-M
would be selected when fully respecting chronology of architectures.
It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
move them before ARMv7 architectures. */
- {TAG_CPU_ARCH_V6_M, ARM_ARCH_V6M},
- {TAG_CPU_ARCH_V6S_M, ARM_ARCH_V6SM},
-
- {TAG_CPU_ARCH_V7, ARM_ARCH_V7},
- {TAG_CPU_ARCH_V7, ARM_ARCH_V7A},
- {TAG_CPU_ARCH_V7, ARM_ARCH_V7R},
- {TAG_CPU_ARCH_V7, ARM_ARCH_V7M},
- {TAG_CPU_ARCH_V7, ARM_ARCH_V7VE},
- {TAG_CPU_ARCH_V7E_M, ARM_ARCH_V7EM},
- {TAG_CPU_ARCH_V8, ARM_ARCH_V8A},
- {TAG_CPU_ARCH_V8, ARM_ARCH_V8_1A},
- {TAG_CPU_ARCH_V8, ARM_ARCH_V8_2A},
- {TAG_CPU_ARCH_V8, ARM_ARCH_V8_3A},
- {TAG_CPU_ARCH_V8M_BASE, ARM_ARCH_V8M_BASE},
- {TAG_CPU_ARCH_V8M_MAIN, ARM_ARCH_V8M_MAIN},
- {TAG_CPU_ARCH_V8R, ARM_ARCH_V8R},
- {TAG_CPU_ARCH_V8, ARM_ARCH_V8_4A},
- {TAG_CPU_ARCH_V8, ARM_ARCH_V8_5A},
- {-1, ARM_ARCH_NONE}
+ {TAG_CPU_ARCH_V6_M, ARM_ARCH_V6M},
+ {TAG_CPU_ARCH_V6S_M, ARM_ARCH_V6SM},
+
+ {TAG_CPU_ARCH_V7, ARM_ARCH_V7},
+ {TAG_CPU_ARCH_V7, ARM_ARCH_V7A},
+ {TAG_CPU_ARCH_V7, ARM_ARCH_V7R},
+ {TAG_CPU_ARCH_V7, ARM_ARCH_V7M},
+ {TAG_CPU_ARCH_V7, ARM_ARCH_V7VE},
+ {TAG_CPU_ARCH_V7E_M, ARM_ARCH_V7EM},
+ {TAG_CPU_ARCH_V8, ARM_ARCH_V8A},
+ {TAG_CPU_ARCH_V8, ARM_ARCH_V8_1A},
+ {TAG_CPU_ARCH_V8, ARM_ARCH_V8_2A},
+ {TAG_CPU_ARCH_V8, ARM_ARCH_V8_3A},
+ {TAG_CPU_ARCH_V8M_BASE, ARM_ARCH_V8M_BASE},
+ {TAG_CPU_ARCH_V8M_MAIN, ARM_ARCH_V8M_MAIN},
+ {TAG_CPU_ARCH_V8R, ARM_ARCH_V8R},
+ {TAG_CPU_ARCH_V8, ARM_ARCH_V8_4A},
+ {TAG_CPU_ARCH_V8, ARM_ARCH_V8_5A},
+ {TAG_CPU_ARCH_V8_1M_MAIN, ARM_ARCH_V8_1M_MAIN},
+ {-1, ARM_ARCH_NONE}
};
/* Set an attribute if it has not already been set by the user. */
if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
{
/* Force revisiting of decision for each new architecture. */
- gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8M_MAIN);
+ gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8_1M_MAIN);
*profile = 'A';
return TAG_CPU_ARCH_V8;
}
by the base architecture.
For new architectures we will have to check these tests. */
- gas_assert (arch <= TAG_CPU_ARCH_V8M_MAIN);
+ gas_assert (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
|| ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
aeabi_set_attribute_int (Tag_DIV_use, 0);