/* tc-aarch64.c -- Assemble for the AArch64 ISA
- Copyright (C) 2009-2016 Free Software Foundation, Inc.
+ Copyright (C) 2009-2017 Free Software Foundation, Inc.
Contributed by ARM Ltd.
This file is part of GAS.
/* Which ABI to use. */
enum aarch64_abi_type
{
- AARCH64_ABI_LP64 = 0,
- AARCH64_ABI_ILP32 = 1
+ AARCH64_ABI_NONE = 0,
+ AARCH64_ABI_LP64 = 1,
+ AARCH64_ABI_ILP32 = 2
};
+#ifndef DEFAULT_ARCH
+#define DEFAULT_ARCH "aarch64"
+#endif
+
+/* DEFAULT_ARCH is initialized in gas/configure.tgt. */
+static const char *default_arch = DEFAULT_ARCH;
+
/* AArch64 ABI for the output file. */
-static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
+static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
/* When non-zero, program to a 32-bit model, in which the C data types
int, long and all pointer types are 32-bit objects (ILP32); or to a
NT_h,
NT_s,
NT_d,
- NT_q
+ NT_q,
+ NT_zero,
+ NT_merge
};
/* Bits for DEFINED field in vector_type_el. */
static bfd_boolean parse_operands (char *, const aarch64_opcode *);
static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
-/* Diagnostics inline function utilites.
+/* Diagnostics inline function utilities.
- These are lightweight utlities which should only be called by parse_operands
+ These are lightweight utilities which should only be called by parse_operands
and other parsers. GAS processes each assembly line by parsing it against
instruction template(s), in the case of multiple templates (for the same
mnemonic name), those templates are tried one by one until one succeeds or
the parsing; we don't want to slow down the whole parsing by recording
non-user errors in detail.
- Remember that the objective is to help GAS pick up the most approapriate
+ Remember that the objective is to help GAS pick up the most appropriate
error message in the case of multiple templates, e.g. FMOV which has 8
templates. */
BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
- BASIC_REG_TYPE(CN) /* c[0-7] */ \
BASIC_REG_TYPE(VN) /* v[0-31] */ \
BASIC_REG_TYPE(ZN) /* z[0-31] */ \
BASIC_REG_TYPE(PN) /* p[0-15] */ \
/* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
+ /* Typecheck: same, plus SVE registers. */ \
+ MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
+ | REG_TYPE(ZN)) \
/* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
| REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
+ /* Typecheck: same, plus SVE registers. */ \
+ MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
+ | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
+ | REG_TYPE(ZN)) \
/* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
| REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
| REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
| REG_TYPE(FP_B) | REG_TYPE(FP_H) \
| REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
+ /* Typecheck: as above, but also Zn and Pn. This should only be \
+ used for SVE instructions, since Zn and Pn are valid symbols \
+ in other contexts. */ \
+ MULTI_REG_TYPE(R_Z_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
+ | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
+ | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
+ | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
+ | REG_TYPE(ZN) | REG_TYPE(PN)) \
/* Any integer register; used for error messages only. */ \
MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
| REG_TYPE(SP_32) | REG_TYPE(SP_64) \
case REG_TYPE_R64_SP:
msg = N_("64-bit integer or SP register expected");
break;
+ case REG_TYPE_SVE_BASE:
+ msg = N_("base register expected");
+ break;
case REG_TYPE_R_Z:
msg = N_("integer or zero register expected");
break;
+ case REG_TYPE_SVE_OFFSET:
+ msg = N_("offset register expected");
+ break;
case REG_TYPE_R_SP:
msg = N_("integer or SP register expected");
break;
msg = N_("128-bit SIMD scalar or floating-point quad precision "
"register expected");
break;
- case REG_TYPE_CN:
- msg = N_("C0 - C15 expected");
- break;
case REG_TYPE_R_Z_BHSDQ_V:
+ case REG_TYPE_R_Z_BHSDQ_VZP:
msg = N_("register expected");
break;
case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
set_syntax_error (error);
}
-/* Similiar to first_error, but this function accepts formatted error
+/* Similar to first_error, but this function accepts formatted error
message. */
static void
first_error_fmt (const char *format, ...)
return (reg_type_masks[type] & (1 << reg->type)) != 0;
}
-/* Try to parse a base or offset register. Return the register entry
- on success, setting *QUALIFIER to the register qualifier. Return null
- otherwise.
+/* Try to parse a base or offset register. Allow SVE base and offset
+ registers if REG_TYPE includes SVE registers. Return the register
+ entry on success, setting *QUALIFIER to the register qualifier.
+ Return null otherwise.
Note that this function does not issue any diagnostics. */
static const reg_entry *
-aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
+aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
+ aarch64_opnd_qualifier_t *qualifier)
{
char *str = *ccp;
const reg_entry *reg = parse_reg (&str);
*qualifier = AARCH64_OPND_QLF_X;
break;
+ case REG_TYPE_ZN:
+ if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
+ || str[0] != '.')
+ return NULL;
+ switch (TOLOWER (str[1]))
+ {
+ case 's':
+ *qualifier = AARCH64_OPND_QLF_S_S;
+ break;
+ case 'd':
+ *qualifier = AARCH64_OPND_QLF_S_D;
+ break;
+ default:
+ return NULL;
+ }
+ str += 2;
+ break;
+
default:
return NULL;
}
return reg;
}
+/* Try to parse a base or offset register. Return the register entry
+ on success, setting *QUALIFIER to the register qualifier. Return null
+ otherwise.
+
+ Note that this function does not issue any diagnostics. */
+
+static const reg_entry *
+aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
+{
+ return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
+}
+
/* Parse the qualifier of a vector register or vector element of type
REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
succeeds; otherwise return FALSE.
Accept only one occurrence of:
- 8b 16b 2h 4h 8h 2s 4s 1d 2d
+ 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
b h s d q */
static bfd_boolean
parse_vector_type_for_operand (aarch64_reg_type reg_type,
enum vector_el_type type;
/* skip '.' */
+ gas_assert (*ptr == '.');
ptr++;
if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
element_size = 64;
break;
case 'q':
- if (width == 1)
+ if (reg_type == REG_TYPE_ZN || width == 1)
{
type = NT_q;
element_size = 128;
first_error (_("missing element size"));
return FALSE;
}
- if (width != 0 && width * element_size != 64 && width * element_size != 128
- && !(width == 2 && element_size == 16))
+ if (width != 0 && width * element_size != 64
+ && width * element_size != 128
+ && !(width == 2 && element_size == 16)
+ && !(width == 4 && element_size == 8))
{
first_error_fmt (_
("invalid element size %d and vector size combination %c"),
return TRUE;
}
+/* *STR contains an SVE zero/merge predication suffix. Parse it into
+ *PARSED_TYPE and point *STR at the end of the suffix. */
+
+static bfd_boolean
+parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
+{
+ char *ptr = *str;
+
+ /* Skip '/'. */
+ gas_assert (*ptr == '/');
+ ptr++;
+ switch (TOLOWER (*ptr))
+ {
+ case 'z':
+ parsed_type->type = NT_zero;
+ break;
+ case 'm':
+ parsed_type->type = NT_merge;
+ break;
+ default:
+ if (*ptr != '\0' && *ptr != ',')
+ first_error_fmt (_("unexpected character `%c' in predication type"),
+ *ptr);
+ else
+ first_error (_("missing predication type"));
+ return FALSE;
+ }
+ parsed_type->width = 0;
+ *str = ptr + 1;
+ return TRUE;
+}
+
/* Parse a register of the type TYPE.
Return PARSE_FAIL if the string pointed by *CCP is not a valid register
type = reg->type;
if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
- && *str == '.')
+ && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
{
- if (!parse_vector_type_for_operand (type, &parsetype, &str))
- return PARSE_FAIL;
+ if (*str == '.')
+ {
+ if (!parse_vector_type_for_operand (type, &parsetype, &str))
+ return PARSE_FAIL;
+ }
+ else
+ {
+ if (!parse_predication_for_operand (&parsetype, &str))
+ return PARSE_FAIL;
+ }
/* Register if of the form Vn.[bhsdq]. */
is_typed_vecreg = TRUE;
/* Expect index. In the new scheme we cannot have
Vn.[bhsdq] represent a scalar. Therefore any
Vn.[bhsdq] should have an index following it.
- Except in reglists ofcourse. */
+ Except in reglists of course. */
atype.defined |= NTA_HASINDEX;
else
atype.defined |= NTA_HASTYPE;
return PARSE_FAIL;
}
- if (in_reg_list == TRUE)
+ if (in_reg_list)
{
first_error (_("index not allowed inside register list"));
return PARSE_FAIL;
}
/* Can't use symbol_new here, so have to create a symbol and then at
- a later date assign it a value. Thats what these functions do. */
+ a later date assign it a value. That's what these functions do. */
static void
symbol_locate (symbolS * symbolP,
return TRUE;
}
+/* Return true if we should treat OPERAND as a double-precision
+ floating-point operand rather than a single-precision one. */
+static bfd_boolean
+double_precision_operand_p (const aarch64_opnd_info *operand)
+{
+ /* Check for unsuffixed SVE registers, which are allowed
+ for LDR and STR but not in instructions that require an
+ immediate. We get better error messages if we arbitrarily
+ pick one size, parse the immediate normally, and then
+ report the match failure in the normal way. */
+ return (operand->qualifier == AARCH64_OPND_QLF_NIL
+ || aarch64_get_qualifier_esize (operand->qualifier) == 8);
+}
+
/* Parse a floating-point immediate. Return TRUE on success and return the
value in *IMMED in the format of IEEE754 single-precision encoding.
*CCP points to the start of the string; DP_P is TRUE when the immediate
return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
}
-/* Assign the immediate value to the relavant field in *OPERAND if
+/* Assign the immediate value to the relevant field in *OPERAND if
RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
needs an internal fixup in a later stage.
ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
0, /* adr_type */
0,
0,
- BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
+ BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
0},
/* Get to the page containing GOT TLS entry for a symbol.
The same as GD, we allocate two consecutive GOT slots
for module index and module offset, the only difference
- with GD is the module offset should be intialized to
+ with GD is the module offset should be initialized to
zero without any outstanding runtime relocation. */
{"tlsldm", 0,
BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
/* Mode argument to parse_shift and parser_shifter_operand. */
enum parse_shift_mode
{
+ SHIFTED_NONE, /* no shifter allowed */
SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
"#imm{,lsl #n}" */
SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
"#imm" */
SHIFTED_LSL, /* bare "lsl #n" */
+ SHIFTED_MUL, /* bare "mul #n" */
SHIFTED_LSL_MSL, /* "lsl|msl #n" */
+ SHIFTED_MUL_VL, /* "mul vl" */
SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
};
return FALSE;
}
+ if (kind == AARCH64_MOD_MUL
+ && mode != SHIFTED_MUL
+ && mode != SHIFTED_MUL_VL)
+ {
+ set_syntax_error (_("invalid use of 'MUL'"));
+ return FALSE;
+ }
+
switch (mode)
{
case SHIFTED_LOGIC_IMM:
- if (aarch64_extend_operator_p (kind) == TRUE)
+ if (aarch64_extend_operator_p (kind))
{
set_syntax_error (_("extending shift is not permitted"));
return FALSE;
}
break;
+ case SHIFTED_MUL:
+ if (kind != AARCH64_MOD_MUL)
+ {
+ set_syntax_error (_("only 'MUL' is permitted"));
+ return FALSE;
+ }
+ break;
+
+ case SHIFTED_MUL_VL:
+ /* "MUL VL" consists of two separate tokens. Require the first
+ token to be "MUL" and look for a following "VL". */
+ if (kind == AARCH64_MOD_MUL)
+ {
+ skip_whitespace (p);
+ if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
+ {
+ p += 2;
+ kind = AARCH64_MOD_MUL_VL;
+ break;
+ }
+ }
+ set_syntax_error (_("only 'MUL VL' is permitted"));
+ return FALSE;
+
case SHIFTED_REG_OFFSET:
if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
&& kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
/* Parse shift amount. */
exp_has_prefix = 0;
- if (mode == SHIFTED_REG_OFFSET && *p == ']')
+ if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
exp.X_op = O_absent;
else
{
}
my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
}
- if (exp.X_op == O_absent)
+ if (kind == AARCH64_MOD_MUL_VL)
+ /* For consistency, give MUL VL the same shift amount as an implicit
+ MUL #1. */
+ operand->shifter.amount = 1;
+ else if (exp.X_op == O_absent)
{
- if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
+ if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
{
set_syntax_error (_("missing shift amount"));
return FALSE;
set_syntax_error (_("constant shift amount required"));
return FALSE;
}
- else if (exp.X_add_number < 0 || exp.X_add_number > 63)
+ /* For parsing purposes, MUL #n has no inherent range. The range
+ depends on the operand and will be checked by operand-specific
+ routines. */
+ else if (kind != AARCH64_MOD_MUL
+ && (exp.X_add_number < 0 || exp.X_add_number > 63))
{
set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
return FALSE;
The A64 instruction set has the following addressing modes:
Offset
- [base] // in SIMD ld/st structure
- [base{,#0}] // in ld/st exclusive
+ [base] // in SIMD ld/st structure
+ [base{,#0}] // in ld/st exclusive
[base{,#imm}]
[base,Xm{,LSL #imm}]
[base,Xm,SXTX {#imm}]
[base,#imm]!
Post-indexed
[base],#imm
- [base],Xm // in SIMD ld/st structure
+ [base],Xm // in SIMD ld/st structure
PC-relative (literal)
label
- =immediate
+ SVE:
+ [base,#imm,MUL VL]
+ [base,Zm.D{,LSL #imm}]
+ [base,Zm.S,(S|U)XTW {#imm}]
+ [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
+ [Zn.S,#imm]
+ [Zn.D,#imm]
+ [Zn.S,Zm.S{,LSL #imm}] // in ADR
+ [Zn.D,Zm.D{,LSL #imm}] // in ADR
+ [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
(As a convenience, the notation "=immediate" is permitted in conjunction
with the pc-relative literal load instructions to automatically place an
.pcrel=1; .preind=1; .postind=0; .writeback=0
The shift/extension information, if any, will be stored in .shifter.
+ The base and offset qualifiers will be stored in *BASE_QUALIFIER and
+ *OFFSET_QUALIFIER respectively, with NIL being used if there's no
+ corresponding register.
- It is the caller's responsibility to check for addressing modes not
- supported by the instruction, and to set inst.reloc.type. */
+ BASE_TYPE says which types of base register should be accepted and
+ OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
+ is the type of shifter that is allowed for immediate offsets,
+ or SHIFTED_NONE if none.
+
+ In all other respects, it is the caller's responsibility to check
+ for addressing modes not supported by the instruction, and to set
+ inst.reloc.type. */
static bfd_boolean
-parse_address_main (char **str, aarch64_opnd_info *operand)
+parse_address_main (char **str, aarch64_opnd_info *operand,
+ aarch64_opnd_qualifier_t *base_qualifier,
+ aarch64_opnd_qualifier_t *offset_qualifier,
+ aarch64_reg_type base_type, aarch64_reg_type offset_type,
+ enum parse_shift_mode imm_shift_mode)
{
char *p = *str;
const reg_entry *reg;
- aarch64_opnd_qualifier_t base_qualifier;
- aarch64_opnd_qualifier_t offset_qualifier;
expressionS *exp = &inst.reloc.exp;
+ *base_qualifier = AARCH64_OPND_QLF_NIL;
+ *offset_qualifier = AARCH64_OPND_QLF_NIL;
if (! skip_past_char (&p, '['))
{
/* =immediate or label. */
/* [ */
- reg = aarch64_reg_parse_32_64 (&p, &base_qualifier);
- if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R64_SP))
+ reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
+ if (!reg || !aarch64_check_reg_type (reg, base_type))
{
- set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R64_SP)));
+ set_syntax_error (_(get_reg_expected_msg (base_type)));
return FALSE;
}
operand->addr.base_regno = reg->number;
/* [Xn, */
operand->addr.preind = 1;
- reg = aarch64_reg_parse_32_64 (&p, &offset_qualifier);
+ reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
if (reg)
{
- if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
+ if (!aarch64_check_reg_type (reg, offset_type))
{
- set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
+ set_syntax_error (_(get_reg_expected_msg (offset_type)));
return FALSE;
}
|| operand->shifter.kind == AARCH64_MOD_LSL
|| operand->shifter.kind == AARCH64_MOD_SXTX)
{
- if (offset_qualifier == AARCH64_OPND_QLF_W)
+ if (*offset_qualifier == AARCH64_OPND_QLF_W)
{
set_syntax_error (_("invalid use of 32-bit register offset"));
return FALSE;
}
+ if (aarch64_get_qualifier_esize (*base_qualifier)
+ != aarch64_get_qualifier_esize (*offset_qualifier))
+ {
+ set_syntax_error (_("offset has different size from base"));
+ return FALSE;
+ }
}
- else if (offset_qualifier == AARCH64_OPND_QLF_X)
+ else if (*offset_qualifier == AARCH64_OPND_QLF_X)
{
set_syntax_error (_("invalid use of 64-bit register offset"));
return FALSE;
inst.reloc.type = entry->ldst_type;
inst.reloc.pc_rel = entry->pc_rel;
}
- else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
+ else
{
- set_syntax_error (_("invalid expression in the address"));
- return FALSE;
+ if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
+ {
+ set_syntax_error (_("invalid expression in the address"));
+ return FALSE;
+ }
+ /* [Xn,<expr> */
+ if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
+ /* [Xn,<expr>,<shifter> */
+ if (! parse_shift (&p, operand, imm_shift_mode))
+ return FALSE;
}
- /* [Xn,<expr> */
}
}
return FALSE;
}
- reg = aarch64_reg_parse_32_64 (&p, &offset_qualifier);
+ reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
if (reg)
{
/* [Xn],Xm */
static bfd_boolean
parse_address (char **str, aarch64_opnd_info *operand)
{
- return parse_address_main (str, operand);
+ aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
+ return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
+ REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
+}
+
+/* Parse an address in which SVE vector registers and MUL VL are allowed.
+ The arguments have the same meaning as for parse_address_main.
+ Return TRUE on success. */
+static bfd_boolean
+parse_sve_address (char **str, aarch64_opnd_info *operand,
+ aarch64_opnd_qualifier_t *base_qualifier,
+ aarch64_opnd_qualifier_t *offset_qualifier)
+{
+ return parse_address_main (str, operand, base_qualifier, offset_qualifier,
+ REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
+ SHIFTED_MUL_VL);
}
/* Parse an operand for a MOVZ, MOVN or MOVK instruction.
/* Miscellaneous. */
+/* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
+ of SIZE tokens in which index I gives the token for field value I,
+ or is null if field value I is invalid. REG_TYPE says which register
+ names should be treated as registers rather than as symbolic immediates.
+
+ Return true on success, moving *STR past the operand and storing the
+ field value in *VAL. */
+
+static int
+parse_enum_string (char **str, int64_t *val, const char *const *array,
+ size_t size, aarch64_reg_type reg_type)
+{
+ expressionS exp;
+ char *p, *q;
+ size_t i;
+
+ /* Match C-like tokens. */
+ p = q = *str;
+ while (ISALNUM (*q))
+ q++;
+
+ for (i = 0; i < size; ++i)
+ if (array[i]
+ && strncasecmp (array[i], p, q - p) == 0
+ && array[i][q - p] == 0)
+ {
+ *val = i;
+ *str = q;
+ return TRUE;
+ }
+
+ if (!parse_immediate_expression (&p, &exp, reg_type))
+ return FALSE;
+
+ if (exp.X_op == O_constant
+ && (uint64_t) exp.X_add_number < size)
+ {
+ *val = exp.X_add_number;
+ *str = p;
+ return TRUE;
+ }
+
+ /* Use the default error for this operand. */
+ return FALSE;
+}
+
/* Parse an option for a preload instruction. Returns the encoding for the
option, or PARSE_FAIL. */
} \
} while (0)
+#define po_enum_or_fail(array) do { \
+ if (!parse_enum_string (&str, &val, array, \
+ ARRAY_SIZE (array), imm_reg_type)) \
+ goto failure; \
+ } while (0)
+
#define po_misc_or_fail(expr) do { \
if (!expr) \
goto failure; \
instruction->reloc.type = BFD_RELOC_UNUSED;
}
-/* Data strutures storing one user error in the assembly code related to
+/* Data structures storing one user error in the assembly code related to
operands. */
struct operand_error_record
const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
/* Most opcodes has much fewer patterns in the list. */
- if (empty_qualifier_sequence_p (qualifiers) == TRUE)
+ if (empty_qualifier_sequence_p (qualifiers))
{
DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
break;
return idx;
}
-/* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
+/* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
corresponding operands in *INSTR. */
static inline void
/* Delimiter. */
if (str[0] != '\0')
- strcat (buf, i == 0 ? " " : ",");
+ strcat (buf, i == 0 ? " " : ", ");
/* Append the operand string. */
strcat (buf, str);
else
{
gas_assert (idx >= 0);
- as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
+ as_bad (_("operand %d must be %s -- `%s'"), idx + 1,
aarch64_get_operand_desc (opd_code), str);
}
break;
{
/* Most opcodes has much fewer patterns in the list.
First NIL qualifier indicates the end in the list. */
- if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
+ if (empty_qualifier_sequence_p (*qualifiers_list))
break;
if (i != qlf_idx)
detail->error ? detail->error : _("immediate value"),
detail->data[0], detail->data[1], idx + 1, str);
else
- as_bad (_("%s expected to be %d at operand %d -- `%s'"),
+ as_bad (_("%s must be %d at operand %d -- `%s'"),
detail->error ? detail->error : _("immediate value"),
detail->data[0], idx + 1, str);
break;
break;
case AARCH64_OPDE_UNALIGNED:
- as_bad (_("immediate value should be a multiple of "
+ as_bad (_("immediate value must be a multiple of "
"%d at operand %d -- `%s'"),
detail->data[0], idx + 1, str);
break;
When this function is called, the operand error information had
been collected for an assembly line and there will be multiple
- errors in the case of mulitple instruction templates; output the
+ errors in the case of multiple instruction templates; output the
error message that most closely describes the problem. */
static void
}
/* Find the error kind of the highest severity. */
- DEBUG_TRACE ("multiple opcode entres with error kind");
+ DEBUG_TRACE ("multiple opcode entries with error kind");
kind = AARCH64_OPDE_NIL;
for (curr = head; curr != NULL; curr = curr->next)
{
static templates *
opcode_lookup (char **str)
{
- char *end, *base;
+ char *end, *base, *dot;
const aarch64_cond *cond;
char condname[16];
int len;
/* Scan up to the end of the mnemonic, which must end in white space,
'.', or end of string. */
+ dot = 0;
for (base = end = *str; is_part_of_name(*end); end++)
- if (*end == '.')
- break;
+ if (*end == '.' && !dot)
+ dot = end;
- if (end == base)
+ if (end == base || dot == base)
return 0;
inst.cond = COND_ALWAYS;
/* Handle a possible condition. */
- if (end[0] == '.')
+ if (dot)
{
- cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
+ cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
if (cond)
{
inst.cond = cond->value;
- *str = end + 3;
+ *str = end;
}
else
{
- *str = end;
+ *str = dot;
return 0;
}
+ len = dot - base;
}
else
- *str = end;
-
- len = end - base;
+ {
+ *str = end;
+ len = end - base;
+ }
if (inst.cond == COND_ALWAYS)
{
if (!vectype->defined || vectype->type == NT_invtype)
goto vectype_conversion_fail;
+ if (vectype->type == NT_zero)
+ return AARCH64_OPND_QLF_P_Z;
+ if (vectype->type == NT_merge)
+ return AARCH64_OPND_QLF_P_M;
+
gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
case AARCH64_OPND_Rt_SYS:
case AARCH64_OPND_Rd_SP:
case AARCH64_OPND_Rn_SP:
+ case AARCH64_OPND_Rm_SP:
case AARCH64_OPND_Fd:
case AARCH64_OPND_Fn:
case AARCH64_OPND_Fm:
case AARCH64_OPND_WIDTH:
case AARCH64_OPND_UIMM7:
case AARCH64_OPND_NZCV:
+ case AARCH64_OPND_SVE_PATTERN:
+ case AARCH64_OPND_SVE_PRFOP:
+ operand->imm.value = default_value;
+ break;
+
+ case AARCH64_OPND_SVE_PATTERN_SCALED:
operand->imm.value = default_value;
+ operand->shifter.kind = AARCH64_MOD_MUL;
+ operand->shifter.amount = 1;
break;
case AARCH64_OPND_EXCEPTION:
return TRUE;
}
-/* A primitive log caculator. */
+/* A primitive log calculator. */
static inline unsigned int
get_logsz (unsigned int size)
gas_assert (logsz <= 4);
/* In reloc.c, these pseudo relocation types should be defined in similar
- order as above reloc_ldst_lo12 array. Because the array index calcuation
+ order as above reloc_ldst_lo12 array. Because the array index calculation
below relies on this. */
return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
}
clear_error ();
skip_whitespace (str);
- imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
+ if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
+ imm_reg_type = REG_TYPE_R_Z_BHSDQ_VZP;
+ else
+ imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
{
int comma_skipped_p = 0;
aarch64_reg_type rtype;
struct vector_type_el vectype;
- aarch64_opnd_qualifier_t qualifier;
+ aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
aarch64_opnd_info *info = &inst.base.operands[i];
aarch64_reg_type reg_type;
backtrack_pos = str;
}
- /* Expect comma between operands; the backtrack mechanizm will take
+ /* Expect comma between operands; the backtrack mechanism will take
care of cases of omitted optional operand. */
if (i > 0 && ! skip_past_char (&str, ','))
{
case AARCH64_OPND_Ra:
case AARCH64_OPND_Rt_SYS:
case AARCH64_OPND_PAIRREG:
+ case AARCH64_OPND_SVE_Rm:
po_int_reg_or_fail (REG_TYPE_R_Z);
break;
case AARCH64_OPND_Rd_SP:
case AARCH64_OPND_Rn_SP:
+ case AARCH64_OPND_SVE_Rn_SP:
+ case AARCH64_OPND_Rm_SP:
po_int_reg_or_fail (REG_TYPE_R_SP);
break;
case AARCH64_OPND_Sd:
case AARCH64_OPND_Sn:
case AARCH64_OPND_Sm:
+ case AARCH64_OPND_SVE_VZn:
+ case AARCH64_OPND_SVE_Vd:
+ case AARCH64_OPND_SVE_Vm:
+ case AARCH64_OPND_SVE_Vn:
val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
if (val == PARSE_FAIL)
{
info->qualifier = AARCH64_OPND_QLF_S_D;
break;
+ case AARCH64_OPND_SVE_Zm3_INDEX:
+ case AARCH64_OPND_SVE_Zm3_22_INDEX:
+ case AARCH64_OPND_SVE_Zm4_INDEX:
case AARCH64_OPND_SVE_Zn_INDEX:
reg_type = REG_TYPE_ZN;
goto vector_reg_index;
goto failure;
break;
- case AARCH64_OPND_Cn:
- case AARCH64_OPND_Cm:
- po_reg_or_fail (REG_TYPE_CN);
- if (val > 15)
+ case AARCH64_OPND_CRn:
+ case AARCH64_OPND_CRm:
{
- set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
- goto failure;
+ char prefix = *(str++);
+ if (prefix != 'c' && prefix != 'C')
+ goto failure;
+
+ po_imm_nc_or_fail ();
+ if (val > 15)
+ {
+ set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
+ goto failure;
+ }
+ info->qualifier = AARCH64_OPND_QLF_CR;
+ info->imm.value = val;
+ break;
}
- inst.base.operands[i].reg.regno = val;
- break;
case AARCH64_OPND_SHLL_IMM:
case AARCH64_OPND_IMM_VLSR:
break;
case AARCH64_OPND_CCMP_IMM:
+ case AARCH64_OPND_SIMM5:
case AARCH64_OPND_FBITS:
case AARCH64_OPND_UIMM4:
case AARCH64_OPND_UIMM3_OP1:
case AARCH64_OPND_IMM_VLSL:
case AARCH64_OPND_IMM:
case AARCH64_OPND_WIDTH:
+ case AARCH64_OPND_SVE_INV_LIMM:
+ case AARCH64_OPND_SVE_LIMM:
+ case AARCH64_OPND_SVE_LIMM_MOV:
+ case AARCH64_OPND_SVE_SHLIMM_PRED:
+ case AARCH64_OPND_SVE_SHLIMM_UNPRED:
+ case AARCH64_OPND_SVE_SHRIMM_PRED:
+ case AARCH64_OPND_SVE_SHRIMM_UNPRED:
+ case AARCH64_OPND_SVE_SIMM5:
+ case AARCH64_OPND_SVE_SIMM5B:
+ case AARCH64_OPND_SVE_SIMM6:
+ case AARCH64_OPND_SVE_SIMM8:
+ case AARCH64_OPND_SVE_UIMM3:
+ case AARCH64_OPND_SVE_UIMM7:
+ case AARCH64_OPND_SVE_UIMM8:
+ case AARCH64_OPND_SVE_UIMM8_53:
+ case AARCH64_OPND_IMM_ROT1:
+ case AARCH64_OPND_IMM_ROT2:
+ case AARCH64_OPND_IMM_ROT3:
+ case AARCH64_OPND_SVE_IMM_ROT1:
+ case AARCH64_OPND_SVE_IMM_ROT2:
+ po_imm_nc_or_fail ();
+ info->imm.value = val;
+ break;
+
+ case AARCH64_OPND_SVE_AIMM:
+ case AARCH64_OPND_SVE_ASIMM:
po_imm_nc_or_fail ();
info->imm.value = val;
+ skip_whitespace (str);
+ if (skip_past_comma (&str))
+ po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
+ else
+ inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
+ break;
+
+ case AARCH64_OPND_SVE_PATTERN:
+ po_enum_or_fail (aarch64_sve_pattern_array);
+ info->imm.value = val;
+ break;
+
+ case AARCH64_OPND_SVE_PATTERN_SCALED:
+ po_enum_or_fail (aarch64_sve_pattern_array);
+ info->imm.value = val;
+ if (skip_past_comma (&str)
+ && !parse_shift (&str, info, SHIFTED_MUL))
+ goto failure;
+ if (!info->shifter.operator_present)
+ {
+ gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
+ info->shifter.kind = AARCH64_MOD_MUL;
+ info->shifter.amount = 1;
+ }
+ break;
+
+ case AARCH64_OPND_SVE_PRFOP:
+ po_enum_or_fail (aarch64_sve_prfop_array);
+ info->imm.value = val;
break;
case AARCH64_OPND_UIMM7:
case AARCH64_OPND_FPIMM:
case AARCH64_OPND_SIMD_FPIMM:
+ case AARCH64_OPND_SVE_FPIMM8:
{
int qfloat;
- bfd_boolean dp_p
- = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
- == 8);
+ bfd_boolean dp_p;
+
+ dp_p = double_precision_operand_p (&inst.base.operands[0]);
if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
|| !aarch64_imm_float_p (qfloat))
{
}
break;
+ case AARCH64_OPND_SVE_I1_HALF_ONE:
+ case AARCH64_OPND_SVE_I1_HALF_TWO:
+ case AARCH64_OPND_SVE_I1_ZERO_ONE:
+ {
+ int qfloat;
+ bfd_boolean dp_p;
+
+ dp_p = double_precision_operand_p (&inst.base.operands[0]);
+ if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
+ {
+ if (!error_p ())
+ set_fatal_syntax_error (_("invalid floating-point"
+ " constant"));
+ goto failure;
+ }
+ inst.base.operands[i].imm.value = qfloat;
+ inst.base.operands[i].imm.is_fp = 1;
+ }
+ break;
+
case AARCH64_OPND_LIMM:
po_misc_or_fail (parse_shifter_operand (&str, info,
SHIFTED_LOGIC_IMM));
case AARCH64_OPND_COND:
case AARCH64_OPND_COND1:
- info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
- str += 2;
- if (info->cond == NULL)
- {
- set_syntax_error (_("invalid condition"));
- goto failure;
- }
- else if (operands[i] == AARCH64_OPND_COND1
- && (info->cond->value & 0xe) == 0xe)
- {
- /* Not allow AL or NV. */
- set_default_error ();
- goto failure;
- }
+ {
+ char *start = str;
+ do
+ str++;
+ while (ISALPHA (*str));
+ info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
+ if (info->cond == NULL)
+ {
+ set_syntax_error (_("invalid condition"));
+ goto failure;
+ }
+ else if (operands[i] == AARCH64_OPND_COND1
+ && (info->cond->value & 0xe) == 0xe)
+ {
+ /* Do not allow AL or NV. */
+ set_default_error ();
+ goto failure;
+ }
+ }
break;
case AARCH64_OPND_ADDR_ADRP:
case AARCH64_OPND_ADDR_REGOFF:
/* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
po_misc_or_fail (parse_address (&str, info));
+ regoff_addr:
if (info->addr.pcrel || !info->addr.offset.is_reg
|| !info->addr.preind || info->addr.postind
|| info->addr.writeback)
/* skip_p */ 0);
break;
+ case AARCH64_OPND_ADDR_SIMM10:
+ po_misc_or_fail (parse_address (&str, info));
+ if (info->addr.pcrel || info->addr.offset.is_reg
+ || !info->addr.preind || info->addr.postind)
+ {
+ set_syntax_error (_("invalid addressing mode"));
+ goto failure;
+ }
+ if (inst.reloc.type != BFD_RELOC_UNUSED)
+ {
+ set_syntax_error (_("relocation not allowed"));
+ goto failure;
+ }
+ assign_imm_if_const_or_fixup_later (&inst.reloc, info,
+ /* addr_off_p */ 1,
+ /* need_libopcodes_p */ 1,
+ /* skip_p */ 0);
+ break;
+
case AARCH64_OPND_ADDR_UIMM12:
po_misc_or_fail (parse_address (&str, info));
if (info->addr.pcrel || info->addr.offset.is_reg
else
{
set_fatal_syntax_error
- (_("writeback value should be an immediate constant"));
+ (_("writeback value must be an immediate constant"));
goto failure;
}
}
/* No qualifier. */
break;
+ case AARCH64_OPND_SVE_ADDR_RI_S4x16:
+ case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_U6:
+ case AARCH64_OPND_SVE_ADDR_RI_U6x2:
+ case AARCH64_OPND_SVE_ADDR_RI_U6x4:
+ case AARCH64_OPND_SVE_ADDR_RI_U6x8:
+ /* [X<n>{, #imm, MUL VL}]
+ [X<n>{, #imm}]
+ but recognizing SVE registers. */
+ po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
+ &offset_qualifier));
+ if (base_qualifier != AARCH64_OPND_QLF_X)
+ {
+ set_syntax_error (_("invalid addressing mode"));
+ goto failure;
+ }
+ sve_regimm:
+ if (info->addr.pcrel || info->addr.offset.is_reg
+ || !info->addr.preind || info->addr.writeback)
+ {
+ set_syntax_error (_("invalid addressing mode"));
+ goto failure;
+ }
+ if (inst.reloc.type != BFD_RELOC_UNUSED
+ || inst.reloc.exp.X_op != O_constant)
+ {
+ /* Make sure this has priority over
+ "invalid addressing mode". */
+ set_fatal_syntax_error (_("constant offset required"));
+ goto failure;
+ }
+ info->addr.offset.imm = inst.reloc.exp.X_add_number;
+ break;
+
+ case AARCH64_OPND_SVE_ADDR_RR:
+ case AARCH64_OPND_SVE_ADDR_RR_LSL1:
+ case AARCH64_OPND_SVE_ADDR_RR_LSL2:
+ case AARCH64_OPND_SVE_ADDR_RR_LSL3:
+ case AARCH64_OPND_SVE_ADDR_RX:
+ case AARCH64_OPND_SVE_ADDR_RX_LSL1:
+ case AARCH64_OPND_SVE_ADDR_RX_LSL2:
+ case AARCH64_OPND_SVE_ADDR_RX_LSL3:
+ /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
+ but recognizing SVE registers. */
+ po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
+ &offset_qualifier));
+ if (base_qualifier != AARCH64_OPND_QLF_X
+ || offset_qualifier != AARCH64_OPND_QLF_X)
+ {
+ set_syntax_error (_("invalid addressing mode"));
+ goto failure;
+ }
+ goto regoff_addr;
+
+ case AARCH64_OPND_SVE_ADDR_RZ:
+ case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
+ case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
+ case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
+ /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
+ [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
+ po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
+ &offset_qualifier));
+ if (base_qualifier != AARCH64_OPND_QLF_X
+ || (offset_qualifier != AARCH64_OPND_QLF_S_S
+ && offset_qualifier != AARCH64_OPND_QLF_S_D))
+ {
+ set_syntax_error (_("invalid addressing mode"));
+ goto failure;
+ }
+ info->qualifier = offset_qualifier;
+ goto regoff_addr;
+
+ case AARCH64_OPND_SVE_ADDR_ZI_U5:
+ case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
+ case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
+ case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
+ /* [Z<n>.<T>{, #imm}] */
+ po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
+ &offset_qualifier));
+ if (base_qualifier != AARCH64_OPND_QLF_S_S
+ && base_qualifier != AARCH64_OPND_QLF_S_D)
+ {
+ set_syntax_error (_("invalid addressing mode"));
+ goto failure;
+ }
+ info->qualifier = base_qualifier;
+ goto sve_regimm;
+
+ case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
+ case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
+ case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
+ /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
+ [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
+
+ We don't reject:
+
+ [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
+
+ here since we get better error messages by leaving it to
+ the qualifier checking routines. */
+ po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
+ &offset_qualifier));
+ if ((base_qualifier != AARCH64_OPND_QLF_S_S
+ && base_qualifier != AARCH64_OPND_QLF_S_D)
+ || offset_qualifier != base_qualifier)
+ {
+ set_syntax_error (_("invalid addressing mode"));
+ goto failure;
+ }
+ info->qualifier = base_qualifier;
+ goto regoff_addr;
+
case AARCH64_OPND_SYSREG:
if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
== PARSE_FAIL)
{
case ldst_pos:
case ldst_imm9:
+ case ldst_imm10:
case ldst_unscaled:
case ldst_unpriv:
/* Loading/storing the base register is unpredictable if writeback. */
REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
- /* Coprocessor register numbers. */
- REGSET (c, CN), REGSET (C, CN),
+ REGDEF (ip0, 16, R_64), REGDEF (IP0, 16, R_64),
+ REGDEF (ip1, 17, R_64), REGDEF (IP1, 17, R_64),
+ REGDEF (fp, 29, R_64), REGDEF (FP, 29, R_64),
+ REGDEF (lr, 30, R_64), REGDEF (LR, 30, R_64),
/* Floating-point single precision registers. */
REGSET (s, FP_S), REGSET (S, FP_S),
Note - despite the name this initialisation is not done when the frag
is created, but only when its type is assigned. A frag can be created
and used a long time before its type is set, so beware of assuming that
- this initialisationis performed first. */
+ this initialisation is performed first. */
#ifndef OBJ_ELF
void
case AARCH64_OPND_ADDR_SIMM7:
case AARCH64_OPND_ADDR_SIMM9:
case AARCH64_OPND_ADDR_SIMM9_2:
+ case AARCH64_OPND_ADDR_SIMM10:
case AARCH64_OPND_ADDR_UIMM12:
/* Immediate offset in an address. */
insn = get_aarch64_insn (buf);
case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
fixP->fx_r_type = (ilp32_p
? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
- : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
+ : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
S_SET_THREAD_LOCAL (fixP->fx_addsy);
/* Should always be exported to object file, see
aarch64_force_relocation(). */
gas_assert (seg->use_rela_p);
break;
- case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
- case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
case BFD_RELOC_AARCH64_LDST32_LO12:
case BFD_RELOC_AARCH64_LDST64_LO12:
case BFD_RELOC_AARCH64_LDST8_LO12:
- case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
- case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
#ifdef OBJ_ELF
+/* Implement md_after_parse_args. This is the earliest time we need to decide
+ ABI. If no -mabi specified, the ABI will be decided by target triplet. */
+
+void
+aarch64_after_parse_args (void)
+{
+ if (aarch64_abi != AARCH64_ABI_NONE)
+ return;
+
+ /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
+ if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
+ aarch64_abi = AARCH64_ABI_ILP32;
+ else
+ aarch64_abi = AARCH64_ABI_LP64;
+}
+
const char *
elf64_aarch64_target_format (void)
{
AARCH64_FEATURE_CRC), "Cortex-A72"},
{"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
AARCH64_FEATURE_CRC), "Cortex-A73"},
+ {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
+ AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16),
+ "Cortex-A55"},
+ {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
+ AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16),
+ "Cortex-A75"},
{"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
"Samsung Exynos M1"},
+ {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
+ AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
+ "Qualcomm Falkor"},
{"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
"Qualcomm QDF24XX"},
{"armv8-a", AARCH64_ARCH_V8},
{"armv8.1-a", AARCH64_ARCH_V8_1},
{"armv8.2-a", AARCH64_ARCH_V8_2},
+ {"armv8.3-a", AARCH64_ARCH_V8_3},
{NULL, AARCH64_ARCH_NONE}
};
{"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
AARCH64_ARCH_NONE},
{"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
- AARCH64_ARCH_NONE},
+ AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
{"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
AARCH64_ARCH_NONE},
{"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
AARCH64_ARCH_NONE},
{"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
- AARCH64_ARCH_NONE},
+ AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
{"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
AARCH64_ARCH_NONE},
{"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
{"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
AARCH64_ARCH_NONE},
+ {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
+ AARCH64_FEATURE (AARCH64_FEATURE_F16
+ | AARCH64_FEATURE_SIMD
+ | AARCH64_FEATURE_COMPNUM, 0)},
+ {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
+ AARCH64_FEATURE (AARCH64_FEATURE_F16
+ | AARCH64_FEATURE_SIMD, 0)},
+ {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
+ AARCH64_ARCH_NONE},
+ {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
+ AARCH64_ARCH_NONE},
{NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
};