/* tc-i386.c -- Assemble code for the Intel 80386
Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
+ 2012
Free Software Foundation, Inc.
This file is part of GAS, the GNU Assembler.
WAIT_PREFIX must be the first prefix since FWAIT is really is an
instruction, and so must come before any prefixes.
The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
- REP_PREFIX, LOCK_PREFIX. */
+ REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
#define WAIT_PREFIX 0
#define SEG_PREFIX 1
#define ADDR_PREFIX 2
#define DATA_PREFIX 3
#define REP_PREFIX 4
+#define HLE_PREFIX REP_PREFIX
#define LOCK_PREFIX 5
#define REX_PREFIX 6 /* must come last. */
#define MAX_PREFIXES 7 /* max prefixes per opcode */
static void set_intel_syntax (int);
static void set_intel_mnemonic (int);
static void set_allow_index_reg (int);
-static void set_sse_check (int);
+static void set_check (int);
static void set_cpu_arch (int);
#ifdef TE_PE
static void pe_directive_secrel (int);
old_gcc_only,
unsupported_with_intel_mnemonic,
unsupported_syntax,
- unsupported
+ unsupported,
+ invalid_vsib_address,
+ invalid_vector_register_set,
+ unsupported_vector_index_register
};
struct _i386_insn
/* Swap operand in encoding. */
unsigned int swap_operand;
+ /* Prefer 8bit or 32bit displacement in encoding. */
+ enum
+ {
+ disp_encoding_default = 0,
+ disp_encoding_8bit,
+ disp_encoding_32bit
+ } disp_encoding;
+
+ /* Have HLE prefix. */
+ unsigned int have_hle;
+
/* Error message. */
enum i386_error error;
};
|| ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
&& !defined (TE_GNU) \
&& !defined (TE_LINUX) \
- && !defined (TE_NETWARE) \
+ && !defined (TE_NACL) \
+ && !defined (TE_NETWARE) \
&& !defined (TE_FreeBSD) \
+ && !defined (TE_DragonFly) \
&& !defined (TE_NetBSD)))
/* This array holds the chars that always start a comment. If the
pre-processor is disabled, these aren't very useful. The option
static enum flag_code flag_code;
static unsigned int object_64bit;
+static unsigned int disallow_64bit_reloc;
static int use_rela_relocations = 0;
+#if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
+ || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
+ || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
+
+/* The ELF ABI to use. */
+enum x86_elf_abi
+{
+ I386_ABI,
+ X86_64_ABI,
+ X86_64_X32_ABI
+};
+
+static enum x86_elf_abi x86_elf_abi = I386_ABI;
+#endif
+
/* The names used to print error messages. */
static const char *flag_code_names[] =
{
/* 1 if pseudo index register, eiz/riz, is allowed . */
static int allow_index_reg = 0;
-static enum
+static enum check_kind
{
- sse_check_none = 0,
- sse_check_warning,
- sse_check_error
+ check_none = 0,
+ check_warning,
+ check_error
}
-sse_check;
+sse_check, operand_check = check_warning;
/* Register prefix used for error message. */
static const char *register_prefix = "%";
CPU_COREI7_FLAGS, 0, 0 },
{ STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
CPU_L1OM_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
+ CPU_K1OM_FLAGS, 0, 0 },
{ STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
CPU_K6_FLAGS, 0, 0 },
{ STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
CPU_K8_FLAGS, 0, 0 },
{ STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
CPU_AMDFAM10_FLAGS, 0, 0 },
- { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BDVER1,
+ { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
CPU_BDVER1_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
+ CPU_BDVER2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
+ CPU_BDVER3_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
+ CPU_BTVER1_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
+ CPU_BTVER2_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
CPU_8087_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
CPU_ANY_SSE_FLAGS, 0, 1 },
{ STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
CPU_AVX_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
+ CPU_AVX2_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
CPU_ANY_AVX_FLAGS, 0, 1 },
{ STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
CPU_VMX_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
+ CPU_VMFUNC_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
CPU_SMX_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
CPU_RDRND_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
CPU_F16C_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
+ CPU_BMI2_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
CPU_FMA_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
CPU_LWP_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
CPU_MOVBE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
+ CPU_CX16_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
CPU_EPT_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
+ CPU_LZCNT_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
+ CPU_HLE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
+ CPU_RTM_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
+ CPU_INVPCID_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
CPU_CLFLUSH_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
CPU_SSE4A_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
CPU_ABM_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
+ CPU_BMI_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
+ CPU_TBM_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
+ CPU_ADX_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
+ CPU_RDSEED_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
+ CPU_PRFCHW_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
+ CPU_SMAP_FLAGS, 0, 0 },
};
#ifdef I386COFF
{"att_mnemonic", set_intel_mnemonic, 0},
{"allow_index_reg", set_allow_index_reg, 1},
{"disallow_index_reg", set_allow_index_reg, 0},
- {"sse_check", set_sse_check, 0},
+ {"sse_check", set_check, 0},
+ {"operand_check", set_check, 1},
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
{"largecomm", handle_large_common, 0},
#else
PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
PROCESSOR_GENERIC64, alt_long_patt will be used.
3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
- PROCESSOR_AMDFAM10, and PROCESSOR_BDVER1, alt_short_patt
+ PROCESSOR_AMDFAM10, PROCESSOR_BD and PROCESSOR_BT, alt_short_patt
will be used.
When -mtune= isn't used, alt_long_patt will be used if
else
patt = f32_patt;
break;
- case PROCESSOR_PENTIUMPRO:
case PROCESSOR_PENTIUM4:
case PROCESSOR_NOCONA:
case PROCESSOR_CORE:
case PROCESSOR_CORE2:
case PROCESSOR_COREI7:
case PROCESSOR_L1OM:
+ case PROCESSOR_K1OM:
case PROCESSOR_GENERIC64:
patt = alt_long_patt;
break;
case PROCESSOR_ATHLON:
case PROCESSOR_K8:
case PROCESSOR_AMDFAM10:
- case PROCESSOR_BDVER1:
+ case PROCESSOR_BD:
+ case PROCESSOR_BT:
patt = alt_short_patt;
break;
case PROCESSOR_I386:
case PROCESSOR_I486:
case PROCESSOR_PENTIUM:
+ case PROCESSOR_PENTIUMPRO:
case PROCESSOR_GENERIC32:
patt = f32_patt;
break;
case PROCESSOR_ATHLON:
case PROCESSOR_K8:
case PROCESSOR_AMDFAM10:
- case PROCESSOR_BDVER1:
+ case PROCESSOR_BD:
+ case PROCESSOR_BT:
case PROCESSOR_GENERIC32:
/* We use cpu_arch_isa_flags to check if we CAN optimize
with nops. */
case PROCESSOR_CORE2:
case PROCESSOR_COREI7:
case PROCESSOR_L1OM:
+ case PROCESSOR_K1OM:
if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
patt = alt_long_patt;
else
return 0;
}
+static INLINE unsigned int
+register_number (const reg_entry *r)
+{
+ unsigned int nr = r->reg_num;
+
+ if (r->reg_flags & RegRex)
+ nr += 8;
+
+ return nr;
+}
+
static INLINE unsigned int
mode_from_disp_size (i386_operand_type t)
{
}
static void
-set_sse_check (int dummy ATTRIBUTE_UNUSED)
+set_check (int what)
{
+ enum check_kind *kind;
+ const char *str;
+
+ if (what)
+ {
+ kind = &operand_check;
+ str = "operand";
+ }
+ else
+ {
+ kind = &sse_check;
+ str = "sse";
+ }
+
SKIP_WHITESPACE ();
if (!is_end_of_line[(unsigned char) *input_line_pointer])
int e = get_symbol_end ();
if (strcmp (string, "none") == 0)
- sse_check = sse_check_none;
+ *kind = check_none;
else if (strcmp (string, "warning") == 0)
- sse_check = sse_check_warning;
+ *kind = check_warning;
else if (strcmp (string, "error") == 0)
- sse_check = sse_check_error;
+ *kind = check_error;
else
- as_bad (_("bad argument to sse_check directive."));
+ as_bad (_("bad argument to %s_check directive."), str);
*input_line_pointer = e;
}
else
- as_bad (_("missing argument for sse_check directive"));
+ as_bad (_("missing argument for %s_check directive"), str);
demand_empty_rest_of_line ();
}
|| new_flag.bitfield.cpul1om)
return;
+ /* If we are targeting Intel K1OM, we must enable it. */
+ if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
+ || new_flag.bitfield.cpuk1om)
+ return;
+
as_bad (_("`%s' is not supported on `%s'"), name, arch);
#endif
}
else
cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
cpu_arch_flags = flags;
+ cpu_arch_isa_flags = flags;
}
*input_line_pointer = e;
demand_empty_rest_of_line ();
as_fatal (_("Intel L1OM is 64bit ELF only"));
return bfd_arch_l1om;
}
+ else if (cpu_arch_isa == PROCESSOR_K1OM)
+ {
+ if (OUTPUT_FLAVOR != bfd_target_elf_flavour
+ || flag_code != CODE_64BIT)
+ as_fatal (_("Intel K1OM is 64bit ELF only"));
+ return bfd_arch_k1om;
+ }
else
return bfd_arch_i386;
}
unsigned long
-i386_mach ()
+i386_mach (void)
{
- if (!strcmp (default_arch, "x86_64"))
+ if (!strncmp (default_arch, "x86_64", 6))
{
if (cpu_arch_isa == PROCESSOR_L1OM)
{
- if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
+ if (OUTPUT_FLAVOR != bfd_target_elf_flavour
+ || default_arch[6] != '\0')
as_fatal (_("Intel L1OM is 64bit ELF only"));
return bfd_mach_l1om;
}
- else
+ else if (cpu_arch_isa == PROCESSOR_K1OM)
+ {
+ if (OUTPUT_FLAVOR != bfd_target_elf_flavour
+ || default_arch[6] != '\0')
+ as_fatal (_("Intel K1OM is 64bit ELF only"));
+ return bfd_mach_k1om;
+ }
+ else if (default_arch[6] == '\0')
return bfd_mach_x86_64;
+ else
+ return bfd_mach_x64_32;
}
else if (!strcmp (default_arch, "i386"))
return bfd_mach_i386_i386;
else
- as_fatal (_("Unknown architecture"));
+ as_fatal (_("unknown architecture"));
}
\f
void
-md_begin ()
+md_begin (void)
{
const char *hash_err;
(void *) core_optab);
if (hash_err)
{
- as_fatal (_("Internal Error: Can't hash %s: %s"),
+ as_fatal (_("can't hash %s: %s"),
(optab - 1)->name,
hash_err);
}
{
hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
if (hash_err)
- as_fatal (_("Internal Error: Can't hash %s: %s"),
+ as_fatal (_("can't hash %s: %s"),
regtab->reg_name,
hash_err);
}
if (flag_code == CODE_64BIT)
{
+#if defined (OBJ_COFF) && defined (TE_PE)
+ x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
+ ? 32 : 16);
+#else
x86_dwarf2_return_column = 16;
+#endif
x86_cie_data_alignment = -8;
}
else
break;
}
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ if (other == BFD_RELOC_SIZE32)
+ {
+ if (size == 8)
+ return BFD_RELOC_SIZE64;
+ if (pcrel)
+ as_bad (_("there are no pc-relative size relocations"));
+ }
+#endif
+
/* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
- if (size == 4 && flag_code != CODE_64BIT)
+ if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
sign = -1;
rel = bfd_reloc_type_lookup (stdoutput, other);
&& fixP->fx_r_type == BFD_RELOC_32_PCREL)
return 0;
- /* adjust_reloc_syms doesn't know about the GOT. */
- if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
+ /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
+ for size relocations. */
+ if (fixP->fx_r_type == BFD_RELOC_SIZE32
+ || fixP->fx_r_type == BFD_RELOC_SIZE64
+ || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
|| fixP->fx_r_type == BFD_RELOC_386_PLT32
|| fixP->fx_r_type == BFD_RELOC_386_GOT32
|| fixP->fx_r_type == BFD_RELOC_386_TLS_GD
/* Check register specifier. */
if (i.vex.register_specifier)
- {
- register_specifier = i.vex.register_specifier->reg_num;
- if ((i.vex.register_specifier->reg_flags & RegRex))
- register_specifier += 8;
- register_specifier = ~register_specifier & 0xf;
- }
+ register_specifier = ~register_number (i.vex.register_specifier) & 0xf;
else
register_specifier = 0xf;
{
expressionS *exp;
- if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
+ if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
+ && i.operands > 0)
{
- /* SSE3 Instructions have the fixed operands with an opcode
- suffix which is coded in the same place as an 8-bit immediate
- field would be. Here we check those operands and remove them
- afterwards. */
+ /* MONITOR/MWAIT as well as SVME instructions have fixed operands
+ with an opcode suffix which is coded in the same place as an
+ 8-bit immediate field would be.
+ Here we check those operands and remove them afterwards. */
unsigned int x;
for (x = 0; x < i.operands; x++)
- if (i.op[x].regs->reg_num != x)
+ if (register_number (i.op[x].regs) != x)
as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
register_prefix, i.op[x].regs->reg_name, x + 1,
i.tm.name);
i.tm.extension_opcode = None;
}
+
+static int
+check_hle (void)
+{
+ switch (i.tm.opcode_modifier.hleprefixok)
+ {
+ default:
+ abort ();
+ case HLEPrefixNone:
+ if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
+ as_bad (_("invalid instruction `%s' after `xacquire'"),
+ i.tm.name);
+ else
+ as_bad (_("invalid instruction `%s' after `xrelease'"),
+ i.tm.name);
+ return 0;
+ case HLEPrefixLock:
+ if (i.prefix[LOCK_PREFIX])
+ return 1;
+ if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
+ as_bad (_("missing `lock' with `xacquire'"));
+ else
+ as_bad (_("missing `lock' with `xrelease'"));
+ return 0;
+ case HLEPrefixAny:
+ return 1;
+ case HLEPrefixRelease:
+ if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
+ {
+ as_bad (_("instruction `%s' after `xacquire' not allowed"),
+ i.tm.name);
+ return 0;
+ }
+ if (i.mem_operands == 0
+ || !operand_type_check (i.types[i.operands - 1], anymem))
+ {
+ as_bad (_("memory destination needed for instruction `%s'"
+ " after `xrelease'"), i.tm.name);
+ return 0;
+ }
+ return 1;
+ }
+}
+
/* This is the guts of the machine-dependent assembler. LINE points to a
machine dependent instruction. This function is supposed to emit
the frags/bytes it assembles to. */
/* Don't optimize displacement for movabs since it only takes 64bit
displacement. */
if (i.disp_operands
+ && i.disp_encoding != disp_encoding_32bit
&& (flag_code != CODE_64BIT
|| strcmp (mnemonic, "movabs") != 0))
optimize_disp ();
if (!(t = match_template ()))
return;
- if (sse_check != sse_check_none
+ if (sse_check != check_none
&& !i.tm.opcode_modifier.noavx
&& (i.tm.cpu_flags.bitfield.cpusse
|| i.tm.cpu_flags.bitfield.cpusse2
|| i.tm.cpu_flags.bitfield.cpusse4_1
|| i.tm.cpu_flags.bitfield.cpusse4_2))
{
- (sse_check == sse_check_warning
+ (sse_check == check_warning
? as_warn
: as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
}
return;
}
+ /* Check if HLE prefix is OK. */
+ if (i.have_hle && !check_hle ())
+ return;
+
/* Check string instruction segment overrides. */
if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
{
case PREFIX_EXIST:
return NULL;
case PREFIX_REP:
- expecting_string_instruction = current_templates->start->name;
+ if (current_templates->start->cpu_flags.bitfield.cpuhle)
+ i.have_hle = 1;
+ else
+ expecting_string_instruction = current_templates->start->name;
break;
default:
break;
if (!current_templates)
{
- /* Check if we should swap operand in encoding. */
+ /* Check if we should swap operand or force 32bit displacement in
+ encoding. */
if (mnem_p - 2 == dot_p && dot_p[1] == 's')
i.swap_operand = 1;
+ else if (mnem_p - 3 == dot_p
+ && dot_p[1] == 'd'
+ && dot_p[2] == '8')
+ i.disp_encoding = disp_encoding_8bit;
+ else if (mnem_p - 4 == dot_p
+ && dot_p[1] == 'd'
+ && dot_p[2] == '3'
+ && dot_p[3] == '2')
+ i.disp_encoding = disp_encoding_32bit;
else
goto check_suffix;
mnem_p = dot_p;
as_warn (_("use .code16 to ensure correct addressing mode"));
}
- /* Check for rep/repne without a string instruction. */
+ /* Check for rep/repne without a string (or other allowed) instruction. */
if (expecting_string_instruction)
{
static templates override;
for (t = current_templates->start; t < current_templates->end; ++t)
- if (t->opcode_modifier.isstring)
+ if (t->opcode_modifier.repprefixok)
break;
if (t >= current_templates->end)
{
return NULL;
}
for (override.start = t; t < current_templates->end; ++t)
- if (!t->opcode_modifier.isstring)
+ if (!t->opcode_modifier.repprefixok)
break;
override.end = t;
current_templates = &override;
}
}
+/* Check if operands are valid for the instruction. */
+
+static int
+check_VecOperands (const insn_template *t)
+{
+ /* Without VSIB byte, we can't have a vector register for index. */
+ if (!t->opcode_modifier.vecsib
+ && i.index_reg
+ && (i.index_reg->reg_type.bitfield.regxmm
+ || i.index_reg->reg_type.bitfield.regymm))
+ {
+ i.error = unsupported_vector_index_register;
+ return 1;
+ }
+
+ /* For VSIB byte, we need a vector register for index, and all vector
+ registers must be distinct. */
+ if (t->opcode_modifier.vecsib)
+ {
+ if (!i.index_reg
+ || !((t->opcode_modifier.vecsib == VecSIB128
+ && i.index_reg->reg_type.bitfield.regxmm)
+ || (t->opcode_modifier.vecsib == VecSIB256
+ && i.index_reg->reg_type.bitfield.regymm)))
+ {
+ i.error = invalid_vsib_address;
+ return 1;
+ }
+
+ gas_assert (i.reg_operands == 2);
+ gas_assert (i.types[0].bitfield.regxmm
+ || i.types[0].bitfield.regymm);
+ gas_assert (i.types[2].bitfield.regxmm
+ || i.types[2].bitfield.regymm);
+
+ if (operand_check == check_none)
+ return 0;
+ if (register_number (i.op[0].regs) != register_number (i.index_reg)
+ && register_number (i.op[2].regs) != register_number (i.index_reg)
+ && register_number (i.op[0].regs) != register_number (i.op[2].regs))
+ return 0;
+ if (operand_check == check_error)
+ {
+ i.error = invalid_vector_register_set;
+ return 1;
+ }
+ as_warn (_("mask, index, and destination registers should be distinct"));
+ }
+
+ return 0;
+}
+
/* Check if operands are valid for the instruction. Update VEX
operand types. */
unsigned int j;
unsigned int found_cpu_match;
unsigned int check_register;
+ enum i386_error specific_error = 0;
#if MAX_OPERANDS != 5
# error "MAX_OPERANDS must be 5."
}
}
- /* We check register size only if size of operands can be
- encoded the canonical way. */
- check_register = t->opcode_modifier.w;
+ /* We check register size if needed. */
+ check_register = t->opcode_modifier.checkregsize;
overlap0 = operand_type_and (i.types[0], operand_types[0]);
switch (t->operands)
{
continue;
}
- /* Check if VEX operands are valid. */
- if (VEX_check_operands (t))
- continue;
+ /* Check if vector and VEX operands are valid. */
+ if (check_VecOperands (t) || VEX_check_operands (t))
+ {
+ specific_error = i.error;
+ continue;
+ }
/* We've found a match; break out of loop. */
break;
{
/* We found no match. */
const char *err_msg;
- switch (i.error)
+ switch (specific_error ? specific_error : i.error)
{
default:
abort ();
err_msg = _("invalid instruction suffix");
break;
case bad_imm4:
- err_msg = _("Imm4 isn't the first operand");
+ err_msg = _("constant doesn't fit in 4 bits");
break;
case old_gcc_only:
err_msg = _("only supported with old gcc");
err_msg = _("unsupported syntax");
break;
case unsupported:
- err_msg = _("unsupported");
+ as_bad (_("unsupported instruction `%s'"),
+ current_templates->start->name);
+ return NULL;
+ case invalid_vsib_address:
+ err_msg = _("invalid VSIB address");
+ break;
+ case invalid_vector_register_set:
+ err_msg = _("mask, index, and destination registers must be distinct");
+ break;
+ case unsupported_vector_index_register:
+ err_msg = _("unsupported vector index register");
break;
}
as_bad (_("%s for `%s'"), err_msg,
if (i.types[op].bitfield.reg8)
continue;
+ /* I/O port address operands are OK too. */
+ if (i.tm.operand_types[op].bitfield.inoutportreg)
+ continue;
+
/* crc32 doesn't generate this warning. */
if (i.tm.base_opcode == 0xf20f38f0)
continue;
if ((i.types[op].bitfield.reg16
|| i.types[op].bitfield.reg32
|| i.types[op].bitfield.reg64)
- && i.op[op].regs->reg_num < 4)
+ && i.op[op].regs->reg_num < 4
+ /* Prohibit these changes in 64bit mode, since the lowering
+ would be more complicated. */
+ && flag_code != CODE_64BIT)
{
- /* Prohibit these changes in the 64bit mode, since the
- lowering is more complicated. */
- if (flag_code == CODE_64BIT
- && !i.tm.operand_types[op].bitfield.inoutportreg)
- {
- as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
- register_prefix, i.op[op].regs->reg_name,
- i.suffix);
- return 0;
- }
#if REGISTER_WARNINGS
- if (!quiet_warnings
- && !i.tm.operand_types[op].bitfield.inoutportreg)
+ if (!quiet_warnings)
as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
register_prefix,
(i.op[op].regs + (i.types[op].bitfield.reg16
lowering is more complicated. */
if (flag_code == CODE_64BIT)
{
- as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
+ as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
register_prefix, i.op[op].regs->reg_name,
i.suffix);
return 0;
}
else
{
- as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
+ as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
register_prefix, i.op[op].regs->reg_name,
i.suffix);
return 0;
}
else
{
- as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
+ as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
register_prefix, i.op[op].regs->reg_name,
i.suffix);
return 0;
lowering is more complicated. */
if (flag_code == CODE_64BIT)
{
- as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
+ as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
register_prefix, i.op[op].regs->reg_name,
i.suffix);
return 0;
{
/* The first operand is implicit and must be xmm0. */
gas_assert (operand_type_equal (&i.types[0], ®xmm));
- if (i.op[0].regs->reg_num != 0)
+ if (register_number (i.op[0].regs) != 0)
return bad_implicit_operand (1);
if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
gas_assert (i.reg_operands
&& (operand_type_equal (&i.types[0], ®xmm)
|| operand_type_equal (&i.types[0], ®ymm)));
- if (i.op[0].regs->reg_num != 0)
+ if (register_number (i.op[0].regs) != 0)
return bad_implicit_operand (i.types[0].bitfield.regxmm);
for (j = 1; j < i.operands; j++)
|| operand_type_equal (&i.tm.operand_types[reg_slot],
®ymm));
exp->X_op = O_constant;
- exp->X_add_number
- = ((i.op[reg_slot].regs->reg_num
- + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
- << 4);
+ exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
}
else
{
|| operand_type_equal (&i.tm.operand_types[reg_slot],
®ymm));
i.op[imm_slot].imms->X_add_number
- |= ((i.op[reg_slot].regs->reg_num
- + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
- << 4);
+ |= register_number (i.op[reg_slot].regs) << 4;
}
gas_assert (operand_type_equal (&i.tm.operand_types[nds], ®xmm)
if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
{
/* For instructions with VexNDS, the register-only
- source operand must be XMM or YMM register. It is
- encoded in VEX prefix. We need to clear RegMem bit
- before calling operand_type_equal. */
- i386_operand_type op = i.tm.operand_types[dest];
+ source operand must be 32/64bit integer, XMM or
+ YMM register. It is encoded in VEX prefix. We
+ need to clear RegMem bit before calling
+ operand_type_equal. */
+
+ i386_operand_type op;
+ unsigned int vvvv;
+
+ /* Check register-only source operand when two source
+ operands are swapped. */
+ if (!i.tm.operand_types[source].bitfield.baseindex
+ && i.tm.operand_types[dest].bitfield.baseindex)
+ {
+ vvvv = source;
+ source = dest;
+ }
+ else
+ vvvv = dest;
+
+ op = i.tm.operand_types[vvvv];
op.bitfield.regmem = 0;
if ((dest + 1) >= i.operands
- || (!operand_type_equal (&op, ®xmm)
+ || (op.bitfield.reg32 != 1
+ && !op.bitfield.reg64 != 1
+ && !operand_type_equal (&op, ®xmm)
&& !operand_type_equal (&op, ®ymm)))
abort ();
- i.vex.register_specifier = i.op[dest].regs;
+ i.vex.register_specifier = i.op[vvvv].regs;
dest++;
}
}
break;
gas_assert (op < i.operands);
+ if (i.tm.opcode_modifier.vecsib)
+ {
+ if (i.index_reg->reg_num == RegEiz
+ || i.index_reg->reg_num == RegRiz)
+ abort ();
+
+ i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
+ if (!i.base_reg)
+ {
+ i.sib.base = NO_BASE_REGISTER;
+ i.sib.scale = i.log2_scale_factor;
+ i.types[op].bitfield.disp8 = 0;
+ i.types[op].bitfield.disp16 = 0;
+ i.types[op].bitfield.disp64 = 0;
+ if (flag_code != CODE_64BIT)
+ {
+ /* Must be 32 bit */
+ i.types[op].bitfield.disp32 = 1;
+ i.types[op].bitfield.disp32s = 0;
+ }
+ else
+ {
+ i.types[op].bitfield.disp32 = 0;
+ i.types[op].bitfield.disp32s = 1;
+ }
+ }
+ i.sib.index = i.index_reg->reg_num;
+ if ((i.index_reg->reg_flags & RegRex) != 0)
+ i.rex |= REX_X;
+ }
+
default_seg = &ds;
if (i.base_reg == 0)
{
i.rm.mode = 0;
if (!i.disp_operands)
- fake_zero_displacement = 1;
+ {
+ fake_zero_displacement = 1;
+ /* Instructions with VSIB byte need 32bit displacement
+ if there is no base register. */
+ if (i.tm.opcode_modifier.vecsib)
+ i.types[op].bitfield.disp32 = 1;
+ }
if (i.index_reg == 0)
{
+ gas_assert (!i.tm.opcode_modifier.vecsib);
/* Operand is just <disp> */
if (flag_code == CODE_64BIT)
{
i.types[op] = disp32;
}
}
- else /* !i.base_reg && i.index_reg */
+ else if (!i.tm.opcode_modifier.vecsib)
{
+ /* !i.base_reg && i.index_reg */
if (i.index_reg->reg_num == RegEiz
|| i.index_reg->reg_num == RegRiz)
i.sib.index = NO_INDEX_REGISTER;
else if (i.base_reg->reg_num == RegRip ||
i.base_reg->reg_num == RegEip)
{
+ gas_assert (!i.tm.opcode_modifier.vecsib);
i.rm.regmem = NO_BASE_REGISTER;
i.types[op].bitfield.disp8 = 0;
i.types[op].bitfield.disp16 = 0;
}
else if (i.base_reg->reg_type.bitfield.reg16)
{
+ gas_assert (!i.tm.opcode_modifier.vecsib);
switch (i.base_reg->reg_num)
{
case 3: /* (%bx) */
i.types[op].bitfield.disp32 = 1;
}
- i.rm.regmem = i.base_reg->reg_num;
+ if (!i.tm.opcode_modifier.vecsib)
+ i.rm.regmem = i.base_reg->reg_num;
if ((i.base_reg->reg_flags & RegRex) != 0)
i.rex |= REX_B;
i.sib.base = i.base_reg->reg_num;
/* x86-64 ignores REX prefix bit here to avoid decoder
complications. */
- if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
- {
+ if (!(i.base_reg->reg_flags & RegRex)
+ && (i.base_reg->reg_num == EBP_REG_NUM
+ || i.base_reg->reg_num == ESP_REG_NUM))
default_seg = &ss;
- if (i.disp_operands == 0)
- {
- fake_zero_displacement = 1;
- i.types[op].bitfield.disp8 = 1;
- }
- }
- else if (i.base_reg->reg_num == ESP_REG_NUM)
+ if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
{
- default_seg = &ss;
+ fake_zero_displacement = 1;
+ i.types[op].bitfield.disp8 = 1;
}
i.sib.scale = i.log2_scale_factor;
if (i.index_reg == 0)
{
+ gas_assert (!i.tm.opcode_modifier.vecsib);
/* <disp>(%esp) becomes two byte modrm with no index
register. We've already stored the code for esp
in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
extra modrm byte. */
i.sib.index = NO_INDEX_REGISTER;
}
- else
+ else if (!i.tm.opcode_modifier.vecsib)
{
if (i.index_reg->reg_num == RegEiz
|| i.index_reg->reg_num == RegRiz)
|| i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
i.rm.mode = 0;
else
- i.rm.mode = mode_from_disp_size (i.types[op]);
+ {
+ if (!fake_zero_displacement
+ && !i.disp_operands
+ && i.disp_encoding)
+ {
+ fake_zero_displacement = 1;
+ if (i.disp_encoding == disp_encoding_8bit)
+ i.types[op].bitfield.disp8 = 1;
+ else
+ i.types[op].bitfield.disp32 = 1;
+ }
+ i.rm.mode = mode_from_disp_size (i.types[op]);
+ }
}
if (fake_zero_displacement)
}
else
{
- vex_reg = op + 1;
- gas_assert (vex_reg < i.operands);
+ /* Check register-only source operand when two source
+ operands are swapped. */
+ if (!i.tm.operand_types[op].bitfield.baseindex
+ && i.tm.operand_types[op + 1].bitfield.baseindex)
+ {
+ vex_reg = op;
+ op += 2;
+ gas_assert (mem == (vex_reg + 1)
+ && op < i.operands);
+ }
+ else
+ {
+ vex_reg = op + 1;
+ gas_assert (vex_reg < i.operands);
+ }
}
}
else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
{
- /* For instructions with VexNDD, there should be
- no memory operand and the register destination
+ /* For instructions with VexNDD, the register destination
is encoded in VEX prefix. */
- gas_assert (i.mem_operands == 0
- && (op + 2) == i.operands);
- vex_reg = op + 1;
+ if (i.mem_operands == 0)
+ {
+ /* There is no memory operand. */
+ gas_assert ((op + 2) == i.operands);
+ vex_reg = op + 1;
+ }
+ else
+ {
+ /* There are only 2 operands. */
+ gas_assert (op < 2 && i.operands == 2);
+ vex_reg = 1;
+ }
}
else
gas_assert (op < i.operands);
if (vex_reg != (unsigned int) ~0)
{
- gas_assert (i.reg_operands == 2);
+ i386_operand_type *type = &i.tm.operand_types[vex_reg];
- if (!operand_type_equal (&i.tm.operand_types[vex_reg],
- ®xmm)
- && !operand_type_equal (&i.tm.operand_types[vex_reg],
- ®ymm))
+ if (type->bitfield.reg32 != 1
+ && type->bitfield.reg64 != 1
+ && !operand_type_equal (type, ®xmm)
+ && !operand_type_equal (type, ®ymm))
abort ();
i.vex.register_specifier = i.op[vex_reg].regs;
output_branch (void)
{
char *p;
+ int size;
int code16;
int prefix;
relax_substateT subtype;
symbolS *sym;
offsetT off;
- code16 = 0;
- if (flag_code == CODE_16BIT)
- code16 = CODE16;
+ code16 = flag_code == CODE_16BIT ? CODE16 : 0;
+ size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
prefix = 0;
if (i.prefix[DATA_PREFIX] != 0)
*p = i.tm.base_opcode;
if ((unsigned char) *p == JUMP_PC_RELATIVE)
- subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, SMALL);
+ subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
else if (cpu_arch_flags.bitfield.cpui386)
- subtype = ENCODE_RELAX_STATE (COND_JUMP, SMALL);
+ subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
else
- subtype = ENCODE_RELAX_STATE (COND_JUMP86, SMALL);
+ subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
subtype |= code16;
sym = i.op[0].disps->X_add_symbol;
if (i.prefixes != 0 && !intel_syntax)
as_warn (_("skipping prefixes on this instruction"));
- p = frag_more (1 + size);
- *p++ = i.tm.base_opcode;
+ p = frag_more (i.tm.opcode_length + size);
+ switch (i.tm.opcode_length)
+ {
+ case 2:
+ *p++ = i.tm.base_opcode >> 8;
+ case 1:
+ *p++ = i.tm.base_opcode;
+ break;
+ default:
+ abort ();
+ }
fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
unsigned int prefix;
/* Since the VEX prefix contains the implicit prefix, we don't
- need the explicit prefix. */
+ need the explicit prefix. */
if (!i.tm.opcode_modifier.vex)
{
switch (i.tm.opcode_length)
if (*q)
FRAG_APPEND_1_CHAR (*q);
}
-
- if (i.tm.opcode_modifier.vex)
+ else
{
for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
if (*q)
fix_new_exp (frag, off, len, exp, 0, r);
}
-#if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
+/* Export the ABI address size for use by TC_ADDRESS_BYTES for the
+ purpose of the `.dc.a' internal pseudo-op. */
+
+int
+x86_address_bytes (void)
+{
+ if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
+ return 4;
+ return stdoutput->arch_info->bits_per_address / 8;
+}
+
+#if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
+ || defined (LEX_AT)
# define lex_got(reloc, adjust, types) NULL
#else
/* Parse operands of the form
const enum bfd_reloc_code_real rel[2];
const i386_operand_type types64;
} gotrel[] = {
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
+ BFD_RELOC_SIZE32 },
+ OPERAND_TYPE_IMM32_64 },
+#endif
{ STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
BFD_RELOC_X86_64_PLTOFF64 },
OPERAND_TYPE_IMM64 },
char *cp;
unsigned int j;
+#if defined (OBJ_MAYBE_ELF)
if (!IS_ELF)
return NULL;
+#endif
for (cp = input_line_pointer; *cp != '@'; cp++)
if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
char *tmpbuf, *past_reloc;
*rel = gotrel[j].rel[object_64bit];
- if (adjust)
- *adjust = len;
if (types)
{
*types = gotrel[j].types64;
}
- if (GOT_symbol == NULL)
+ if (j != 0 && GOT_symbol == NULL)
GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
/* The length of the first part of our input line. */
/* Replace the relocation token with ' ', so that
errors like foo@GOTOFF1 will be detected. */
tmpbuf[first++] = ' ';
+ else
+ /* Increment length by 1 if the relocation token is
+ removed. */
+ len++;
+ if (adjust)
+ *adjust = len;
+ memcpy (tmpbuf + first, past_reloc, second);
+ tmpbuf[first + second] = '\0';
+ return tmpbuf;
+ }
+
+ as_bad (_("@%s reloc is not supported with %d-bit output format"),
+ gotrel[j].str, 1 << (5 + object_64bit));
+ return NULL;
+ }
+ }
+
+ /* Might be a symbol version string. Don't as_bad here. */
+ return NULL;
+}
+#endif
+
+#ifdef TE_PE
+#ifdef lex_got
+#undef lex_got
+#endif
+/* Parse operands of the form
+ <symbol>@SECREL32+<nnn>
+
+ If we find one, set up the correct relocation in RELOC and copy the
+ input string, minus the `@SECREL32' into a malloc'd buffer for
+ parsing by the calling routine. Return this buffer, and if ADJUST
+ is non-null set it to the length of the string we removed from the
+ input line. Otherwise return NULL.
+
+ This function is copied from the ELF version above adjusted for PE targets. */
+
+static char *
+lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
+ int *adjust ATTRIBUTE_UNUSED,
+ i386_operand_type *types ATTRIBUTE_UNUSED)
+{
+ static const struct
+ {
+ const char *str;
+ int len;
+ const enum bfd_reloc_code_real rel[2];
+ const i386_operand_type types64;
+ }
+ gotrel[] =
+ {
+ { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
+ BFD_RELOC_32_SECREL },
+ OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
+ };
+
+ char *cp;
+ unsigned j;
+
+ for (cp = input_line_pointer; *cp != '@'; cp++)
+ if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
+ return NULL;
+
+ for (j = 0; j < ARRAY_SIZE (gotrel); j++)
+ {
+ int len = gotrel[j].len;
+
+ if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
+ {
+ if (gotrel[j].rel[object_64bit] != 0)
+ {
+ int first, second;
+ char *tmpbuf, *past_reloc;
+
+ *rel = gotrel[j].rel[object_64bit];
+ if (adjust)
+ *adjust = len;
+
+ if (types)
+ {
+ if (flag_code != CODE_64BIT)
+ {
+ types->bitfield.imm32 = 1;
+ types->bitfield.disp32 = 1;
+ }
+ else
+ *types = gotrel[j].types64;
+ }
+
+ /* The length of the first part of our input line. */
+ first = cp - input_line_pointer;
+
+ /* The second part goes from after the reloc token until
+ (and including) an end_of_line char or comma. */
+ past_reloc = cp + 1 + len;
+ cp = past_reloc;
+ while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
+ ++cp;
+ second = cp + 1 - past_reloc;
+
+ /* Allocate and copy string. The trailing NUL shouldn't
+ be necessary, but be safe. */
+ tmpbuf = (char *) xmalloc (first + second + 2);
+ memcpy (tmpbuf, input_line_pointer, first);
+ if (second != 0 && *past_reloc != ' ')
+ /* Replace the relocation token with ' ', so that
+ errors like foo@SECLREL321 will be detected. */
+ tmpbuf[first++] = ' ';
memcpy (tmpbuf + first, past_reloc, second);
tmpbuf[first + second] = '\0';
return tmpbuf;
return NULL;
}
+#endif /* TE_PE */
+
void
x86_cons (expressionS *exp, int size)
{
/* Handle @GOTOFF and the like in an expression. */
char *save;
char *gotfree_input_line;
- int adjust;
+ int adjust = 0;
save = input_line_pointer;
gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
if (intel_syntax)
i386_intel_simplify (exp);
}
-#endif
static void
signed_cons (int size)
#ifdef TE_PE
static void
-pe_directive_secrel (dummy)
- int dummy ATTRIBUTE_UNUSED;
+pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
{
expressionS exp;
? i.base_reg->reg_type.bitfield.reg32
: i.base_reg->reg_type.bitfield.reg16))
ok = 0;
- else if (i.base_reg->reg_num != expected)
+ else if (register_number (i.base_reg) != expected)
ok = -1;
if (ok < 0)
: (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
? i386_regtab[j].reg_type.bitfield.reg32
: i386_regtab[j].reg_type.bitfield.reg16)
- && i386_regtab[j].reg_num == expected)
+ && register_number(i386_regtab + j) == expected)
break;
gas_assert (j < i386_regtab_size);
as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
|| i.base_reg->reg_num !=
(i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
|| (i.index_reg
+ && !(i.index_reg->reg_type.bitfield.regxmm
+ || i.index_reg->reg_type.bitfield.regymm)
&& (!i.index_reg->reg_type.bitfield.baseindex
|| (i.prefix[ADDR_PREFIX] == 0
&& i.index_reg->reg_num != RegRiz
if ((i.base_reg
&& !i.base_reg->reg_type.bitfield.reg32)
|| (i.index_reg
+ && !i.index_reg->reg_type.bitfield.regxmm
+ && !i.index_reg->reg_type.bitfield.regymm
&& ((!i.index_reg->reg_type.bitfield.reg32
&& i.index_reg->reg_num != RegEiz)
|| !i.index_reg->reg_type.bitfield.baseindex)))
}
else if (*base_string == REGISTER_PREFIX)
{
+ end_op = strchr (base_string, ',');
+ if (end_op)
+ *end_op = '\0';
as_bad (_("bad register name `%s'"), base_string);
return 0;
}
}
else if (*base_string == REGISTER_PREFIX)
{
+ end_op = strchr (base_string, ',');
+ if (end_op)
+ *end_op = '\0';
as_bad (_("bad register name `%s'"), base_string);
return 0;
}
return 1; /* Normal return. */
}
\f
+/* Calculate the maximum variable size (i.e., excluding fr_fix)
+ that an rs_machine_dependent frag may reach. */
+
+unsigned int
+i386_frag_max_var (fragS *frag)
+{
+ /* The only relaxable frags are for jumps.
+ Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
+ gas_assert (frag->fr_type == rs_machine_dependent);
+ return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
+}
+
/* md_estimate_size_before_relax()
Called just before relax() for rs_machine_dependent frags. The x86
returned value. */
int
-md_estimate_size_before_relax (fragP, segment)
- fragS *fragP;
- segT segment;
+md_estimate_size_before_relax (fragS *fragP, segT segment)
{
/* We've already got fragP->fr_subtype right; all we have to do is
check for un-relaxable symbols. On an ELF system, we can't relax
Caller will turn frag into a ".space 0". */
void
-md_convert_frag (abfd, sec, fragP)
- bfd *abfd ATTRIBUTE_UNUSED;
- segT sec ATTRIBUTE_UNUSED;
- fragS *fragP;
+md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
+ fragS *fragP)
{
unsigned char *opcode;
unsigned char *where_to_put_displacement = NULL;
fragP->fr_fix += extension;
}
\f
-/* Apply a fixup (fixS) to segment data, once it has been determined
+/* Apply a fixup (fixP) to segment data, once it has been determined
by our caller that we have all the info we need to fix it up.
+ Parameter valP is the pointer to the value of the bits.
+
On the 386, immediates, displacements, and data pointers are all in
the same (little-endian) format, so we don't need to care about which
we are handling. */
void
-md_apply_fix (fixP, valP, seg)
- /* The fix we're to put in. */
- fixS *fixP;
- /* Pointer to the value of the bits. */
- valueT *valP;
- /* Segment fix is from. */
- segT seg ATTRIBUTE_UNUSED;
+md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
{
char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
valueT value = *valP;
#define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
#define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
#define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
-#define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
+#define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
+#define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
+#define OPTION_X32 (OPTION_MD_BASE + 14)
struct option md_longopts[] =
{
{"32", no_argument, NULL, OPTION_32},
#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
- || defined (TE_PE) || defined (TE_PEP))
+ || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
{"64", no_argument, NULL, OPTION_64},
+#endif
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ {"x32", no_argument, NULL, OPTION_X32},
#endif
{"divide", no_argument, NULL, OPTION_DIVIDE},
{"march", required_argument, NULL, OPTION_MARCH},
{"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
{"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
{"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
+ {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
{"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
{NULL, no_argument, NULL, 0}
};
break;
#endif
#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
- || defined (TE_PE) || defined (TE_PEP))
+ || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
case OPTION_64:
{
const char **list, **l;
if (CONST_STRNEQ (*l, "elf64-x86-64")
|| strcmp (*l, "coff-x86-64") == 0
|| strcmp (*l, "pe-x86-64") == 0
- || strcmp (*l, "pei-x86-64") == 0)
+ || strcmp (*l, "pei-x86-64") == 0
+ || strcmp (*l, "mach-o-x86-64") == 0)
{
default_arch = "x86_64";
break;
}
if (*l == NULL)
- as_fatal (_("No compiled in support for x86_64"));
+ as_fatal (_("no compiled in support for x86_64"));
free (list);
}
break;
#endif
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ case OPTION_X32:
+ if (IS_ELF)
+ {
+ const char **list, **l;
+
+ list = bfd_target_list ();
+ for (l = list; *l != NULL; l++)
+ if (CONST_STRNEQ (*l, "elf32-x86-64"))
+ {
+ default_arch = "x86_64:32";
+ break;
+ }
+ if (*l == NULL)
+ as_fatal (_("no compiled in support for 32bit x86_64"));
+ free (list);
+ }
+ else
+ as_fatal (_("32bit x86_64 is only supported for ELF"));
+ break;
+#endif
+
case OPTION_32:
default_arch = "i386";
break;
do
{
if (*arch == '.')
- as_fatal (_("Invalid -march= option: `%s'"), arg);
+ as_fatal (_("invalid -march= option: `%s'"), arg);
next = strchr (arch, '+');
if (next)
*next++ = '\0';
else
cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
cpu_arch_flags = flags;
+ cpu_arch_isa_flags = flags;
}
break;
}
}
if (j >= ARRAY_SIZE (cpu_arch))
- as_fatal (_("Invalid -march= option: `%s'"), arg);
+ as_fatal (_("invalid -march= option: `%s'"), arg);
arch = next;
}
case OPTION_MTUNE:
if (*arg == '.')
- as_fatal (_("Invalid -mtune= option: `%s'"), arg);
+ as_fatal (_("invalid -mtune= option: `%s'"), arg);
for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
{
if (strcmp (arg, cpu_arch [j].name) == 0)
}
}
if (j >= ARRAY_SIZE (cpu_arch))
- as_fatal (_("Invalid -mtune= option: `%s'"), arg);
+ as_fatal (_("invalid -mtune= option: `%s'"), arg);
break;
case OPTION_MMNEMONIC:
else if (strcasecmp (arg, "intel") == 0)
intel_mnemonic = 1;
else
- as_fatal (_("Invalid -mmnemonic= option: `%s'"), arg);
+ as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
break;
case OPTION_MSYNTAX:
else if (strcasecmp (arg, "intel") == 0)
intel_syntax = 1;
else
- as_fatal (_("Invalid -msyntax= option: `%s'"), arg);
+ as_fatal (_("invalid -msyntax= option: `%s'"), arg);
break;
case OPTION_MINDEX_REG:
case OPTION_MSSE_CHECK:
if (strcasecmp (arg, "error") == 0)
- sse_check = sse_check_error;
+ sse_check = check_error;
else if (strcasecmp (arg, "warning") == 0)
- sse_check = sse_check_warning;
+ sse_check = check_warning;
else if (strcasecmp (arg, "none") == 0)
- sse_check = sse_check_none;
+ sse_check = check_none;
else
- as_fatal (_("Invalid -msse-check= option: `%s'"), arg);
+ as_fatal (_("invalid -msse-check= option: `%s'"), arg);
+ break;
+
+ case OPTION_MOPERAND_CHECK:
+ if (strcasecmp (arg, "error") == 0)
+ operand_check = check_error;
+ else if (strcasecmp (arg, "warning") == 0)
+ operand_check = check_warning;
+ else if (strcasecmp (arg, "none") == 0)
+ operand_check = check_none;
+ else
+ as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
break;
case OPTION_MAVXSCALAR:
else if (strcasecmp (arg, "256") == 0)
avxscalar = vex256;
else
- as_fatal (_("Invalid -mavxscalar= option: `%s'"), arg);
+ as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
break;
default:
fprintf (stream, "%s\n", message);
p = start;
left = size - (start - message) - len - 2;
-
+
gas_assert (left >= 0);
p = mempcpy (p, name, len);
#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
|| defined (TE_PE) || defined (TE_PEP))
fprintf (stream, _("\
- --32/--64 generate 32bit/64bit code\n"));
+ --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
#endif
#ifdef SVR4_COMMENT_CHARS
fprintf (stream, _("\
-msse-check=[none|error|warning]\n\
check SSE instructions\n"));
fprintf (stream, _("\
+ -moperand-check=[none|error|warning]\n\
+ check operand combinations for validity\n"));
+ fprintf (stream, _("\
-mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
length\n"));
fprintf (stream, _("\
const char *
i386_target_format (void)
{
- if (!strcmp (default_arch, "x86_64"))
- update_code_flag (CODE_64BIT, 1);
+ if (!strncmp (default_arch, "x86_64", 6))
+ {
+ update_code_flag (CODE_64BIT, 1);
+ if (default_arch[6] == '\0')
+ x86_elf_abi = X86_64_ABI;
+ else
+ x86_elf_abi = X86_64_X32_ABI;
+ }
else if (!strcmp (default_arch, "i386"))
update_code_flag (CODE_32BIT, 1);
else
- as_fatal (_("Unknown architecture"));
+ as_fatal (_("unknown architecture"));
if (cpu_flags_all_zero (&cpu_arch_isa_flags))
cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
case bfd_target_elf_flavour:
{
- if (flag_code == CODE_64BIT)
+ const char *format;
+
+ switch (x86_elf_abi)
{
+ default:
+ format = ELF_TARGET_FORMAT;
+ break;
+ case X86_64_ABI:
+ use_rela_relocations = 1;
object_64bit = 1;
+ format = ELF_TARGET_FORMAT64;
+ break;
+ case X86_64_X32_ABI:
use_rela_relocations = 1;
+ object_64bit = 1;
+ disallow_64bit_reloc = 1;
+ format = ELF_TARGET_FORMAT32;
+ break;
}
if (cpu_arch_isa == PROCESSOR_L1OM)
{
- if (flag_code != CODE_64BIT)
+ if (x86_elf_abi != X86_64_ABI)
as_fatal (_("Intel L1OM is 64bit only"));
return ELF_TARGET_L1OM_FORMAT;
}
+ if (cpu_arch_isa == PROCESSOR_K1OM)
+ {
+ if (x86_elf_abi != X86_64_ABI)
+ as_fatal (_("Intel K1OM is 64bit only"));
+ return ELF_TARGET_K1OM_FORMAT;
+ }
else
- return (flag_code == CODE_64BIT
- ? ELF_TARGET_FORMAT64 : ELF_TARGET_FORMAT);
+ return format;
}
#endif
#if defined (OBJ_MACH_O)
case bfd_target_mach_o_flavour:
- return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
+ if (flag_code == CODE_64BIT)
+ {
+ use_rela_relocations = 1;
+ object_64bit = 1;
+ return "mach-o-x86-64";
+ }
+ else
+ return "mach-o-i386";
#endif
default:
abort ();
#endif
\f
symbolS *
-md_undefined_symbol (name)
- char *name;
+md_undefined_symbol (char *name)
{
if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
&& name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
/* Round up a section size to the appropriate boundary. */
valueT
-md_section_align (segment, size)
- segT segment ATTRIBUTE_UNUSED;
- valueT size;
+md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
{
#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
}
arelent *
-tc_gen_reloc (section, fixp)
- asection *section ATTRIBUTE_UNUSED;
- fixS *fixp;
+tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
{
arelent *rel;
bfd_reloc_code_real_type code;
switch (fixp->fx_r_type)
{
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ case BFD_RELOC_SIZE32:
+ case BFD_RELOC_SIZE64:
+ if (S_IS_DEFINED (fixp->fx_addsy)
+ && !S_IS_EXTERNAL (fixp->fx_addsy))
+ {
+ /* Resolve size relocation against local symbol to size of
+ the symbol plus addend. */
+ valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
+ if (fixp->fx_r_type == BFD_RELOC_SIZE32
+ && !fits_in_unsigned_long (value))
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _("symbol size computation overflow"));
+ fixp->fx_addsy = NULL;
+ fixp->fx_subsy = NULL;
+ md_apply_fix (fixp, (valueT *) &value, NULL);
+ return NULL;
+ }
+#endif
+
case BFD_RELOC_X86_64_PLT32:
case BFD_RELOC_X86_64_GOT32:
case BFD_RELOC_X86_64_GOTPCREL:
/* Use the rela in 64bit mode. */
else
{
+ if (disallow_64bit_reloc)
+ switch (code)
+ {
+ case BFD_RELOC_X86_64_DTPOFF64:
+ case BFD_RELOC_X86_64_TPOFF64:
+ case BFD_RELOC_64_PCREL:
+ case BFD_RELOC_X86_64_GOTOFF64:
+ case BFD_RELOC_X86_64_GOT64:
+ case BFD_RELOC_X86_64_GOTPCREL64:
+ case BFD_RELOC_X86_64_GOTPC64:
+ case BFD_RELOC_X86_64_GOTPLT64:
+ case BFD_RELOC_X86_64_PLTOFF64:
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _("cannot represent relocation type %s in x32 mode"),
+ bfd_get_reloc_code_name (code));
+ break;
+ default:
+ break;
+ }
+
if (!fixp->fx_pcrel)
rel->addend = fixp->fx_offset;
else
cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
}
+int
+x86_dwarf2_addr_size (void)
+{
+#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
+ if (x86_elf_abi == X86_64_X32_ABI)
+ return 4;
+#endif
+ return bfd_arch_bits_per_address (stdoutput) / 8;
+}
+
int
i386_elf_section_type (const char *str, size_t len)
{
if (letter == 'l')
return SHF_X86_64_LARGE;
- *ptr_msg = _("Bad .section directive: want a,l,w,x,M,S,G,T in string");
+ *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
}
else
- *ptr_msg = _("Bad .section directive: want a,w,x,M,S,G,T in string");
+ *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
return -1;
}